diff options
Diffstat (limited to 'compiler')
189 files changed, 4197 insertions, 2859 deletions
diff --git a/compiler/rustc/Cargo.toml b/compiler/rustc/Cargo.toml index 277cf0f51d3..696c003a587 100644 --- a/compiler/rustc/Cargo.toml +++ b/compiler/rustc/Cargo.toml @@ -15,11 +15,7 @@ version = '0.4.0' optional = true features = ['unprefixed_malloc_on_supported_platforms'] -[dependencies.tikv-jemallocator] -version = '0.4.0' -optional = true - [features] -jemalloc = ['tikv-jemalloc-sys', 'tikv-jemallocator'] +jemalloc = ['tikv-jemalloc-sys'] llvm = ['rustc_driver/llvm'] max_level_info = ['rustc_driver/max_level_info'] diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index 6f9ecb9cd21..3928d70c0ed 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -45,24 +45,24 @@ pub struct TypedArena<T> { end: Cell<*mut T>, /// A vector of arena chunks. - chunks: RefCell<Vec<TypedArenaChunk<T>>>, + chunks: RefCell<Vec<ArenaChunk<T>>>, /// Marker indicating that dropping the arena causes its owned /// instances of `T` to be dropped. _own: PhantomData<T>, } -struct TypedArenaChunk<T> { +struct ArenaChunk<T = u8> { /// The raw storage for the arena chunk. storage: Box<[MaybeUninit<T>]>, /// The number of valid entries in the chunk. entries: usize, } -impl<T> TypedArenaChunk<T> { +impl<T> ArenaChunk<T> { #[inline] - unsafe fn new(capacity: usize) -> TypedArenaChunk<T> { - TypedArenaChunk { storage: Box::new_uninit_slice(capacity), entries: 0 } + unsafe fn new(capacity: usize) -> ArenaChunk<T> { + ArenaChunk { storage: Box::new_uninit_slice(capacity), entries: 0 } } /// Destroys this arena chunk. @@ -125,6 +125,11 @@ impl<I, T> IterExt<T> for I where I: IntoIterator<Item = T>, { + // This default collects into a `SmallVec` and then allocates by copying + // from it. The specializations below for types like `Vec` are more + // efficient, copying directly without the intermediate collecting step. + // This default could be made more efficient, like + // `DroplessArena::alloc_from_iter`, but it's not hot enough to bother. #[inline] default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] { let vec: SmallVec<[_; 8]> = self.into_iter().collect(); @@ -139,7 +144,7 @@ impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> { if len == 0 { return &mut []; } - // Move the content to the arena by copying and then forgetting it + // Move the content to the arena by copying and then forgetting it. unsafe { let start_ptr = arena.alloc_raw_slice(len); self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len); @@ -156,7 +161,7 @@ impl<T> IterExt<T> for Vec<T> { if len == 0 { return &mut []; } - // Move the content to the arena by copying and then forgetting it + // Move the content to the arena by copying and then forgetting it. unsafe { let start_ptr = arena.alloc_raw_slice(len); self.as_ptr().copy_to_nonoverlapping(start_ptr, len); @@ -173,7 +178,7 @@ impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> { if len == 0 { return &mut []; } - // Move the content to the arena by copying and then forgetting it + // Move the content to the arena by copying and then forgetting it. unsafe { let start_ptr = arena.alloc_raw_slice(len); self.as_ptr().copy_to_nonoverlapping(start_ptr, len); @@ -272,7 +277,7 @@ impl<T> TypedArena<T> { // Also ensure that this chunk can fit `additional`. new_cap = cmp::max(additional, new_cap); - let mut chunk = TypedArenaChunk::<T>::new(new_cap); + let mut chunk = ArenaChunk::<T>::new(new_cap); self.ptr.set(chunk.start()); self.end.set(chunk.end()); chunks.push(chunk); @@ -281,7 +286,7 @@ impl<T> TypedArena<T> { // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other // chunks. - fn clear_last_chunk(&self, last_chunk: &mut TypedArenaChunk<T>) { + fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) { // Determine how much was filled. let start = last_chunk.start() as usize; // We obtain the value of the pointer to the first uninitialized element. @@ -340,7 +345,7 @@ pub struct DroplessArena { end: Cell<*mut u8>, /// A vector of arena chunks. - chunks: RefCell<Vec<TypedArenaChunk<u8>>>, + chunks: RefCell<Vec<ArenaChunk>>, } unsafe impl Send for DroplessArena {} @@ -378,7 +383,7 @@ impl DroplessArena { // Also ensure that this chunk can fit `additional`. new_cap = cmp::max(additional, new_cap); - let mut chunk = TypedArenaChunk::<u8>::new(new_cap); + let mut chunk = ArenaChunk::new(new_cap); self.start.set(chunk.start()); self.end.set(chunk.end()); chunks.push(chunk); @@ -520,10 +525,19 @@ impl DroplessArena { } } -// Declare an `Arena` containing one dropless arena and many typed arenas (the -// types of the typed arenas are specified by the arguments). The dropless -// arena will be used for any types that impl `Copy`, and also for any of the -// specified types that satisfy `!mem::needs_drop`. +/// Declare an `Arena` containing one dropless arena and many typed arenas (the +/// types of the typed arenas are specified by the arguments). +/// +/// There are three cases of interest. +/// - Types that are `Copy`: these need not be specified in the arguments. They +/// will use the `DroplessArena`. +/// - Types that are `!Copy` and `!Drop`: these must be specified in the +/// arguments. An empty `TypedArena` will be created for each one, but the +/// `DroplessArena` will always be used and the `TypedArena` will stay empty. +/// This is odd but harmless, because an empty arena allocates no memory. +/// - Types that are `!Copy` and `Drop`: these must be specified in the +/// arguments. The `TypedArena` will be used for them. +/// #[rustc_macro_transparency = "semitransparent"] pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { #[derive(Default)] @@ -532,7 +546,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { $($name: $crate::TypedArena<$ty>,)* } - pub trait ArenaAllocatable<'tcx, T = Self>: Sized { + pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized { fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self; fn allocate_from_iter<'a>( arena: &'a Arena<'tcx>, @@ -541,7 +555,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { } // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`. - impl<'tcx, T: Copy> ArenaAllocatable<'tcx, ()> for T { + impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T { #[inline] fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self { arena.dropless.alloc(self) @@ -555,7 +569,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { } } $( - impl<'tcx> ArenaAllocatable<'tcx, $ty> for $ty { + impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty { #[inline] fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self { if !::std::mem::needs_drop::<Self>() { @@ -581,7 +595,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { impl<'tcx> Arena<'tcx> { #[inline] - pub fn alloc<T: ArenaAllocatable<'tcx, U>, U>(&self, value: T) -> &mut T { + pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&self, value: T) -> &mut T { value.allocate_on(self) } @@ -594,7 +608,7 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { self.dropless.alloc_slice(value) } - pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, U>, U>( + pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, C>, C>( &'a self, iter: impl ::std::iter::IntoIterator<Item = T>, ) -> &'a mut [T] { @@ -603,5 +617,10 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) { } } +// Marker types that let us give different behaviour for arenas allocating +// `Copy` types vs `!Copy` types. +pub struct IsCopy; +pub struct IsNotCopy; + #[cfg(test)] mod tests; diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs index 438168f4fcc..7c19559ed91 100644 --- a/compiler/rustc_ast/src/ast.rs +++ b/compiler/rustc_ast/src/ast.rs @@ -2418,8 +2418,9 @@ impl<S: Encoder> rustc_serialize::Encodable<S> for AttrId { } impl<D: Decoder> rustc_serialize::Decodable<D> for AttrId { - fn decode(d: &mut D) -> Result<AttrId, D::Error> { - d.read_nil().map(|_| crate::attr::mk_attr_id()) + fn decode(d: &mut D) -> AttrId { + d.read_unit(); + crate::attr::mk_attr_id() } } diff --git a/compiler/rustc_ast/src/ptr.rs b/compiler/rustc_ast/src/ptr.rs index 9fe87a0a637..70dbda82224 100644 --- a/compiler/rustc_ast/src/ptr.rs +++ b/compiler/rustc_ast/src/ptr.rs @@ -115,8 +115,8 @@ impl<T> fmt::Pointer for P<T> { } impl<D: Decoder, T: 'static + Decodable<D>> Decodable<D> for P<T> { - fn decode(d: &mut D) -> Result<P<T>, D::Error> { - Decodable::decode(d).map(P) + fn decode(d: &mut D) -> P<T> { + P(Decodable::decode(d)) } } @@ -204,8 +204,8 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for P<[T]> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for P<[T]> { - fn decode(d: &mut D) -> Result<P<[T]>, D::Error> { - Ok(P::from_vec(Decodable::decode(d)?)) + fn decode(d: &mut D) -> P<[T]> { + P::from_vec(Decodable::decode(d)) } } diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs index 51cabb50cd3..2174378a560 100644 --- a/compiler/rustc_ast/src/tokenstream.rs +++ b/compiler/rustc_ast/src/tokenstream.rs @@ -163,7 +163,7 @@ impl<S: Encoder> Encodable<S> for LazyTokenStream { } impl<D: Decoder> Decodable<D> for LazyTokenStream { - fn decode(_d: &mut D) -> Result<Self, D::Error> { + fn decode(_d: &mut D) -> Self { panic!("Attempted to decode LazyTokenStream"); } } diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index f04dc85b28a..17bc8d7591b 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -625,18 +625,18 @@ impl<'hir> LoweringContext<'_, 'hir> { /// } /// } /// ``` - fn lower_expr_await(&mut self, await_span: Span, expr: &Expr) -> hir::ExprKind<'hir> { - let dot_await_span = expr.span.shrink_to_hi().to(await_span); + fn lower_expr_await(&mut self, dot_await_span: Span, expr: &Expr) -> hir::ExprKind<'hir> { + let full_span = expr.span.to(dot_await_span); match self.generator_kind { Some(hir::GeneratorKind::Async(_)) => {} Some(hir::GeneratorKind::Gen) | None => { let mut err = struct_span_err!( self.sess, - await_span, + dot_await_span, E0728, "`await` is only allowed inside `async` functions and blocks" ); - err.span_label(await_span, "only allowed inside `async` functions and blocks"); + err.span_label(dot_await_span, "only allowed inside `async` functions and blocks"); if let Some(item_sp) = self.current_item { err.span_label(item_sp, "this is not `async`"); } @@ -646,7 +646,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let span = self.mark_span_with_reason(DesugaringKind::Await, dot_await_span, None); let gen_future_span = self.mark_span_with_reason( DesugaringKind::Await, - await_span, + full_span, self.allow_gen_future.clone(), ); let expr = self.lower_expr_mut(expr); @@ -699,9 +699,9 @@ impl<'hir> LoweringContext<'_, 'hir> { let loop_hir_id = self.lower_node_id(loop_node_id); let ready_arm = { let x_ident = Ident::with_dummy_span(sym::result); - let (x_pat, x_pat_hid) = self.pat_ident(span, x_ident); - let x_expr = self.expr_ident(span, x_ident, x_pat_hid); - let ready_field = self.single_pat_field(span, x_pat); + let (x_pat, x_pat_hid) = self.pat_ident(gen_future_span, x_ident); + let x_expr = self.expr_ident(gen_future_span, x_ident, x_pat_hid); + let ready_field = self.single_pat_field(gen_future_span, x_pat); let ready_pat = self.pat_lang_item_variant( span, hir::LangItem::PollReady, @@ -711,7 +711,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let break_x = self.with_loop_scope(loop_node_id, move |this| { let expr_break = hir::ExprKind::Break(this.lower_loop_destination(None), Some(x_expr)); - this.arena.alloc(this.expr(span, expr_break, ThinVec::new())) + this.arena.alloc(this.expr(gen_future_span, expr_break, ThinVec::new())) }); self.arm(ready_pat, break_x) }; @@ -783,7 +783,7 @@ impl<'hir> LoweringContext<'_, 'hir> { // `match ::std::future::IntoFuture::into_future(<expr>) { ... }` let into_future_span = self.mark_span_with_reason( DesugaringKind::Await, - await_span, + dot_await_span, self.allow_into_future.clone(), ); let into_future_expr = self.expr_call_lang_item_fn( diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 47b61067055..32cec3a295a 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -479,6 +479,20 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let attrs = std::mem::take(&mut self.attrs); let mut bodies = std::mem::take(&mut self.bodies); let local_node_ids = std::mem::take(&mut self.local_node_ids); + + let local_id_to_def_id = local_node_ids + .iter() + .filter_map(|&node_id| { + let hir_id = self.node_id_to_hir_id[node_id]?; + if hir_id.local_id == hir::ItemLocalId::new(0) { + None + } else { + let def_id = self.resolver.opt_local_def_id(node_id)?; + Some((hir_id.local_id, def_id)) + } + }) + .collect(); + let trait_map = local_node_ids .into_iter() .filter_map(|node_id| { @@ -501,7 +515,13 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let (hash_including_bodies, hash_without_bodies) = self.hash_owner(node, &bodies); let (nodes, parenting) = index::index_hir(self.sess, self.resolver.definitions(), node, &bodies); - let nodes = hir::OwnerNodes { hash_including_bodies, hash_without_bodies, nodes, bodies }; + let nodes = hir::OwnerNodes { + hash_including_bodies, + hash_without_bodies, + nodes, + bodies, + local_id_to_def_id, + }; let attrs = { let mut hcx = self.resolver.create_stable_hashing_context(); let mut stable_hasher = StableHasher::new(); diff --git a/compiler/rustc_ast_pretty/src/pp.rs b/compiler/rustc_ast_pretty/src/pp.rs index 82c40868d18..e1f43cb20dc 100644 --- a/compiler/rustc_ast_pretty/src/pp.rs +++ b/compiler/rustc_ast_pretty/src/pp.rs @@ -147,6 +147,22 @@ pub enum Breaks { } #[derive(Clone, Copy)] +enum IndentStyle { + /// Vertically aligned under whatever column this block begins at. + /// + /// fn demo(arg1: usize, + /// arg2: usize); + Visual, + /// Indented relative to the indentation level of the previous line. + /// + /// fn demo( + /// arg1: usize, + /// arg2: usize, + /// ); + Block { offset: isize }, +} + +#[derive(Clone, Copy)] pub struct BreakToken { offset: isize, blank_space: isize, @@ -154,7 +170,7 @@ pub struct BreakToken { #[derive(Clone, Copy)] pub struct BeginToken { - offset: isize, + indent: IndentStyle, breaks: Breaks, } @@ -178,7 +194,7 @@ impl Token { #[derive(Copy, Clone)] enum PrintFrame { Fits, - Broken { offset: isize, breaks: Breaks }, + Broken { indent: usize, breaks: Breaks }, } const SIZE_INFINITY: isize = 0xffff; @@ -204,6 +220,8 @@ pub struct Printer { scan_stack: VecDeque<usize>, /// Stack of blocks-in-progress being flushed by print print_stack: Vec<PrintFrame>, + /// Level of indentation of current line + indent: usize, /// Buffered indentation to avoid writing trailing whitespace pending_indentation: isize, /// The token most recently popped from the left boundary of the @@ -229,6 +247,7 @@ impl Printer { right_total: 0, scan_stack: VecDeque::new(), print_stack: Vec::new(), + indent: 0, pending_indentation: 0, last_printed: None, } @@ -368,38 +387,41 @@ impl Printer { *self .print_stack .last() - .unwrap_or(&PrintFrame::Broken { offset: 0, breaks: Breaks::Inconsistent }) + .unwrap_or(&PrintFrame::Broken { indent: 0, breaks: Breaks::Inconsistent }) } fn print_begin(&mut self, token: BeginToken, size: isize) { if size > self.space { - let col = self.margin - self.space + token.offset; - self.print_stack.push(PrintFrame::Broken { offset: col, breaks: token.breaks }); + self.print_stack.push(PrintFrame::Broken { indent: self.indent, breaks: token.breaks }); + self.indent = match token.indent { + IndentStyle::Block { offset } => (self.indent as isize + offset) as usize, + IndentStyle::Visual => (self.margin - self.space) as usize, + }; } else { self.print_stack.push(PrintFrame::Fits); } } fn print_end(&mut self) { - self.print_stack.pop().unwrap(); + if let PrintFrame::Broken { indent, .. } = self.print_stack.pop().unwrap() { + self.indent = indent; + } } fn print_break(&mut self, token: BreakToken, size: isize) { - let break_offset = - match self.get_top() { - PrintFrame::Fits => None, - PrintFrame::Broken { offset, breaks: Breaks::Consistent } => Some(offset), - PrintFrame::Broken { offset, breaks: Breaks::Inconsistent } => { - if size > self.space { Some(offset) } else { None } - } - }; - if let Some(offset) = break_offset { - self.out.push('\n'); - self.pending_indentation = offset + token.offset; - self.space = self.margin - (offset + token.offset); - } else { + let fits = match self.get_top() { + PrintFrame::Fits => true, + PrintFrame::Broken { breaks: Breaks::Consistent, .. } => false, + PrintFrame::Broken { breaks: Breaks::Inconsistent, .. } => size <= self.space, + }; + if fits { self.pending_indentation += token.blank_space; self.space -= token.blank_space; + } else { + self.out.push('\n'); + let indent = self.indent as isize + token.offset; + self.pending_indentation = indent; + self.space = self.margin - indent; } } @@ -422,7 +444,10 @@ impl Printer { /// "raw box" pub fn rbox(&mut self, indent: usize, breaks: Breaks) { - self.scan_begin(BeginToken { offset: indent as isize, breaks }) + self.scan_begin(BeginToken { + indent: IndentStyle::Block { offset: indent as isize }, + breaks, + }) } /// Inconsistent breaking box @@ -435,6 +460,10 @@ impl Printer { self.rbox(indent, Breaks::Consistent) } + pub fn visual_align(&mut self) { + self.scan_begin(BeginToken { indent: IndentStyle::Visual, breaks: Breaks::Consistent }); + } + pub fn break_offset(&mut self, n: usize, off: isize) { self.scan_break(BreakToken { offset: off, blank_space: n as isize }) } @@ -457,7 +486,7 @@ impl Printer { self.break_offset(n, 0) } - crate fn zerobreak(&mut self) { + pub fn zerobreak(&mut self) { self.spaces(0) } diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs index 1cbc3162d43..b575dc21961 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state.rs @@ -315,7 +315,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere self.word(cmnt.lines[0].clone()); self.hardbreak() } else { - self.ibox(0); + self.visual_align(); for line in &cmnt.lines { if !line.is_empty() { self.word(line.clone()); @@ -607,7 +607,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere &mut self, macro_def: &ast::MacroDef, ident: &Ident, - sp: &Span, + sp: Span, print_visibility: impl FnOnce(&mut Self), ) { let (kw, has_bang) = if macro_def.macro_rules { @@ -623,7 +623,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere macro_def.body.delim(), ¯o_def.body.inner_tokens(), true, - *sp, + sp, ); if macro_def.body.need_semicolon() { self.word(";"); @@ -655,7 +655,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere // Outer-box is consistent. self.cbox(INDENT_UNIT); // Head-box is inconsistent. - self.ibox(w.len() + 1); + self.ibox(0); // Keyword that starts the head. if !w.is_empty() { self.word_nbsp(w); diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs index 956200d60f5..6a5bba30b8b 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs @@ -320,7 +320,9 @@ impl<'a> State<'a> { self.print_ident(label.ident); self.word_space(":"); } - self.head("while"); + self.cbox(0); + self.ibox(0); + self.word_nbsp("while"); self.print_expr_as_cond(test); self.space(); self.print_block_with_attrs(blk, attrs); @@ -330,7 +332,9 @@ impl<'a> State<'a> { self.print_ident(label.ident); self.word_space(":"); } - self.head("for"); + self.cbox(0); + self.ibox(0); + self.word_nbsp("for"); self.print_pat(pat); self.space(); self.word_space("in"); @@ -343,12 +347,14 @@ impl<'a> State<'a> { self.print_ident(label.ident); self.word_space(":"); } - self.head("loop"); + self.cbox(0); + self.ibox(0); + self.word_nbsp("loop"); self.print_block_with_attrs(blk, attrs); } ast::ExprKind::Match(ref expr, ref arms) => { - self.cbox(INDENT_UNIT); - self.ibox(INDENT_UNIT); + self.cbox(0); + self.ibox(0); self.word_nbsp("match"); self.print_expr_as_cond(expr); self.space(); @@ -388,7 +394,7 @@ impl<'a> State<'a> { self.word_space(":"); } // containing cbox, will be closed by print-block at } - self.cbox(INDENT_UNIT); + self.cbox(0); // head-box, will be closed by print-block after { self.ibox(0); self.print_block_with_attrs(blk, attrs); @@ -397,7 +403,7 @@ impl<'a> State<'a> { self.word_nbsp("async"); self.print_capture_clause(capture_clause); // cbox/ibox in analogy to the `ExprKind::Block` arm above - self.cbox(INDENT_UNIT); + self.cbox(0); self.ibox(0); self.print_block_with_attrs(blk, attrs); } @@ -500,7 +506,9 @@ impl<'a> State<'a> { self.word("?") } ast::ExprKind::TryBlock(ref blk) => { - self.head("try"); + self.cbox(0); + self.ibox(0); + self.word_nbsp("try"); self.print_block_with_attrs(blk, attrs) } ast::ExprKind::Err => { diff --git a/compiler/rustc_ast_pretty/src/pprust/state/item.rs b/compiler/rustc_ast_pretty/src/pprust/state/item.rs index c756b946b1e..dac84ae9d5f 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state/item.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state/item.rs @@ -1,5 +1,5 @@ use crate::pp::Breaks::Inconsistent; -use crate::pprust::state::{AnnNode, PrintState, State, INDENT_UNIT}; +use crate::pprust::state::{AnnNode, PrintState, State}; use rustc_ast as ast; use rustc_ast::GenericBound; @@ -347,7 +347,7 @@ impl<'a> State<'a> { } } ast::ItemKind::MacroDef(ref macro_def) => { - self.print_mac_def(macro_def, &item.ident, &item.span, |state| { + self.print_mac_def(macro_def, &item.ident, item.span, |state| { state.print_visibility(&item.vis) }); } @@ -377,7 +377,7 @@ impl<'a> State<'a> { self.space_if_not_bol(); self.maybe_print_comment(v.span.lo()); self.print_outer_attributes(&v.attrs); - self.ibox(INDENT_UNIT); + self.ibox(0); self.print_variant(v); self.word(","); self.end(); diff --git a/compiler/rustc_borrowck/src/constraints/mod.rs b/compiler/rustc_borrowck/src/constraints/mod.rs index 98378a98684..d41143ee763 100644 --- a/compiler/rustc_borrowck/src/constraints/mod.rs +++ b/compiler/rustc_borrowck/src/constraints/mod.rs @@ -72,7 +72,7 @@ impl<'tcx> Index<OutlivesConstraintIndex> for OutlivesConstraintSet<'tcx> { } } -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, PartialEq, Eq)] pub struct OutlivesConstraint<'tcx> { // NB. The ordering here is not significant for correctness, but // it is for convenience. Before we dump the constraints in the diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs index 8f4e574fbd6..8ed50075ecb 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs @@ -798,7 +798,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> { .map(|assoc_items| { assoc_items .in_definition_order() - .map(|assoc_item_def| assoc_item_def.ident) + .map(|assoc_item_def| assoc_item_def.ident(self.infcx.tcx)) .filter(|&ident| { let original_method_ident = path_segment.ident; original_method_ident != ident diff --git a/compiler/rustc_borrowck/src/region_infer/dump_mir.rs b/compiler/rustc_borrowck/src/region_infer/dump_mir.rs index cfd3acb6bde..97233b930c3 100644 --- a/compiler/rustc_borrowck/src/region_infer/dump_mir.rs +++ b/compiler/rustc_borrowck/src/region_infer/dump_mir.rs @@ -72,7 +72,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } let mut constraints: Vec<_> = self.constraints.outlives().iter().collect(); - constraints.sort(); + constraints.sort_by_key(|c| (c.sup, c.sub)); for constraint in &constraints { let OutlivesConstraint { sup, sub, locations, category, variance_info: _ } = constraint; let (name, arg) = match locations { diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index b39a28f79aa..d9120ff2457 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -612,7 +612,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { fn propagate_constraints(&mut self, _body: &Body<'tcx>) { debug!("constraints={:#?}", { let mut constraints: Vec<_> = self.constraints.outlives().iter().collect(); - constraints.sort(); + constraints.sort_by_key(|c| (c.sup, c.sub)); constraints .into_iter() .map(|c| (c, self.constraint_sccs.scc(c.sup), self.constraint_sccs.scc(c.sub))) diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index b6f5f4998a6..73103643e3e 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -758,6 +758,7 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { }, ProjectionElem::Field(field, fty) => { let fty = self.sanitize_type(place, fty); + let fty = self.cx.normalize(fty, location); match self.field_ty(place, base, field, location) { Ok(ty) => { let ty = self.cx.normalize(ty, location); diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs index b986df403f9..16a903d5e59 100644 --- a/compiler/rustc_borrowck/src/universal_regions.rs +++ b/compiler/rustc_borrowck/src/universal_regions.rs @@ -180,8 +180,9 @@ pub enum RegionClassification { /// anywhere. There is only one, `'static`. Global, - /// An **external** region is only relevant for closures. In that - /// case, it refers to regions that are free in the closure type + /// An **external** region is only relevant for + /// closures, generators, and inline consts. In that + /// case, it refers to regions that are free in the type /// -- basically, something bound in the surrounding context. /// /// Consider this example: @@ -198,8 +199,8 @@ pub enum RegionClassification { /// Here, the lifetimes `'a` and `'b` would be **external** to the /// closure. /// - /// If we are not analyzing a closure, there are no external - /// lifetimes. + /// If we are not analyzing a closure/generator/inline-const, + /// there are no external lifetimes. External, /// A **local** lifetime is one about which we know the full set @@ -424,22 +425,30 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { let typeck_root_def_id = self.infcx.tcx.typeck_root_def_id(self.mir_def.did.to_def_id()); - // If this is a closure or generator, then the late-bound regions from the enclosing - // function are actually external regions to us. For example, here, 'a is not local - // to the closure c (although it is local to the fn foo): - // fn foo<'a>() { - // let c = || { let x: &'a u32 = ...; } - // } - if self.mir_def.did.to_def_id() != typeck_root_def_id { + // If this is is a 'root' body (not a closure/generator/inline const), then + // there are no extern regions, so the local regions start at the same + // position as the (empty) sub-list of extern regions + let first_local_index = if self.mir_def.did.to_def_id() == typeck_root_def_id { + first_extern_index + } else { + // If this is a closure, generator, or inline-const, then the late-bound regions from the enclosing + // function are actually external regions to us. For example, here, 'a is not local + // to the closure c (although it is local to the fn foo): + // fn foo<'a>() { + // let c = || { let x: &'a u32 = ...; } + // } self.infcx - .replace_late_bound_regions_with_nll_infer_vars(self.mir_def.did, &mut indices) - } - - let bound_inputs_and_output = self.compute_inputs_and_output(&indices, defining_ty); + .replace_late_bound_regions_with_nll_infer_vars(self.mir_def.did, &mut indices); + // Any regions created during the execution of `defining_ty` or during the above + // late-bound region replacement are all considered 'extern' regions + self.infcx.num_region_vars() + }; // "Liberate" the late-bound regions. These correspond to // "local" free regions. - let first_local_index = self.infcx.num_region_vars(); + + let bound_inputs_and_output = self.compute_inputs_and_output(&indices, defining_ty); + let inputs_and_output = self.infcx.replace_bound_regions_with_nll_infer_vars( FR, self.mir_def.did, diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs index d1393528d1c..41b56cca1dc 100644 --- a/compiler/rustc_builtin_macros/src/format.rs +++ b/compiler/rustc_builtin_macros/src/format.rs @@ -791,7 +791,7 @@ impl<'a, 'b> Context<'a, 'b> { // Thus in the not nicely ordered case we emit the following instead: // // match (&$arg0, &$arg1, …) { - // _args => [ArgumentV1::new(_args.$i, …), ArgumentV1::new(_args.$j, …), …] + // args => [ArgumentV1::new(args.$i, …), ArgumentV1::new(args.$j, …), …] // } // // for the sequence of indices $i, $j, … governed by fmt_arg_index_and_ty. @@ -804,7 +804,7 @@ impl<'a, 'b> Context<'a, 'b> { self.ecx.expr_addr_of(expansion_span, P(e.take())) } else { let def_site = self.ecx.with_def_site_ctxt(span); - let args_tuple = self.ecx.expr_ident(def_site, Ident::new(sym::_args, def_site)); + let args_tuple = self.ecx.expr_ident(def_site, Ident::new(sym::args, def_site)); let member = Ident::new(sym::integer(arg_index), def_site); self.ecx.expr(def_site, ast::ExprKind::Field(args_tuple, member)) }; @@ -828,7 +828,7 @@ impl<'a, 'b> Context<'a, 'b> { .map(|e| self.ecx.expr_addr_of(e.span.with_ctxt(self.macsp.ctxt()), e)) .collect(); - let pat = self.ecx.pat_ident(self.macsp, Ident::new(sym::_args, self.macsp)); + let pat = self.ecx.pat_ident(self.macsp, Ident::new(sym::args, self.macsp)); let arm = self.ecx.arm(self.macsp, pat, args_array); let head = self.ecx.expr(self.macsp, ast::ExprKind::Tup(heads)); self.ecx.expr_match(self.macsp, head, vec![arm]) @@ -877,11 +877,21 @@ impl<'a, 'b> Context<'a, 'b> { return ecx.expr_call_global(macsp, path, vec![arg]); } }; + let new_fn_name = match trait_ { + "Display" => "new_display", + "Debug" => "new_debug", + "LowerExp" => "new_lower_exp", + "UpperExp" => "new_upper_exp", + "Octal" => "new_octal", + "Pointer" => "new_pointer", + "Binary" => "new_binary", + "LowerHex" => "new_lower_hex", + "UpperHex" => "new_upper_hex", + _ => unreachable!(), + }; - let path = ecx.std_path(&[sym::fmt, Symbol::intern(trait_), sym::fmt]); - let format_fn = ecx.path_global(sp, path); - let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, sym::new]); - ecx.expr_call_global(macsp, path, vec![arg, ecx.expr_path(format_fn)]) + let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, Symbol::intern(new_fn_name)]); + ecx.expr_call_global(sp, path, vec![arg]) } } diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock index 65e142a00f8..faed52727c8 100644 --- a/compiler/rustc_codegen_cranelift/Cargo.lock +++ b/compiler/rustc_codegen_cranelift/Cargo.lock @@ -172,9 +172,9 @@ checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg", "hashbrown", diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml index 3be4250296e..2d19040b509 100644 --- a/compiler/rustc_codegen_cranelift/Cargo.toml +++ b/compiler/rustc_codegen_cranelift/Cargo.toml @@ -19,7 +19,7 @@ gimli = { version = "0.25.0", default-features = false, features = ["write"]} object = { version = "0.27.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] } ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" } -indexmap = "1.0.2" +indexmap = "1.8.0" libloading = { version = "0.6.0", optional = true } smallvec = "1.6.1" diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs index b4213da6e05..8a74c4c07e0 100644 --- a/compiler/rustc_codegen_gcc/src/asm.rs +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -560,6 +560,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(), + InlineAsmRegClass::Msp430(_) => unimplemented!(), InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(), InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(), InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(), @@ -622,6 +623,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), + InlineAsmRegClass::Msp430(_) => unimplemented!(), InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(), InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(), InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(), @@ -729,6 +731,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::Bpf(_) => unimplemented!(), InlineAsmRegClass::Hexagon(_) => unimplemented!(), InlineAsmRegClass::Mips(_) => unimplemented!(), + InlineAsmRegClass::Msp430(_) => unimplemented!(), InlineAsmRegClass::Nvptx(_) => unimplemented!(), InlineAsmRegClass::PowerPC(_) => unimplemented!(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 379c88bbd40..ffb77e16a14 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -1256,7 +1256,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { aggregate_value } - fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> { + fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { + // TODO(antoyo) + } + + fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> { let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1"); let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1"); let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]); @@ -1267,11 +1271,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort. } - fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) { - // TODO(antoyo) - } - - fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> { + fn resume(&mut self, _exn: RValue<'gcc>) { unimplemented!(); } @@ -1279,7 +1279,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> { + fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) { unimplemented!(); } @@ -1287,18 +1287,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> { + fn catch_switch( + &mut self, + _parent: Option<RValue<'gcc>>, + _unwind: Option<Block<'gcc>>, + _handlers: &[Block<'gcc>], + ) -> RValue<'gcc> { unimplemented!(); } - fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) { - unimplemented!(); - } - - fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { - // TODO(antoyo) - } - // Atomic Operations fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> { let expected = self.current_func().new_local(None, cmp.get_type(), "expected"); diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 8b696dc6fba..e22bec24951 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -232,6 +232,9 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { InlineAsmArch::SpirV => {} InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {} InlineAsmArch::Bpf => {} + InlineAsmArch::Msp430 => { + constraints.push("~{sr}".to_string()); + } } } if !options.contains(InlineAsmOptions::NOMEM) { @@ -580,6 +583,7 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e", InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r", InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } @@ -666,6 +670,7 @@ fn modifier_to_llvm( }, InlineAsmRegClass::Avr(_) => None, InlineAsmRegClass::S390x(_) => None, + InlineAsmRegClass::Msp430(_) => None, InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } @@ -734,6 +739,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &' InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(), InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), + InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 8a9450c20dd..c9a04e6280f 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -956,29 +956,24 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) } } - fn landing_pad( - &mut self, - ty: &'ll Type, - pers_fn: &'ll Value, - num_clauses: usize, - ) -> &'ll Value { - // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while, - // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The - // personality lives on the parent function anyway. - self.set_personality_fn(pers_fn); + fn set_personality_fn(&mut self, personality: &'ll Value) { unsafe { - llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED) + llvm::LLVMSetPersonalityFn(self.llfn(), personality); } } - fn set_cleanup(&mut self, landing_pad: &'ll Value) { + fn cleanup_landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value) -> &'ll Value { + let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } + landing_pad } - fn resume(&mut self, exn: &'ll Value) -> &'ll Value { - unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } + fn resume(&mut self, exn: &'ll Value) { + unsafe { + llvm::LLVMBuildResume(self.llbuilder, exn); + } } fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> { @@ -995,14 +990,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) } - fn cleanup_ret( - &mut self, - funclet: &Funclet<'ll>, - unwind: Option<&'ll BasicBlock>, - ) -> &'ll Value { - let ret = - unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) }; - ret.expect("LLVM does not have support for cleanupret") + fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) { + unsafe { + llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) + .expect("LLVM does not have support for cleanupret"); + } } fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> { @@ -1023,7 +1015,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { &mut self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, - num_handlers: usize, + handlers: &[&'ll BasicBlock], ) -> &'ll Value { let name = cstr!("catchswitch"); let ret = unsafe { @@ -1031,23 +1023,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { self.llbuilder, parent, unwind, - num_handlers as c_uint, + handlers.len() as c_uint, name.as_ptr(), ) }; - ret.expect("LLVM does not have support for catchswitch") - } - - fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { - unsafe { - llvm::LLVMRustAddHandler(catch_switch, handler); - } - } - - fn set_personality_fn(&mut self, personality: &'ll Value) { - unsafe { - llvm::LLVMSetPersonalityFn(self.llfn(), personality); + let ret = ret.expect("LLVM does not have support for catchswitch"); + for handler in handlers { + unsafe { + llvm::LLVMRustAddHandler(ret, handler); + } } + ret } // Atomic Operations @@ -1478,4 +1464,19 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { None } } + + pub(crate) fn landing_pad( + &mut self, + ty: &'ll Type, + pers_fn: &'ll Value, + num_clauses: usize, + ) -> &'ll Value { + // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while, + // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The + // personality lives on the parent function anyway. + self.set_personality_fn(pers_fn); + unsafe { + llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED) + } + } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index bb16bc5dccd..8672459b5da 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -215,16 +215,19 @@ pub unsafe fn create_module<'ll>( // to ensure intrinsic calls don't use it. if !sess.needs_plt() { let avoid_plt = "RtLibUseGOT\0".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag(llmod, avoid_plt, 1); + llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); } if sess.is_sanitizer_cfi_enabled() { // FIXME(rcvalle): Add support for non canonical jump tables. let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast(); - // FIXME(rcvalle): Add it with Override behavior flag--LLVMRustAddModuleFlag adds it with - // Warning behavior flag. Add support for specifying the behavior flag to - // LLVMRustAddModuleFlag. - llvm::LLVMRustAddModuleFlag(llmod, canonical_jump_tables, 1); + // FIXME(rcvalle): Add it with Override behavior flag. + llvm::LLVMRustAddModuleFlag( + llmod, + llvm::LLVMModFlagBehavior::Warning, + canonical_jump_tables, + 1, + ); } // Control Flow Guard is currently only supported by the MSVC linker on Windows. @@ -233,11 +236,21 @@ pub unsafe fn create_module<'ll>( CFGuard::Disabled => {} CFGuard::NoChecks => { // Set `cfguard=1` module flag to emit metadata only. - llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 1) + llvm::LLVMRustAddModuleFlag( + llmod, + llvm::LLVMModFlagBehavior::Warning, + "cfguard\0".as_ptr() as *const _, + 1, + ) } CFGuard::Checks => { // Set `cfguard=2` module flag to emit metadata and checks. - llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 2) + llvm::LLVMRustAddModuleFlag( + llmod, + llvm::LLVMModFlagBehavior::Warning, + "cfguard\0".as_ptr() as *const _, + 2, + ) } } } @@ -247,24 +260,28 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlag( llmod, + llvm::LLVMModFlagBehavior::Error, "branch-target-enforcement\0".as_ptr().cast(), bti.into(), ); llvm::LLVMRustAddModuleFlag( llmod, + llvm::LLVMModFlagBehavior::Error, "sign-return-address\0".as_ptr().cast(), pac.is_some().into(), ); let pac_opts = pac.unwrap_or(PacRet { leaf: false, key: PAuthKey::A }); llvm::LLVMRustAddModuleFlag( llmod, + llvm::LLVMModFlagBehavior::Error, "sign-return-address-all\0".as_ptr().cast(), pac_opts.leaf.into(), ); let is_bkey = if pac_opts.key == PAuthKey::A { false } else { true }; llvm::LLVMRustAddModuleFlag( llmod, + llvm::LLVMModFlagBehavior::Error, "sign-return-address-with-bkey\0".as_ptr().cast(), is_bkey.into(), ); diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 32f18419753..3014d2f1930 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -9,6 +9,7 @@ use rustc_data_structures::fx::FxIndexSet; use rustc_hir::def::DefKind; use rustc_hir::def_id::DefIdSet; use rustc_llvm::RustString; +use rustc_middle::bug; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::coverage::CodeRegion; use rustc_middle::ty::TyCtxt; @@ -76,10 +77,18 @@ pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) { let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| { mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer); }); - debug_assert!( - !coverage_mapping_buffer.is_empty(), - "Every `FunctionCoverage` should have at least one counter" - ); + + if coverage_mapping_buffer.is_empty() { + if function_coverage.is_used() { + bug!( + "A used function should have had coverage mapping data but did not: {}", + mangled_function_name + ); + } else { + debug!("unused function had no coverage mapping data: {}", mangled_function_name); + continue; + } + } function_data.push((mangled_function_name, source_hash, is_used, coverage_mapping_buffer)); } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 3d5fd2f354e..1266b540aae 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -10,6 +10,8 @@ use super::CrateDebugContext; use crate::abi; use crate::common::CodegenCx; +use crate::debuginfo::utils::fat_pointer_kind; +use crate::debuginfo::utils::FatPtrKind; use crate::llvm; use crate::llvm::debuginfo::{ DIArray, DICompositeType, DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, @@ -376,22 +378,24 @@ macro_rules! return_if_metadata_created_in_meantime { }; } -fn fixed_vec_metadata<'ll, 'tcx>( +/// Creates debuginfo for a fixed size array (e.g. `[u64; 123]`). +/// For slices (that is, "arrays" of unknown size) use [slice_type_metadata]. +fn fixed_size_array_metadata<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, unique_type_id: UniqueTypeId, - array_or_slice_type: Ty<'tcx>, - element_type: Ty<'tcx>, + array_type: Ty<'tcx>, ) -> MetadataCreationResult<'ll> { + let ty::Array(element_type, len) = array_type.kind() else { + bug!("fixed_size_array_metadata() called with non-ty::Array type `{:?}`", array_type) + }; + let element_type_metadata = type_metadata(cx, element_type); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let (size, align) = cx.size_and_align_of(array_or_slice_type); + let (size, align) = cx.size_and_align_of(array_type); - let upper_bound = match array_or_slice_type.kind() { - ty::Array(_, len) => len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong, - _ => -1, - }; + let upper_bound = len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong; let subrange = unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) }; @@ -410,55 +414,111 @@ fn fixed_vec_metadata<'ll, 'tcx>( MetadataCreationResult::new(metadata, false) } -fn vec_slice_metadata<'ll, 'tcx>( +/// Creates debuginfo for built-in pointer-like things: +/// +/// - ty::Ref +/// - ty::RawPtr +/// - ty::Adt in the case it's Box +/// +/// At some point we might want to remove the special handling of Box +/// and treat it the same as other smart pointers (like Rc, Arc, ...). +fn pointer_or_reference_metadata<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, - slice_ptr_type: Ty<'tcx>, - element_type: Ty<'tcx>, + ptr_type: Ty<'tcx>, + pointee_type: Ty<'tcx>, unique_type_id: UniqueTypeId, ) -> MetadataCreationResult<'ll> { - let data_ptr_type = cx.tcx.mk_imm_ptr(element_type); - - let data_ptr_metadata = type_metadata(cx, data_ptr_type); + let pointee_type_metadata = type_metadata(cx, pointee_type); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let slice_type_name = compute_debuginfo_type_name(cx.tcx, slice_ptr_type, true); - - let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type); - let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx.types.usize); - - let member_descriptions = vec![ - MemberDescription { - name: "data_ptr".to_owned(), - type_metadata: data_ptr_metadata, - offset: Size::ZERO, - size: pointer_size, - align: pointer_align, - flags: DIFlags::FlagZero, - discriminant: None, - source_info: None, - }, - MemberDescription { - name: "length".to_owned(), - type_metadata: type_metadata(cx, cx.tcx.types.usize), - offset: pointer_size, - size: usize_size, - align: usize_align, - flags: DIFlags::FlagZero, - discriminant: None, - source_info: None, - }, - ]; + let (thin_pointer_size, thin_pointer_align) = + cx.size_and_align_of(cx.tcx.mk_imm_ptr(cx.tcx.types.unit)); + let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true); - let metadata = composite_type_metadata( - cx, - slice_ptr_type, - &slice_type_name, - unique_type_id, - member_descriptions, - NO_SCOPE_METADATA, - ); - MetadataCreationResult::new(metadata, false) + let pointer_type_metadata = match fat_pointer_kind(cx, pointee_type) { + None => { + // This is a thin pointer. Create a regular pointer type and give it the correct name. + debug_assert_eq!( + (thin_pointer_size, thin_pointer_align), + cx.size_and_align_of(ptr_type) + ); + + unsafe { + llvm::LLVMRustDIBuilderCreatePointerType( + DIB(cx), + pointee_type_metadata, + thin_pointer_size.bits(), + thin_pointer_align.bits() as u32, + 0, // Ignore DWARF address space. + ptr_type_debuginfo_name.as_ptr().cast(), + ptr_type_debuginfo_name.len(), + ) + } + } + Some(fat_pointer_kind) => { + let layout = cx.layout_of(ptr_type); + + let addr_field = layout.field(cx, abi::FAT_PTR_ADDR); + let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA); + + let (addr_field_name, extra_field_name) = match fat_pointer_kind { + FatPtrKind::Dyn => ("pointer", "vtable"), + FatPtrKind::Slice => ("data_ptr", "length"), + }; + + debug_assert_eq!(abi::FAT_PTR_ADDR, 0); + debug_assert_eq!(abi::FAT_PTR_EXTRA, 1); + + // The data pointer type is a regular, thin pointer, regardless of whether this is a slice + // or a trait object. + let data_ptr_type_metadata = unsafe { + llvm::LLVMRustDIBuilderCreatePointerType( + DIB(cx), + pointee_type_metadata, + addr_field.size.bits(), + addr_field.align.abi.bits() as u32, + 0, // Ignore DWARF address space. + std::ptr::null(), + 0, + ) + }; + + let member_descriptions = vec![ + MemberDescription { + name: addr_field_name.into(), + type_metadata: data_ptr_type_metadata, + offset: layout.fields.offset(abi::FAT_PTR_ADDR), + size: addr_field.size, + align: addr_field.align.abi, + flags: DIFlags::FlagZero, + discriminant: None, + source_info: None, + }, + MemberDescription { + name: extra_field_name.into(), + type_metadata: type_metadata(cx, extra_field.ty), + offset: layout.fields.offset(abi::FAT_PTR_EXTRA), + size: extra_field.size, + align: extra_field.align.abi, + flags: DIFlags::FlagZero, + discriminant: None, + source_info: None, + }, + ]; + + composite_type_metadata( + cx, + ptr_type, + &ptr_type_debuginfo_name, + unique_type_id, + member_descriptions, + NO_SCOPE_METADATA, + ) + } + }; + + MetadataCreationResult { metadata: pointer_type_metadata, already_stored_in_typemap: false } } fn subroutine_type_metadata<'ll, 'tcx>( @@ -495,83 +555,57 @@ fn subroutine_type_metadata<'ll, 'tcx>( ) } -// FIXME(1563): This is all a bit of a hack because 'trait pointer' is an ill- -// defined concept. For the case of an actual trait pointer (i.e., `Box<Trait>`, -// `&Trait`), `trait_object_type` should be the whole thing (e.g, `Box<Trait>`) and -// `trait_type` should be the actual trait (e.g., `Trait`). Where the trait is part -// of a DST struct, there is no `trait_object_type` and the results of this -// function will be a little bit weird. -fn trait_pointer_metadata<'ll, 'tcx>( +/// Create debuginfo for `dyn SomeTrait` types. Currently these are empty structs +/// we with the correct type name (e.g. "dyn SomeTrait<Foo, Item=u32> + Sync"). +fn dyn_type_metadata<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, - trait_type: Ty<'tcx>, - trait_object_type: Option<Ty<'tcx>>, + dyn_type: Ty<'tcx>, unique_type_id: UniqueTypeId, ) -> &'ll DIType { - // The implementation provided here is a stub. It makes sure that the trait - // type is assigned the correct name, size, namespace, and source location. - // However, it does not describe the trait's methods. - - let (containing_scope, trait_type_name) = match trait_object_type { - Some(trait_object_type) => match trait_object_type.kind() { - ty::Adt(def, _) => ( - Some(get_namespace_for_item(cx, def.did)), - compute_debuginfo_type_name(cx.tcx, trait_object_type, false), - ), - ty::RawPtr(_) | ty::Ref(..) => { - (NO_SCOPE_METADATA, compute_debuginfo_type_name(cx.tcx, trait_object_type, true)) - } - _ => { - bug!( - "debuginfo: unexpected trait-object type in \ - trait_pointer_metadata(): {:?}", - trait_object_type - ); - } - }, + if let ty::Dynamic(..) = dyn_type.kind() { + let type_name = compute_debuginfo_type_name(cx.tcx, dyn_type, true); + composite_type_metadata(cx, dyn_type, &type_name, unique_type_id, vec![], NO_SCOPE_METADATA) + } else { + bug!("Only ty::Dynamic is valid for dyn_type_metadata(). Found {:?} instead.", dyn_type) + } +} - // No object type, use the trait type directly (no scope here since the type - // will be wrapped in the dyn$ synthetic type). - None => (NO_SCOPE_METADATA, compute_debuginfo_type_name(cx.tcx, trait_type, true)), +/// Create debuginfo for `[T]` and `str`. These are unsized. +/// +/// NOTE: We currently emit just emit the debuginfo for the element type here +/// (i.e. `T` for slices and `u8` for `str`), so that we end up with +/// `*const T` for the `data_ptr` field of the corresponding fat-pointer +/// debuginfo of `&[T]`. +/// +/// It would be preferable and more accurate if we emitted a DIArray of T +/// without an upper bound instead. That is, LLVM already supports emitting +/// debuginfo of arrays of unknown size. But GDB currently seems to end up +/// in an infinite loop when confronted with such a type. +/// +/// As a side effect of the current encoding every instance of a type like +/// `struct Foo { unsized_field: [u8] }` will look like +/// `struct Foo { unsized_field: u8 }` in debuginfo. If the length of the +/// slice is zero, then accessing `unsized_field` in the debugger would +/// result in an out-of-bounds access. +fn slice_type_metadata<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + slice_type: Ty<'tcx>, + unique_type_id: UniqueTypeId, +) -> MetadataCreationResult<'ll> { + let element_type = match slice_type.kind() { + ty::Slice(element_type) => element_type, + ty::Str => cx.tcx.types.u8, + _ => { + bug!( + "Only ty::Slice is valid for slice_type_metadata(). Found {:?} instead.", + slice_type + ) + } }; - let layout = cx.layout_of(cx.tcx.mk_mut_ptr(trait_type)); - - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - - let data_ptr_field = layout.field(cx, 0); - let vtable_field = layout.field(cx, 1); - let member_descriptions = vec![ - MemberDescription { - name: "pointer".to_owned(), - type_metadata: type_metadata(cx, cx.tcx.mk_mut_ptr(cx.tcx.types.u8)), - offset: layout.fields.offset(0), - size: data_ptr_field.size, - align: data_ptr_field.align.abi, - flags: DIFlags::FlagArtificial, - discriminant: None, - source_info: None, - }, - MemberDescription { - name: "vtable".to_owned(), - type_metadata: type_metadata(cx, vtable_field.ty), - offset: layout.fields.offset(1), - size: vtable_field.size, - align: vtable_field.align.abi, - flags: DIFlags::FlagArtificial, - discriminant: None, - source_info: None, - }, - ]; - - composite_type_metadata( - cx, - trait_object_type.unwrap_or(trait_type), - &trait_type_name, - unique_type_id, - member_descriptions, - containing_scope, - ) + let element_type_metadata = type_metadata(cx, element_type); + return_if_metadata_created_in_meantime!(cx, unique_type_id); + MetadataCreationResult { metadata: element_type_metadata, already_stored_in_typemap: false } } pub fn type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { @@ -610,26 +644,6 @@ pub fn type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll debug!("type_metadata: {:?}", t); - let ptr_metadata = |ty: Ty<'tcx>| match *ty.kind() { - ty::Slice(typ) => Ok(vec_slice_metadata(cx, t, typ, unique_type_id)), - ty::Str => Ok(vec_slice_metadata(cx, t, cx.tcx.types.u8, unique_type_id)), - ty::Dynamic(..) => Ok(MetadataCreationResult::new( - trait_pointer_metadata(cx, ty, Some(t), unique_type_id), - false, - )), - _ => { - let pointee_metadata = type_metadata(cx, ty); - - if let Some(metadata) = - debug_context(cx).type_map.borrow().find_metadata_for_unique_id(unique_type_id) - { - return Err(metadata); - } - - Ok(MetadataCreationResult::new(pointer_type_metadata(cx, t, pointee_metadata), false)) - } - }; - let MetadataCreationResult { metadata, already_stored_in_typemap } = match *t.kind() { ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) @@ -637,22 +651,20 @@ pub fn type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll ty::Tuple(elements) if elements.is_empty() => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } - ty::Array(typ, _) | ty::Slice(typ) => fixed_vec_metadata(cx, unique_type_id, t, typ), - ty::Str => fixed_vec_metadata(cx, unique_type_id, t, cx.tcx.types.i8), + ty::Array(..) => fixed_size_array_metadata(cx, unique_type_id, t), + ty::Slice(_) | ty::Str => slice_type_metadata(cx, t, unique_type_id), ty::Dynamic(..) => { - MetadataCreationResult::new(trait_pointer_metadata(cx, t, None, unique_type_id), false) + MetadataCreationResult::new(dyn_type_metadata(cx, t, unique_type_id), false) } ty::Foreign(..) => { MetadataCreationResult::new(foreign_type_metadata(cx, t, unique_type_id), false) } - ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => match ptr_metadata(ty) { - Ok(res) => res, - Err(metadata) => return metadata, - }, - ty::Adt(def, _) if def.is_box() => match ptr_metadata(t.boxed_ty()) { - Ok(res) => res, - Err(metadata) => return metadata, - }, + ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => { + pointer_or_reference_metadata(cx, t, pointee_type, unique_type_id) + } + ty::Adt(def, _) if def.is_box() => { + pointer_or_reference_metadata(cx, t, t.boxed_ty(), unique_type_id) + } ty::FnDef(..) | ty::FnPtr(_) => { if let Some(metadata) = debug_context(cx).type_map.borrow().find_metadata_for_unique_id(unique_type_id) @@ -694,7 +706,22 @@ pub fn type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll type_map.borrow_mut().remove_type(t); // This is actually a function pointer, so wrap it in pointer DI. - MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false) + let (pointer_size, pointer_align) = + cx.size_and_align_of(cx.tcx.mk_imm_ptr(cx.tcx.mk_unit())); + let name = compute_debuginfo_type_name(cx.tcx, t, false); + let md = unsafe { + llvm::LLVMRustDIBuilderCreatePointerType( + DIB(cx), + fn_metadata, + pointer_size.bits(), + pointer_align.bits() as u32, + 0, // Ignore DWARF address space. + name.as_ptr().cast(), + name.len(), + ) + }; + + MetadataCreationResult::new(md, false) } ty::Closure(def_id, substs) => { let upvar_tys: Vec<_> = substs.as_closure().upvar_tys().collect(); @@ -959,26 +986,6 @@ fn foreign_type_metadata<'ll, 'tcx>( create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA, DIFlags::FlagZero) } -fn pointer_type_metadata<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, - pointer_type: Ty<'tcx>, - pointee_type_metadata: &'ll DIType, -) -> &'ll DIType { - let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type); - let name = compute_debuginfo_type_name(cx.tcx, pointer_type, false); - unsafe { - llvm::LLVMRustDIBuilderCreatePointerType( - DIB(cx), - pointee_type_metadata, - pointer_size.bits(), - pointer_align.bits() as u32, - 0, // Ignore DWARF address space. - name.as_ptr().cast(), - name.len(), - ) - } -} - fn param_type_metadata<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { debug!("param_type_metadata: {:?}", t); let name = format!("{:?}", t); @@ -1493,7 +1500,7 @@ fn generator_layout_and_saved_local_names<'tcx>( let state_arg = mir::Local::new(1); for var in &body.var_debug_info { - let place = if let mir::VarDebugInfoContents::Place(p) = var.value { p } else { continue }; + let mir::VarDebugInfoContents::Place(place) = &var.value else { continue }; if place.local != state_arg { continue; } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 61e49fab6ff..28eb8e2a0a4 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -108,18 +108,29 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> { // This can be overridden using --llvm-opts -dwarf-version,N. // Android has the same issue (#22398) if let Some(version) = sess.target.dwarf_version { - llvm::LLVMRustAddModuleFlag(self.llmod, "Dwarf Version\0".as_ptr().cast(), version) + llvm::LLVMRustAddModuleFlag( + self.llmod, + llvm::LLVMModFlagBehavior::Warning, + "Dwarf Version\0".as_ptr().cast(), + version, + ) } // Indicate that we want CodeView debug information on MSVC if sess.target.is_like_msvc { - llvm::LLVMRustAddModuleFlag(self.llmod, "CodeView\0".as_ptr().cast(), 1) + llvm::LLVMRustAddModuleFlag( + self.llmod, + llvm::LLVMModFlagBehavior::Warning, + "CodeView\0".as_ptr().cast(), + 1, + ) } // Prevent bitcode readers from deleting the debug info. let ptr = "Debug Info Version\0".as_ptr(); llvm::LLVMRustAddModuleFlag( self.llmod, + llvm::LLVMModFlagBehavior::Warning, ptr.cast(), llvm::LLVMRustDebugMetadataVersion(), ); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs index 953b6765a48..6dd0d58efe3 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs @@ -4,7 +4,9 @@ use super::namespace::item_namespace; use super::CrateDebugContext; use rustc_hir::def_id::DefId; -use rustc_middle::ty::DefIdTree; +use rustc_middle::ty::layout::LayoutOf; +use rustc_middle::ty::{self, DefIdTree, Ty}; +use rustc_target::abi::VariantIdx; use crate::common::CodegenCx; use crate::llvm; @@ -46,3 +48,58 @@ pub fn DIB<'a, 'll>(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> { pub fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { item_namespace(cx, cx.tcx.parent(def_id).expect("get_namespace_for_item: missing parent?")) } + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum FatPtrKind { + Slice, + Dyn, +} + +/// Determines if `pointee_ty` is slice-like or trait-object-like, i.e. +/// if the second field of the fat pointer is a length or a vtable-pointer. +/// If `pointee_ty` does not require a fat pointer (because it is Sized) then +/// the function returns `None`. +pub(crate) fn fat_pointer_kind<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + pointee_ty: Ty<'tcx>, +) -> Option<FatPtrKind> { + let layout = cx.layout_of(pointee_ty); + + if !layout.is_unsized() { + return None; + } + + match *pointee_ty.kind() { + ty::Str | ty::Slice(_) => Some(FatPtrKind::Slice), + ty::Dynamic(..) => Some(FatPtrKind::Dyn), + ty::Adt(adt_def, _) => { + assert!(adt_def.is_struct()); + assert!(adt_def.variants.len() == 1); + let variant = &adt_def.variants[VariantIdx::from_usize(0)]; + assert!(!variant.fields.is_empty()); + let last_field_index = variant.fields.len() - 1; + + debug_assert!( + (0..last_field_index) + .all(|field_index| { !layout.field(cx, field_index).is_unsized() }) + ); + + let unsized_field = layout.field(cx, last_field_index); + assert!(unsized_field.is_unsized()); + fat_pointer_kind(cx, unsized_field.ty) + } + ty::Foreign(_) => { + // Assert that pointers to foreign types really are thin: + debug_assert_eq!( + cx.size_of(cx.tcx.mk_imm_ptr(pointee_ty)), + cx.size_of(cx.tcx.mk_imm_ptr(cx.tcx.types.u8)) + ); + None + } + _ => { + // For all other pointee types we should already have returned None + // at the beginning of the function. + panic!("fat_pointer_kind() - Encountered unexpected `pointee_ty`: {:?}", pointee_ty) + } + } +} diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 5adfa18035a..f51d014bfb3 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -525,9 +525,8 @@ fn codegen_msvc_try<'ll>( normal.ret(bx.const_i32(0)); - let cs = catchswitch.catch_switch(None, None, 2); - catchswitch.add_handler(cs, catchpad_rust.llbb()); - catchswitch.add_handler(cs, catchpad_foreign.llbb()); + let cs = + catchswitch.catch_switch(None, None, &[catchpad_rust.llbb(), catchpad_foreign.llbb()]); // We can't use the TypeDescriptor defined in libpanic_unwind because it // might be in another DLL and the SEH encoding only supports specifying diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index cea4595fbbf..f0612eaba80 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -7,6 +7,7 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![feature(bool_to_option)] #![feature(crate_visibility_modifier)] +#![feature(let_else)] #![feature(extern_types)] #![feature(nll)] #![recursion_limit = "256"] diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index a1c7d2b4f61..2b102188790 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -61,6 +61,26 @@ pub enum LLVMMachineType { ARM = 0x01c0, } +/// LLVM's Module::ModFlagBehavior, defined in llvm/include/llvm/IR/Module.h. +/// +/// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are +/// resolved according to the merge behaviors specified here. Flags differing only in merge +/// behavior are still considered to be in conflict. +/// +/// In order for Rust-C LTO to work, we must specify behaviors compatible with Clang. Notably, +/// 'Error' and 'Warning' cannot be mixed for a given flag. +#[derive(Copy, Clone, PartialEq)] +#[repr(C)] +pub enum LLVMModFlagBehavior { + Error = 1, + Warning = 2, + Require = 3, + Override = 4, + Append = 5, + AppendUnique = 6, + Max = 7, +} + // Consts for the LLVM CallConv type, pre-cast to usize. /// LLVM CallingConv::ID. Should we wrap this? @@ -1895,7 +1915,16 @@ extern "C" { pub fn LLVMRustIsRustLLVM() -> bool; - pub fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32); + /// Add LLVM module flags. + /// + /// In order for Rust-C LTO to work, module flags must be compatible with Clang. What + /// "compatible" means depends on the merge behaviors involved. + pub fn LLVMRustAddModuleFlag( + M: &Module, + merge_behavior: LLVMModFlagBehavior, + name: *const c_char, + value: u32, + ); pub fn LLVMRustMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index acf65259f61..7a13e424f9a 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -932,7 +932,7 @@ fn link_natively<'a, B: ArchiveBuilder<'a>>( but `link.exe` was not found", ); sess.note_without_error( - "please ensure that VS 2013, VS 2015, VS 2017 or VS 2019 \ + "please ensure that VS 2013, VS 2015, VS 2017, VS 2019 or VS 2022 \ was installed with the Visual C++ option", ); } @@ -1159,6 +1159,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { LinkerFlavor::Lld(_) => "lld", LinkerFlavor::PtxLinker => "rust-ptx-linker", LinkerFlavor::BpfLinker => "bpf-linker", + LinkerFlavor::L4Bender => "l4-bender", }), flavor, )), diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index 15d16e7d3d6..3fb56f42b8c 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -126,7 +126,6 @@ pub fn get_linker<'a>( // FIXME: Move `/LIBPATH` addition for uwp targets from the linker construction // to the linker args construction. assert!(cmd.get_args().is_empty() || sess.target.vendor == "uwp"); - match flavor { LinkerFlavor::Lld(LldFlavor::Link) | LinkerFlavor::Msvc => { Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker> @@ -149,6 +148,8 @@ pub fn get_linker<'a>( LinkerFlavor::PtxLinker => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>, LinkerFlavor::BpfLinker => Box::new(BpfLinker { cmd, sess }) as Box<dyn Linker>, + + LinkerFlavor::L4Bender => Box::new(L4Bender::new(cmd, sess)) as Box<dyn Linker>, } } @@ -1355,6 +1356,157 @@ impl<'a> Linker for WasmLd<'a> { } } +/// Linker shepherd script for L4Re (Fiasco) +pub struct L4Bender<'a> { + cmd: Command, + sess: &'a Session, + hinted_static: bool, +} + +impl<'a> Linker for L4Bender<'a> { + fn link_dylib(&mut self, _lib: Symbol, _verbatim: bool, _as_needed: bool) { + bug!("dylibs are not supported on L4Re"); + } + fn link_staticlib(&mut self, lib: Symbol, _verbatim: bool) { + self.hint_static(); + self.cmd.arg(format!("-PC{}", lib)); + } + fn link_rlib(&mut self, lib: &Path) { + self.hint_static(); + self.cmd.arg(lib); + } + fn include_path(&mut self, path: &Path) { + self.cmd.arg("-L").arg(path); + } + fn framework_path(&mut self, _: &Path) { + bug!("frameworks are not supported on L4Re"); + } + fn output_filename(&mut self, path: &Path) { + self.cmd.arg("-o").arg(path); + } + + fn add_object(&mut self, path: &Path) { + self.cmd.arg(path); + } + + fn full_relro(&mut self) { + self.cmd.arg("-zrelro"); + self.cmd.arg("-znow"); + } + + fn partial_relro(&mut self) { + self.cmd.arg("-zrelro"); + } + + fn no_relro(&mut self) { + self.cmd.arg("-znorelro"); + } + + fn cmd(&mut self) -> &mut Command { + &mut self.cmd + } + + fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {} + + fn link_rust_dylib(&mut self, _: Symbol, _: &Path) { + panic!("Rust dylibs not supported"); + } + + fn link_framework(&mut self, _framework: Symbol, _as_needed: bool) { + bug!("frameworks not supported on L4Re"); + } + + fn link_whole_staticlib(&mut self, lib: Symbol, _verbatim: bool, _search_path: &[PathBuf]) { + self.hint_static(); + self.cmd.arg("--whole-archive").arg(format!("-l{}", lib)); + self.cmd.arg("--no-whole-archive"); + } + + fn link_whole_rlib(&mut self, lib: &Path) { + self.hint_static(); + self.cmd.arg("--whole-archive").arg(lib).arg("--no-whole-archive"); + } + + fn gc_sections(&mut self, keep_metadata: bool) { + if !keep_metadata { + self.cmd.arg("--gc-sections"); + } + } + + fn no_gc_sections(&mut self) { + self.cmd.arg("--no-gc-sections"); + } + + fn optimize(&mut self) { + // GNU-style linkers support optimization with -O. GNU ld doesn't + // need a numeric argument, but other linkers do. + if self.sess.opts.optimize == config::OptLevel::Default + || self.sess.opts.optimize == config::OptLevel::Aggressive + { + self.cmd.arg("-O1"); + } + } + + fn pgo_gen(&mut self) {} + + fn debuginfo(&mut self, strip: Strip) { + match strip { + Strip::None => {} + Strip::Debuginfo => { + self.cmd().arg("--strip-debug"); + } + Strip::Symbols => { + self.cmd().arg("--strip-all"); + } + } + } + + fn no_default_libraries(&mut self) { + self.cmd.arg("-nostdlib"); + } + + fn export_symbols(&mut self, _: &Path, _: CrateType, _: &[String]) { + // ToDo, not implemented, copy from GCC + self.sess.warn("exporting symbols not implemented yet for L4Bender"); + return; + } + + fn subsystem(&mut self, subsystem: &str) { + self.cmd.arg(&format!("--subsystem {}", subsystem)); + } + + fn reset_per_library_state(&mut self) { + self.hint_static(); // Reset to default before returning the composed command line. + } + + fn group_start(&mut self) { + self.cmd.arg("--start-group"); + } + + fn group_end(&mut self) { + self.cmd.arg("--end-group"); + } + + fn linker_plugin_lto(&mut self) {} + + fn control_flow_guard(&mut self) {} + + fn no_crt_objects(&mut self) {} +} + +impl<'a> L4Bender<'a> { + pub fn new(cmd: Command, sess: &'a Session) -> L4Bender<'a> { + L4Bender { cmd: cmd, sess: sess, hinted_static: false } + } + + fn hint_static(&mut self) { + if !self.hinted_static { + self.cmd.arg("-static"); + self.hinted_static = true; + } + } +} + pub(crate) fn exported_symbols(tcx: TyCtxt<'_>, crate_type: CrateType) -> Vec<String> { if let Some(ref exports) = tcx.sess.target.override_export_symbols { return exports.clone(); diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 5ce4e606fd2..4c7a09ca1e9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -477,6 +477,28 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup); } + fn codegen_abort_terminator( + &mut self, + helper: TerminatorCodegenHelper<'tcx>, + mut bx: Bx, + terminator: &mir::Terminator<'tcx>, + ) { + let span = terminator.source_info.span; + self.set_debug_loc(&mut bx, terminator.source_info); + + // Get the location information. + let location = self.get_caller_location(&mut bx, terminator.source_info).immediate(); + + // Obtain the panic entry point. + let def_id = common::langcall(bx.tcx(), Some(span), "", LangItem::PanicNoUnwind); + let instance = ty::Instance::mono(bx.tcx(), def_id); + let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty()); + let llfn = bx.get_fn_addr(instance); + + // Codegen the actual panic invoke/call. + helper.do_call(self, &mut bx, fn_abi, llfn, &[location], None, None); + } + /// Returns `true` if this is indeed a panic intrinsic and codegen is done. fn codegen_panic_intrinsic( &mut self, @@ -1014,10 +1036,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx), mir::TerminatorKind::Abort => { - bx.abort(); - // `abort` does not terminate the block, so we still need to generate - // an `unreachable` terminator after it. - bx.unreachable(); + self.codegen_abort_terminator(helper, bx, terminator); } mir::TerminatorKind::Goto { target } => { @@ -1327,8 +1346,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let mut cp_bx = self.new_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); - let cs = cs_bx.catch_switch(None, None, 1); - cs_bx.add_handler(cs, cp_bx.llbb()); + let cs = cs_bx.catch_switch(None, None, &[cp_bx.llbb()]); // The "null" here is actually a RTTI type descriptor for the // C++ personality function, but `catch (...)` has no type so @@ -1355,8 +1373,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); - let lp = bx.landing_pad(llretty, llpersonality, 1); - bx.set_cleanup(lp); + let lp = bx.cleanup_landing_pad(llretty, llpersonality); let slot = self.get_personality_slot(&mut bx); slot.storage_live(&mut bx); diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 3657f80c2de..c654232c10a 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -369,6 +369,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } + sym::const_allocate => { + // returns a null pointer at runtime. + bx.const_null(bx.type_i8p()) + } + + sym::const_deallocate => { + // nop at runtime. + return; + } + // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst name if name_str.starts_with("atomic_") => { diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 5a06fb46105..53fb21b269a 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -421,29 +421,22 @@ pub trait BuilderMethods<'a, 'tcx>: fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; - fn landing_pad( - &mut self, - ty: Self::Type, - pers_fn: Self::Value, - num_clauses: usize, - ) -> Self::Value; - fn set_cleanup(&mut self, landing_pad: Self::Value); - fn resume(&mut self, exn: Self::Value) -> Self::Value; + fn set_personality_fn(&mut self, personality: Self::Value); + + // These are used by everyone except msvc + fn cleanup_landing_pad(&mut self, ty: Self::Type, pers_fn: Self::Value) -> Self::Value; + fn resume(&mut self, exn: Self::Value); + + // These are used only by msvc fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet; - fn cleanup_ret( - &mut self, - funclet: &Self::Funclet, - unwind: Option<Self::BasicBlock>, - ) -> Self::Value; + fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>); fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; fn catch_switch( &mut self, parent: Option<Self::Value>, unwind: Option<Self::BasicBlock>, - num_handlers: usize, + handlers: &[Self::BasicBlock], ) -> Self::Value; - fn add_handler(&mut self, catch_switch: Self::Value, handler: Self::BasicBlock); - fn set_personality_fn(&mut self, personality: Self::Value); fn atomic_cmpxchg( &mut self, diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 3ec9f3ca3b8..9dc34260de7 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -7,7 +7,6 @@ use crate::interpret::{ }; use rustc_errors::ErrorReported; -use rustc_hir as hir; use rustc_hir::def::DefKind; use rustc_middle::mir; use rustc_middle::mir::interpret::ErrorHandled; @@ -216,7 +215,7 @@ pub fn eval_to_const_value_raw_provider<'tcx>( tcx: TyCtxt<'tcx>, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, ) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> { - assert!(key.param_env.constness() == hir::Constness::Const); + assert!(key.param_env.is_const()); // see comment in eval_to_allocation_raw_provider for what we're doing here if key.param_env.reveal() == Reveal::All { let mut key = key; @@ -251,7 +250,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>( tcx: TyCtxt<'tcx>, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, ) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> { - assert!(key.param_env.constness() == hir::Constness::Const); + assert!(key.param_env.is_const()); // Because the constant is computed twice (once per value of `Reveal`), we are at risk of // reporting the same error twice here. To resolve this, we check whether we can evaluate the // constant in the more restrictive `Reveal::UserFacing`, which most likely already was diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 30e9cbe4403..89717b75f12 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -347,6 +347,33 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, )?; ecx.write_pointer(ptr, dest)?; } + sym::const_deallocate => { + let ptr = ecx.read_pointer(&args[0])?; + let size = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?; + let align = ecx.read_scalar(&args[2])?.to_machine_usize(ecx)?; + + let size = Size::from_bytes(size); + let align = match Align::from_bytes(align) { + Ok(a) => a, + Err(err) => throw_ub_format!("align has to be a power of 2, {}", err), + }; + + // If an allocation is created in an another const, + // we don't deallocate it. + let (alloc_id, _, _) = ecx.memory.ptr_get_alloc(ptr)?; + let is_allocated_in_another_const = matches!( + ecx.tcx.get_global_alloc(alloc_id), + Some(interpret::GlobalAlloc::Memory(_)) + ); + + if !is_allocated_in_another_const { + ecx.memory.deallocate( + ptr, + Some((size, align)), + interpret::MemoryKind::Machine(MemoryKind::Heap), + )?; + } + } _ => { return Err(ConstEvalErrKind::NeedsRfc(format!( "calling intrinsic `{}`", diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs index cf939aaa73f..91610b15eb9 100644 --- a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs @@ -4,11 +4,12 @@ use rustc_errors::ErrorReported; use rustc_infer::infer::TyCtxtInferExt; +use rustc_infer::traits::TraitEngine; use rustc_middle::mir::*; use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty}; use rustc_span::DUMMY_SP; use rustc_trait_selection::traits::{ - self, ImplSource, Obligation, ObligationCause, SelectionContext, + self, FulfillmentContext, ImplSource, Obligation, ObligationCause, SelectionContext, }; use super::ConstCx; @@ -145,15 +146,10 @@ impl Qualif for NeedsNonConstDrop { qualifs.needs_non_const_drop } - fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, mut ty: Ty<'tcx>) -> bool { - // Avoid selecting for simple cases. - match ty::util::needs_drop_components(ty, &cx.tcx.data_layout).as_deref() { - Ok([]) => return false, - Err(ty::util::AlwaysRequiresDrop) => return true, - // If we've got a single component, select with that - // to increase the chance that we hit the selection cache. - Ok([t]) => ty = t, - Ok([..]) => {} + fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool { + // Avoid selecting for simple cases, such as builtin types. + if ty::util::is_trivially_const_drop(ty) { + return false; } let Some(drop_trait) = cx.tcx.lang_items().drop_trait() else { @@ -161,28 +157,50 @@ impl Qualif for NeedsNonConstDrop { // without having the lang item present. return false; }; - let trait_ref = - ty::TraitRef { def_id: drop_trait, substs: cx.tcx.mk_substs_trait(ty, &[]) }; + let obligation = Obligation::new( ObligationCause::dummy(), cx.param_env, ty::Binder::dummy(ty::TraitPredicate { - trait_ref, + trait_ref: ty::TraitRef { + def_id: drop_trait, + substs: cx.tcx.mk_substs_trait(ty, &[]), + }, constness: ty::BoundConstness::ConstIfConst, polarity: ty::ImplPolarity::Positive, }), ); - let implsrc = cx.tcx.infer_ctxt().enter(|infcx| { + cx.tcx.infer_ctxt().enter(|infcx| { let mut selcx = SelectionContext::new(&infcx); - selcx.select(&obligation) - }); - !matches!( - implsrc, - Ok(Some( + let Some(impl_src) = selcx.select(&obligation).ok().flatten() else { + // If we couldn't select a const drop candidate, then it's bad + return true; + }; + + if !matches!( + impl_src, ImplSource::ConstDrop(_) | ImplSource::Param(_, ty::BoundConstness::ConstIfConst) - )) - ) + ) { + // If our const drop candidate is not ConstDrop or implied by the param env, + // then it's bad + return true; + } + + if impl_src.borrow_nested_obligations().is_empty() { + return false; + } + + // If we successfully found one, then select all of the predicates + // implied by our const drop impl. + let mut fcx = FulfillmentContext::new(); + for nested in impl_src.nested_obligations() { + fcx.register_predicate_obligation(&infcx, nested); + } + + // If we had any errors, then it's bad + !fcx.select_all_or_error(&infcx).is_empty() + }) } fn in_adt_inherently<'tcx>( diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml index 2bb9d6c1893..ad296c97659 100644 --- a/compiler/rustc_data_structures/Cargo.toml +++ b/compiler/rustc_data_structures/Cargo.toml @@ -9,7 +9,7 @@ doctest = false [dependencies] arrayvec = { version = "0.7", default-features = false } ena = "0.14" -indexmap = "1.5.1" +indexmap = { version = "1.8.0", features = ["rustc-rayon"] } tracing = "0.1" jobserver_crate = { version = "0.1.13", package = "jobserver" } rustc_serialize = { path = "../rustc_serialize" } diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs index c9af35da4bc..525cd650dd2 100644 --- a/compiler/rustc_data_structures/src/fingerprint.rs +++ b/compiler/rustc_data_structures/src/fingerprint.rs @@ -149,10 +149,10 @@ impl<E: rustc_serialize::Encoder> Encodable<E> for Fingerprint { impl<D: rustc_serialize::Decoder> Decodable<D> for Fingerprint { #[inline] - fn decode(d: &mut D) -> Result<Self, D::Error> { + fn decode(d: &mut D) -> Self { let mut bytes = [0u8; 16]; - d.read_raw_bytes_into(&mut bytes)?; - Ok(Fingerprint::from_le_bytes(bytes)) + d.read_raw_bytes_into(&mut bytes); + Fingerprint::from_le_bytes(bytes) } } @@ -195,8 +195,8 @@ impl<E: rustc_serialize::Encoder> Encodable<E> for PackedFingerprint { impl<D: rustc_serialize::Decoder> Decodable<D> for PackedFingerprint { #[inline] - fn decode(d: &mut D) -> Result<Self, D::Error> { - Fingerprint::decode(d).map(PackedFingerprint) + fn decode(d: &mut D) -> Self { + Self(Fingerprint::decode(d)) } } diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs index 872b0eb7854..53062b9c20d 100644 --- a/compiler/rustc_data_structures/src/sip128.rs +++ b/compiler/rustc_data_structures/src/sip128.rs @@ -409,20 +409,6 @@ impl SipHasher128 { } } -macro_rules! dispatch_value { - ($target: expr, $value:expr) => { - let value = $value; - #[allow(unreachable_patterns)] - #[allow(overflowing_literals)] - match value { - 0..=0xFF => $target.short_write(value as u8), - 0x100..=0xFFFF => $target.short_write(value as u16), - 0x10000..=0xFFFFFFFF => $target.short_write(value as u32), - _ => $target.short_write(value as u64), - } - }; -} - impl Hasher for SipHasher128 { #[inline] fn write_u8(&mut self, i: u8) { @@ -436,7 +422,7 @@ impl Hasher for SipHasher128 { #[inline] fn write_u32(&mut self, i: u32) { - dispatch_value!(self, i); + self.short_write(i); } #[inline] @@ -466,7 +452,7 @@ impl Hasher for SipHasher128 { #[inline] fn write_i64(&mut self, i: i64) { - dispatch_value!(self, i as u64); + self.short_write(i as u64); } #[inline] diff --git a/compiler/rustc_data_structures/src/stable_hasher/tests.rs b/compiler/rustc_data_structures/src/stable_hasher/tests.rs index 391db67d29d..31190363eb6 100644 --- a/compiler/rustc_data_structures/src/stable_hasher/tests.rs +++ b/compiler/rustc_data_structures/src/stable_hasher/tests.rs @@ -98,3 +98,45 @@ fn test_hash_bit_matrix() { assert_ne!(a, b); assert_ne!(hash(&a), hash(&b)); } + +// Check that exchanging the value of two adjacent fields changes the hash. +#[test] +fn test_attribute_permutation() { + macro_rules! test_type { + ($ty: ty) => {{ + struct Foo { + a: $ty, + b: $ty, + } + + impl<CTX> HashStable<CTX> for Foo { + fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { + self.a.hash_stable(hcx, hasher); + self.b.hash_stable(hcx, hasher); + } + } + + #[allow(overflowing_literals)] + let mut item = Foo { a: 0xFF, b: 0xFF_FF }; + let hash_a = hash(&item); + std::mem::swap(&mut item.a, &mut item.b); + let hash_b = hash(&item); + assert_ne!( + hash_a, + hash_b, + "The hash stayed the same after values were swapped for type `{}`!", + stringify!($ty) + ); + }}; + } + + test_type!(u16); + test_type!(u32); + test_type!(u64); + test_type!(u128); + + test_type!(i16); + test_type!(i32); + test_type!(i64); + test_type!(i128); +} diff --git a/compiler/rustc_data_structures/src/svh.rs b/compiler/rustc_data_structures/src/svh.rs index ce90fbacaa4..12ef286091c 100644 --- a/compiler/rustc_data_structures/src/svh.rs +++ b/compiler/rustc_data_structures/src/svh.rs @@ -55,8 +55,8 @@ impl<S: Encoder> Encodable<S> for Svh { } impl<D: Decoder> Decodable<D> for Svh { - fn decode(d: &mut D) -> Result<Svh, D::Error> { - d.read_u64().map(u64::from_le).map(Svh::new) + fn decode(d: &mut D) -> Svh { + Svh::new(u64::from_le(d.read_u64())) } } diff --git a/compiler/rustc_driver/src/lib.rs b/compiler/rustc_driver/src/lib.rs index 19fa6812b45..2b64312dbef 100644 --- a/compiler/rustc_driver/src/lib.rs +++ b/compiler/rustc_driver/src/lib.rs @@ -597,10 +597,7 @@ impl RustcDefaultCalls { let rlink_data = fs::read_to_string(file).unwrap_or_else(|err| { sess.fatal(&format!("failed to read rlink file: {}", err)); }); - let codegen_results: CodegenResults = - json::decode(&rlink_data).unwrap_or_else(|err| { - sess.fatal(&format!("failed to decode rlink: {}", err)); - }); + let codegen_results: CodegenResults = json::decode(&rlink_data); let result = compiler.codegen_backend().link(sess, codegen_results, &outputs); abort_on_err(result, sess); } else { diff --git a/compiler/rustc_error_codes/src/error_codes.rs b/compiler/rustc_error_codes/src/error_codes.rs index 79d9c55b547..c401f65edda 100644 --- a/compiler/rustc_error_codes/src/error_codes.rs +++ b/compiler/rustc_error_codes/src/error_codes.rs @@ -472,6 +472,7 @@ E0768: include_str!("./error_codes/E0768.md"), E0769: include_str!("./error_codes/E0769.md"), E0770: include_str!("./error_codes/E0770.md"), E0771: include_str!("./error_codes/E0771.md"), +E0772: include_str!("./error_codes/E0772.md"), E0773: include_str!("./error_codes/E0773.md"), E0774: include_str!("./error_codes/E0774.md"), E0775: include_str!("./error_codes/E0775.md"), @@ -486,6 +487,7 @@ E0783: include_str!("./error_codes/E0783.md"), E0784: include_str!("./error_codes/E0784.md"), E0785: include_str!("./error_codes/E0785.md"), E0786: include_str!("./error_codes/E0786.md"), +E0787: include_str!("./error_codes/E0787.md"), ; // E0006, // merged with E0005 // E0008, // cannot bind by-move into a pattern guard @@ -641,5 +643,4 @@ E0786: include_str!("./error_codes/E0786.md"), // E0723, // unstable feature in `const` context E0726, // non-explicit (not `'_`) elided lifetime in unsupported position // E0738, // Removed; errored on `#[track_caller] fn`s in `extern "Rust" { ... }`. - E0772, // `'static' obligation coming from `impl dyn Trait {}` or `impl Foo for dyn Bar {}`. } diff --git a/compiler/rustc_error_codes/src/error_codes/E0772.md b/compiler/rustc_error_codes/src/error_codes/E0772.md new file mode 100644 index 00000000000..262e52351ef --- /dev/null +++ b/compiler/rustc_error_codes/src/error_codes/E0772.md @@ -0,0 +1,89 @@ +A trait object has some specific lifetime `'1`, but it was used in a way that +requires it to have a `'static` lifetime. + +Example of erroneous code: + +```compile_fail,E0772 +trait BooleanLike {} +trait Person {} + +impl BooleanLike for bool {} + +impl dyn Person { + fn is_cool(&self) -> bool { + // hey you, you're pretty cool + true + } +} + +fn get_is_cool<'p>(person: &'p dyn Person) -> impl BooleanLike { + // error: `person` has an anonymous lifetime `'p` but calling + // `print_cool_fn` introduces an implicit `'static` lifetime + // requirement + person.is_cool() +} +``` + +The trait object `person` in the function `get_is_cool`, while already being +behind a reference with lifetime `'p`, also has it's own implicit lifetime, +`'2`. + +Lifetime `'2` represents the data the trait object might hold inside, for +example: + +``` +trait MyTrait {} + +struct MyStruct<'a>(&'a i32); + +impl<'a> MyTrait for MyStruct<'a> {} +``` + +With this scenario, if a trait object of `dyn MyTrait + '2` was made from +`MyStruct<'a>`, `'a` must live as long, if not longer than `'2`. This allows the +trait object's internal data to be accessed safely from any trait methods. This +rule also goes for any lifetime any struct made into a trait object may have. + +In the implementation for `dyn Person`, the `'2` lifetime representing the +internal data was ommitted, meaning that the compiler inferred the lifetime +`'static`. As a result, the implementation's `is_cool` is inferred by the +compiler to look like this: + +``` +# trait Person {} +# +# impl dyn Person { +fn is_cool<'a>(self: &'a (dyn Person + 'static)) -> bool {unimplemented!()} +# } +``` + +While the `get_is_cool` function is inferred to look like this: + +``` +# trait Person {} +# trait BooleanLike {} +# +fn get_is_cool<'p, R: BooleanLike>(person: &'p (dyn Person + 'p)) -> R { + unimplemented!() +} +``` + +Which brings us to the core of the problem; the assignment of type +`&'_ (dyn Person + '_)` to type `&'_ (dyn Person + 'static)` is impossible. + +Fixing it is as simple as being generic over lifetime `'2`, as to prevent the +compiler from inferring it as `'static`: + +``` +# trait Person {} +# +impl<'d> dyn Person + 'd {/* ... */} + +// This works too, and is more elegant: +//impl dyn Person + '_ {/* ... */} +``` + +See the [Rust Reference on Trait Object Lifetime Bounds][trait-objects] for +more information on trait object lifetimes. + +[trait-object-lifetime-bounds]: https://doc.rust-lang.org/reference/types/trait-object.html#trait-object-lifetime-bounds diff --git a/compiler/rustc_error_codes/src/error_codes/E0787.md b/compiler/rustc_error_codes/src/error_codes/E0787.md new file mode 100644 index 00000000000..cee50829270 --- /dev/null +++ b/compiler/rustc_error_codes/src/error_codes/E0787.md @@ -0,0 +1,28 @@ +An unsupported naked function definition. + +Erroneous code example: + +```compile_fail,E0787 +#![feature(naked_functions)] + +#[naked] +pub extern "C" fn f() -> u32 { + 42 +} +``` + +The naked functions must be defined using a single inline assembly +block. + +The execution must never fall through past the end of the assembly +code so the block must use `noreturn` option. The asm block can also +use `att_syntax` and `raw` options, but others options are not allowed. + +The asm block must not contain any operands other than `const` and +`sym`. + +### Additional information + +For more information, please see [RFC 2972]. + +[RFC 2972]: https://github.com/rust-lang/rfcs/blob/master/text/2972-constrained-naked.md diff --git a/compiler/rustc_errors/src/json/tests.rs b/compiler/rustc_errors/src/json/tests.rs index d055937ac36..c5b3d204407 100644 --- a/compiler/rustc_errors/src/json/tests.rs +++ b/compiler/rustc_errors/src/json/tests.rs @@ -64,7 +64,7 @@ fn test_positions(code: &str, span: (u32, u32), expected_output: SpanTestData) { let bytes = output.lock().unwrap(); let actual_output = str::from_utf8(&bytes).unwrap(); - let actual_output: TestData = decode(actual_output).unwrap(); + let actual_output: TestData = decode(actual_output); assert_eq!(expected_output, actual_output) }) diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index a681298301a..16e9b265d69 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -99,8 +99,8 @@ impl Hash for ToolMetadata { // Doesn't really need to round-trip impl<D: Decoder> Decodable<D> for ToolMetadata { - fn decode(_d: &mut D) -> Result<Self, D::Error> { - Ok(ToolMetadata(None)) + fn decode(_d: &mut D) -> Self { + ToolMetadata(None) } } @@ -445,9 +445,6 @@ struct HandlerInner { deduplicated_warn_count: usize, future_breakage_diagnostics: Vec<Diagnostic>, - - /// If set to `true`, no warning or error will be emitted. - quiet: bool, } /// A key denoting where from a diagnostic was stashed. @@ -563,19 +560,10 @@ impl Handler { emitted_diagnostics: Default::default(), stashed_diagnostics: Default::default(), future_breakage_diagnostics: Vec::new(), - quiet: false, }), } } - pub fn with_disabled_diagnostic<T, F: FnOnce() -> T>(&self, f: F) -> T { - let prev = self.inner.borrow_mut().quiet; - self.inner.borrow_mut().quiet = true; - let ret = f(); - self.inner.borrow_mut().quiet = prev; - ret - } - // This is here to not allow mutation of flags; // as of this writing it's only used in tests in librustc_middle. pub fn can_emit_warnings(&self) -> bool { @@ -946,7 +934,7 @@ impl HandlerInner { } fn emit_diagnostic(&mut self, diagnostic: &Diagnostic) { - if diagnostic.cancelled() || self.quiet { + if diagnostic.cancelled() { return; } @@ -1170,9 +1158,6 @@ impl HandlerInner { } fn delay_as_bug(&mut self, diagnostic: Diagnostic) { - if self.quiet { - return; - } if self.flags.report_delayed_bugs { self.emit_diagnostic(&diagnostic); } diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs index 07b5e20b2dd..258320aeb63 100644 --- a/compiler/rustc_expand/src/base.rs +++ b/compiler/rustc_expand/src/base.rs @@ -8,7 +8,7 @@ use rustc_ast::tokenstream::{CanSynthesizeMissingTokens, TokenStream}; use rustc_ast::visit::{AssocCtxt, Visitor}; use rustc_ast::{self as ast, AstLike, Attribute, Item, NodeId, PatKind}; use rustc_attr::{self as attr, Deprecation, Stability}; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::sync::{self, Lrc}; use rustc_errors::{Applicability, DiagnosticBuilder, ErrorReported}; use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT; @@ -920,8 +920,25 @@ pub trait ResolverExpand { /// we generated proc macros harnesses, so that we can map /// HIR proc macros items back to their harness items. fn declare_proc_macro(&mut self, id: NodeId); + + /// Tools registered with `#![register_tool]` and used by tool attributes and lints. + fn registered_tools(&self) -> &FxHashSet<Ident>; } +pub trait LintStoreExpand { + fn pre_expansion_lint( + &self, + sess: &Session, + registered_tools: &FxHashSet<Ident>, + node_id: NodeId, + attrs: &[Attribute], + items: &[P<Item>], + name: &str, + ); +} + +type LintStoreExpandDyn<'a> = Option<&'a (dyn LintStoreExpand + 'a)>; + #[derive(Clone, Default)] pub struct ModuleData { /// Path to the module starting from the crate name, like `my_crate::foo::bar`. @@ -956,9 +973,6 @@ pub struct ExpansionData { pub is_trailing_mac: bool, } -type OnExternModLoaded<'a> = - Option<&'a dyn Fn(Ident, Vec<Attribute>, Vec<P<Item>>, Span) -> (Vec<Attribute>, Vec<P<Item>>)>; - /// One of these is made during expansion and incrementally updated as we go; /// when a macro expansion occurs, the resulting nodes have the `backtrace() /// -> expn_data` of their expansion context stored into their span. @@ -973,10 +987,8 @@ pub struct ExtCtxt<'a> { /// (or during eager expansion, but that's a hack). pub force_mode: bool, pub expansions: FxHashMap<Span, Vec<String>>, - /// Called directly after having parsed an external `mod foo;` in expansion. - /// - /// `Ident` is the module name. - pub(super) extern_mod_loaded: OnExternModLoaded<'a>, + /// Used for running pre-expansion lints on freshly loaded modules. + pub(super) lint_store: LintStoreExpandDyn<'a>, /// When we 'expand' an inert attribute, we leave it /// in the AST, but insert it here so that we know /// not to expand it again. @@ -988,14 +1000,14 @@ impl<'a> ExtCtxt<'a> { sess: &'a Session, ecfg: expand::ExpansionConfig<'a>, resolver: &'a mut dyn ResolverExpand, - extern_mod_loaded: OnExternModLoaded<'a>, + lint_store: LintStoreExpandDyn<'a>, ) -> ExtCtxt<'a> { ExtCtxt { sess, ecfg, reduced_recursion_limit: None, resolver, - extern_mod_loaded, + lint_store, root_path: PathBuf::new(), current_expansion: ExpansionData { id: LocalExpnId::ROOT, diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs index 7604a464be2..9a4daa6d750 100644 --- a/compiler/rustc_expand/src/expand.rs +++ b/compiler/rustc_expand/src/expand.rs @@ -1097,7 +1097,7 @@ impl InvocationCollectorNode for P<ast::Item> { ModKind::Unloaded => { // We have an outline `mod foo;` so we need to parse the file. let old_attrs_len = attrs.len(); - let ParsedExternalMod { mut items, inner_span, file_path, dir_path, dir_ownership } = + let ParsedExternalMod { items, inner_span, file_path, dir_path, dir_ownership } = parse_external_mod( &ecx.sess, ident, @@ -1107,8 +1107,15 @@ impl InvocationCollectorNode for P<ast::Item> { &mut attrs, ); - if let Some(extern_mod_loaded) = ecx.extern_mod_loaded { - (attrs, items) = extern_mod_loaded(ident, attrs, items, inner_span); + if let Some(lint_store) = ecx.lint_store { + lint_store.pre_expansion_lint( + ecx.sess, + ecx.resolver.registered_tools(), + ecx.current_expansion.lint_node_id, + &attrs, + &items, + ident.name.as_str(), + ); } *mod_kind = ModKind::Loaded(items, Inline::No, inner_span); diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs index 5599c1df6d9..dfc07da9169 100644 --- a/compiler/rustc_expand/src/lib.rs +++ b/compiler/rustc_expand/src/lib.rs @@ -2,7 +2,6 @@ #![feature(associated_type_defaults)] #![feature(crate_visibility_modifier)] #![feature(decl_macro)] -#![cfg_attr(bootstrap, feature(destructuring_assignment))] #![feature(if_let_guard)] #![feature(let_else)] #![feature(proc_macro_diagnostic)] diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 723cc06864a..fc7f01f041d 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -625,7 +625,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ ), rustc_attr!( rustc_pass_by_value, Normal, - template!(Word), WarnFollowing, + template!(Word), ErrorFollowing, "#[rustc_pass_by_value] is used to mark types that must be passed by value instead of reference." ), BuiltinAttribute { @@ -697,6 +697,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ rustc_attr!(TEST, rustc_capture_analysis, Normal, template!(Word), WarnFollowing), rustc_attr!(TEST, rustc_insignificant_dtor, Normal, template!(Word), WarnFollowing), rustc_attr!(TEST, rustc_strict_coherence, Normal, template!(Word), WarnFollowing), + rustc_attr!(TEST, rustc_with_negative_coherence, Normal, template!(Word), WarnFollowing), rustc_attr!(TEST, rustc_variance, Normal, template!(Word), WarnFollowing), rustc_attr!(TEST, rustc_layout, Normal, template!(List: "field1, field2, ..."), WarnFollowing), rustc_attr!(TEST, rustc_regions, Normal, template!(Word), WarnFollowing), diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs index e839f7fc777..74d6b05ca5f 100644 --- a/compiler/rustc_hir/src/definitions.rs +++ b/compiler/rustc_hir/src/definitions.rs @@ -107,8 +107,6 @@ pub struct Definitions { /// Their `HirId`s are defined by their position while lowering the enclosing owner. // FIXME(cjgillot) Some `LocalDefId`s from `use` items are dropped during lowering and lack a `HirId`. pub(super) def_id_to_hir_id: IndexVec<LocalDefId, Option<hir::HirId>>, - /// The reverse mapping of `def_id_to_hir_id`. - pub(super) hir_id_to_def_id: FxHashMap<hir::HirId, LocalDefId>, /// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`. expansions_that_defined: FxHashMap<LocalDefId, ExpnId>, @@ -330,11 +328,6 @@ impl Definitions { self.def_id_to_hir_id[id].unwrap() } - #[inline] - pub fn opt_hir_id_to_local_def_id(&self, hir_id: hir::HirId) -> Option<LocalDefId> { - self.hir_id_to_def_id.get(&hir_id).copied() - } - /// Adds a root definition (no parent) and a few other reserved definitions. pub fn new(stable_crate_id: StableCrateId, crate_span: Span) -> Definitions { let key = DefKey { @@ -362,7 +355,6 @@ impl Definitions { Definitions { table, def_id_to_hir_id: Default::default(), - hir_id_to_def_id: Default::default(), expansions_that_defined: Default::default(), def_id_to_span, stable_crate_id, @@ -425,12 +417,6 @@ impl Definitions { "trying to initialize `LocalDefId` <-> `HirId` mappings twice" ); - // Build the reverse mapping of `def_id_to_hir_id`. - self.hir_id_to_def_id = mapping - .iter_enumerated() - .filter_map(|(def_id, hir_id)| hir_id.map(|hir_id| (hir_id, def_id))) - .collect(); - self.def_id_to_hir_id = mapping; } diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index 4e6fac5eb28..a0ed72c9e9e 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -707,6 +707,8 @@ pub struct OwnerNodes<'tcx> { pub nodes: IndexVec<ItemLocalId, Option<ParentedNode<'tcx>>>, /// Content of local bodies. pub bodies: SortedMap<ItemLocalId, &'tcx Body<'tcx>>, + /// Non-owning definitions contained in this owner. + pub local_id_to_def_id: SortedMap<ItemLocalId, LocalDefId>, } /// Full information resulting from lowering an AST node. diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs index def0c1d0687..be4849d0b84 100644 --- a/compiler/rustc_hir/src/lang_items.rs +++ b/compiler/rustc_hir/src/lang_items.rs @@ -283,6 +283,7 @@ language_item_table! { PanicInfo, sym::panic_info, panic_info, Target::Struct, GenericRequirement::None; PanicLocation, sym::panic_location, panic_location, Target::Struct, GenericRequirement::None; PanicImpl, sym::panic_impl, panic_impl, Target::Fn, GenericRequirement::None; + PanicNoUnwind, sym::panic_no_unwind, panic_no_unwind, Target::Fn, GenericRequirement::Exact(0); /// libstd panic entry point. Necessary for const eval to be able to catch it BeginPanic, sym::begin_panic, begin_panic_fn, Target::Fn, GenericRequirement::None; diff --git a/compiler/rustc_hir/src/stable_hash_impls.rs b/compiler/rustc_hir/src/stable_hash_impls.rs index a43c1f9d9ae..b15054ae6d6 100644 --- a/compiler/rustc_hir/src/stable_hash_impls.rs +++ b/compiler/rustc_hir/src/stable_hash_impls.rs @@ -208,8 +208,13 @@ impl<'tcx, HirCtx: crate::HashStableContext> HashStable<HirCtx> for OwnerNodes<' fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) { // We ignore the `nodes` and `bodies` fields since these refer to information included in // `hash` which is hashed in the collector and used for the crate hash. - let OwnerNodes { hash_including_bodies, hash_without_bodies: _, nodes: _, bodies: _ } = - *self; + let OwnerNodes { + hash_including_bodies, + hash_without_bodies: _, + nodes: _, + bodies: _, + local_id_to_def_id: _, + } = *self; hash_including_bodies.hash_stable(hcx, hasher); } } diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs index 9e54122f8dd..a47ebaf1237 100644 --- a/compiler/rustc_hir_pretty/src/lib.rs +++ b/compiler/rustc_hir_pretty/src/lib.rs @@ -571,7 +571,7 @@ impl<'a> State<'a> { self.ann.nested(self, Nested::Body(body)); } hir::ItemKind::Macro(ref macro_def) => { - self.print_mac_def(macro_def, &item.ident, &item.span, |state| { + self.print_mac_def(macro_def, &item.ident, item.span, |state| { state.print_visibility(&item.vis) }); } diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs index d563a6ca478..870c3f80682 100644 --- a/compiler/rustc_incremental/src/persist/load.rs +++ b/compiler/rustc_incremental/src/persist/load.rs @@ -158,14 +158,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { // Decode the list of work_products let mut work_product_decoder = Decoder::new(&work_products_data[..], start_pos); let work_products: Vec<SerializedWorkProduct> = - Decodable::decode(&mut work_product_decoder).unwrap_or_else(|e| { - let msg = format!( - "Error decoding `work-products` from incremental \ - compilation session directory: {}", - e - ); - sess.fatal(&msg) - }); + Decodable::decode(&mut work_product_decoder); for swp in work_products { let mut all_files_exist = true; @@ -203,8 +196,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { LoadResult::Error { message } => LoadResult::Error { message }, LoadResult::Ok { data: (bytes, start_pos) } => { let mut decoder = Decoder::new(&bytes, start_pos); - let prev_commandline_args_hash = u64::decode(&mut decoder) - .expect("Error reading commandline arg hash from cached dep-graph"); + let prev_commandline_args_hash = u64::decode(&mut decoder); if prev_commandline_args_hash != expected_hash { if report_incremental_info { @@ -220,8 +212,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { return LoadResult::DataOutOfDate; } - let dep_graph = SerializedDepGraph::decode(&mut decoder) - .expect("Error reading cached dep-graph"); + let dep_graph = SerializedDepGraph::decode(&mut decoder); LoadResult::Ok { data: (dep_graph, prev_work_products) } } diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs index e3c6528b218..8b61530577d 100644 --- a/compiler/rustc_index/src/vec.rs +++ b/compiler/rustc_index/src/vec.rs @@ -395,8 +395,8 @@ macro_rules! newtype_index { (@serializable $type:ident) => ( impl<D: ::rustc_serialize::Decoder> ::rustc_serialize::Decodable<D> for $type { - fn decode(d: &mut D) -> Result<Self, D::Error> { - d.read_u32().map(Self::from_u32) + fn decode(d: &mut D) -> Self { + Self::from_u32(d.read_u32()) } } impl<E: ::rustc_serialize::Encoder> ::rustc_serialize::Encodable<E> for $type { @@ -527,8 +527,8 @@ impl<S: Encoder, I: Idx, T: Encodable<S>> Encodable<S> for &IndexVec<I, T> { } impl<D: Decoder, I: Idx, T: Decodable<D>> Decodable<D> for IndexVec<I, T> { - fn decode(d: &mut D) -> Result<Self, D::Error> { - Decodable::decode(d).map(|v| IndexVec { raw: v, _marker: PhantomData }) + fn decode(d: &mut D) -> Self { + IndexVec { raw: Decodable::decode(d), _marker: PhantomData } } } diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs index 14ab635a2ae..1eb8190bd7d 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/mod.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs @@ -2650,7 +2650,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { infer::LateBoundRegion(_, br, infer::AssocTypeProjection(def_id)) => format!( " for lifetime parameter {}in trait containing associated type `{}`", br_string(br), - self.tcx.associated_item(def_id).ident + self.tcx.associated_item(def_id).name ), infer::EarlyBoundRegion(_, name) => format!(" for lifetime parameter `{}`", name), infer::UpvarRegion(ref upvar_id, _) => { diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs index 412a077959d..0a9f59fbc97 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs @@ -70,7 +70,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { .map(|s| format!("`{}`", s)) .unwrap_or_else(|| "`fn` parameter".to_string()), lifetime, - ctxt.assoc_item.ident, + ctxt.assoc_item.name, ); err.span_label(param.param_ty_span, &format!("this data with {}...", lifetime)); err.span_label( @@ -231,7 +231,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { // Handle case of `impl Foo for dyn Bar { fn qux(&self) {} }` introducing a // `'static` lifetime when called as a method on a binding: `bar.qux()`. if self.find_impl_on_dyn_trait(&mut err, param.param_ty, &ctxt) { - override_error_code = Some(ctxt.assoc_item.ident); + override_error_code = Some(ctxt.assoc_item.name); } } } @@ -252,7 +252,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { self.get_impl_ident_and_self_ty_from_trait(*item_def_id, &v.0) { if self.suggest_constrain_dyn_trait_in_impl(&mut err, &v.0, ident, self_ty) { - override_error_code = Some(ident); + override_error_code = Some(ident.name); } } } diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs index 266eec08ceb..d1b24b332bd 100644 --- a/compiler/rustc_infer/src/infer/mod.rs +++ b/compiler/rustc_infer/src/infer/mod.rs @@ -195,7 +195,7 @@ pub struct InferCtxtInner<'tcx> { // Opaque types found in explicit return types and their // associated fresh inference variable. Writeback resolves these // variables to get the concrete type, which can be used to - // 'de-opaque' OpaqueTypeDecl, after typeck is done with all functions. + // 'de-opaque' OpaqueTypeDecl outside of type inference. pub opaque_types: OpaqueTypeMap<'tcx>, /// A map from inference variables created from opaque diff --git a/compiler/rustc_infer/src/infer/opaque_types.rs b/compiler/rustc_infer/src/infer/opaque_types.rs index 4851e637d3a..e7dca94806c 100644 --- a/compiler/rustc_infer/src/infer/opaque_types.rs +++ b/compiler/rustc_infer/src/infer/opaque_types.rs @@ -569,13 +569,15 @@ impl<'a, 'tcx> Instantiator<'a, 'tcx> { let predicate = predicate.fold_with(&mut BottomUpFolder { tcx, ty_op: |ty| match ty.kind() { - ty::Projection(projection_ty) => infcx.infer_projection( - self.param_env, - *projection_ty, - traits::ObligationCause::misc(self.value_span, self.body_id), - 0, - &mut self.obligations, - ), + ty::Projection(projection_ty) if !projection_ty.has_escaping_bound_vars() => { + infcx.infer_projection( + self.param_env, + *projection_ty, + traits::ObligationCause::misc(self.value_span, self.body_id), + 0, + &mut self.obligations, + ) + } _ => ty, }, lt_op: |lt| lt, diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs index 3804e100307..26343561959 100644 --- a/compiler/rustc_interface/src/interface.rs +++ b/compiler/rustc_interface/src/interface.rs @@ -124,7 +124,16 @@ pub fn parse_cfgspecs(cfgspecs: Vec<String>) -> FxHashSet<(String, Option<String Err(errs) => errs.into_iter().for_each(|mut err| err.cancel()), } - error!(r#"expected `key` or `key="value"`"#); + // If the user tried to use a key="value" flag, but is missing the quotes, provide + // a hint about how to resolve this. + if s.contains("=") && !s.contains("=\"") && !s.ends_with("\"") { + error!(concat!( + r#"expected `key` or `key="value"`, ensure escaping is appropriate"#, + r#" for your shell, try 'key="value"' or key=\"value\""# + )); + } else { + error!(r#"expected `key` or `key="value"`"#); + } }) .collect::<CrateConfig>(); cfg.into_iter().map(|(a, b)| (a.to_string(), b.map(|b| b.to_string()))).collect() diff --git a/compiler/rustc_interface/src/lib.rs b/compiler/rustc_interface/src/lib.rs index 2fc3759968f..b911b108a73 100644 --- a/compiler/rustc_interface/src/lib.rs +++ b/compiler/rustc_interface/src/lib.rs @@ -1,5 +1,6 @@ #![feature(bool_to_option)] #![feature(box_patterns)] +#![feature(let_else)] #![feature(internal_output_capture)] #![feature(thread_spawn_unchecked)] #![feature(nll)] diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index 33bf670f570..be31eb89f1b 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -3,7 +3,7 @@ use crate::proc_macro_decls; use crate::util; use rustc_ast::mut_visit::MutVisitor; -use rustc_ast::{self as ast, visit, DUMMY_NODE_ID}; +use rustc_ast::{self as ast, visit}; use rustc_borrowck as mir_borrowck; use rustc_codegen_ssa::back::link::emit_metadata; use rustc_codegen_ssa::traits::CodegenBackend; @@ -11,16 +11,16 @@ use rustc_data_structures::parallel; use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal}; use rustc_data_structures::temp_dir::MaybeTempDir; use rustc_errors::{Applicability, ErrorReported, PResult}; -use rustc_expand::base::ExtCtxt; +use rustc_expand::base::{ExtCtxt, LintStoreExpand, ResolverExpand}; use rustc_hir::def_id::{StableCrateId, LOCAL_CRATE}; use rustc_hir::Crate; -use rustc_lint::LintStore; +use rustc_lint::{EarlyCheckNode, LintStore}; use rustc_metadata::creader::CStore; use rustc_metadata::{encode_metadata, EncodedMetadata}; use rustc_middle::arena::Arena; use rustc_middle::dep_graph::DepGraph; use rustc_middle::ty::query::{ExternProviders, Providers}; -use rustc_middle::ty::{self, GlobalCtxt, ResolverOutputs, TyCtxt}; +use rustc_middle::ty::{self, GlobalCtxt, RegisteredTools, ResolverOutputs, TyCtxt}; use rustc_mir_build as mir_build; use rustc_parse::{parse_crate_from_file, parse_crate_from_source_str, validate_attr}; use rustc_passes::{self, hir_stats, layout_test}; @@ -34,7 +34,7 @@ use rustc_session::lint; use rustc_session::output::{filename_for_input, filename_for_metadata}; use rustc_session::search_paths::PathKind; use rustc_session::{Limit, Session}; -use rustc_span::symbol::{sym, Ident, Symbol}; +use rustc_span::symbol::{sym, Symbol}; use rustc_span::{FileName, MultiSpan}; use rustc_trait_selection::traits; use rustc_typeck as typeck; @@ -233,26 +233,43 @@ pub fn register_plugins<'a>( Ok((krate, lint_store)) } -fn pre_expansion_lint( +fn pre_expansion_lint<'a>( sess: &Session, lint_store: &LintStore, - krate: &ast::Crate, - crate_attrs: &[ast::Attribute], - crate_name: &str, + registered_tools: &RegisteredTools, + check_node: impl EarlyCheckNode<'a>, + node_name: &str, ) { - sess.prof.generic_activity_with_arg("pre_AST_expansion_lint_checks", crate_name).run(|| { - rustc_lint::check_ast_crate( + sess.prof.generic_activity_with_arg("pre_AST_expansion_lint_checks", node_name).run(|| { + rustc_lint::check_ast_node( sess, - lint_store, - krate, - crate_attrs, true, + lint_store, + registered_tools, None, rustc_lint::BuiltinCombinedPreExpansionLintPass::new(), + check_node, ); }); } +// Cannot implement directly for `LintStore` due to trait coherence. +struct LintStoreExpandImpl<'a>(&'a LintStore); + +impl LintStoreExpand for LintStoreExpandImpl<'_> { + fn pre_expansion_lint( + &self, + sess: &Session, + registered_tools: &RegisteredTools, + node_id: ast::NodeId, + attrs: &[ast::Attribute], + items: &[rustc_ast::ptr::P<ast::Item>], + name: &str, + ) { + pre_expansion_lint(sess, self.0, registered_tools, (node_id, attrs, items), name); + } +} + /// Runs the "early phases" of the compiler: initial `cfg` processing, loading compiler plugins, /// syntax expansion, secondary `cfg` expansion, synthesis of a test /// harness if one is to be provided, injection of a dependency on the @@ -265,7 +282,7 @@ pub fn configure_and_expand( resolver: &mut Resolver<'_>, ) -> Result<ast::Crate> { tracing::trace!("configure_and_expand"); - pre_expansion_lint(sess, lint_store, &krate, &krate.attrs, crate_name); + pre_expansion_lint(sess, lint_store, resolver.registered_tools(), &krate, crate_name); rustc_builtin_macros::register_builtin_macros(resolver); krate = sess.time("crate_injection", || { @@ -321,13 +338,8 @@ pub fn configure_and_expand( ..rustc_expand::expand::ExpansionConfig::default(crate_name.to_string()) }; - let crate_attrs = krate.attrs.clone(); - let extern_mod_loaded = |ident: Ident, attrs, items, span| { - let krate = ast::Crate { attrs, items, span, id: DUMMY_NODE_ID, is_placeholder: false }; - pre_expansion_lint(sess, lint_store, &krate, &crate_attrs, ident.name.as_str()); - (krate.attrs, krate.items) - }; - let mut ecx = ExtCtxt::new(sess, cfg, resolver, Some(&extern_mod_loaded)); + let lint_store = LintStoreExpandImpl(lint_store); + let mut ecx = ExtCtxt::new(sess, cfg, resolver, Some(&lint_store)); // Expand macros now! let krate = sess.time("expand_crate", || ecx.monotonic_expander().expand_crate(krate)); @@ -499,14 +511,15 @@ pub fn lower_to_hir<'res, 'tcx>( ); sess.time("early_lint_checks", || { - rustc_lint::check_ast_crate( + let lint_buffer = Some(std::mem::take(resolver.lint_buffer())); + rustc_lint::check_ast_node( sess, - lint_store, - &krate, - &krate.attrs, false, - Some(std::mem::take(resolver.lint_buffer())), + lint_store, + resolver.registered_tools(), + lint_buffer, rustc_lint::BuiltinCombinedEarlyLintPass::new(), + &*krate, ) }); diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs index 3921187baa5..6d9183eda9d 100644 --- a/compiler/rustc_interface/src/util.rs +++ b/compiler/rustc_interface/src/util.rs @@ -717,57 +717,57 @@ impl<'a, 'b> ReplaceBodyWithLoop<'a, 'b> { } fn should_ignore_fn(ret_ty: &ast::FnRetTy) -> bool { - if let ast::FnRetTy::Ty(ref ty) = ret_ty { - fn involves_impl_trait(ty: &ast::Ty) -> bool { - match ty.kind { - ast::TyKind::ImplTrait(..) => true, - ast::TyKind::Slice(ref subty) - | ast::TyKind::Array(ref subty, _) - | ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) - | ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. }) - | ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), - ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), - ast::TyKind::Path(_, ref path) => { - path.segments.iter().any(|seg| match seg.args.as_deref() { - None => false, - Some(&ast::GenericArgs::AngleBracketed(ref data)) => { - data.args.iter().any(|arg| match arg { - ast::AngleBracketedArg::Arg(arg) => match arg { - ast::GenericArg::Type(ty) => involves_impl_trait(ty), - ast::GenericArg::Lifetime(_) - | ast::GenericArg::Const(_) => false, - }, - ast::AngleBracketedArg::Constraint(c) => match c.kind { - ast::AssocConstraintKind::Bound { .. } => true, - ast::AssocConstraintKind::Equality { ref term } => { - match term { - Term::Ty(ty) => involves_impl_trait(ty), - // FIXME(...): This should check if the constant - // involves a trait impl, but for now ignore. - Term::Const(_) => false, - } + let ast::FnRetTy::Ty(ref ty) = ret_ty else { + return false; + }; + fn involves_impl_trait(ty: &ast::Ty) -> bool { + match ty.kind { + ast::TyKind::ImplTrait(..) => true, + ast::TyKind::Slice(ref subty) + | ast::TyKind::Array(ref subty, _) + | ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) + | ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. }) + | ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), + ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), + ast::TyKind::Path(_, ref path) => { + path.segments.iter().any(|seg| match seg.args.as_deref() { + None => false, + Some(&ast::GenericArgs::AngleBracketed(ref data)) => { + data.args.iter().any(|arg| match arg { + ast::AngleBracketedArg::Arg(arg) => match arg { + ast::GenericArg::Type(ty) => involves_impl_trait(ty), + ast::GenericArg::Lifetime(_) | ast::GenericArg::Const(_) => { + false + } + }, + ast::AngleBracketedArg::Constraint(c) => match c.kind { + ast::AssocConstraintKind::Bound { .. } => true, + ast::AssocConstraintKind::Equality { ref term } => { + match term { + Term::Ty(ty) => involves_impl_trait(ty), + // FIXME(...): This should check if the constant + // involves a trait impl, but for now ignore. + Term::Const(_) => false, } - }, - }) - } - Some(&ast::GenericArgs::Parenthesized(ref data)) => { - any_involves_impl_trait(data.inputs.iter()) - || ReplaceBodyWithLoop::should_ignore_fn(&data.output) - } - }) - } - _ => false, + } + }, + }) + } + Some(&ast::GenericArgs::Parenthesized(ref data)) => { + any_involves_impl_trait(data.inputs.iter()) + || ReplaceBodyWithLoop::should_ignore_fn(&data.output) + } + }) } + _ => false, } + } - fn any_involves_impl_trait<'a, I: Iterator<Item = &'a P<ast::Ty>>>(mut it: I) -> bool { - it.any(|subty| involves_impl_trait(subty)) - } - - involves_impl_trait(ty) - } else { - false + fn any_involves_impl_trait<'a, I: Iterator<Item = &'a P<ast::Ty>>>(mut it: I) -> bool { + it.any(|subty| involves_impl_trait(subty)) } + + involves_impl_trait(ty) } fn is_sig_const(sig: &ast::FnSig) -> bool { diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index 65385d4c7a1..9b24f43f7fd 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -912,7 +912,7 @@ declare_lint_pass!( impl EarlyLintPass for AnonymousParameters { fn check_trait_item(&mut self, cx: &EarlyContext<'_>, it: &ast::AssocItem) { - if cx.sess.edition() != Edition::Edition2015 { + if cx.sess().edition() != Edition::Edition2015 { // This is a hard error in future editions; avoid linting and erroring return; } @@ -921,7 +921,7 @@ impl EarlyLintPass for AnonymousParameters { if let ast::PatKind::Ident(_, ident, None) = arg.pat.kind { if ident.name == kw::Empty { cx.struct_span_lint(ANONYMOUS_PARAMETERS, arg.pat.span, |lint| { - let ty_snip = cx.sess.source_map().span_to_snippet(arg.ty.span); + let ty_snip = cx.sess().source_map().span_to_snippet(arg.ty.span); let (ty_snip, appl) = if let Ok(ref snip) = ty_snip { (snip.as_str(), Applicability::MachineApplicable) @@ -1775,7 +1775,7 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns { }; if join.edition() >= Edition::Edition2021 { let mut err = - rustc_errors::struct_span_err!(cx.sess, pat.span, E0783, "{}", msg,); + rustc_errors::struct_span_err!(cx.sess(), pat.span, E0783, "{}", msg,); err.span_suggestion( pat.span, suggestion, @@ -1799,7 +1799,7 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns { let replace = "..=".to_owned(); if join.edition() >= Edition::Edition2021 { let mut err = - rustc_errors::struct_span_err!(cx.sess, pat.span, E0783, "{}", msg,); + rustc_errors::struct_span_err!(cx.sess(), pat.span, E0783, "{}", msg,); err.span_suggestion_short( join, suggestion, @@ -1983,7 +1983,7 @@ impl KeywordIdents { UnderMacro(under_macro): UnderMacro, ident: Ident, ) { - let next_edition = match cx.sess.edition() { + let next_edition = match cx.sess().edition() { Edition::Edition2015 => { match ident.name { kw::Async | kw::Await | kw::Try => Edition::Edition2018, @@ -2011,7 +2011,7 @@ impl KeywordIdents { }; // Don't lint `r#foo`. - if cx.sess.parse_sess.raw_identifier_spans.borrow().contains(&ident.span) { + if cx.sess().parse_sess.raw_identifier_spans.borrow().contains(&ident.span) { return; } @@ -2379,7 +2379,7 @@ declare_lint_pass!( impl EarlyLintPass for IncompleteFeatures { fn check_crate(&mut self, cx: &EarlyContext<'_>, _: &ast::Crate) { - let features = cx.sess.features_untracked(); + let features = cx.sess().features_untracked(); features .declared_lang_features .iter() @@ -3158,7 +3158,10 @@ declare_lint! { /// of this, GNU assembler [local labels] *must* be used instead of labels /// with a name. Using named labels might cause assembler or linker errors. /// + /// See the explanation in [Rust By Example] for more details. + /// /// [local labels]: https://sourceware.org/binutils/docs/as/Symbol-Names.html#Local-Labels + /// [Rust By Example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html#labels pub NAMED_ASM_LABELS, Deny, "named labels in inline assembly", diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs index 69c376c6169..cb08e952586 100644 --- a/compiler/rustc_lint/src/context.rs +++ b/compiler/rustc_lint/src/context.rs @@ -16,10 +16,9 @@ use self::TargetLint::*; -use crate::levels::{is_known_lint_tool, LintLevelsBuilder}; +use crate::levels::LintLevelsBuilder; use crate::passes::{EarlyLintPassObject, LateLintPassObject}; -use ast::util::unicode::TEXT_FLOW_CONTROL_CHARS; -use rustc_ast as ast; +use rustc_ast::util::unicode::TEXT_FLOW_CONTROL_CHARS; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync; use rustc_errors::{struct_span_err, Applicability, SuggestionStyle}; @@ -32,13 +31,14 @@ use rustc_middle::middle::privacy::AccessLevels; use rustc_middle::middle::stability; use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_middle::ty::{self, print::Printer, subst::GenericArg, Ty, TyCtxt}; +use rustc_middle::ty::{self, print::Printer, subst::GenericArg, RegisteredTools, Ty, TyCtxt}; use rustc_serialize::json::Json; use rustc_session::lint::{BuiltinLintDiagnostics, ExternDepSpec}; use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintBuffer, LintId}; use rustc_session::Session; use rustc_span::lev_distance::find_best_match_for_name; -use rustc_span::{symbol::Symbol, BytePos, MultiSpan, Span, DUMMY_SP}; +use rustc_span::symbol::{sym, Ident, Symbol}; +use rustc_span::{BytePos, MultiSpan, Span, DUMMY_SP}; use rustc_target::abi; use tracing::debug; @@ -313,7 +313,7 @@ impl LintStore { sess: &Session, lint_name: &str, level: Level, - crate_attrs: &[ast::Attribute], + registered_tools: &RegisteredTools, ) { let (tool_name, lint_name_only) = parse_lint_and_tool_name(lint_name); if lint_name_only == crate::WARNINGS.name_lower() && level == Level::ForceWarn { @@ -326,7 +326,7 @@ impl LintStore { ) .emit(); } - let db = match self.check_lint_name(sess, lint_name_only, tool_name, crate_attrs) { + let db = match self.check_lint_name(lint_name_only, tool_name, registered_tools) { CheckLintNameResult::Ok(_) => None, CheckLintNameResult::Warning(ref msg, _) => Some(sess.struct_warn(msg)), CheckLintNameResult::NoLint(suggestion) => { @@ -397,13 +397,16 @@ impl LintStore { /// printing duplicate warnings. pub fn check_lint_name( &self, - sess: &Session, lint_name: &str, tool_name: Option<Symbol>, - crate_attrs: &[ast::Attribute], + registered_tools: &RegisteredTools, ) -> CheckLintNameResult<'_> { if let Some(tool_name) = tool_name { - if !is_known_lint_tool(tool_name, sess, crate_attrs) { + // FIXME: rustc and rustdoc are considered tools for lints, but not for attributes. + if tool_name != sym::rustc + && tool_name != sym::rustdoc + && !registered_tools.contains(&Ident::with_dummy_span(tool_name)) + { return CheckLintNameResult::NoTool; } } @@ -521,7 +524,7 @@ impl LintStore { } } -/// Context for lint checking after type checking. +/// Context for lint checking outside of type inference. pub struct LateContext<'tcx> { /// Type context we're checking in. pub tcx: TyCtxt<'tcx>, @@ -553,20 +556,9 @@ pub struct LateContext<'tcx> { pub only_module: bool, } -/// Context for lint checking of the AST, after expansion, before lowering to -/// HIR. +/// Context for lint checking of the AST, after expansion, before lowering to HIR. pub struct EarlyContext<'a> { - /// Type context we're checking in. - pub sess: &'a Session, - - /// The crate being checked. - pub krate: &'a ast::Crate, - pub builder: LintLevelsBuilder<'a>, - - /// The store of registered lints and the lint levels. - pub lint_store: &'a LintStore, - pub buffered: LintBuffer, } @@ -770,6 +762,7 @@ pub trait LintContext: Sized { } BuiltinLintDiagnostics::NamedAsmLabel(help) => { db.help(&help); + db.note("see the asm section of Rust By Example <https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html#labels> for more information"); } } // Rewrap `db`, and pass control to the user. @@ -801,19 +794,20 @@ pub trait LintContext: Sized { } impl<'a> EarlyContext<'a> { - pub fn new( + pub(crate) fn new( sess: &'a Session, + warn_about_weird_lints: bool, lint_store: &'a LintStore, - krate: &'a ast::Crate, - crate_attrs: &'a [ast::Attribute], + registered_tools: &'a RegisteredTools, buffered: LintBuffer, - warn_about_weird_lints: bool, ) -> EarlyContext<'a> { EarlyContext { - sess, - krate, - lint_store, - builder: LintLevelsBuilder::new(sess, warn_about_weird_lints, lint_store, crate_attrs), + builder: LintLevelsBuilder::new( + sess, + warn_about_weird_lints, + lint_store, + registered_tools, + ), buffered, } } @@ -851,11 +845,11 @@ impl LintContext for EarlyContext<'_> { /// Gets the overall compiler `Session` object. fn sess(&self) -> &Session { - &self.sess + &self.builder.sess() } fn lints(&self) -> &LintStore { - &*self.lint_store + self.builder.lint_store() } fn lookup<S: Into<MultiSpan>>( diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs index 0bba66d3838..1b2c88867d4 100644 --- a/compiler/rustc_lint/src/early.rs +++ b/compiler/rustc_lint/src/early.rs @@ -16,9 +16,11 @@ use crate::context::{EarlyContext, LintContext, LintStore}; use crate::passes::{EarlyLintPass, EarlyLintPassObject}; -use rustc_ast as ast; -use rustc_ast::visit as ast_visit; +use rustc_ast::ptr::P; +use rustc_ast::visit::{self as ast_visit, Visitor}; use rustc_ast::AstLike; +use rustc_ast::{self as ast, walk_list}; +use rustc_middle::ty::RegisteredTools; use rustc_session::lint::{BufferedEarlyLint, LintBuffer, LintPass}; use rustc_session::Session; use rustc_span::symbol::Ident; @@ -31,7 +33,7 @@ macro_rules! run_early_pass { ($cx:expr, $f:ident, $($args:expr),*) => ({ $cx.pass.$f(&$cx.context, $($args),*); }) } -struct EarlyContextAndPass<'a, T: EarlyLintPass> { +pub struct EarlyContextAndPass<'a, T: EarlyLintPass> { context: EarlyContext<'a>, pass: T, } @@ -57,7 +59,7 @@ impl<'a, T: EarlyLintPass> EarlyContextAndPass<'a, T> { F: FnOnce(&mut Self), { let is_crate_node = id == ast::CRATE_NODE_ID; - let push = self.context.builder.push(attrs, &self.context.lint_store, is_crate_node); + let push = self.context.builder.push(attrs, is_crate_node); self.check_id(id); self.enter_attrs(attrs); f(self); @@ -325,48 +327,89 @@ macro_rules! early_lint_pass_impl { crate::early_lint_methods!(early_lint_pass_impl, []); -fn early_lint_crate<T: EarlyLintPass>( +/// Early lints work on different nodes - either on the crate root, or on freshly loaded modules. +/// This trait generalizes over those nodes. +pub trait EarlyCheckNode<'a>: Copy { + fn id(self) -> ast::NodeId; + fn attrs<'b>(self) -> &'b [ast::Attribute] + where + 'a: 'b; + fn check<'b>(self, cx: &mut EarlyContextAndPass<'b, impl EarlyLintPass>) + where + 'a: 'b; +} + +impl<'a> EarlyCheckNode<'a> for &'a ast::Crate { + fn id(self) -> ast::NodeId { + ast::CRATE_NODE_ID + } + fn attrs<'b>(self) -> &'b [ast::Attribute] + where + 'a: 'b, + { + &self.attrs + } + fn check<'b>(self, cx: &mut EarlyContextAndPass<'b, impl EarlyLintPass>) + where + 'a: 'b, + { + run_early_pass!(cx, check_crate, self); + ast_visit::walk_crate(cx, self); + run_early_pass!(cx, check_crate_post, self); + } +} + +impl<'a> EarlyCheckNode<'a> for (ast::NodeId, &'a [ast::Attribute], &'a [P<ast::Item>]) { + fn id(self) -> ast::NodeId { + self.0 + } + fn attrs<'b>(self) -> &'b [ast::Attribute] + where + 'a: 'b, + { + self.1 + } + fn check<'b>(self, cx: &mut EarlyContextAndPass<'b, impl EarlyLintPass>) + where + 'a: 'b, + { + walk_list!(cx, visit_attribute, self.1); + walk_list!(cx, visit_item, self.2); + } +} + +fn early_lint_node<'a>( sess: &Session, + warn_about_weird_lints: bool, lint_store: &LintStore, - krate: &ast::Crate, - crate_attrs: &[ast::Attribute], - pass: T, + registered_tools: &RegisteredTools, buffered: LintBuffer, - warn_about_weird_lints: bool, + pass: impl EarlyLintPass, + check_node: impl EarlyCheckNode<'a>, ) -> LintBuffer { let mut cx = EarlyContextAndPass { context: EarlyContext::new( sess, + warn_about_weird_lints, lint_store, - krate, - crate_attrs, + registered_tools, buffered, - warn_about_weird_lints, ), pass, }; - // Visit the whole crate. - cx.with_lint_attrs(ast::CRATE_NODE_ID, &krate.attrs, |cx| { - // since the root module isn't visited as an item (because it isn't an - // item), warn for it here. - run_early_pass!(cx, check_crate, krate); - - ast_visit::walk_crate(cx, krate); - - run_early_pass!(cx, check_crate_post, krate); - }); + cx.with_lint_attrs(check_node.id(), check_node.attrs(), |cx| check_node.check(cx)); cx.context.buffered } -pub fn check_ast_crate<T: EarlyLintPass>( +pub fn check_ast_node<'a>( sess: &Session, - lint_store: &LintStore, - krate: &ast::Crate, - crate_attrs: &[ast::Attribute], pre_expansion: bool, + lint_store: &LintStore, + registered_tools: &RegisteredTools, lint_buffer: Option<LintBuffer>, - builtin_lints: T, + builtin_lints: impl EarlyLintPass, + check_node: impl EarlyCheckNode<'a>, ) { let passes = if pre_expansion { &lint_store.pre_expansion_passes } else { &lint_store.early_passes }; @@ -374,39 +417,39 @@ pub fn check_ast_crate<T: EarlyLintPass>( let mut buffered = lint_buffer.unwrap_or_default(); if !sess.opts.debugging_opts.no_interleave_lints { - buffered = early_lint_crate( + buffered = early_lint_node( sess, + pre_expansion, lint_store, - krate, - crate_attrs, - builtin_lints, + registered_tools, buffered, - pre_expansion, + builtin_lints, + check_node, ); if !passes.is_empty() { - buffered = early_lint_crate( + buffered = early_lint_node( sess, + false, lint_store, - krate, - crate_attrs, - EarlyLintPassObjects { lints: &mut passes[..] }, + registered_tools, buffered, - false, + EarlyLintPassObjects { lints: &mut passes[..] }, + check_node, ); } } else { for (i, pass) in passes.iter_mut().enumerate() { buffered = sess.prof.extra_verbose_generic_activity("run_lint", pass.name()).run(|| { - early_lint_crate( + early_lint_node( sess, + pre_expansion && i == 0, lint_store, - krate, - crate_attrs, - EarlyLintPassObjects { lints: slice::from_mut(pass) }, + registered_tools, buffered, - pre_expansion && i == 0, + EarlyLintPassObjects { lints: slice::from_mut(pass) }, + check_node, ) }); } diff --git a/compiler/rustc_lint/src/internal.rs b/compiler/rustc_lint/src/internal.rs index 7353cd6b876..d8e1162890c 100644 --- a/compiler/rustc_lint/src/internal.rs +++ b/compiler/rustc_lint/src/internal.rs @@ -108,7 +108,7 @@ impl<'tcx> LateLintPass<'tcx> for TyTyKind { lint.build(&format!("usage of qualified `ty::{}`", t)) .span_suggestion( path.span, - "try using it unqualified", + "try importing it and using it unqualified", t, // The import probably needs to be changed Applicability::MaybeIncorrect, diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs index 6e95708b17f..8afbd462c14 100644 --- a/compiler/rustc_lint/src/levels.rs +++ b/compiler/rustc_lint/src/levels.rs @@ -5,7 +5,7 @@ use rustc_ast_pretty::pprust; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder}; use rustc_hir as hir; -use rustc_hir::{intravisit, HirId, CRATE_HIR_ID}; +use rustc_hir::{intravisit, HirId}; use rustc_middle::hir::nested_filter; use rustc_middle::lint::LevelAndSource; use rustc_middle::lint::LintDiagnosticBuilder; @@ -14,7 +14,7 @@ use rustc_middle::lint::{ COMMAND_LINE, }; use rustc_middle::ty::query::Providers; -use rustc_middle::ty::TyCtxt; +use rustc_middle::ty::{RegisteredTools, TyCtxt}; use rustc_session::lint::{ builtin::{self, FORBIDDEN_LINT_GROUPS}, Level, Lint, LintId, @@ -27,14 +27,14 @@ use tracing::debug; fn lint_levels(tcx: TyCtxt<'_>, (): ()) -> LintLevelMap { let store = unerased_lint_store(tcx); - let crate_attrs = tcx.hir().attrs(CRATE_HIR_ID); - let levels = LintLevelsBuilder::new(tcx.sess, false, &store, crate_attrs); - let mut builder = LintLevelMapBuilder { levels, tcx, store }; + let levels = + LintLevelsBuilder::new(tcx.sess, false, &store, &tcx.resolutions(()).registered_tools); + let mut builder = LintLevelMapBuilder { levels, tcx }; let krate = tcx.hir().krate(); builder.levels.id_to_set.reserve(krate.owners.len() + 1); - let push = builder.levels.push(tcx.hir().attrs(hir::CRATE_HIR_ID), &store, true); + let push = builder.levels.push(tcx.hir().attrs(hir::CRATE_HIR_ID), true); builder.levels.register_id(hir::CRATE_HIR_ID); tcx.hir().walk_toplevel_module(&mut builder); builder.levels.pop(push); @@ -49,7 +49,7 @@ pub struct LintLevelsBuilder<'s> { cur: LintStackIndex, warn_about_weird_lints: bool, store: &'s LintStore, - crate_attrs: &'s [ast::Attribute], + registered_tools: &'s RegisteredTools, } pub struct BuilderPush { @@ -62,7 +62,7 @@ impl<'s> LintLevelsBuilder<'s> { sess: &'s Session, warn_about_weird_lints: bool, store: &'s LintStore, - crate_attrs: &'s [ast::Attribute], + registered_tools: &'s RegisteredTools, ) -> Self { let mut builder = LintLevelsBuilder { sess, @@ -71,19 +71,27 @@ impl<'s> LintLevelsBuilder<'s> { id_to_set: Default::default(), warn_about_weird_lints, store, - crate_attrs, + registered_tools, }; builder.process_command_line(sess, store); assert_eq!(builder.sets.list.len(), 1); builder } + pub(crate) fn sess(&self) -> &Session { + self.sess + } + + pub(crate) fn lint_store(&self) -> &LintStore { + self.store + } + fn process_command_line(&mut self, sess: &Session, store: &LintStore) { let mut specs = FxHashMap::default(); self.sets.lint_cap = sess.opts.lint_cap.unwrap_or(Level::Forbid); for &(ref lint_name, level) in &sess.opts.lint_opts { - store.check_lint_name_cmdline(sess, &lint_name, level, self.crate_attrs); + store.check_lint_name_cmdline(sess, &lint_name, level, self.registered_tools); let orig_level = level; let lint_flag_val = Symbol::intern(lint_name); @@ -217,12 +225,7 @@ impl<'s> LintLevelsBuilder<'s> { /// `#[allow]` /// /// Don't forget to call `pop`! - pub(crate) fn push( - &mut self, - attrs: &[ast::Attribute], - store: &LintStore, - is_crate_node: bool, - ) -> BuilderPush { + pub(crate) fn push(&mut self, attrs: &[ast::Attribute], is_crate_node: bool) -> BuilderPush { let mut specs = FxHashMap::default(); let sess = self.sess; let bad_attr = |span| struct_span_err!(sess, span, E0452, "malformed lint attribute input"); @@ -310,7 +313,8 @@ impl<'s> LintLevelsBuilder<'s> { }; let tool_name = tool_ident.map(|ident| ident.name); let name = pprust::path_to_string(&meta_item.path); - let lint_result = store.check_lint_name(sess, &name, tool_name, self.crate_attrs); + let lint_result = + self.store.check_lint_name(&name, tool_name, self.registered_tools); match &lint_result { CheckLintNameResult::Ok(ids) => { let src = LintLevelSource::Node( @@ -459,7 +463,7 @@ impl<'s> LintLevelsBuilder<'s> { // Ignore any errors or warnings that happen because the new name is inaccurate // NOTE: `new_name` already includes the tool name, so we don't have to add it again. if let CheckLintNameResult::Ok(ids) = - store.check_lint_name(sess, &new_name, None, self.crate_attrs) + self.store.check_lint_name(&new_name, None, self.registered_tools) { let src = LintLevelSource::Node(Symbol::intern(&new_name), sp, reason); for &id in ids { @@ -562,34 +566,19 @@ impl<'s> LintLevelsBuilder<'s> { } } -pub fn is_known_lint_tool(m_item: Symbol, sess: &Session, attrs: &[ast::Attribute]) -> bool { - if [sym::clippy, sym::rustc, sym::rustdoc].contains(&m_item) { - return true; - } - // Look for registered tools - // NOTE: does no error handling; error handling is done by rustc_resolve. - sess.filter_by_name(attrs, sym::register_tool) - .filter_map(|attr| attr.meta_item_list()) - .flatten() - .filter_map(|nested_meta| nested_meta.ident()) - .map(|ident| ident.name) - .any(|name| name == m_item) -} - -struct LintLevelMapBuilder<'a, 'tcx> { +struct LintLevelMapBuilder<'tcx> { levels: LintLevelsBuilder<'tcx>, tcx: TyCtxt<'tcx>, - store: &'a LintStore, } -impl LintLevelMapBuilder<'_, '_> { +impl LintLevelMapBuilder<'_> { fn with_lint_attrs<F>(&mut self, id: hir::HirId, f: F) where F: FnOnce(&mut Self), { let is_crate_hir = id == hir::CRATE_HIR_ID; let attrs = self.tcx.hir().attrs(id); - let push = self.levels.push(attrs, self.store, is_crate_hir); + let push = self.levels.push(attrs, is_crate_hir); if push.changed { self.levels.register_id(id); } @@ -598,7 +587,7 @@ impl LintLevelMapBuilder<'_, '_> { } } -impl<'tcx> intravisit::Visitor<'tcx> for LintLevelMapBuilder<'_, 'tcx> { +impl<'tcx> intravisit::Visitor<'tcx> for LintLevelMapBuilder<'tcx> { type NestedFilter = nested_filter::All; fn nested_visit_map(&mut self) -> Self::Map { diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index 4aa8505c940..a87f2b2768d 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -96,7 +96,7 @@ use unused::*; pub use builtin::SoftLints; pub use context::{CheckLintNameResult, FindLintError, LintStore}; pub use context::{EarlyContext, LateContext, LintContext}; -pub use early::check_ast_crate; +pub use early::{check_ast_node, EarlyCheckNode}; pub use late::check_crate; pub use passes::{EarlyLintPass, LateLintPass}; pub use rustc_session::lint::Level::{self, *}; @@ -481,6 +481,11 @@ fn register_builtins(store: &mut LintStore, no_interleave_lints: bool) { <https://github.com/rust-lang/rust/issues/59014> for more information", ); store.register_removed("plugin_as_library", "plugins have been deprecated and retired"); + store.register_removed( + "unsupported_naked_functions", + "converted into hard error, see RFC 2972 \ + <https://github.com/rust-lang/rfcs/blob/master/text/2972-constrained-naked.md> for more information", + ); } fn register_internals(store: &mut LintStore) { diff --git a/compiler/rustc_lint/src/non_ascii_idents.rs b/compiler/rustc_lint/src/non_ascii_idents.rs index a570206f1ee..2dd6dbd67a8 100644 --- a/compiler/rustc_lint/src/non_ascii_idents.rs +++ b/compiler/rustc_lint/src/non_ascii_idents.rs @@ -166,7 +166,7 @@ impl EarlyLintPass for NonAsciiIdents { } let mut has_non_ascii_idents = false; - let symbols = cx.sess.parse_sess.symbol_gallery.symbols.lock(); + let symbols = cx.sess().parse_sess.symbol_gallery.symbols.lock(); // Sort by `Span` so that error messages make sense with respect to the // order of identifier locations in the code. diff --git a/compiler/rustc_lint/src/non_fmt_panic.rs b/compiler/rustc_lint/src/non_fmt_panic.rs index a919b3c82aa..6bf25732f60 100644 --- a/compiler/rustc_lint/src/non_fmt_panic.rs +++ b/compiler/rustc_lint/src/non_fmt_panic.rs @@ -336,5 +336,5 @@ fn is_arg_inside_call(arg: Span, call: Span) -> bool { // panic call in the source file, to avoid invalid suggestions when macros are involved. // We specifically check for the spans to not be identical, as that happens sometimes when // proc_macros lie about spans and apply the same span to all the tokens they produce. - call.contains(arg) && !call.source_equal(&arg) + call.contains(arg) && !call.source_equal(arg) } diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs index be7756b0f28..f73388c675e 100644 --- a/compiler/rustc_lint/src/nonstandard_style.rs +++ b/compiler/rustc_lint/src/nonstandard_style.rs @@ -164,7 +164,7 @@ impl EarlyLintPass for NonCamelCaseTypes { let has_repr_c = it .attrs .iter() - .any(|attr| attr::find_repr_attrs(&cx.sess, attr).contains(&attr::ReprC)); + .any(|attr| attr::find_repr_attrs(cx.sess(), attr).contains(&attr::ReprC)); if has_repr_c { return; diff --git a/compiler/rustc_lint/src/pass_by_value.rs b/compiler/rustc_lint/src/pass_by_value.rs index 26d0560bf89..2caf929788f 100644 --- a/compiler/rustc_lint/src/pass_by_value.rs +++ b/compiler/rustc_lint/src/pass_by_value.rs @@ -76,10 +76,10 @@ fn gen_args(cx: &LateContext<'_>, segment: &PathSegment<'_>) -> String { .map(|arg| match arg { GenericArg::Lifetime(lt) => lt.name.ident().to_string(), GenericArg::Type(ty) => { - cx.tcx.sess.source_map().span_to_snippet(ty.span).unwrap_or_default() + cx.tcx.sess.source_map().span_to_snippet(ty.span).unwrap_or_else(|_| "_".into()) } GenericArg::Const(c) => { - cx.tcx.sess.source_map().span_to_snippet(c.span).unwrap_or_default() + cx.tcx.sess.source_map().span_to_snippet(c.span).unwrap_or_else(|_| "_".into()) } GenericArg::Infer(_) => String::from("_"), }) diff --git a/compiler/rustc_lint/src/traits.rs b/compiler/rustc_lint/src/traits.rs index dafff640b36..4c7f3482776 100644 --- a/compiler/rustc_lint/src/traits.rs +++ b/compiler/rustc_lint/src/traits.rs @@ -86,7 +86,6 @@ declare_lint_pass!( impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) { - use rustc_middle::ty; use rustc_middle::ty::PredicateKind::*; let predicates = cx.tcx.explicit_predicates_of(item.def_id); @@ -94,7 +93,7 @@ impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints { let Trait(trait_predicate) = predicate.kind().skip_binder() else { continue }; - if trait_predicate.constness == ty::BoundConstness::ConstIfConst { + if trait_predicate.is_const_if_const() { // `~const Drop` definitely have meanings so avoid linting here. continue; } diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index c2f6118227a..4af68233f0d 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -2456,6 +2456,10 @@ declare_lint! { /// register size, to alert you of possibly using the incorrect width. To /// fix this, add the suggested modifier to the template, or cast the /// value to the correct size. + /// + /// See [register template modifiers] in the reference for more details. + /// + /// [register template modifiers]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html#template-modifiers pub ASM_SUB_REGISTER, Warn, "using only a subset of a register for inline asm inputs", @@ -2760,52 +2764,6 @@ declare_lint! { } declare_lint! { - /// The `unsupported_naked_functions` lint detects naked function - /// definitions that are unsupported but were previously accepted. - /// - /// ### Example - /// - /// ```rust - /// #![feature(naked_functions)] - /// - /// #[naked] - /// pub extern "C" fn f() -> u32 { - /// 42 - /// } - /// ``` - /// - /// {{produces}} - /// - /// ### Explanation - /// - /// The naked functions must be defined using a single inline assembly - /// block. - /// - /// The execution must never fall through past the end of the assembly - /// code so the block must use `noreturn` option. The asm block can also - /// use `att_syntax` option, but other options are not allowed. - /// - /// The asm block must not contain any operands other than `const` and - /// `sym`. Additionally, naked function should specify a non-Rust ABI. - /// - /// Naked functions cannot be inlined. All forms of the `inline` attribute - /// are prohibited. - /// - /// While other definitions of naked functions were previously accepted, - /// they are unsupported and might not work reliably. This is a - /// [future-incompatible] lint that will transition into hard error in - /// the future. - /// - /// [future-incompatible]: ../index.md#future-incompatible-lints - pub UNSUPPORTED_NAKED_FUNCTIONS, - Warn, - "unsupported naked function definitions", - @future_incompatible = FutureIncompatibleInfo { - reference: "issue #32408 <https://github.com/rust-lang/rust/issues/32408>", - }; -} - -declare_lint! { /// The `ineffective_unstable_trait_impl` lint detects `#[unstable]` attributes which are not used. /// /// ### Example @@ -3070,7 +3028,6 @@ declare_lint_pass! { UNINHABITED_STATIC, FUNCTION_ITEM_REFERENCES, USELESS_DEPRECATED, - UNSUPPORTED_NAKED_FUNCTIONS, MISSING_ABI, INVALID_DOC_ATTRIBUTES, SEMICOLON_IN_EXPRESSIONS_FROM_MACROS, diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs index 97f6df51f88..3b5d636124d 100644 --- a/compiler/rustc_lint_defs/src/lib.rs +++ b/compiler/rustc_lint_defs/src/lib.rs @@ -282,7 +282,7 @@ pub enum ExternDepSpec { // This could be a closure, but then implementing derive trait // becomes hacky (and it gets allocated). -#[derive(PartialEq, Debug)] +#[derive(Debug)] pub enum BuiltinLintDiagnostics { Normal, AbsPathWithModule(Span), @@ -309,7 +309,6 @@ pub enum BuiltinLintDiagnostics { /// Lints that are buffered up early on in the `Session` before the /// `LintLevels` is calculated. -#[derive(PartialEq)] pub struct BufferedEarlyLint { /// The span of code that we are linting on. pub span: MultiSpan, @@ -336,9 +335,7 @@ pub struct LintBuffer { impl LintBuffer { pub fn add_early_lint(&mut self, early_lint: BufferedEarlyLint) { let arr = self.map.entry(early_lint.node_id).or_default(); - if !arr.contains(&early_lint) { - arr.push(early_lint); - } + arr.push(early_lint); } pub fn add_lint( diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index c21e4acbefe..dcd6327c92f 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -722,9 +722,12 @@ extern "C" bool LLVMRustIsRustLLVM() { #endif } -extern "C" void LLVMRustAddModuleFlag(LLVMModuleRef M, const char *Name, - uint32_t Value) { - unwrap(M)->addModuleFlag(Module::Warning, Name, Value); +extern "C" void LLVMRustAddModuleFlag( + LLVMModuleRef M, + Module::ModFlagBehavior MergeBehavior, + const char *Name, + uint32_t Value) { + unwrap(M)->addModuleFlag(MergeBehavior, Name, Value); } extern "C" LLVMValueRef LLVMRustMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD) { diff --git a/compiler/rustc_macros/src/serialize.rs b/compiler/rustc_macros/src/serialize.rs index 3351564299c..6c5461505fa 100644 --- a/compiler/rustc_macros/src/serialize.rs +++ b/compiler/rustc_macros/src/serialize.rs @@ -47,7 +47,7 @@ fn decodable_body( quote! { ::rustc_serialize::Decoder::read_struct( __decoder, - |__decoder| { ::std::result::Result::Ok(#construct) }, + |__decoder| { #construct }, ) } } @@ -57,7 +57,7 @@ fn decodable_body( .enumerate() .map(|(idx, vi)| { let construct = vi.construct(|field, index| decode_field(field, index, false)); - quote! { #idx => { ::std::result::Result::Ok(#construct) } } + quote! { #idx => { #construct } } }) .collect(); let names: TokenStream = variants @@ -82,8 +82,7 @@ fn decodable_body( |__decoder, __variant_idx| { match __variant_idx { #match_inner - _ => return ::std::result::Result::Err( - ::rustc_serialize::Decoder::error(__decoder, #message)), + _ => panic!(#message), } }) } @@ -95,9 +94,7 @@ fn decodable_body( s.bound_impl( quote!(::rustc_serialize::Decodable<#decoder_ty>), quote! { - fn decode( - __decoder: &mut #decoder_ty, - ) -> ::std::result::Result<Self, <#decoder_ty as ::rustc_serialize::Decoder>::Error> { + fn decode(__decoder: &mut #decoder_ty) -> Self { #decode_body } }, @@ -127,12 +124,7 @@ fn decode_field(field: &syn::Field, index: usize, is_struct: bool) -> proc_macro #__decoder, #opt_field_name #decode_inner_method) }; - quote! { - match #decode_call { - ::std::result::Result::Ok(__res) => __res, - ::std::result::Result::Err(__err) => return ::std::result::Result::Err(__err), - } - } + quote! { #decode_call } } pub fn type_encodable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream { diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index 220bc9c5f75..fb1c71fb8cd 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -263,7 +263,7 @@ impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Lazy<T> { fn decode<M: Metadata<'a, 'tcx>>(self, metadata: M) -> T { let mut dcx = metadata.decoder(self.position.get()); dcx.lazy_state = LazyState::NodeStart(self.position); - T::decode(&mut dcx).unwrap() + T::decode(&mut dcx) } } @@ -274,7 +274,7 @@ impl<'a: 'x, 'tcx: 'x, 'x, T: Decodable<DecodeContext<'a, 'tcx>>> Lazy<[T]> { ) -> impl ExactSizeIterator<Item = T> + Captures<'a> + Captures<'tcx> + 'x { let mut dcx = metadata.decoder(self.position.get()); dcx.lazy_state = LazyState::NodeStart(self.position); - (0..self.meta).map(move |_| T::decode(&mut dcx).unwrap()) + (0..self.meta).map(move |_| T::decode(&mut dcx)) } } @@ -300,11 +300,8 @@ impl<'a, 'tcx> DecodeContext<'a, 'tcx> { if cnum == LOCAL_CRATE { self.cdata().cnum } else { self.cdata().cnum_map[cnum] } } - fn read_lazy_with_meta<T: ?Sized + LazyMeta>( - &mut self, - meta: T::Meta, - ) -> Result<Lazy<T>, <Self as Decoder>::Error> { - let distance = self.read_usize()?; + fn read_lazy_with_meta<T: ?Sized + LazyMeta>(&mut self, meta: T::Meta) -> Lazy<T> { + let distance = self.read_usize(); let position = match self.lazy_state { LazyState::NoNode => bug!("read_lazy_with_meta: outside of a metadata node"), LazyState::NodeStart(start) => { @@ -315,7 +312,7 @@ impl<'a, 'tcx> DecodeContext<'a, 'tcx> { LazyState::Previous(last_pos) => last_pos.get() + distance, }; self.lazy_state = LazyState::Previous(NonZeroUsize::new(position).unwrap()); - Ok(Lazy::from_position_and_meta(NonZeroUsize::new(position).unwrap(), meta)) + Lazy::from_position_and_meta(NonZeroUsize::new(position).unwrap(), meta) } #[inline] @@ -342,25 +339,21 @@ impl<'a, 'tcx> TyDecoder<'tcx> for DecodeContext<'a, 'tcx> { self.opaque.position() } - fn cached_ty_for_shorthand<F>( - &mut self, - shorthand: usize, - or_insert_with: F, - ) -> Result<Ty<'tcx>, Self::Error> + fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx> where - F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>, + F: FnOnce(&mut Self) -> Ty<'tcx>, { let tcx = self.tcx(); let key = ty::CReaderCacheKey { cnum: Some(self.cdata().cnum), pos: shorthand }; if let Some(&ty) = tcx.ty_rcache.borrow().get(&key) { - return Ok(ty); + return ty; } - let ty = or_insert_with(self)?; + let ty = or_insert_with(self); tcx.ty_rcache.borrow_mut().insert(key, ty); - Ok(ty) + ty } fn with_position<F, R>(&mut self, pos: usize, f: F) -> R @@ -376,7 +369,7 @@ impl<'a, 'tcx> TyDecoder<'tcx> for DecodeContext<'a, 'tcx> { r } - fn decode_alloc_id(&mut self) -> Result<rustc_middle::mir::interpret::AllocId, Self::Error> { + fn decode_alloc_id(&mut self) -> rustc_middle::mir::interpret::AllocId { if let Some(alloc_decoding_session) = self.alloc_decoding_session { alloc_decoding_session.decode_alloc_id(self) } else { @@ -386,48 +379,48 @@ impl<'a, 'tcx> TyDecoder<'tcx> for DecodeContext<'a, 'tcx> { } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum { - fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<CrateNum, String> { - let cnum = CrateNum::from_u32(d.read_u32()?); - Ok(d.map_encoded_cnum_to_current(cnum)) + fn decode(d: &mut DecodeContext<'a, 'tcx>) -> CrateNum { + let cnum = CrateNum::from_u32(d.read_u32()); + d.map_encoded_cnum_to_current(cnum) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefIndex { - fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<DefIndex, String> { - Ok(DefIndex::from_u32(d.read_u32()?)) + fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefIndex { + DefIndex::from_u32(d.read_u32()) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex { - fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<ExpnIndex, String> { - Ok(ExpnIndex::from_u32(d.read_u32()?)) + fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ExpnIndex { + ExpnIndex::from_u32(d.read_u32()) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SyntaxContext { - fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<SyntaxContext, String> { + fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> SyntaxContext { let cdata = decoder.cdata(); let sess = decoder.sess.unwrap(); let cname = cdata.root.name; rustc_span::hygiene::decode_syntax_context(decoder, &cdata.hygiene_context, |_, id| { debug!("SpecializedDecoder<SyntaxContext>: decoding {}", id); - Ok(cdata + cdata .root .syntax_contexts .get(cdata, id) .unwrap_or_else(|| panic!("Missing SyntaxContext {:?} for crate {:?}", id, cname)) - .decode((cdata, sess))) + .decode((cdata, sess)) }) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnId { - fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<ExpnId, String> { + fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> ExpnId { let local_cdata = decoder.cdata(); let sess = decoder.sess.unwrap(); - let cnum = CrateNum::decode(decoder)?; - let index = u32::decode(decoder)?; + let cnum = CrateNum::decode(decoder); + let index = u32::decode(decoder); let expn_id = rustc_span::hygiene::decode_expn_id(cnum, index, |expn_id| { let ExpnId { krate: cnum, local_id: index } = expn_id; @@ -453,23 +446,23 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnId { .decode((crate_data, sess)); (expn_data, expn_hash) }); - Ok(expn_id) + expn_id } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span { - fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Span, String> { - let ctxt = SyntaxContext::decode(decoder)?; - let tag = u8::decode(decoder)?; + fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Span { + let ctxt = SyntaxContext::decode(decoder); + let tag = u8::decode(decoder); if tag == TAG_PARTIAL_SPAN { - return Ok(DUMMY_SP.with_ctxt(ctxt)); + return DUMMY_SP.with_ctxt(ctxt); } debug_assert!(tag == TAG_VALID_SPAN_LOCAL || tag == TAG_VALID_SPAN_FOREIGN); - let lo = BytePos::decode(decoder)?; - let len = BytePos::decode(decoder)?; + let lo = BytePos::decode(decoder); + let len = BytePos::decode(decoder); let hi = lo + len; let Some(sess) = decoder.sess else { @@ -512,7 +505,7 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span { if decoder.cdata().root.is_proc_macro_crate() { // Decode `CrateNum` as u32 - using `CrateNum::decode` will ICE // since we don't have `cnum_map` populated. - let cnum = u32::decode(decoder)?; + let cnum = u32::decode(decoder); panic!( "Decoding of crate {:?} tried to access proc-macro dep {:?}", decoder.cdata().root.name, @@ -520,7 +513,7 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span { ); } // tag is TAG_VALID_SPAN_FOREIGN, checked by `debug_assert` above - let cnum = CrateNum::decode(decoder)?; + let cnum = CrateNum::decode(decoder); debug!( "SpecializedDecoder<Span>::specialized_decode: loading source files from cnum {:?}", cnum @@ -582,18 +575,18 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span { (hi + source_file.translated_source_file.start_pos) - source_file.original_start_pos; // Do not try to decode parent for foreign spans. - Ok(Span::new(lo, hi, ctxt, None)) + Span::new(lo, hi, ctxt, None) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [thir::abstract_const::Node<'tcx>] { - fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Self { ty::codec::RefDecodable::decode(d) } } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] { - fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Self { ty::codec::RefDecodable::decode(d) } } @@ -601,7 +594,7 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Decodable<DecodeContext<'a, 'tcx>> for Lazy<T> { - fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> { + fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self { decoder.read_lazy_with_meta(()) } } @@ -609,9 +602,9 @@ impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Decodable<DecodeContext<'a impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Decodable<DecodeContext<'a, 'tcx>> for Lazy<[T]> { - fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> { - let len = decoder.read_usize()?; - if len == 0 { Ok(Lazy::empty()) } else { decoder.read_lazy_with_meta(len) } + fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self { + let len = decoder.read_usize(); + if len == 0 { Lazy::empty() } else { decoder.read_lazy_with_meta(len) } } } @@ -620,8 +613,8 @@ impl<'a, 'tcx, I: Idx, T: Decodable<DecodeContext<'a, 'tcx>>> Decodable<DecodeCo where Option<T>: FixedSizeEncoding, { - fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Result<Self, String> { - let len = decoder.read_usize()?; + fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self { + let len = decoder.read_usize(); decoder.read_lazy_with_meta(len) } } @@ -1286,7 +1279,7 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { }; ty::AssocItem { - ident, + name: ident.name, kind, vis: self.get_visibility(id), defaultness: container.defaultness(), @@ -1380,11 +1373,15 @@ impl<'a, 'tcx> CrateMetadataRef<'a> { self.root.traits.decode(self).map(move |index| self.local_def_id(index)) } - fn get_trait_impls(self) -> impl Iterator<Item = (DefId, Option<SimplifiedType>)> + 'a { - self.cdata.trait_impls.values().flat_map(move |impls| { - impls - .decode(self) - .map(move |(idx, simplified_self_ty)| (self.local_def_id(idx), simplified_self_ty)) + fn get_trait_impls(self) -> impl Iterator<Item = (DefId, DefId, Option<SimplifiedType>)> + 'a { + self.cdata.trait_impls.iter().flat_map(move |((trait_cnum_raw, trait_index), impls)| { + let trait_def_id = DefId { + krate: self.cnum_map[CrateNum::from_u32(*trait_cnum_raw)], + index: *trait_index, + }; + impls.decode(self).map(move |(impl_index, simplified_self_ty)| { + (trait_def_id, self.local_def_id(impl_index), simplified_self_ty) + }) }) } diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs index 9b59c14c2fe..2f8e35648ec 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs @@ -493,7 +493,7 @@ impl CStore { pub fn trait_impls_in_crate_untracked( &self, cnum: CrateNum, - ) -> impl Iterator<Item = (DefId, Option<SimplifiedType>)> + '_ { + ) -> impl Iterator<Item = (DefId, DefId, Option<SimplifiedType>)> + '_ { self.get_crate_data(cnum).get_trait_impls() } } diff --git a/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs index 054431169a2..d66f2b031a8 100644 --- a/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs +++ b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs @@ -39,11 +39,11 @@ impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for DefPathHashMapRef<'tcx> { } impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefPathHashMapRef<'static> { - fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Result<DefPathHashMapRef<'static>, String> { + fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefPathHashMapRef<'static> { // Import TyDecoder so we can access the DecodeContext::position() method use crate::rustc_middle::ty::codec::TyDecoder; - let len = d.read_usize()?; + let len = d.read_usize(); let pos = d.position(); let o = OwningRef::new(d.blob().clone()).map(|x| &x[pos..pos + len]); @@ -52,7 +52,9 @@ impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefPathHashMapRef<'static> // the method. We use read_raw_bytes() for that. let _ = d.read_raw_bytes(len); - let inner = odht::HashTable::from_raw_bytes(o).map_err(|e| format!("{}", e))?; - Ok(DefPathHashMapRef::OwnedFromMetadata(inner)) + let inner = odht::HashTable::from_raw_bytes(o).unwrap_or_else(|e| { + panic!("decode error: {}", e); + }); + DefPathHashMapRef::OwnedFromMetadata(inner) } } diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index ebb78adf343..3fae652ee2e 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -1291,7 +1291,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { record!(self.tables.kind[def_id] <- EntryKind::AssocType(container)); } } - self.encode_ident_span(def_id, impl_item.ident); + self.encode_ident_span(def_id, impl_item.ident(self.tcx)); self.encode_item_type(def_id); if let Some(trait_item_def_id) = impl_item.trait_item_def_id { record!(self.tables.trait_item_def_id[def_id] <- trait_item_def_id); @@ -1314,7 +1314,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { return; } - let mut keys_and_jobs = self + let keys_and_jobs = self .tcx .mir_keys(()) .iter() @@ -1327,8 +1327,6 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { } }) .collect::<Vec<_>>(); - // Sort everything to ensure a stable order for diagnotics. - keys_and_jobs.sort_by_key(|&(def_id, _, _)| def_id.index()); for (def_id, encode_const, encode_opt) in keys_and_jobs.into_iter() { debug_assert!(encode_const || encode_opt); diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs index 82ea7ff6aab..1885df6ac5d 100644 --- a/compiler/rustc_middle/src/hir/map/mod.rs +++ b/compiler/rustc_middle/src/hir/map/mod.rs @@ -204,8 +204,11 @@ impl<'hir> Map<'hir> { if hir_id.local_id == ItemLocalId::new(0) { Some(hir_id.owner) } else { - // FIXME(#85914) is this access safe for incr. comp.? - self.tcx.untracked_resolutions.definitions.opt_hir_id_to_local_def_id(hir_id) + self.tcx + .hir_owner_nodes(hir_id.owner)? + .local_id_to_def_id + .get(&hir_id.local_id) + .copied() } } diff --git a/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs b/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs index 5f028975bd0..e2f3d6e078f 100644 --- a/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs +++ b/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs @@ -45,8 +45,9 @@ impl<S: serialize::Encoder> serialize::Encodable<S> for GraphIsCyclicCache { impl<D: serialize::Decoder> serialize::Decodable<D> for GraphIsCyclicCache { #[inline] - fn decode(d: &mut D) -> Result<Self, D::Error> { - serialize::Decodable::decode(d).map(|_v: ()| Self::new()) + fn decode(d: &mut D) -> Self { + let () = serialize::Decodable::decode(d); + Self::new() } } diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index b762a10da84..66f2c6e78a2 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -273,20 +273,20 @@ pub struct AllocDecodingSession<'s> { impl<'s> AllocDecodingSession<'s> { /// Decodes an `AllocId` in a thread-safe way. - pub fn decode_alloc_id<'tcx, D>(&self, decoder: &mut D) -> Result<AllocId, D::Error> + pub fn decode_alloc_id<'tcx, D>(&self, decoder: &mut D) -> AllocId where D: TyDecoder<'tcx>, { // Read the index of the allocation. - let idx = usize::try_from(decoder.read_u32()?).unwrap(); + let idx = usize::try_from(decoder.read_u32()).unwrap(); let pos = usize::try_from(self.state.data_offsets[idx]).unwrap(); // Decode the `AllocDiscriminant` now so that we know if we have to reserve an // `AllocId`. let (alloc_kind, pos) = decoder.with_position(pos, |decoder| { - let alloc_kind = AllocDiscriminant::decode(decoder)?; - Ok((alloc_kind, decoder.position())) - })?; + let alloc_kind = AllocDiscriminant::decode(decoder); + (alloc_kind, decoder.position()) + }); // Check the decoding state to see if it's already decoded or if we should // decode it here. @@ -295,7 +295,7 @@ impl<'s> AllocDecodingSession<'s> { match *entry { State::Done(alloc_id) => { - return Ok(alloc_id); + return alloc_id; } ref mut entry @ State::Empty => { // We are allowed to decode. @@ -329,7 +329,7 @@ impl<'s> AllocDecodingSession<'s> { State::InProgress(ref mut sessions, alloc_id) => { if sessions.contains(&self.session_id) { // Don't recurse. - return Ok(alloc_id); + return alloc_id; } else { // Start decoding concurrently. sessions.insert(self.session_id); @@ -343,37 +343,37 @@ impl<'s> AllocDecodingSession<'s> { let alloc_id = decoder.with_position(pos, |decoder| { match alloc_kind { AllocDiscriminant::Alloc => { - let alloc = <&'tcx Allocation as Decodable<_>>::decode(decoder)?; + let alloc = <&'tcx Allocation as Decodable<_>>::decode(decoder); // We already have a reserved `AllocId`. let alloc_id = alloc_id.unwrap(); trace!("decoded alloc {:?}: {:#?}", alloc_id, alloc); decoder.tcx().set_alloc_id_same_memory(alloc_id, alloc); - Ok(alloc_id) + alloc_id } AllocDiscriminant::Fn => { assert!(alloc_id.is_none()); trace!("creating fn alloc ID"); - let instance = ty::Instance::decode(decoder)?; + let instance = ty::Instance::decode(decoder); trace!("decoded fn alloc instance: {:?}", instance); let alloc_id = decoder.tcx().create_fn_alloc(instance); - Ok(alloc_id) + alloc_id } AllocDiscriminant::Static => { assert!(alloc_id.is_none()); trace!("creating extern static alloc ID"); - let did = <DefId as Decodable<D>>::decode(decoder)?; + let did = <DefId as Decodable<D>>::decode(decoder); trace!("decoded static def-ID: {:?}", did); let alloc_id = decoder.tcx().create_static_alloc(did); - Ok(alloc_id) + alloc_id } } - })?; + }); self.state.decoding_state[idx].with_lock(|entry| { *entry = State::Done(alloc_id); }); - Ok(alloc_id) + alloc_id } } diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index 48f39b26152..b1ab0f5b533 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -619,20 +619,20 @@ impl<'tcx, E: TyEncoder<'tcx>, T: Encodable<E>> Encodable<E> for ClearCrossCrate } impl<'tcx, D: TyDecoder<'tcx>, T: Decodable<D>> Decodable<D> for ClearCrossCrate<T> { #[inline] - fn decode(d: &mut D) -> Result<ClearCrossCrate<T>, D::Error> { + fn decode(d: &mut D) -> ClearCrossCrate<T> { if D::CLEAR_CROSS_CRATE { - return Ok(ClearCrossCrate::Clear); + return ClearCrossCrate::Clear; } - let discr = u8::decode(d)?; + let discr = u8::decode(d); match discr { - TAG_CLEAR_CROSS_CRATE_CLEAR => Ok(ClearCrossCrate::Clear), + TAG_CLEAR_CROSS_CRATE_CLEAR => ClearCrossCrate::Clear, TAG_CLEAR_CROSS_CRATE_SET => { - let val = T::decode(d)?; - Ok(ClearCrossCrate::Set(val)) + let val = T::decode(d); + ClearCrossCrate::Set(val) } - tag => Err(d.error(&format!("Invalid tag for ClearCrossCrate: {:?}", tag))), + tag => panic!("Invalid tag for ClearCrossCrate: {:?}", tag), } } } @@ -894,7 +894,7 @@ pub struct LocalDecl<'tcx> { /// across a suspension point against the type components of the generator /// which type checking knows are live across a suspension point. We need to /// flag drop flags to avoid triggering this check as they are introduced - /// after typeck. + /// outside of type inference. /// /// This should be sound because the drop flags are fully algebraic, and /// therefore don't affect the auto-trait or outlives properties of the diff --git a/compiler/rustc_middle/src/mir/predecessors.rs b/compiler/rustc_middle/src/mir/predecessors.rs index fd6bb76dc43..2562baac911 100644 --- a/compiler/rustc_middle/src/mir/predecessors.rs +++ b/compiler/rustc_middle/src/mir/predecessors.rs @@ -57,14 +57,15 @@ impl PredecessorCache { impl<S: serialize::Encoder> serialize::Encodable<S> for PredecessorCache { #[inline] fn encode(&self, s: &mut S) -> Result<(), S::Error> { - serialize::Encodable::encode(&(), s) + s.emit_unit() } } impl<D: serialize::Decoder> serialize::Decodable<D> for PredecessorCache { #[inline] - fn decode(d: &mut D) -> Result<Self, D::Error> { - serialize::Decodable::decode(d).map(|_v: ()| Self::new()) + fn decode(d: &mut D) -> Self { + let () = d.read_unit(); + Self::new() } } diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs index 06cbc3383ef..965d30a7b92 100644 --- a/compiler/rustc_middle/src/mir/spanview.rs +++ b/compiler/rustc_middle/src/mir/spanview.rs @@ -230,7 +230,7 @@ where } /// Format a string showing the start line and column, and end line and column within a file. -pub fn source_range_no_file<'tcx>(tcx: TyCtxt<'tcx>, span: &Span) -> String { +pub fn source_range_no_file<'tcx>(tcx: TyCtxt<'tcx>, span: Span) -> String { let source_map = tcx.sess.source_map(); let start = source_map.lookup_char_pos(span.lo()); let end = source_map.lookup_char_pos(span.hi()); @@ -629,7 +629,7 @@ fn tooltip<'tcx>( let mut text = Vec::new(); text.push(format!("{}: {}:", spanview_id, &source_map.span_to_embeddable_string(span))); for statement in statements { - let source_range = source_range_no_file(tcx, &statement.source_info.span); + let source_range = source_range_no_file(tcx, statement.source_info.span); text.push(format!( "\n{}{}: {}: {:?}", TOOLTIP_INDENT, @@ -639,7 +639,7 @@ fn tooltip<'tcx>( )); } if let Some(term) = terminator { - let source_range = source_range_no_file(tcx, &term.source_info.span); + let source_range = source_range_no_file(tcx, term.source_info.span); text.push(format!( "\n{}{}: {}: {:?}", TOOLTIP_INDENT, diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 5dc7b219642..54391bd649e 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -252,7 +252,7 @@ rustc_queries! { /// Set of all the `DefId`s in this crate that have MIR associated with /// them. This includes all the body owners, but also things like struct /// constructors. - query mir_keys(_: ()) -> FxHashSet<LocalDefId> { + query mir_keys(_: ()) -> rustc_data_structures::fx::FxIndexSet<LocalDefId> { storage(ArenaCacheSelector<'tcx>) desc { "getting a list of all mir_keys" } } @@ -784,24 +784,11 @@ rustc_queries! { desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) } cache_on_disk_if { true } load_cached(tcx, id) { - #[cfg(bootstrap)] - { - match match tcx.on_disk_cache().as_ref() { - Some(c) => c.try_load_query_result(*tcx, id), - None => None, - } { - Some(x) => Some(&*tcx.arena.alloc(x)), - None => None, - } - } - #[cfg(not(bootstrap))] - { - let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx - .on_disk_cache().as_ref() - .and_then(|c| c.try_load_query_result(*tcx, id)); + let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx + .on_disk_cache().as_ref() + .and_then(|c| c.try_load_query_result(*tcx, id)); - typeck_results.map(|x| &*tcx.arena.alloc(x)) - } + typeck_results.map(|x| &*tcx.arena.alloc(x)) } } diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs index de5beffb5c5..1123cab8076 100644 --- a/compiler/rustc_middle/src/traits/mod.rs +++ b/compiler/rustc_middle/src/traits/mod.rs @@ -402,7 +402,7 @@ impl ObligationCauseCode<'_> { // `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(ObligationCauseCode<'_>, 40); +static_assert_size!(ObligationCauseCode<'_>, 48); #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum StatementAsExpression { @@ -440,11 +440,11 @@ pub struct IfExpressionCause { #[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)] pub struct DerivedObligationCause<'tcx> { - /// The trait reference of the parent obligation that led to the + /// The trait predicate of the parent obligation that led to the /// current obligation. Note that only trait obligations lead to - /// derived obligations, so we just store the trait reference here + /// derived obligations, so we just store the trait predicate here /// directly. - pub parent_trait_ref: ty::PolyTraitRef<'tcx>, + pub parent_trait_pred: ty::PolyTraitPredicate<'tcx>, /// The parent trait had this cause. pub parent_code: Lrc<ObligationCauseCode<'tcx>>, @@ -566,7 +566,7 @@ pub enum ImplSource<'tcx, N> { TraitAlias(ImplSourceTraitAliasData<'tcx, N>), /// ImplSource for a `const Drop` implementation. - ConstDrop(ImplSourceConstDropData), + ConstDrop(ImplSourceConstDropData<N>), } impl<'tcx, N> ImplSource<'tcx, N> { @@ -581,10 +581,10 @@ impl<'tcx, N> ImplSource<'tcx, N> { ImplSource::Object(d) => d.nested, ImplSource::FnPointer(d) => d.nested, ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) - | ImplSource::Pointee(ImplSourcePointeeData) - | ImplSource::ConstDrop(ImplSourceConstDropData) => Vec::new(), + | ImplSource::Pointee(ImplSourcePointeeData) => Vec::new(), ImplSource::TraitAlias(d) => d.nested, ImplSource::TraitUpcasting(d) => d.nested, + ImplSource::ConstDrop(i) => i.nested, } } @@ -599,10 +599,10 @@ impl<'tcx, N> ImplSource<'tcx, N> { ImplSource::Object(d) => &d.nested, ImplSource::FnPointer(d) => &d.nested, ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) - | ImplSource::Pointee(ImplSourcePointeeData) - | ImplSource::ConstDrop(ImplSourceConstDropData) => &[], + | ImplSource::Pointee(ImplSourcePointeeData) => &[], ImplSource::TraitAlias(d) => &d.nested, ImplSource::TraitUpcasting(d) => &d.nested, + ImplSource::ConstDrop(i) => &i.nested, } } @@ -661,9 +661,9 @@ impl<'tcx, N> ImplSource<'tcx, N> { nested: d.nested.into_iter().map(f).collect(), }) } - ImplSource::ConstDrop(ImplSourceConstDropData) => { - ImplSource::ConstDrop(ImplSourceConstDropData) - } + ImplSource::ConstDrop(i) => ImplSource::ConstDrop(ImplSourceConstDropData { + nested: i.nested.into_iter().map(f).collect(), + }), } } } @@ -755,8 +755,10 @@ pub struct ImplSourceDiscriminantKindData; #[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)] pub struct ImplSourcePointeeData; -#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)] -pub struct ImplSourceConstDropData; +#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)] +pub struct ImplSourceConstDropData<N> { + pub nested: Vec<N>, +} #[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, TypeFoldable, Lift)] pub struct ImplSourceTraitAliasData<'tcx, N> { diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs index 71ee00c602a..e18f04d92ee 100644 --- a/compiler/rustc_middle/src/traits/select.rs +++ b/compiler/rustc_middle/src/traits/select.rs @@ -146,8 +146,8 @@ pub enum SelectionCandidate<'tcx> { BuiltinUnsizeCandidate, - /// Implementation of `const Drop`. - ConstDropCandidate, + /// Implementation of `const Drop`, optionally from a custom `impl const Drop`. + ConstDropCandidate(Option<DefId>), } /// The result of trait evaluation. The order is important diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs index aa2f37bd81a..6ce9f5eea34 100644 --- a/compiler/rustc_middle/src/traits/structural_impls.rs +++ b/compiler/rustc_middle/src/traits/structural_impls.rs @@ -120,6 +120,12 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitAliasData<'tcx, } } +impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceConstDropData<N> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ImplSourceConstDropData(nested={:?})", self.nested) + } +} + /////////////////////////////////////////////////////////////////////////// // Lift implementations @@ -127,5 +133,4 @@ TrivialTypeFoldableAndLiftImpls! { super::IfExpressionCause, super::ImplSourceDiscriminantKindData, super::ImplSourcePointeeData, - super::ImplSourceConstDropData, } diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs index 2776370ba6f..c23d4eae1a4 100644 --- a/compiler/rustc_middle/src/ty/assoc.rs +++ b/compiler/rustc_middle/src/ty/assoc.rs @@ -44,8 +44,7 @@ impl AssocItemContainer { #[derive(Copy, Clone, Debug, PartialEq, HashStable, Eq, Hash)] pub struct AssocItem { pub def_id: DefId, - #[stable_hasher(project(name))] - pub ident: Ident, + pub name: Symbol, pub kind: AssocKind, pub vis: Visibility, pub defaultness: hir::Defaultness, @@ -61,6 +60,10 @@ pub struct AssocItem { } impl AssocItem { + pub fn ident(&self, tcx: TyCtxt<'_>) -> Ident { + Ident::new(self.name, tcx.def_ident_span(self.def_id).unwrap()) + } + pub fn signature(&self, tcx: TyCtxt<'_>) -> String { match self.kind { ty::AssocKind::Fn => { @@ -70,9 +73,9 @@ impl AssocItem { // regions just fine, showing `fn(&MyType)`. tcx.fn_sig(self.def_id).skip_binder().to_string() } - ty::AssocKind::Type => format!("type {};", self.ident), + ty::AssocKind::Type => format!("type {};", self.name), ty::AssocKind::Const => { - format!("const {}: {:?};", self.ident, tcx.type_of(self.def_id)) + format!("const {}: {:?};", self.name, tcx.type_of(self.def_id)) } } } @@ -115,7 +118,7 @@ pub struct AssocItems<'tcx> { impl<'tcx> AssocItems<'tcx> { /// Constructs an `AssociatedItems` map from a series of `ty::AssocItem`s in definition order. pub fn new(items_in_def_order: impl IntoIterator<Item = &'tcx ty::AssocItem>) -> Self { - let items = items_in_def_order.into_iter().map(|item| (item.ident.name, item)).collect(); + let items = items_in_def_order.into_iter().map(|item| (item.name, item)).collect(); AssocItems { items } } @@ -149,7 +152,7 @@ impl<'tcx> AssocItems<'tcx> { ) -> Option<&ty::AssocItem> { self.filter_by_name_unhygienic(ident.name) .filter(|item| item.kind == kind) - .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id)) + .find(|item| tcx.hygienic_eq(ident, item.ident(tcx), parent_def_id)) } /// Returns the associated item with the given name and any of `AssocKind`, if one exists. @@ -162,7 +165,7 @@ impl<'tcx> AssocItems<'tcx> { ) -> Option<&ty::AssocItem> { self.filter_by_name_unhygienic(ident.name) .filter(|item| kinds.contains(&item.kind)) - .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id)) + .find(|item| tcx.hygienic_eq(ident, item.ident(tcx), parent_def_id)) } /// Returns the associated item with the given name in the given `Namespace`, if one exists. @@ -175,6 +178,6 @@ impl<'tcx> AssocItems<'tcx> { ) -> Option<&ty::AssocItem> { self.filter_by_name_unhygienic(ident.name) .filter(|item| item.kind.namespace() == ns) - .find(|item| tcx.hygienic_eq(ident, item.ident, parent_def_id)) + .find(|item| tcx.hygienic_eq(ident, item.ident(tcx), parent_def_id)) } } diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs index db37d98614e..65b91eedf8a 100644 --- a/compiler/rustc_middle/src/ty/codec.rs +++ b/compiler/rustc_middle/src/ty/codec.rs @@ -14,7 +14,7 @@ use crate::mir::{ }; use crate::thir; use crate::ty::subst::SubstsRef; -use crate::ty::{self, List, Ty, TyCtxt}; +use crate::ty::{self, Ty, TyCtxt}; use rustc_data_structures::fx::FxHashMap; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use rustc_span::Span; @@ -71,7 +71,7 @@ pub trait TyEncoder<'tcx>: Encoder { /// `Decodable` can still be implemented in cases where `Decodable` is required /// by a trait bound. pub trait RefDecodable<'tcx, D: TyDecoder<'tcx>> { - fn decode(d: &mut D) -> Result<&'tcx Self, D::Error>; + fn decode(d: &mut D) -> &'tcx Self; } /// Encode the given value or a previously cached shorthand. @@ -172,13 +172,9 @@ pub trait TyDecoder<'tcx>: Decoder { fn position(&self) -> usize; - fn cached_ty_for_shorthand<F>( - &mut self, - shorthand: usize, - or_insert_with: F, - ) -> Result<Ty<'tcx>, Self::Error> + fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx> where - F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>; + F: FnOnce(&mut Self) -> Ty<'tcx>; fn with_position<F, R>(&mut self, pos: usize, f: F) -> R where @@ -188,35 +184,35 @@ pub trait TyDecoder<'tcx>: Decoder { (self.peek_byte() & (SHORTHAND_OFFSET as u8)) != 0 } - fn decode_alloc_id(&mut self) -> Result<AllocId, Self::Error>; + fn decode_alloc_id(&mut self) -> AllocId; } #[inline] fn decode_arena_allocable<'tcx, D, T: ArenaAllocatable<'tcx> + Decodable<D>>( decoder: &mut D, -) -> Result<&'tcx T, D::Error> +) -> &'tcx T where D: TyDecoder<'tcx>, { - Ok(decoder.tcx().arena.alloc(Decodable::decode(decoder)?)) + decoder.tcx().arena.alloc(Decodable::decode(decoder)) } #[inline] fn decode_arena_allocable_slice<'tcx, D, T: ArenaAllocatable<'tcx> + Decodable<D>>( decoder: &mut D, -) -> Result<&'tcx [T], D::Error> +) -> &'tcx [T] where D: TyDecoder<'tcx>, { - Ok(decoder.tcx().arena.alloc_from_iter(<Vec<T> as Decodable<D>>::decode(decoder)?)) + decoder.tcx().arena.alloc_from_iter(<Vec<T> as Decodable<D>>::decode(decoder)) } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for Ty<'tcx> { #[allow(rustc::usage_of_ty_tykind)] - fn decode(decoder: &mut D) -> Result<Ty<'tcx>, D::Error> { + fn decode(decoder: &mut D) -> Ty<'tcx> { // Handle shorthands first, if we have a usize > 0x80. if decoder.positioned_at_shorthand() { - let pos = decoder.read_usize()?; + let pos = decoder.read_usize(); assert!(pos >= SHORTHAND_OFFSET); let shorthand = pos - SHORTHAND_OFFSET; @@ -225,87 +221,89 @@ impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for Ty<'tcx> { }) } else { let tcx = decoder.tcx(); - Ok(tcx.mk_ty(ty::TyKind::decode(decoder)?)) + tcx.mk_ty(ty::TyKind::decode(decoder)) } } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Binder<'tcx, ty::PredicateKind<'tcx>> { - fn decode(decoder: &mut D) -> Result<ty::Binder<'tcx, ty::PredicateKind<'tcx>>, D::Error> { - let bound_vars = Decodable::decode(decoder)?; + fn decode(decoder: &mut D) -> ty::Binder<'tcx, ty::PredicateKind<'tcx>> { + let bound_vars = Decodable::decode(decoder); // Handle shorthands first, if we have a usize > 0x80. - Ok(ty::Binder::bind_with_vars( + ty::Binder::bind_with_vars( if decoder.positioned_at_shorthand() { - let pos = decoder.read_usize()?; + let pos = decoder.read_usize(); assert!(pos >= SHORTHAND_OFFSET); let shorthand = pos - SHORTHAND_OFFSET; - decoder.with_position(shorthand, ty::PredicateKind::decode)? + decoder.with_position(shorthand, ty::PredicateKind::decode) } else { - ty::PredicateKind::decode(decoder)? + ty::PredicateKind::decode(decoder) }, bound_vars, - )) + ) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Predicate<'tcx> { - fn decode(decoder: &mut D) -> Result<ty::Predicate<'tcx>, D::Error> { - let predicate_kind = Decodable::decode(decoder)?; - let predicate = decoder.tcx().mk_predicate(predicate_kind); - Ok(predicate) + fn decode(decoder: &mut D) -> ty::Predicate<'tcx> { + let predicate_kind = Decodable::decode(decoder); + decoder.tcx().mk_predicate(predicate_kind) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for SubstsRef<'tcx> { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { - let len = decoder.read_usize()?; + fn decode(decoder: &mut D) -> Self { + let len = decoder.read_usize(); let tcx = decoder.tcx(); - tcx.mk_substs((0..len).map(|_| Decodable::decode(decoder))) + tcx.mk_substs( + (0..len).map::<ty::subst::GenericArg<'tcx>, _>(|_| Decodable::decode(decoder)), + ) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for mir::Place<'tcx> { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { - let local: mir::Local = Decodable::decode(decoder)?; - let len = decoder.read_usize()?; - let projection: &'tcx List<mir::PlaceElem<'tcx>> = - decoder.tcx().mk_place_elems((0..len).map(|_| Decodable::decode(decoder)))?; - Ok(mir::Place { local, projection }) + fn decode(decoder: &mut D) -> Self { + let local: mir::Local = Decodable::decode(decoder); + let len = decoder.read_usize(); + let projection = decoder.tcx().mk_place_elems( + (0..len).map::<mir::PlaceElem<'tcx>, _>(|_| Decodable::decode(decoder)), + ); + mir::Place { local, projection } } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Region<'tcx> { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { - Ok(decoder.tcx().mk_region(Decodable::decode(decoder)?)) + fn decode(decoder: &mut D) -> Self { + decoder.tcx().mk_region(Decodable::decode(decoder)) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for CanonicalVarInfos<'tcx> { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { - let len = decoder.read_usize()?; - let interned: Result<Vec<CanonicalVarInfo<'tcx>>, _> = + fn decode(decoder: &mut D) -> Self { + let len = decoder.read_usize(); + let interned: Vec<CanonicalVarInfo<'tcx>> = (0..len).map(|_| Decodable::decode(decoder)).collect(); - Ok(decoder.tcx().intern_canonical_var_infos(interned?.as_slice())) + decoder.tcx().intern_canonical_var_infos(interned.as_slice()) } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for AllocId { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { + fn decode(decoder: &mut D) -> Self { decoder.decode_alloc_id() } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::SymbolName<'tcx> { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { - Ok(ty::SymbolName::new(decoder.tcx(), &decoder.read_str()?)) + fn decode(decoder: &mut D) -> Self { + ty::SymbolName::new(decoder.tcx(), &decoder.read_str()) } } macro_rules! impl_decodable_via_ref { ($($t:ty),+) => { $(impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for $t { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { + fn decode(decoder: &mut D) -> Self { RefDecodable::decode(decoder) } })* @@ -313,77 +311,73 @@ macro_rules! impl_decodable_via_ref { } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<Ty<'tcx>> { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - let len = decoder.read_usize()?; - decoder.tcx().mk_type_list((0..len).map(|_| Decodable::decode(decoder))) + fn decode(decoder: &mut D) -> &'tcx Self { + let len = decoder.read_usize(); + decoder.tcx().mk_type_list((0..len).map::<Ty<'tcx>, _>(|_| Decodable::decode(decoder))) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>> { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - let len = decoder.read_usize()?; - decoder.tcx().mk_poly_existential_predicates((0..len).map(|_| Decodable::decode(decoder))) + fn decode(decoder: &mut D) -> &'tcx Self { + let len = decoder.read_usize(); + decoder.tcx().mk_poly_existential_predicates( + (0..len).map::<ty::Binder<'tcx, _>, _>(|_| Decodable::decode(decoder)), + ) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::Const<'tcx> { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - Ok(decoder.tcx().mk_const(Decodable::decode(decoder)?)) + fn decode(decoder: &mut D) -> &'tcx Self { + decoder.tcx().mk_const(Decodable::decode(decoder)) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [ty::ValTree<'tcx>] { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - Ok(decoder.tcx().arena.alloc_from_iter( - (0..decoder.read_usize()?) - .map(|_| Decodable::decode(decoder)) - .collect::<Result<Vec<_>, _>>()?, - )) + fn decode(decoder: &mut D) -> &'tcx Self { + decoder.tcx().arena.alloc_from_iter( + (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(), + ) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for Allocation { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - Ok(decoder.tcx().intern_const_alloc(Decodable::decode(decoder)?)) + fn decode(decoder: &mut D) -> &'tcx Self { + decoder.tcx().intern_const_alloc(Decodable::decode(decoder)) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [(ty::Predicate<'tcx>, Span)] { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - Ok(decoder.tcx().arena.alloc_from_iter( - (0..decoder.read_usize()?) - .map(|_| Decodable::decode(decoder)) - .collect::<Result<Vec<_>, _>>()?, - )) + fn decode(decoder: &mut D) -> &'tcx Self { + decoder.tcx().arena.alloc_from_iter( + (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(), + ) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [thir::abstract_const::Node<'tcx>] { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - Ok(decoder.tcx().arena.alloc_from_iter( - (0..decoder.read_usize()?) - .map(|_| Decodable::decode(decoder)) - .collect::<Result<Vec<_>, _>>()?, - )) + fn decode(decoder: &mut D) -> &'tcx Self { + decoder.tcx().arena.alloc_from_iter( + (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(), + ) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [thir::abstract_const::NodeId] { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - Ok(decoder.tcx().arena.alloc_from_iter( - (0..decoder.read_usize()?) - .map(|_| Decodable::decode(decoder)) - .collect::<Result<Vec<_>, _>>()?, - )) + fn decode(decoder: &mut D) -> &'tcx Self { + decoder.tcx().arena.alloc_from_iter( + (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(), + ) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for ty::List<ty::BoundVariableKind> { - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { - let len = decoder.read_usize()?; - decoder.tcx().mk_bound_variable_kinds((0..len).map(|_| Decodable::decode(decoder))) + fn decode(decoder: &mut D) -> &'tcx Self { + let len = decoder.read_usize(); + decoder.tcx().mk_bound_variable_kinds( + (0..len).map::<ty::BoundVariableKind, _>(|_| Decodable::decode(decoder)), + ) } } @@ -405,7 +399,7 @@ macro_rules! __impl_decoder_methods { ($($name:ident -> $ty:ty;)*) => { $( #[inline] - fn $name(&mut self) -> Result<$ty, Self::Error> { + fn $name(&mut self) -> $ty { self.opaque.$name() } )* @@ -418,14 +412,14 @@ macro_rules! impl_arena_allocatable_decoder { [$name:ident: $ty:ty]) => { impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for $ty { #[inline] - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { + fn decode(decoder: &mut D) -> &'tcx Self { decode_arena_allocable(decoder) } } impl<'tcx, D: TyDecoder<'tcx>> RefDecodable<'tcx, D> for [$ty] { #[inline] - fn decode(decoder: &mut D) -> Result<&'tcx Self, D::Error> { + fn decode(decoder: &mut D) -> &'tcx Self { decode_arena_allocable_slice(decoder) } } @@ -456,10 +450,8 @@ macro_rules! implement_ty_decoder { use super::$DecoderName; impl<$($typaram ),*> Decoder for $DecoderName<$($typaram),*> { - type Error = String; - $crate::__impl_decoder_methods! { - read_nil -> (); + read_unit -> (); read_u128 -> u128; read_u64 -> u64; @@ -483,13 +475,9 @@ macro_rules! implement_ty_decoder { } #[inline] - fn read_raw_bytes_into(&mut self, bytes: &mut [u8]) -> Result<(), Self::Error> { + fn read_raw_bytes_into(&mut self, bytes: &mut [u8]) { self.opaque.read_raw_bytes_into(bytes) } - - fn error(&mut self, err: &str) -> Self::Error { - self.opaque.error(err) - } } } } @@ -505,9 +493,9 @@ macro_rules! impl_binder_encode_decode { } } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for ty::Binder<'tcx, $t> { - fn decode(decoder: &mut D) -> Result<Self, D::Error> { - let bound_vars = Decodable::decode(decoder)?; - Ok(ty::Binder::bind_with_vars(Decodable::decode(decoder)?, bound_vars)) + fn decode(decoder: &mut D) -> Self { + let bound_vars = Decodable::decode(decoder); + ty::Binder::bind_with_vars(Decodable::decode(decoder), bound_vars) } } )* diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 1f4ebd03676..de45e1bb851 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -147,8 +147,8 @@ impl<S: Encoder> Encodable<S> for ScalarInt { } impl<D: Decoder> Decodable<D> for ScalarInt { - fn decode(d: &mut D) -> Result<ScalarInt, D::Error> { - Ok(ScalarInt { data: d.read_u128()?, size: d.read_u8()? }) + fn decode(d: &mut D) -> ScalarInt { + ScalarInt { data: d.read_u128(), size: d.read_u8() } } } diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index a7d7ee5efc8..d063494f2bc 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -113,6 +113,12 @@ pub struct CtxtInterners<'tcx> { bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>, layout: InternedSet<'tcx, Layout>, adt_def: InternedSet<'tcx, AdtDef>, + + /// `#[stable]` and `#[unstable]` attributes + stability: InternedSet<'tcx, attr::Stability>, + + /// `#[rustc_const_stable]` and `#[rustc_const_unstable]` attributes + const_stability: InternedSet<'tcx, attr::ConstStability>, } impl<'tcx> CtxtInterners<'tcx> { @@ -134,6 +140,8 @@ impl<'tcx> CtxtInterners<'tcx> { bound_variable_kinds: Default::default(), layout: Default::default(), adt_def: Default::default(), + stability: Default::default(), + const_stability: Default::default(), } } @@ -212,7 +220,7 @@ pub struct CommonLifetimes<'tcx> { /// `ReStatic` pub re_static: Region<'tcx>, - /// Erased region, used after type-checking + /// Erased region, used outside of type inference. pub re_erased: Region<'tcx>, } @@ -352,7 +360,7 @@ pub struct TypeckResults<'tcx> { field_indices: ItemLocalMap<usize>, /// Stores the types for various nodes in the AST. Note that this table - /// is not guaranteed to be populated until after typeck. See + /// is not guaranteed to be populated outside inference. See /// typeck::check::fn_ctxt for details. node_types: ItemLocalMap<Ty<'tcx>>, @@ -1035,12 +1043,6 @@ pub struct GlobalCtxt<'tcx> { /// Data layout specification for the current target. pub data_layout: TargetDataLayout, - /// `#[stable]` and `#[unstable]` attributes - stability_interner: ShardedHashMap<&'tcx attr::Stability, ()>, - - /// `#[rustc_const_stable]` and `#[rustc_const_unstable]` attributes - const_stability_interner: ShardedHashMap<&'tcx attr::ConstStability, ()>, - /// Stores memory for globals (statics/consts). pub(crate) alloc_map: Lock<interpret::AllocMap<'tcx>>, @@ -1092,16 +1094,6 @@ impl<'tcx> TyCtxt<'tcx> { self.create_memory_alloc(alloc) } - // FIXME(eddyb) move to `direct_interners!`. - pub fn intern_stability(self, stab: attr::Stability) -> &'tcx attr::Stability { - self.stability_interner.intern(stab, |stab| self.arena.alloc(stab)) - } - - // FIXME(eddyb) move to `direct_interners!`. - pub fn intern_const_stability(self, stab: attr::ConstStability) -> &'tcx attr::ConstStability { - self.const_stability_interner.intern(stab, |stab| self.arena.alloc(stab)) - } - /// Returns a range of the start/end indices specified with the /// `rustc_layout_scalar_valid_range` attribute. // FIXME(eddyb) this is an awkward spot for this method, maybe move it? @@ -1185,8 +1177,6 @@ impl<'tcx> TyCtxt<'tcx> { evaluation_cache: Default::default(), crate_name: Symbol::intern(crate_name), data_layout, - stability_interner: Default::default(), - const_stability_interner: Default::default(), alloc_map: Lock::new(interpret::AllocMap::new()), output_filenames: Arc::new(output_filenames), } @@ -1952,11 +1942,11 @@ impl<'tcx> TyCtxt<'tcx> { writeln!(fmt, "InternalSubsts interner: #{}", self.0.interners.substs.len())?; writeln!(fmt, "Region interner: #{}", self.0.interners.region.len())?; - writeln!(fmt, "Stability interner: #{}", self.0.stability_interner.len())?; + writeln!(fmt, "Stability interner: #{}", self.0.interners.stability.len())?; writeln!( fmt, "Const Stability interner: #{}", - self.0.const_stability_interner.len() + self.0.interners.const_stability.len() )?; writeln!( fmt, @@ -1973,7 +1963,10 @@ impl<'tcx> TyCtxt<'tcx> { } } -/// An entry in an interner. +// This type holds a `T` in the interner. The `T` is stored in the arena and +// this type just holds a pointer to it, but it still effectively owns it. It +// impls `Borrow` so that it can be looked up using the original +// (non-arena-memory-owning) types. struct Interned<'tcx, T: ?Sized>(&'tcx T); impl<'tcx, T: 'tcx + ?Sized> Clone for Interned<'tcx, T> { @@ -1981,6 +1974,7 @@ impl<'tcx, T: 'tcx + ?Sized> Clone for Interned<'tcx, T> { Interned(self.0) } } + impl<'tcx, T: 'tcx + ?Sized> Copy for Interned<'tcx, T> {} impl<'tcx, T: 'tcx + ?Sized> IntoPointer for Interned<'tcx, T> { @@ -1988,9 +1982,18 @@ impl<'tcx, T: 'tcx + ?Sized> IntoPointer for Interned<'tcx, T> { self.0 as *const _ as *const () } } -// N.B., an `Interned<Ty>` compares and hashes as a `TyKind`. + +#[allow(rustc::usage_of_ty_tykind)] +impl<'tcx> Borrow<TyKind<'tcx>> for Interned<'tcx, TyS<'tcx>> { + fn borrow<'a>(&'a self) -> &'a TyKind<'tcx> { + &self.0.kind() + } +} + impl<'tcx> PartialEq for Interned<'tcx, TyS<'tcx>> { fn eq(&self, other: &Interned<'tcx, TyS<'tcx>>) -> bool { + // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals + // `x == y`. self.0.kind() == other.0.kind() } } @@ -1999,19 +2002,21 @@ impl<'tcx> Eq for Interned<'tcx, TyS<'tcx>> {} impl<'tcx> Hash for Interned<'tcx, TyS<'tcx>> { fn hash<H: Hasher>(&self, s: &mut H) { + // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`. self.0.kind().hash(s) } } -#[allow(rustc::usage_of_ty_tykind)] -impl<'tcx> Borrow<TyKind<'tcx>> for Interned<'tcx, TyS<'tcx>> { - fn borrow<'a>(&'a self) -> &'a TyKind<'tcx> { - &self.0.kind() +impl<'tcx> Borrow<Binder<'tcx, PredicateKind<'tcx>>> for Interned<'tcx, PredicateInner<'tcx>> { + fn borrow<'a>(&'a self) -> &'a Binder<'tcx, PredicateKind<'tcx>> { + &self.0.kind } } -// N.B., an `Interned<PredicateInner>` compares and hashes as a `PredicateKind`. + impl<'tcx> PartialEq for Interned<'tcx, PredicateInner<'tcx>> { fn eq(&self, other: &Interned<'tcx, PredicateInner<'tcx>>) -> bool { + // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals + // `x == y`. self.0.kind == other.0.kind } } @@ -2020,19 +2025,21 @@ impl<'tcx> Eq for Interned<'tcx, PredicateInner<'tcx>> {} impl<'tcx> Hash for Interned<'tcx, PredicateInner<'tcx>> { fn hash<H: Hasher>(&self, s: &mut H) { + // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`. self.0.kind.hash(s) } } -impl<'tcx> Borrow<Binder<'tcx, PredicateKind<'tcx>>> for Interned<'tcx, PredicateInner<'tcx>> { - fn borrow<'a>(&'a self) -> &'a Binder<'tcx, PredicateKind<'tcx>> { - &self.0.kind +impl<'tcx, T> Borrow<[T]> for Interned<'tcx, List<T>> { + fn borrow<'a>(&'a self) -> &'a [T] { + &self.0[..] } } -// N.B., an `Interned<List<T>>` compares and hashes as its elements. impl<'tcx, T: PartialEq> PartialEq for Interned<'tcx, List<T>> { fn eq(&self, other: &Interned<'tcx, List<T>>) -> bool { + // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals + // `x == y`. self.0[..] == other.0[..] } } @@ -2041,20 +2048,23 @@ impl<'tcx, T: Eq> Eq for Interned<'tcx, List<T>> {} impl<'tcx, T: Hash> Hash for Interned<'tcx, List<T>> { fn hash<H: Hasher>(&self, s: &mut H) { + // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`. self.0[..].hash(s) } } -impl<'tcx, T> Borrow<[T]> for Interned<'tcx, List<T>> { - fn borrow<'a>(&'a self) -> &'a [T] { - &self.0[..] - } -} - macro_rules! direct_interners { ($($name:ident: $method:ident($ty:ty),)+) => { - $(impl<'tcx> PartialEq for Interned<'tcx, $ty> { + $(impl<'tcx> Borrow<$ty> for Interned<'tcx, $ty> { + fn borrow<'a>(&'a self) -> &'a $ty { + &self.0 + } + } + + impl<'tcx> PartialEq for Interned<'tcx, $ty> { fn eq(&self, other: &Self) -> bool { + // The `Borrow` trait requires that `x.borrow() == y.borrow()` + // equals `x == y`. self.0 == other.0 } } @@ -2063,16 +2073,12 @@ macro_rules! direct_interners { impl<'tcx> Hash for Interned<'tcx, $ty> { fn hash<H: Hasher>(&self, s: &mut H) { + // The `Borrow` trait requires that `x.borrow().hash(s) == + // x.hash(s)`. self.0.hash(s) } } - impl<'tcx> Borrow<$ty> for Interned<'tcx, $ty> { - fn borrow<'a>(&'a self) -> &'a $ty { - &self.0 - } - } - impl<'tcx> TyCtxt<'tcx> { pub fn $method(self, v: $ty) -> &'tcx $ty { self.interners.$name.intern(v, |v| { @@ -2089,6 +2095,8 @@ direct_interners! { const_allocation: intern_const_alloc(Allocation), layout: intern_layout(Layout), adt_def: intern_adt_def(AdtDef), + stability: intern_stability(attr::Stability), + const_stability: intern_const_stability(attr::ConstStability), } macro_rules! slice_interners { @@ -2786,8 +2794,33 @@ pub trait InternIteratorElement<T, R>: Sized { impl<T, R> InternIteratorElement<T, R> for T { type Output = R; - fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output { - f(&iter.collect::<SmallVec<[_; 8]>>()) + fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>( + mut iter: I, + f: F, + ) -> Self::Output { + // This code is hot enough that it's worth specializing for the most + // common length lists, to avoid the overhead of `SmallVec` creation. + // Lengths 0, 1, and 2 typically account for ~95% of cases. If + // `size_hint` is incorrect a panic will occur via an `unwrap` or an + // `assert`. + match iter.size_hint() { + (0, Some(0)) => { + assert!(iter.next().is_none()); + f(&[]) + } + (1, Some(1)) => { + let t0 = iter.next().unwrap(); + assert!(iter.next().is_none()); + f(&[t0]) + } + (2, Some(2)) => { + let t0 = iter.next().unwrap(); + let t1 = iter.next().unwrap(); + assert!(iter.next().is_none()); + f(&[t0, t1]) + } + _ => f(&iter.collect::<SmallVec<[_; 8]>>()), + } } } @@ -2797,6 +2830,7 @@ where { type Output = R; fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output { + // This code isn't hot. f(&iter.cloned().collect::<SmallVec<[_; 8]>>()) } } @@ -2809,10 +2843,15 @@ impl<T, R, E> InternIteratorElement<T, R> for Result<T, E> { ) -> Self::Output { // This code is hot enough that it's worth specializing for the most // common length lists, to avoid the overhead of `SmallVec` creation. - // The match arms are in order of frequency. The 1, 2, and 0 cases are - // typically hit in ~95% of cases. We assume that if the upper and - // lower bounds from `size_hint` agree they are correct. + // Lengths 0, 1, and 2 typically account for ~95% of cases. If + // `size_hint` is incorrect a panic will occur via an `unwrap` or an + // `assert`, unless a failure happens first, in which case the result + // will be an error anyway. Ok(match iter.size_hint() { + (0, Some(0)) => { + assert!(iter.next().is_none()); + f(&[]) + } (1, Some(1)) => { let t0 = iter.next().unwrap()?; assert!(iter.next().is_none()); @@ -2824,10 +2863,6 @@ impl<T, R, E> InternIteratorElement<T, R> for Result<T, E> { assert!(iter.next().is_none()); f(&[t0, t1]) } - (0, Some(0)) => { - assert!(iter.next().is_none()); - f(&[]) - } _ => f(&iter.collect::<Result<SmallVec<[_; 8]>, _>>()?), }) } diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 5bb687512f3..5c4a4cdde25 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -972,10 +972,10 @@ fn foo(&self) -> Self::T { String::new() } let (span, sugg) = if has_params { let pos = span.hi() - BytePos(1); let span = Span::new(pos, pos, span.ctxt(), span.parent()); - (span, format!(", {} = {}", assoc.ident, ty)) + (span, format!(", {} = {}", assoc.ident(self), ty)) } else { let item_args = self.format_generic_args(assoc_substs); - (span.shrink_to_hi(), format!("<{}{} = {}>", assoc.ident, item_args, ty)) + (span.shrink_to_hi(), format!("<{}{} = {}>", assoc.ident(self), item_args, ty)) }; db.span_suggestion_verbose(span, msg, sugg, MaybeIncorrect); return true; diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 4bc3e23f4a5..e7a8e71ce71 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -119,6 +119,8 @@ mod sty; // Data types +pub type RegisteredTools = FxHashSet<Ident>; + #[derive(Debug)] pub struct ResolverOutputs { pub definitions: rustc_hir::definitions::Definitions, @@ -141,6 +143,7 @@ pub struct ResolverOutputs { /// Mapping from ident span to path span for paths that don't exist as written, but that /// exist under `std`. For example, wrote `str::from_utf8` instead of `std::str::from_utf8`. pub confused_type_with_std_module: FxHashMap<Span, Span>, + pub registered_tools: RegisteredTools, } #[derive(Clone, Copy, Debug)] @@ -376,15 +379,28 @@ pub struct CReaderCacheKey { pub pos: usize, } +/// Represents a type. +/// +/// IMPORTANT: Every `TyS` is *required* to have unique contents. The type's +/// correctness relies on this, *but it does not enforce it*. Therefore, any +/// code that creates a `TyS` must ensure uniqueness itself. In practice this +/// is achieved by interning. #[allow(rustc::usage_of_ty_tykind)] pub struct TyS<'tcx> { /// This field shouldn't be used directly and may be removed in the future. /// Use `TyS::kind()` instead. kind: TyKind<'tcx>, + + /// This field provides fast access to information that is also contained + /// in `kind`. + /// /// This field shouldn't be used directly and may be removed in the future. /// Use `TyS::flags()` instead. flags: TypeFlags, + /// This field provides fast access to information that is also contained + /// in `kind`. + /// /// This is a kind of confusing thing: it stores the smallest /// binder such that /// @@ -436,6 +452,8 @@ impl<'tcx> PartialOrd for TyS<'tcx> { impl<'tcx> PartialEq for TyS<'tcx> { #[inline] fn eq(&self, other: &TyS<'tcx>) -> bool { + // Pointer equality implies equality (due to the unique contents + // assumption). ptr::eq(self, other) } } @@ -443,6 +461,8 @@ impl<'tcx> Eq for TyS<'tcx> {} impl<'tcx> Hash for TyS<'tcx> { fn hash<H: Hasher>(&self, s: &mut H) { + // Pointer hashing is sufficient (due to the unique contents + // assumption). (self as *const TyS<'_>).hash(s) } } @@ -746,6 +766,17 @@ impl<'tcx> TraitPredicate<'tcx> { *param_env = param_env.with_constness(self.constness.and(param_env.constness())) } } + + /// Remap the constness of this predicate before emitting it for diagnostics. + pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) { + // this is different to `remap_constness` that callees want to print this predicate + // in case of selection errors. `T: ~const Drop` bounds cannot end up here when the + // param_env is not const because we it is always satisfied in non-const contexts. + if let hir::Constness::NotConst = param_env.constness() { + self.constness = ty::BoundConstness::NotConst; + } + } + pub fn def_id(self) -> DefId { self.trait_ref.def_id } @@ -753,6 +784,11 @@ impl<'tcx> TraitPredicate<'tcx> { pub fn self_ty(self) -> Ty<'tcx> { self.trait_ref.self_ty() } + + #[inline] + pub fn is_const_if_const(self) -> bool { + self.constness == BoundConstness::ConstIfConst + } } impl<'tcx> PolyTraitPredicate<'tcx> { @@ -764,6 +800,19 @@ impl<'tcx> PolyTraitPredicate<'tcx> { pub fn self_ty(self) -> ty::Binder<'tcx, Ty<'tcx>> { self.map_bound(|trait_ref| trait_ref.self_ty()) } + + /// Remap the constness of this predicate before emitting it for diagnostics. + pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) { + *self = self.map_bound(|mut p| { + p.remap_constness_diag(param_env); + p + }); + } + + #[inline] + pub fn is_const_if_const(self) -> bool { + self.skip_binder().is_const_if_const() + } } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] @@ -1349,6 +1398,11 @@ impl<'tcx> ParamEnv<'tcx> { self.packed.tag().constness } + #[inline] + pub fn is_const(self) -> bool { + self.packed.tag().constness == hir::Constness::Const + } + /// Construct a trait environment with no where-clauses in scope /// where the values of all `impl Trait` and other hidden types /// are revealed. This is suitable for monomorphized, post-typeck @@ -1464,6 +1518,7 @@ impl<'tcx> PolyTraitRef<'tcx> { polarity: ty::ImplPolarity::Positive, }) } + #[inline] pub fn without_const(self) -> PolyTraitPredicate<'tcx> { self.with_constness(BoundConstness::NotConst) diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs index 84ab42a760b..b3b2bb4459f 100644 --- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs +++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs @@ -34,8 +34,8 @@ impl<'tcx> TyCtxt<'tcx> { /// Erase the regions in `value` and then fully normalize all the /// types found within. The result will also have regions erased. /// - /// This is appropriate to use only after type-check: it assumes - /// that normalization will succeed, for example. + /// This should only be used outside of type inference. For example, + /// it assumes that normalization will succeed. pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T where T: TypeFoldable<'tcx>, diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index bbdaf248a9e..ddcc8680d83 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -908,7 +908,7 @@ pub trait PrettyPrinter<'tcx>: if !first { p!(", "); } - p!(write("{} = ", self.tcx().associated_item(assoc_item_def_id).ident)); + p!(write("{} = ", self.tcx().associated_item(assoc_item_def_id).name)); match term.skip_binder() { Term::Ty(ty) => { @@ -2413,6 +2413,29 @@ impl<'tcx> ty::Binder<'tcx, ty::TraitRef<'tcx>> { } } +#[derive(Copy, Clone, TypeFoldable, Lift)] +pub struct TraitPredPrintModifiersAndPath<'tcx>(ty::TraitPredicate<'tcx>); + +impl<'tcx> fmt::Debug for TraitPredPrintModifiersAndPath<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl<'tcx> ty::TraitPredicate<'tcx> { + pub fn print_modifiers_and_trait_path(self) -> TraitPredPrintModifiersAndPath<'tcx> { + TraitPredPrintModifiersAndPath(self) + } +} + +impl<'tcx> ty::PolyTraitPredicate<'tcx> { + pub fn print_modifiers_and_trait_path( + self, + ) -> ty::Binder<'tcx, TraitPredPrintModifiersAndPath<'tcx>> { + self.map_bound(TraitPredPrintModifiersAndPath) + } +} + forward_display_to_print! { Ty<'tcx>, &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>, @@ -2427,6 +2450,7 @@ forward_display_to_print! { ty::Binder<'tcx, TraitRefPrintOnlyTraitName<'tcx>>, ty::Binder<'tcx, ty::FnSig<'tcx>>, ty::Binder<'tcx, ty::TraitPredicate<'tcx>>, + ty::Binder<'tcx, TraitPredPrintModifiersAndPath<'tcx>>, ty::Binder<'tcx, ty::SubtypePredicate<'tcx>>, ty::Binder<'tcx, ty::ProjectionPredicate<'tcx>>, ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>, @@ -2455,7 +2479,7 @@ define_print_and_forward_display! { } ty::ExistentialProjection<'tcx> { - let name = cx.tcx().associated_item(self.item_def_id).ident; + let name = cx.tcx().associated_item(self.item_def_id).name; p!(write("{} = ", name), print(self.term)) } @@ -2491,6 +2515,18 @@ define_print_and_forward_display! { p!(print_def_path(self.0.def_id, &[])); } + TraitPredPrintModifiersAndPath<'tcx> { + if let ty::BoundConstness::ConstIfConst = self.0.constness { + p!("~const ") + } + + if let ty::ImplPolarity::Negative = self.0.polarity { + p!("!") + } + + p!(print(self.0.trait_ref.print_only_trait_path())); + } + ty::ParamTy { p!(write("{}", self.name)) } @@ -2508,8 +2544,11 @@ define_print_and_forward_display! { } ty::TraitPredicate<'tcx> { - p!(print(self.trait_ref.self_ty()), ": ", - print(self.trait_ref.print_only_trait_path())) + p!(print(self.trait_ref.self_ty()), ": "); + if let ty::BoundConstness::ConstIfConst = self.constness { + p!("~const "); + } + p!(print(self.trait_ref.print_only_trait_path())) } ty::ProjectionPredicate<'tcx> { diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 20db25f7899..7d4af6cfa40 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -1464,11 +1464,11 @@ pub enum RegionKind { /// Static data that has an "infinite" lifetime. Top in the region lattice. ReStatic, - /// A region variable. Should not exist after typeck. + /// A region variable. Should not exist outside of type inference. ReVar(RegionVid), /// A placeholder region -- basically, the higher-ranked version of `ReFree`. - /// Should not exist after typeck. + /// Should not exist outside of type inference. RePlaceholder(ty::PlaceholderRegion), /// Empty lifetime is for data that is never accessed. We tag the @@ -1810,7 +1810,7 @@ impl<'tcx> TyS<'tcx> { pub fn sequence_element_type(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { match self.kind() { Array(ty, _) | Slice(ty) => ty, - Str => tcx.mk_mach_uint(ty::UintTy::U8), + Str => tcx.types.u8, _ => bug!("`sequence_element_type` called on non-sequence value: {}", self), } } diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs index 63e9b58584c..cf97344f18e 100644 --- a/compiler/rustc_middle/src/ty/subst.rs +++ b/compiler/rustc_middle/src/ty/subst.rs @@ -180,8 +180,8 @@ impl<'tcx, E: TyEncoder<'tcx>> Encodable<E> for GenericArg<'tcx> { } impl<'tcx, D: TyDecoder<'tcx>> Decodable<D> for GenericArg<'tcx> { - fn decode(d: &mut D) -> Result<GenericArg<'tcx>, D::Error> { - Ok(GenericArgKind::decode(d)?.pack()) + fn decode(d: &mut D) -> GenericArg<'tcx> { + GenericArgKind::decode(d).pack() } } diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 8793264a47f..96c27d649e4 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -1041,6 +1041,42 @@ pub fn needs_drop_components<'tcx>( } } +pub fn is_trivially_const_drop<'tcx>(ty: Ty<'tcx>) -> bool { + match *ty.kind() { + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Infer(ty::IntVar(_)) + | ty::Infer(ty::FloatVar(_)) + | ty::Str + | ty::RawPtr(_) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::Never + | ty::Foreign(_) => true, + + ty::Opaque(..) + | ty::Dynamic(..) + | ty::Error(_) + | ty::Bound(..) + | ty::Param(_) + | ty::Placeholder(_) + | ty::Projection(_) + | ty::Infer(_) => false, + + // Not trivial because they have components, and instead of looking inside, + // we'll just perform trait selection. + ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(_) | ty::Adt(..) => false, + + ty::Array(ty, _) | ty::Slice(ty) => is_trivially_const_drop(ty), + + ty::Tuple(tys) => tys.iter().all(|ty| is_trivially_const_drop(ty.expect_ty())), + } +} + // Does the equivalent of // ``` // let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>(); diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs index 85950d82419..3294f2cf641 100644 --- a/compiler/rustc_mir_build/src/build/matches/mod.rs +++ b/compiler/rustc_mir_build/src/build/matches/mod.rs @@ -1347,23 +1347,22 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let mut otherwise = None; for match_pair in match_pairs { - if let PatKind::Or { ref pats } = *match_pair.pattern.kind { - let or_span = match_pair.pattern.span; - let place = match_pair.place; - - first_candidate.visit_leaves(|leaf_candidate| { - self.test_or_pattern( - leaf_candidate, - &mut otherwise, - pats, - or_span, - place.clone(), - fake_borrows, - ); - }); - } else { + let PatKind::Or { ref pats } = &*match_pair.pattern.kind else { bug!("Or-patterns should have been sorted to the end"); - } + }; + let or_span = match_pair.pattern.span; + let place = match_pair.place; + + first_candidate.visit_leaves(|leaf_candidate| { + self.test_or_pattern( + leaf_candidate, + &mut otherwise, + pats, + or_span, + place.clone(), + fake_borrows, + ); + }); } let remainder_start = otherwise.unwrap_or_else(|| self.cfg.start_new_block()); diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs index 7ed5d1d67ab..f4bf28bfa5c 100644 --- a/compiler/rustc_mir_build/src/build/matches/test.rs +++ b/compiler/rustc_mir_build/src/build/matches/test.rs @@ -88,11 +88,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { switch_ty: Ty<'tcx>, options: &mut FxIndexMap<&'tcx ty::Const<'tcx>, u128>, ) -> bool { - let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) { - Some(match_pair) => match_pair, - _ => { - return false; - } + let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place) else { + return false; }; match *match_pair.pattern.kind { diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_build/src/lints.rs index d348aaa899e..b21ca6028a2 100644 --- a/compiler/rustc_mir_build/src/lints.rs +++ b/compiler/rustc_mir_build/src/lints.rs @@ -33,6 +33,9 @@ crate fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { if let Some(NonRecursive) = TriColorDepthFirstSearch::new(&body).run_from_start(&mut vis) { return; } + if vis.reachable_recursive_calls.is_empty() { + return; + } vis.reachable_recursive_calls.sort(); @@ -148,13 +151,14 @@ impl<'mir, 'tcx> TriColorVisitor<&'mir Body<'tcx>> for Search<'mir, 'tcx> { } fn ignore_edge(&mut self, bb: BasicBlock, target: BasicBlock) -> bool { + let terminator = self.body[bb].terminator(); + if terminator.unwind() == Some(&Some(target)) && terminator.successors().count() > 1 { + return true; + } // Don't traverse successors of recursive calls or false CFG edges. match self.body[bb].terminator().kind { TerminatorKind::Call { ref func, .. } => self.is_recursive_call(func), - - TerminatorKind::FalseUnwind { unwind: Some(imaginary_target), .. } - | TerminatorKind::FalseEdge { imaginary_target, .. } => imaginary_target == target, - + TerminatorKind::FalseEdge { imaginary_target, .. } => imaginary_target == target, _ => false, } } diff --git a/compiler/rustc_mir_transform/src/coverage/debug.rs b/compiler/rustc_mir_transform/src/coverage/debug.rs index c61ee6f7e6c..ce8b187a744 100644 --- a/compiler/rustc_mir_transform/src/coverage/debug.rs +++ b/compiler/rustc_mir_transform/src/coverage/debug.rs @@ -111,6 +111,7 @@ use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph}; use super::spans::CoverageSpan; +use itertools::Itertools; use rustc_middle::mir::create_dump_file; use rustc_middle::mir::generic_graphviz::GraphvizWriter; use rustc_middle::mir::spanview::{self, SpanViewable}; @@ -739,7 +740,6 @@ pub(super) fn dump_coverage_graphviz<'tcx>( ) } }) - .collect::<Vec<_>>() .join("\n ") )); } @@ -768,7 +768,6 @@ fn bcb_to_string_sections<'tcx>( .map(|expression| { format!("Intermediate {}", debug_counters.format_counter(expression)) }) - .collect::<Vec<_>>() .join("\n"), ); } @@ -783,7 +782,6 @@ fn bcb_to_string_sections<'tcx>( covspan.format(tcx, mir_body) ) }) - .collect::<Vec<_>>() .join("\n"), ); } @@ -793,7 +791,6 @@ fn bcb_to_string_sections<'tcx>( dependency_counters .iter() .map(|counter| debug_counters.format_counter(counter)) - .collect::<Vec<_>>() .join(" \n"), )); } diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index a25402a1ff9..57862b6628d 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -1,5 +1,6 @@ use super::Error; +use itertools::Itertools; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph::dominators::{self, Dominators}; use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode}; @@ -418,18 +419,11 @@ impl BasicCoverageBlockData { pub fn take_edge_counters( &mut self, ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> { - self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter())) + self.edge_from_bcbs.take().map(|m| m.into_iter()) } pub fn id(&self) -> String { - format!( - "@{}", - self.basic_blocks - .iter() - .map(|bb| bb.index().to_string()) - .collect::<Vec<_>>() - .join(ID_SEPARATOR) - ) + format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR)) } } diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index a9161580bc6..d1cb2826ded 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -1,6 +1,7 @@ use super::debug::term_type; use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB}; +use itertools::Itertools; use rustc_data_structures::graph::WithNumNodes; use rustc_middle::mir::spanview::source_range_no_file; use rustc_middle::mir::{ @@ -27,7 +28,7 @@ impl CoverageStatement { let stmt = &mir_body[bb].statements[stmt_index]; format!( "{}: @{}[{}]: {:?}", - source_range_no_file(tcx, &span), + source_range_no_file(tcx, span), bb.index(), stmt_index, stmt @@ -37,7 +38,7 @@ impl CoverageStatement { let term = mir_body[bb].terminator(); format!( "{}: @{}.{}: {:?}", - source_range_no_file(tcx, &span), + source_range_no_file(tcx, span), bb.index(), term_type(&term.kind), term.kind @@ -154,7 +155,7 @@ impl CoverageSpan { pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String { format!( "{}\n {}", - source_range_no_file(tcx, &self.span), + source_range_no_file(tcx, self.span), self.format_coverage_statements(tcx, mir_body).replace('\n', "\n "), ) } @@ -169,11 +170,7 @@ impl CoverageSpan { CoverageStatement::Statement(bb, _, index) => (bb, index), CoverageStatement::Terminator(bb, _) => (bb, usize::MAX), }); - sorted_coverage_statements - .iter() - .map(|covstmt| covstmt.format(tcx, mir_body)) - .collect::<Vec<_>>() - .join("\n") + sorted_coverage_statements.iter().map(|covstmt| covstmt.format(tcx, mir_body)).join("\n") } /// If the span is part of a macro, returns the macro name symbol. diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs index b9c79d4cf2d..62ea2538ff0 100644 --- a/compiler/rustc_mir_transform/src/coverage/tests.rs +++ b/compiler/rustc_mir_transform/src/coverage/tests.rs @@ -31,6 +31,7 @@ use super::spans; use coverage_test_macros::let_bcb; +use itertools::Itertools; use rustc_data_structures::graph::WithNumNodes; use rustc_data_structures::graph::WithSuccessors; use rustc_index::vec::{Idx, IndexVec}; @@ -232,11 +233,9 @@ fn print_mir_graphviz(name: &str, mir_body: &Body<'_>) { mir_body .successors(bb) .map(|successor| { format!(" {:?} -> {:?};", bb, successor) }) - .collect::<Vec<_>>() .join("\n") ) }) - .collect::<Vec<_>>() .join("\n") ); } @@ -262,11 +261,9 @@ fn print_coverage_graphviz( basic_coverage_blocks .successors(bcb) .map(|successor| { format!(" {:?} -> {:?};", bcb, successor) }) - .collect::<Vec<_>>() .join("\n") ) }) - .collect::<Vec<_>>() .join("\n") ); } diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs index ac88060f0d3..9a6b6532ce8 100644 --- a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs +++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs @@ -1,12 +1,12 @@ use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::*; -use rustc_middle::ty::{Ty, TyCtxt}; +use rustc_middle::ty::{self, Ty, TyCtxt}; use std::fmt::Debug; use super::simplify::simplify_cfg; /// This pass optimizes something like -/// ```text +/// ```ignore (syntax-highlighting-only) /// let x: Option<()>; /// let y: Option<()>; /// match (x,y) { @@ -15,144 +15,201 @@ use super::simplify::simplify_cfg; /// } /// ``` /// into something like -/// ```text +/// ```ignore (syntax-highlighting-only) /// let x: Option<()>; /// let y: Option<()>; -/// let discriminant_x = // get discriminant of x -/// let discriminant_y = // get discriminant of y -/// if discriminant_x != discriminant_y || discriminant_x == None {1} else {0} +/// let discriminant_x = std::mem::discriminant(x); +/// let discriminant_y = std::mem::discriminant(y); +/// if discriminant_x == discriminant_y { +/// match x { +/// Some(_) => 0, +/// _ => 1, // <---- +/// } // | Actually the same bb +/// } else { // | +/// 1 // <-------------- +/// } +/// ``` +/// +/// Specifically, it looks for instances of control flow like this: +/// ```text +/// +/// ================= +/// | BB1 | +/// |---------------| ============================ +/// | ... | /------> | BBC | +/// |---------------| | |--------------------------| +/// | switchInt(Q) | | | _cl = discriminant(P) | +/// | c | --------/ |--------------------------| +/// | d | -------\ | switchInt(_cl) | +/// | ... | | | c | ---> BBC.2 +/// | otherwise | --\ | /--- | otherwise | +/// ================= | | | ============================ +/// | | | +/// ================= | | | +/// | BBU | <-| | | ============================ +/// |---------------| | \-------> | BBD | +/// |---------------| | | |--------------------------| +/// | unreachable | | | | _dl = discriminant(P) | +/// ================= | | |--------------------------| +/// | | | switchInt(_dl) | +/// ================= | | | d | ---> BBD.2 +/// | BB9 | <--------------- | otherwise | +/// |---------------| ============================ +/// | ... | +/// ================= /// ``` +/// Where the `otherwise` branch on `BB1` is permitted to either go to `BBU` or to `BB9`. In the +/// code: +/// - `BB1` is `parent` and `BBC, BBD` are children +/// - `P` is `child_place` +/// - `child_ty` is the type of `_cl`. +/// - `Q` is `parent_op`. +/// - `parent_ty` is the type of `Q`. +/// - `BB9` is `destination` +/// All this is then transformed into: +/// ```text +/// +/// ======================= +/// | BB1 | +/// |---------------------| ============================ +/// | ... | /------> | BBEq | +/// | _s = discriminant(P)| | |--------------------------| +/// | _t = Ne(Q, _s) | | |--------------------------| +/// |---------------------| | | switchInt(Q) | +/// | switchInt(_t) | | | c | ---> BBC.2 +/// | false | --------/ | d | ---> BBD.2 +/// | otherwise | ---------------- | otherwise | +/// ======================= | ============================ +/// | +/// ================= | +/// | BB9 | <-----------/ +/// |---------------| +/// | ... | +/// ================= +/// ``` +/// +/// This is only correct for some `P`, since `P` is now computed outside the original `switchInt`. +/// The filter on which `P` are allowed (together with discussion of its correctness) is found in +/// `may_hoist`. pub struct EarlyOtherwiseBranch; impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - // FIXME(#78496) - sess.opts.debugging_opts.unsound_mir_opts && sess.mir_opt_level() >= 3 + sess.mir_opt_level() >= 2 } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { trace!("running EarlyOtherwiseBranch on {:?}", body.source); - // we are only interested in this bb if the terminator is a switchInt - let bbs_with_switch = - body.basic_blocks().iter_enumerated().filter(|(_, bb)| is_switch(bb.terminator())); + let mut should_cleanup = false; - let opts_to_apply: Vec<OptimizationToApply<'tcx>> = bbs_with_switch - .flat_map(|(bb_idx, bb)| { - let switch = bb.terminator(); - let helper = Helper { body, tcx }; - let infos = helper.go(bb, switch)?; - Some(OptimizationToApply { infos, basic_block_first_switch: bb_idx }) - }) - .collect(); - - let should_cleanup = !opts_to_apply.is_empty(); + // Also consider newly generated bbs in the same pass + for i in 0..body.basic_blocks().len() { + let bbs = body.basic_blocks(); + let parent = BasicBlock::from_usize(i); + let Some(opt_data) = evaluate_candidate(tcx, body, parent) else { + continue + }; - for opt_to_apply in opts_to_apply { - if !tcx.consider_optimizing(|| format!("EarlyOtherwiseBranch {:?}", &opt_to_apply)) { + if !tcx.consider_optimizing(|| format!("EarlyOtherwiseBranch {:?}", &opt_data)) { break; } - trace!("SUCCESS: found optimization possibility to apply: {:?}", &opt_to_apply); + trace!("SUCCESS: found optimization possibility to apply: {:?}", &opt_data); - let statements_before = - body.basic_blocks()[opt_to_apply.basic_block_first_switch].statements.len(); - let end_of_block_location = Location { - block: opt_to_apply.basic_block_first_switch, - statement_index: statements_before, + should_cleanup = true; + + let TerminatorKind::SwitchInt { + discr: parent_op, + switch_ty: parent_ty, + targets: parent_targets + } = &bbs[parent].terminator().kind else { + unreachable!() + }; + // Always correct since we can only switch on `Copy` types + let parent_op = match parent_op { + Operand::Move(x) => Operand::Copy(*x), + Operand::Copy(x) => Operand::Copy(*x), + Operand::Constant(x) => Operand::Constant(x.clone()), }; + let statements_before = bbs[parent].statements.len(); + let parent_end = Location { block: parent, statement_index: statements_before }; let mut patch = MirPatch::new(body); - // create temp to store second discriminant in - let discr_type = opt_to_apply.infos[0].second_switch_info.discr_ty; - let discr_span = opt_to_apply.infos[0].second_switch_info.discr_source_info.span; - let second_discriminant_temp = patch.new_temp(discr_type, discr_span); + // create temp to store second discriminant in, `_s` in example above + let second_discriminant_temp = + patch.new_temp(opt_data.child_ty, opt_data.child_source.span); - patch.add_statement( - end_of_block_location, - StatementKind::StorageLive(second_discriminant_temp), - ); + patch.add_statement(parent_end, StatementKind::StorageLive(second_discriminant_temp)); // create assignment of discriminant - let place_of_adt_to_get_discriminant_of = - opt_to_apply.infos[0].second_switch_info.place_of_adt_discr_read; patch.add_assign( - end_of_block_location, + parent_end, Place::from(second_discriminant_temp), - Rvalue::Discriminant(place_of_adt_to_get_discriminant_of), + Rvalue::Discriminant(opt_data.child_place), ); - // create temp to store NotEqual comparison between the two discriminants - let not_equal = BinOp::Ne; - let not_equal_res_type = not_equal.ty(tcx, discr_type, discr_type); - let not_equal_temp = patch.new_temp(not_equal_res_type, discr_span); - patch.add_statement(end_of_block_location, StatementKind::StorageLive(not_equal_temp)); - - // create NotEqual comparison between the two discriminants - let first_descriminant_place = - opt_to_apply.infos[0].first_switch_info.discr_used_in_switch; - let not_equal_rvalue = Rvalue::BinaryOp( - not_equal, - Box::new(( - Operand::Copy(Place::from(second_discriminant_temp)), - Operand::Copy(first_descriminant_place), - )), + // create temp to store inequality comparison between the two discriminants, `_t` in + // example above + let nequal = BinOp::Ne; + let comp_res_type = nequal.ty(tcx, parent_ty, opt_data.child_ty); + let comp_temp = patch.new_temp(comp_res_type, opt_data.child_source.span); + patch.add_statement(parent_end, StatementKind::StorageLive(comp_temp)); + + // create inequality comparison between the two discriminants + let comp_rvalue = Rvalue::BinaryOp( + nequal, + Box::new((parent_op.clone(), Operand::Move(Place::from(second_discriminant_temp)))), ); patch.add_statement( - end_of_block_location, - StatementKind::Assign(Box::new((Place::from(not_equal_temp), not_equal_rvalue))), + parent_end, + StatementKind::Assign(Box::new((Place::from(comp_temp), comp_rvalue))), ); - let new_targets = opt_to_apply - .infos - .iter() - .flat_map(|x| x.second_switch_info.targets_with_values.iter()) - .cloned(); - - let targets = SwitchTargets::new( - new_targets, - opt_to_apply.infos[0].first_switch_info.otherwise_bb, - ); - - // new block that jumps to the correct discriminant case. This block is switched to if the discriminants are equal - let new_switch_data = BasicBlockData::new(Some(Terminator { - source_info: opt_to_apply.infos[0].second_switch_info.discr_source_info, + let eq_new_targets = parent_targets.iter().map(|(value, child)| { + let TerminatorKind::SwitchInt{ targets, .. } = &bbs[child].terminator().kind else { + unreachable!() + }; + (value, targets.target_for_value(value)) + }); + let eq_targets = SwitchTargets::new(eq_new_targets, opt_data.destination); + + // Create `bbEq` in example above + let eq_switch = BasicBlockData::new(Some(Terminator { + source_info: bbs[parent].terminator().source_info, kind: TerminatorKind::SwitchInt { - // the first and second discriminants are equal, so just pick one - discr: Operand::Copy(first_descriminant_place), - switch_ty: discr_type, - targets, + // switch on the first discriminant, so we can mark the second one as dead + discr: parent_op, + switch_ty: opt_data.child_ty, + targets: eq_targets, }, })); - let new_switch_bb = patch.new_block(new_switch_data); + let eq_bb = patch.new_block(eq_switch); - // switch on the NotEqual. If true, then jump to the `otherwise` case. - // If false, then jump to a basic block that then jumps to the correct disciminant case - let true_case = opt_to_apply.infos[0].first_switch_info.otherwise_bb; - let false_case = new_switch_bb; + // Jump to it on the basis of the inequality comparison + let true_case = opt_data.destination; + let false_case = eq_bb; patch.patch_terminator( - opt_to_apply.basic_block_first_switch, + parent, TerminatorKind::if_( tcx, - Operand::Move(Place::from(not_equal_temp)), + Operand::Move(Place::from(comp_temp)), true_case, false_case, ), ); // generate StorageDead for the second_discriminant_temp not in use anymore - patch.add_statement( - end_of_block_location, - StatementKind::StorageDead(second_discriminant_temp), - ); + patch.add_statement(parent_end, StatementKind::StorageDead(second_discriminant_temp)); - // Generate a StorageDead for not_equal_temp in each of the targets, since we moved it into the switch + // Generate a StorageDead for comp_temp in each of the targets, since we moved it into + // the switch for bb in [false_case, true_case].iter() { patch.add_statement( Location { block: *bb, statement_index: 0 }, - StatementKind::StorageDead(not_equal_temp), + StatementKind::StorageDead(comp_temp), ); } @@ -167,201 +224,177 @@ impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch { } } -fn is_switch(terminator: &Terminator<'_>) -> bool { - matches!(terminator.kind, TerminatorKind::SwitchInt { .. }) -} - -struct Helper<'a, 'tcx> { - body: &'a Body<'tcx>, - tcx: TyCtxt<'tcx>, -} - -#[derive(Debug, Clone)] -struct SwitchDiscriminantInfo<'tcx> { - /// Type of the discriminant being switched on - discr_ty: Ty<'tcx>, - /// The basic block that the otherwise branch points to - otherwise_bb: BasicBlock, - /// Target along with the value being branched from. Otherwise is not included - targets_with_values: Vec<(u128, BasicBlock)>, - discr_source_info: SourceInfo, - /// The place of the discriminant used in the switch - discr_used_in_switch: Place<'tcx>, - /// The place of the adt that has its discriminant read - place_of_adt_discr_read: Place<'tcx>, - /// The type of the adt that has its discriminant read - type_adt_matched_on: Ty<'tcx>, -} - -#[derive(Debug)] -struct OptimizationToApply<'tcx> { - infos: Vec<OptimizationInfo<'tcx>>, - /// Basic block of the original first switch - basic_block_first_switch: BasicBlock, +/// Returns true if computing the discriminant of `place` may be hoisted out of the branch +fn may_hoist<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, place: Place<'tcx>) -> bool { + for (place, proj) in place.iter_projections() { + match proj { + // Dereferencing in the computation of `place` might cause issues from one of two + // cateogires. First, the referrent might be invalid. We protect against this by + // dereferencing references only (not pointers). Second, the use of a reference may + // invalidate other references that are used later (for aliasing reasons). Consider + // where such an invalidated reference may appear: + // - In `Q`: Not possible since `Q` is used as the operand of a `SwitchInt` and so + // cannot contain referenced data. + // - In `BBU`: Not possible since that block contains only the `unreachable` terminator + // - In `BBC.2, BBD.2`: Not possible, since `discriminant(P)` was computed prior to + // reaching that block in the input to our transformation, and so any data + // invalidated by that computation could not have been used there. + // - In `BB9`: Not possible since control flow might have reached `BB9` via the + // `otherwise` branch in `BBC, BBD` in the input to our transformation, which would + // have invalidated the data when computing `discriminant(P)` + // So dereferencing here is correct. + ProjectionElem::Deref => match place.ty(body.local_decls(), tcx).ty.kind() { + ty::Ref(..) => {} + _ => return false, + }, + // Field projections are always valid + ProjectionElem::Field(..) => {} + // We cannot allow + // downcasts either, since the correctness of the downcast may depend on the parent + // branch being taken. An easy example of this is + // ``` + // Q = discriminant(_3) + // P = (_3 as Variant) + // ``` + // However, checking if the child and parent place are the same and only erroring then + // is not sufficient either, since the `discriminant(_3) == 1` (or whatever) check may + // be replaced by another optimization pass with any other condition that can be proven + // equivalent. + ProjectionElem::Downcast(..) => { + return false; + } + // We cannot allow indexing since the index may be out of bounds. + _ => { + return false; + } + } + } + true } #[derive(Debug)] -struct OptimizationInfo<'tcx> { - /// Info about the first switch and discriminant - first_switch_info: SwitchDiscriminantInfo<'tcx>, - /// Info about the second switch and discriminant - second_switch_info: SwitchDiscriminantInfo<'tcx>, +struct OptimizationData<'tcx> { + destination: BasicBlock, + child_place: Place<'tcx>, + child_ty: Ty<'tcx>, + child_source: SourceInfo, } -impl<'tcx> Helper<'_, 'tcx> { - pub fn go( - &self, - bb: &BasicBlockData<'tcx>, - switch: &Terminator<'tcx>, - ) -> Option<Vec<OptimizationInfo<'tcx>>> { - // try to find the statement that defines the discriminant that is used for the switch - let discr = self.find_switch_discriminant_info(bb, switch)?; - - // go through each target, finding a discriminant read, and a switch - let results = discr - .targets_with_values - .iter() - .map(|(value, target)| self.find_discriminant_switch_pairing(&discr, *target, *value)); - - // if the optimization did not apply for one of the targets, then abort - if results.clone().any(|x| x.is_none()) || results.len() == 0 { - trace!("NO: not all of the targets matched the pattern for optimization"); - return None; +fn evaluate_candidate<'tcx>( + tcx: TyCtxt<'tcx>, + body: &Body<'tcx>, + parent: BasicBlock, +) -> Option<OptimizationData<'tcx>> { + let bbs = body.basic_blocks(); + let TerminatorKind::SwitchInt { + targets, + switch_ty: parent_ty, + .. + } = &bbs[parent].terminator().kind else { + return None + }; + let parent_dest = { + let poss = targets.otherwise(); + // If the fallthrough on the parent is trivially unreachable, we can let the + // children choose the destination + if bbs[poss].statements.len() == 0 + && bbs[poss].terminator().kind == TerminatorKind::Unreachable + { + None + } else { + Some(poss) } - - Some(results.flatten().collect()) + }; + let Some((_, child)) = targets.iter().next() else { + return None + }; + let child_terminator = &bbs[child].terminator(); + let TerminatorKind::SwitchInt { + switch_ty: child_ty, + targets: child_targets, + .. + } = &child_terminator.kind else { + return None + }; + if child_ty != parent_ty { + return None; + } + let Some(StatementKind::Assign(boxed)) + = &bbs[child].statements.first().map(|x| &x.kind) else { + return None; + }; + let (_, Rvalue::Discriminant(child_place)) = &**boxed else { + return None; + }; + let destination = parent_dest.unwrap_or(child_targets.otherwise()); + + // Verify that the optimization is legal in general + // We can hoist evaluating the child discriminant out of the branch + if !may_hoist(tcx, body, *child_place) { + return None; } - fn find_discriminant_switch_pairing( - &self, - discr_info: &SwitchDiscriminantInfo<'tcx>, - target: BasicBlock, - value: u128, - ) -> Option<OptimizationInfo<'tcx>> { - let bb = &self.body.basic_blocks()[target]; - // find switch - let terminator = bb.terminator(); - if is_switch(terminator) { - let this_bb_discr_info = self.find_switch_discriminant_info(bb, terminator)?; - - // the types of the two adts matched on have to be equalfor this optimization to apply - if discr_info.type_adt_matched_on != this_bb_discr_info.type_adt_matched_on { - trace!( - "NO: types do not match. LHS: {:?}, RHS: {:?}", - discr_info.type_adt_matched_on, - this_bb_discr_info.type_adt_matched_on - ); - return None; - } - - // the otherwise branch of the two switches have to point to the same bb - if discr_info.otherwise_bb != this_bb_discr_info.otherwise_bb { - trace!("NO: otherwise target is not the same"); - return None; - } - - // check that the value being matched on is the same. The - if !this_bb_discr_info.targets_with_values.iter().any(|x| x.0 == value) { - trace!("NO: values being matched on are not the same"); - return None; - } - - // only allow optimization if the left and right of the tuple being matched are the same variants. - // so the following should not optimize - // ```rust - // let x: Option<()>; - // let y: Option<()>; - // match (x,y) { - // (Some(_), None) => {}, - // _ => {} - // } - // ``` - // We check this by seeing that the value of the first discriminant is the only other discriminant value being used as a target in the second switch - if !(this_bb_discr_info.targets_with_values.len() == 1 - && this_bb_discr_info.targets_with_values[0].0 == value) - { - trace!( - "NO: The second switch did not have only 1 target (besides otherwise) that had the same value as the value from the first switch that got us here" - ); - return None; - } - - // when the second place is a projection of the first one, it's not safe to calculate their discriminant values sequentially. - // for example, this should not be optimized: - // - // ```rust - // enum E<'a> { Empty, Some(&'a E<'a>), } - // let Some(Some(_)) = e; - // ``` - // - // ```mir - // bb0: { - // _2 = discriminant(*_1) - // switchInt(_2) -> [...] - // } - // bb1: { - // _3 = discriminant(*(((*_1) as Some).0: &E)) - // switchInt(_3) -> [...] - // } - // ``` - let discr_place = discr_info.place_of_adt_discr_read; - let this_discr_place = this_bb_discr_info.place_of_adt_discr_read; - if discr_place.local == this_discr_place.local - && this_discr_place.projection.starts_with(discr_place.projection) - { - trace!("NO: one target is the projection of another"); - return None; - } - - // if we reach this point, the optimization applies, and we should be able to optimize this case - // store the info that is needed to apply the optimization - - Some(OptimizationInfo { - first_switch_info: discr_info.clone(), - second_switch_info: this_bb_discr_info, - }) - } else { - None + // Verify that the optimization is legal for each branch + for (value, child) in targets.iter() { + if !verify_candidate_branch(&bbs[child], value, *child_place, destination) { + return None; } } + Some(OptimizationData { + destination, + child_place: *child_place, + child_ty, + child_source: child_terminator.source_info, + }) +} - fn find_switch_discriminant_info( - &self, - bb: &BasicBlockData<'tcx>, - switch: &Terminator<'tcx>, - ) -> Option<SwitchDiscriminantInfo<'tcx>> { - match &switch.kind { - TerminatorKind::SwitchInt { discr, targets, .. } => { - let discr_local = discr.place()?.as_local()?; - // the declaration of the discriminant read. Place of this read is being used in the switch - let discr_decl = &self.body.local_decls()[discr_local]; - let discr_ty = discr_decl.ty; - // the otherwise target lies as the last element - let otherwise_bb = targets.otherwise(); - let targets_with_values = targets.iter().collect(); - - // find the place of the adt where the discriminant is being read from - // assume this is the last statement of the block - let place_of_adt_discr_read = match bb.statements.last()?.kind { - StatementKind::Assign(box (_, Rvalue::Discriminant(adt_place))) => { - Some(adt_place) - } - _ => None, - }?; - - let type_adt_matched_on = place_of_adt_discr_read.ty(self.body, self.tcx).ty; - - Some(SwitchDiscriminantInfo { - discr_used_in_switch: discr.place()?, - discr_ty, - otherwise_bb, - targets_with_values, - discr_source_info: discr_decl.source_info, - place_of_adt_discr_read, - type_adt_matched_on, - }) - } - _ => unreachable!("must only be passed terminator that is a switch"), - } +fn verify_candidate_branch<'tcx>( + branch: &BasicBlockData<'tcx>, + value: u128, + place: Place<'tcx>, + destination: BasicBlock, +) -> bool { + // In order for the optimization to be correct, the branch must... + // ...have exactly one statement + if branch.statements.len() != 1 { + return false; + } + // ...assign the descriminant of `place` in that statement + let StatementKind::Assign(boxed) = &branch.statements[0].kind else { + return false + }; + let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed else { + return false + }; + if *from_place != place { + return false; + } + // ...make that assignment to a local + if discr_place.projection.len() != 0 { + return false; + } + // ...terminate on a `SwitchInt` that invalidates that local + let TerminatorKind::SwitchInt{ discr: switch_op, targets, .. } = &branch.terminator().kind else { + return false + }; + if *switch_op != Operand::Move(*discr_place) { + return false; + } + // ...fall through to `destination` if the switch misses + if destination != targets.otherwise() { + return false; + } + // ...have a branch for value `value` + let mut iter = targets.iter(); + let Some((target_value, _)) = iter.next() else { + return false; + }; + if target_value != value { + return false; + } + // ...and have no more branches + if let Some(_) = iter.next() { + return false; } + return true; } diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs index f364a332a78..1b9fddec2be 100644 --- a/compiler/rustc_mir_transform/src/function_item_references.rs +++ b/compiler/rustc_mir_transform/src/function_item_references.rs @@ -1,3 +1,4 @@ +use itertools::Itertools; use rustc_errors::Applicability; use rustc_hir::def_id::DefId; use rustc_middle::mir::visit::Visitor; @@ -42,54 +43,28 @@ impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> { } = &terminator.kind { let source_info = *self.body.source_info(location); - // Only handle function calls outside macros - if !source_info.span.from_expansion() { - let func_ty = func.ty(self.body, self.tcx); - if let ty::FnDef(def_id, substs_ref) = *func_ty.kind() { - // Handle calls to `transmute` - if self.tcx.is_diagnostic_item(sym::transmute, def_id) { - let arg_ty = args[0].ty(self.body, self.tcx); - for generic_inner_ty in arg_ty.walk() { - if let GenericArgKind::Type(inner_ty) = generic_inner_ty.unpack() { - if let Some((fn_id, fn_substs)) = - FunctionItemRefChecker::is_fn_ref(inner_ty) - { - let span = self.nth_arg_span(&args, 0); - self.emit_lint(fn_id, fn_substs, source_info, span); - } + let func_ty = func.ty(self.body, self.tcx); + if let ty::FnDef(def_id, substs_ref) = *func_ty.kind() { + // Handle calls to `transmute` + if self.tcx.is_diagnostic_item(sym::transmute, def_id) { + let arg_ty = args[0].ty(self.body, self.tcx); + for generic_inner_ty in arg_ty.walk() { + if let GenericArgKind::Type(inner_ty) = generic_inner_ty.unpack() { + if let Some((fn_id, fn_substs)) = + FunctionItemRefChecker::is_fn_ref(inner_ty) + { + let span = self.nth_arg_span(&args, 0); + self.emit_lint(fn_id, fn_substs, source_info, span); } } - } else { - self.check_bound_args(def_id, substs_ref, &args, source_info); } + } else { + self.check_bound_args(def_id, substs_ref, &args, source_info); } } } self.super_terminator(terminator, location); } - - /// Emits a lint for function references formatted with `fmt::Pointer::fmt` by macros. These - /// cases are handled as operands instead of call terminators to avoid any dependence on - /// unstable, internal formatting details like whether `fmt` is called directly or not. - fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) { - let source_info = *self.body.source_info(location); - if source_info.span.from_expansion() { - let op_ty = operand.ty(self.body, self.tcx); - if let ty::FnDef(def_id, substs_ref) = *op_ty.kind() { - if self.tcx.is_diagnostic_item(sym::pointer_trait_fmt, def_id) { - let param_ty = substs_ref.type_at(0); - if let Some((fn_id, fn_substs)) = FunctionItemRefChecker::is_fn_ref(param_ty) { - // The operand's ctxt wouldn't display the lint since it's inside a macro so - // we have to use the callsite's ctxt. - let callsite_ctxt = source_info.span.source_callsite().ctxt(); - let span = source_info.span.with_ctxt(callsite_ctxt); - self.emit_lint(fn_id, fn_substs, source_info, span); - } - } - } - } - self.super_operand(operand, location); - } } impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { @@ -119,7 +94,13 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { if let Some((fn_id, fn_substs)) = FunctionItemRefChecker::is_fn_ref(subst_ty) { - let span = self.nth_arg_span(args, arg_num); + let mut span = self.nth_arg_span(args, arg_num); + if span.from_expansion() { + // The operand's ctxt wouldn't display the lint since it's inside a macro so + // we have to use the callsite's ctxt. + let callsite_ctxt = span.source_callsite().ctxt(); + span = span.with_ctxt(callsite_ctxt); + } self.emit_lint(fn_id, fn_substs, source_info, span); } } @@ -197,7 +178,7 @@ impl<'tcx> FunctionItemRefChecker<'_, 'tcx> { let ident = self.tcx.item_name(fn_id).to_ident_string(); let ty_params = fn_substs.types().map(|ty| format!("{}", ty)); let const_params = fn_substs.consts().map(|c| format!("{}", c)); - let params = ty_params.chain(const_params).collect::<Vec<String>>().join(", "); + let params = ty_params.chain(const_params).join(", "); let num_args = fn_sig.inputs().map_bound(|inputs| inputs.len()).skip_binder(); let variadic = if fn_sig.c_variadic() { ", ..." } else { "" }; let ret = if fn_sig.output().skip_binder().is_unit() { "" } else { " -> _" }; diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index bf6f13fa67b..8e1601fb719 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -18,7 +18,7 @@ extern crate rustc_middle; use required_consts::RequiredConstsVisitor; use rustc_const_eval::util; -use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::fx::FxIndexSet; use rustc_data_structures::steal::Steal; use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; @@ -136,8 +136,8 @@ fn is_mir_available(tcx: TyCtxt<'_>, def_id: DefId) -> bool { /// Finds the full set of `DefId`s within the current crate that have /// MIR associated with them. -fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxHashSet<LocalDefId> { - let mut set = FxHashSet::default(); +fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> { + let mut set = FxIndexSet::default(); // All body-owners have MIR associated with them. set.extend(tcx.hir().body_owners()); @@ -146,7 +146,7 @@ fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxHashSet<LocalDefId> { // they don't have a BodyId, so we need to build them separately. struct GatherCtors<'a, 'tcx> { tcx: TyCtxt<'tcx>, - set: &'a mut FxHashSet<LocalDefId>, + set: &'a mut FxIndexSet<LocalDefId>, } impl<'tcx> Visitor<'tcx> for GatherCtors<'_, 'tcx> { fn visit_variant_data( diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index 7e7f6938706..7f13da5d38f 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -807,10 +807,18 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { self.output.push(create_fn_mono_item(tcx, instance, source)); } } + mir::TerminatorKind::Abort { .. } => { + let instance = Instance::mono( + tcx, + tcx.require_lang_item(LangItem::PanicNoUnwind, Some(source)), + ); + if should_codegen_locally(tcx, &instance) { + self.output.push(create_fn_mono_item(tcx, instance, source)); + } + } mir::TerminatorKind::Goto { .. } | mir::TerminatorKind::SwitchInt { .. } | mir::TerminatorKind::Resume - | mir::TerminatorKind::Abort | mir::TerminatorKind::Return | mir::TerminatorKind::Unreachable => {} mir::TerminatorKind::GeneratorDrop diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index c41f2d3299b..17bac362ec8 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -731,28 +731,22 @@ impl<'a> Parser<'a> { match x { Ok((_, _, false)) => { if self.eat(&token::Gt) { - let turbo_err = e.span_suggestion_verbose( + e.span_suggestion_verbose( binop.span.shrink_to_lo(), TURBOFISH_SUGGESTION_STR, "::".to_string(), Applicability::MaybeIncorrect, - ); - if self.check(&TokenKind::Semi) { - turbo_err.emit(); - *expr = self.mk_expr_err(expr.span); - return Ok(()); - } else { - match self.parse_expr() { - Ok(_) => { - turbo_err.emit(); - *expr = self - .mk_expr_err(expr.span.to(self.prev_token.span)); - return Ok(()); - } - Err(mut err) => { - turbo_err.cancel(); - err.cancel(); - } + ) + .emit(); + match self.parse_expr() { + Ok(_) => { + *expr = + self.mk_expr_err(expr.span.to(self.prev_token.span)); + return Ok(()); + } + Err(mut err) => { + *expr = self.mk_expr_err(expr.span); + err.cancel(); } } } diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index 192e87b4c01..693dd0051da 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -1458,7 +1458,7 @@ impl<'a> Parser<'a> { self.parse_block_expr(label, lo, BlockCheckMode::Default, attrs) } else if !ate_colon && (self.check(&TokenKind::Comma) || self.check(&TokenKind::Gt)) { // We're probably inside of a `Path<'a>` that needs a turbofish, so suppress the - // "must be followed by a colon" error. + // "must be followed by a colon" error, and the "expected one of" error. self.diagnostic().delay_span_bug(lo, "this label wasn't parsed correctly"); consume_colon = false; Ok(self.mk_expr_err(lo)) @@ -2383,6 +2383,17 @@ impl<'a> Parser<'a> { } pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> { + fn check_let_expr(expr: &Expr) -> (bool, bool) { + match expr.kind { + ExprKind::Binary(_, ref lhs, ref rhs) => { + let lhs_rslt = check_let_expr(lhs); + let rhs_rslt = check_let_expr(rhs); + (lhs_rslt.0 || rhs_rslt.0, false) + } + ExprKind::Let(..) => (true, true), + _ => (false, true), + } + } let attrs = self.parse_outer_attributes()?; self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { let lo = this.token.span; @@ -2390,9 +2401,12 @@ impl<'a> Parser<'a> { let guard = if this.eat_keyword(kw::If) { let if_span = this.prev_token.span; let cond = this.parse_expr()?; - if let ExprKind::Let(..) = cond.kind { - // Remove the last feature gating of a `let` expression since it's stable. - this.sess.gated_spans.ungate_last(sym::let_chains, cond.span); + let (has_let_expr, does_not_have_bin_op) = check_let_expr(&cond); + if has_let_expr { + if does_not_have_bin_op { + // Remove the last feature gating of a `let` expression since it's stable. + this.sess.gated_spans.ungate_last(sym::let_chains, cond.span); + } let span = if_span.to(cond.span); this.sess.gated_spans.gate(sym::if_let_guard, span); } diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs index ade441b0e7d..06849b31256 100644 --- a/compiler/rustc_parse/src/parser/item.rs +++ b/compiler/rustc_parse/src/parser/item.rs @@ -423,7 +423,7 @@ impl<'a> Parser<'a> { // Maybe the user misspelled `macro_rules` (issue #91227) if self.token.is_ident() && path.segments.len() == 1 - && lev_distance("macro_rules", &path.segments[0].ident.to_string()) <= 3 + && lev_distance("macro_rules", &path.segments[0].ident.to_string(), 3).is_some() { err.span_suggestion( path.span, diff --git a/compiler/rustc_passes/src/naked_functions.rs b/compiler/rustc_passes/src/naked_functions.rs index 0228196d1a1..00a93ccc9aa 100644 --- a/compiler/rustc_passes/src/naked_functions.rs +++ b/compiler/rustc_passes/src/naked_functions.rs @@ -1,6 +1,7 @@ //! Checks validity of naked functions. use rustc_ast::{Attribute, InlineAsmOptions}; +use rustc_errors::struct_span_err; use rustc_hir as hir; use rustc_hir::def_id::LocalDefId; use rustc_hir::intravisit::{FnKind, Visitor}; @@ -8,7 +9,6 @@ use rustc_hir::{ExprKind, HirId, InlineAsmOperand, StmtKind}; use rustc_middle::ty::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_session::lint::builtin::UNDEFINED_NAKED_FUNCTION_ABI; -use rustc_session::lint::builtin::UNSUPPORTED_NAKED_FUNCTIONS; use rustc_span::symbol::sym; use rustc_span::Span; use rustc_target::spec::abi::Abi; @@ -64,18 +64,16 @@ impl<'tcx> Visitor<'tcx> for CheckNakedFunctions<'tcx> { check_abi(self.tcx, hir_id, fn_header.abi, ident_span); check_no_patterns(self.tcx, body.params); check_no_parameters_use(self.tcx, body); - check_asm(self.tcx, hir_id, body, span); - check_inline(self.tcx, hir_id, attrs); + check_asm(self.tcx, body, span); + check_inline(self.tcx, attrs); } } } /// Check that the function isn't inlined. -fn check_inline(tcx: TyCtxt<'_>, hir_id: HirId, attrs: &[Attribute]) { +fn check_inline(tcx: TyCtxt<'_>, attrs: &[Attribute]) { for attr in attrs.iter().filter(|attr| attr.has_name(sym::inline)) { - tcx.struct_span_lint_hir(UNSUPPORTED_NAKED_FUNCTIONS, hir_id, attr.span, |lint| { - lint.build("naked functions cannot be inlined").emit(); - }); + tcx.sess.struct_span_err(attr.span, "naked functions cannot be inlined").emit(); } } @@ -146,31 +144,31 @@ impl<'tcx> Visitor<'tcx> for CheckParameters<'tcx> { } /// Checks that function body contains a single inline assembly block. -fn check_asm<'tcx>(tcx: TyCtxt<'tcx>, hir_id: HirId, body: &'tcx hir::Body<'tcx>, fn_span: Span) { +fn check_asm<'tcx>(tcx: TyCtxt<'tcx>, body: &'tcx hir::Body<'tcx>, fn_span: Span) { let mut this = CheckInlineAssembly { tcx, items: Vec::new() }; this.visit_body(body); if let [(ItemKind::Asm, _)] = this.items[..] { // Ok. } else { - tcx.struct_span_lint_hir(UNSUPPORTED_NAKED_FUNCTIONS, hir_id, fn_span, |lint| { - let mut diag = lint.build("naked functions must contain a single asm block"); - let mut has_asm = false; - for &(kind, span) in &this.items { - match kind { - ItemKind::Asm if has_asm => { - diag.span_label( - span, - "multiple asm blocks are unsupported in naked functions", - ); - } - ItemKind::Asm => has_asm = true, - ItemKind::NonAsm => { - diag.span_label(span, "non-asm is unsupported in naked functions"); - } + let mut diag = struct_span_err!( + tcx.sess, + fn_span, + E0787, + "naked functions must contain a single asm block" + ); + let mut has_asm = false; + for &(kind, span) in &this.items { + match kind { + ItemKind::Asm if has_asm => { + diag.span_label(span, "multiple asm blocks are unsupported in naked functions"); + } + ItemKind::Asm => has_asm = true, + ItemKind::NonAsm => { + diag.span_label(span, "non-asm is unsupported in naked functions"); } } - diag.emit(); - }); + } + diag.emit(); } } @@ -221,7 +219,7 @@ impl<'tcx> CheckInlineAssembly<'tcx> { ExprKind::InlineAsm(ref asm) => { self.items.push((ItemKind::Asm, span)); - self.check_inline_asm(expr.hir_id, asm, span); + self.check_inline_asm(asm, span); } ExprKind::DropTemps(..) | ExprKind::Block(..) | ExprKind::Err => { @@ -230,7 +228,7 @@ impl<'tcx> CheckInlineAssembly<'tcx> { } } - fn check_inline_asm(&self, hir_id: HirId, asm: &'tcx hir::InlineAsm<'tcx>, span: Span) { + fn check_inline_asm(&self, asm: &'tcx hir::InlineAsm<'tcx>, span: Span) { let unsupported_operands: Vec<Span> = asm .operands .iter() @@ -243,18 +241,17 @@ impl<'tcx> CheckInlineAssembly<'tcx> { }) .collect(); if !unsupported_operands.is_empty() { - self.tcx.struct_span_lint_hir( - UNSUPPORTED_NAKED_FUNCTIONS, - hir_id, + struct_span_err!( + self.tcx.sess, unsupported_operands, - |lint| { - lint.build("only `const` and `sym` operands are supported in naked functions") - .emit(); - }, - ); + E0787, + "only `const` and `sym` operands are supported in naked functions", + ) + .emit(); } let unsupported_options: Vec<&'static str> = [ + (InlineAsmOptions::MAY_UNWIND, "`may_unwind`"), (InlineAsmOptions::NOMEM, "`nomem`"), (InlineAsmOptions::NOSTACK, "`nostack`"), (InlineAsmOptions::PRESERVES_FLAGS, "`preserves_flags`"), @@ -266,19 +263,24 @@ impl<'tcx> CheckInlineAssembly<'tcx> { .collect(); if !unsupported_options.is_empty() { - self.tcx.struct_span_lint_hir(UNSUPPORTED_NAKED_FUNCTIONS, hir_id, span, |lint| { - lint.build(&format!( - "asm options unsupported in naked functions: {}", - unsupported_options.join(", ") - )) - .emit(); - }); + struct_span_err!( + self.tcx.sess, + span, + E0787, + "asm options unsupported in naked functions: {}", + unsupported_options.join(", ") + ) + .emit(); } if !asm.options.contains(InlineAsmOptions::NORETURN) { - self.tcx.struct_span_lint_hir(UNSUPPORTED_NAKED_FUNCTIONS, hir_id, span, |lint| { - lint.build("asm in naked functions must use `noreturn` option").emit(); - }); + struct_span_err!( + self.tcx.sess, + span, + E0787, + "asm in naked functions must use `noreturn` option" + ) + .emit(); } } } diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs index 5f6d9b050b2..06e276ab42b 100644 --- a/compiler/rustc_query_impl/src/on_disk_cache.rs +++ b/compiler/rustc_query_impl/src/on_disk_cache.rs @@ -163,15 +163,12 @@ impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { // Decode the *position* of the footer, which can be found in the // last 8 bytes of the file. decoder.set_position(data.len() - IntEncodedWithFixedSize::ENCODED_SIZE); - let footer_pos = IntEncodedWithFixedSize::decode(&mut decoder) - .expect("error while trying to decode footer position") - .0 as usize; + let footer_pos = IntEncodedWithFixedSize::decode(&mut decoder).0 as usize; // Decode the file footer, which contains all the lookup tables, etc. decoder.set_position(footer_pos); decode_tagged(&mut decoder, TAG_FILE_FOOTER) - .expect("error while trying to decode footer position") }; Self { @@ -372,7 +369,7 @@ impl<'sess> OnDiskCache<'sess> { dep_node_index: SerializedDepNodeIndex, ) -> QuerySideEffects { let side_effects: Option<QuerySideEffects> = - self.load_indexed(tcx, dep_node_index, &self.prev_side_effects_index, "side_effects"); + self.load_indexed(tcx, dep_node_index, &self.prev_side_effects_index); side_effects.unwrap_or_default() } @@ -398,7 +395,7 @@ impl<'sess> OnDiskCache<'sess> { where T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, { - self.load_indexed(tcx, dep_node_index, &self.query_result_index, "query result") + self.load_indexed(tcx, dep_node_index, &self.query_result_index) } /// Stores side effect emitted during computation of an anonymous query. @@ -423,17 +420,13 @@ impl<'sess> OnDiskCache<'sess> { tcx: TyCtxt<'tcx>, dep_node_index: SerializedDepNodeIndex, index: &FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - debug_tag: &'static str, ) -> Option<T> where T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, { let pos = index.get(&dep_node_index).cloned()?; - self.with_decoder(tcx, pos, |decoder| match decode_tagged(decoder, dep_node_index) { - Ok(v) => Some(v), - Err(e) => bug!("could not decode cached {}: {}", debug_tag, e), - }) + self.with_decoder(tcx, pos, |decoder| Some(decode_tagged(decoder, dep_node_index))) } fn with_decoder<'a, 'tcx, T, F: for<'s> FnOnce(&mut CacheDecoder<'s, 'tcx>) -> T>( @@ -535,7 +528,7 @@ impl<'a, 'tcx> DecoderWithPosition for CacheDecoder<'a, 'tcx> { // Decodes something that was encoded with `encode_tagged()` and verify that the // tag matches and the correct amount of bytes was read. -fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> Result<V, D::Error> +fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> V where T: Decodable<D> + Eq + std::fmt::Debug, V: Decodable<D>, @@ -543,15 +536,15 @@ where { let start_pos = decoder.position(); - let actual_tag = T::decode(decoder)?; + let actual_tag = T::decode(decoder); assert_eq!(actual_tag, expected_tag); - let value = V::decode(decoder)?; + let value = V::decode(decoder); let end_pos = decoder.position(); - let expected_len: u64 = Decodable::decode(decoder)?; + let expected_len: u64 = Decodable::decode(decoder); assert_eq!((end_pos - start_pos) as u64, expected_len); - Ok(value) + value } impl<'a, 'tcx> TyDecoder<'tcx> for CacheDecoder<'a, 'tcx> { @@ -572,26 +565,22 @@ impl<'a, 'tcx> TyDecoder<'tcx> for CacheDecoder<'a, 'tcx> { self.opaque.data[self.opaque.position()] } - fn cached_ty_for_shorthand<F>( - &mut self, - shorthand: usize, - or_insert_with: F, - ) -> Result<Ty<'tcx>, Self::Error> + fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx> where - F: FnOnce(&mut Self) -> Result<Ty<'tcx>, Self::Error>, + F: FnOnce(&mut Self) -> Ty<'tcx>, { let tcx = self.tcx(); let cache_key = ty::CReaderCacheKey { cnum: None, pos: shorthand }; if let Some(&ty) = tcx.ty_rcache.borrow().get(&cache_key) { - return Ok(ty); + return ty; } - let ty = or_insert_with(self)?; + let ty = or_insert_with(self); // This may overwrite the entry, but it should overwrite with the same value. tcx.ty_rcache.borrow_mut().insert_same(cache_key, ty); - Ok(ty) + ty } fn with_position<F, R>(&mut self, pos: usize, f: F) -> R @@ -607,7 +596,7 @@ impl<'a, 'tcx> TyDecoder<'tcx> for CacheDecoder<'a, 'tcx> { r } - fn decode_alloc_id(&mut self) -> Result<interpret::AllocId, Self::Error> { + fn decode_alloc_id(&mut self) -> interpret::AllocId { let alloc_decoding_session = self.alloc_decoding_session; alloc_decoding_session.decode_alloc_id(self) } @@ -619,35 +608,35 @@ rustc_middle::implement_ty_decoder!(CacheDecoder<'a, 'tcx>); // when a `CacheDecoder` is passed to `Decodable::decode`. Unfortunately, we have to manually opt // into specializations this way, given how `CacheDecoder` and the decoding traits currently work. impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Vec<u8> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { Decodable::decode(&mut d.opaque) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for SyntaxContext { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { let syntax_contexts = decoder.syntax_contexts; rustc_span::hygiene::decode_syntax_context(decoder, decoder.hygiene_context, |this, id| { // This closure is invoked if we haven't already decoded the data for the `SyntaxContext` we are deserializing. // We look up the position of the associated `SyntaxData` and decode it. let pos = syntax_contexts.get(&id).unwrap(); this.with_position(pos.to_usize(), |decoder| { - let data: SyntaxContextData = decode_tagged(decoder, TAG_SYNTAX_CONTEXT)?; - Ok(data) + let data: SyntaxContextData = decode_tagged(decoder, TAG_SYNTAX_CONTEXT); + data }) }) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { - let hash = ExpnHash::decode(decoder)?; + fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { + let hash = ExpnHash::decode(decoder); if hash.is_root() { - return Ok(ExpnId::root()); + return ExpnId::root(); } if let Some(expn_id) = ExpnId::from_hash(hash) { - return Ok(expn_id); + return expn_id; } let krate = decoder.tcx.stable_crate_id_to_crate_num(hash.stable_crate_id()); @@ -660,7 +649,7 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId { .unwrap_or_else(|| panic!("Bad hash {:?} (map {:?})", hash, decoder.expn_data)); let data: ExpnData = decoder - .with_position(pos.to_usize(), |decoder| decode_tagged(decoder, TAG_EXPN_DATA))?; + .with_position(pos.to_usize(), |decoder| decode_tagged(decoder, TAG_EXPN_DATA)); let expn_id = rustc_span::hygiene::register_local_expn_id(data, hash); #[cfg(debug_assertions)] @@ -687,21 +676,21 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId { }; debug_assert_eq!(expn_id.krate, krate); - Ok(expn_id) + expn_id } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { - let ctxt = SyntaxContext::decode(decoder)?; - let parent = Option::<LocalDefId>::decode(decoder)?; - let tag: u8 = Decodable::decode(decoder)?; + fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { + let ctxt = SyntaxContext::decode(decoder); + let parent = Option::<LocalDefId>::decode(decoder); + let tag: u8 = Decodable::decode(decoder); if tag == TAG_PARTIAL_SPAN { - return Ok(Span::new(BytePos(0), BytePos(0), ctxt, parent)); + return Span::new(BytePos(0), BytePos(0), ctxt, parent); } else if tag == TAG_RELATIVE_SPAN { - let dlo = u32::decode(decoder)?; - let dto = u32::decode(decoder)?; + let dlo = u32::decode(decoder); + let dto = u32::decode(decoder); let enclosing = decoder.tcx.definitions_untracked().def_span(parent.unwrap()).data_untracked(); @@ -712,29 +701,29 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span { parent, ); - return Ok(span); + return span; } else { debug_assert_eq!(tag, TAG_FULL_SPAN); } - let file_lo_index = SourceFileIndex::decode(decoder)?; - let line_lo = usize::decode(decoder)?; - let col_lo = BytePos::decode(decoder)?; - let len = BytePos::decode(decoder)?; + let file_lo_index = SourceFileIndex::decode(decoder); + let line_lo = usize::decode(decoder); + let col_lo = BytePos::decode(decoder); + let len = BytePos::decode(decoder); let file_lo = decoder.file_index_to_file(file_lo_index); let lo = file_lo.lines[line_lo - 1] + col_lo; let hi = lo + len; - Ok(Span::new(lo, hi, ctxt, parent)) + Span::new(lo, hi, ctxt, parent) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { - let stable_id = StableCrateId::decode(d)?; + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { + let stable_id = StableCrateId::decode(d); let cnum = d.tcx.stable_crate_id_to_crate_num(stable_id); - Ok(cnum) + cnum } } @@ -743,8 +732,8 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum { // because we would not know how to transform the `DefIndex` to the current // context. impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefIndex { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<DefIndex, String> { - Err(d.error("trying to decode `DefIndex` outside the context of a `DefId`")) + fn decode(_d: &mut CacheDecoder<'a, 'tcx>) -> DefIndex { + panic!("trying to decode `DefIndex` outside the context of a `DefId`") } } @@ -752,23 +741,23 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefIndex { // compilation sessions. We use the `DefPathHash`, which is stable across // sessions, to map the old `DefId` to the new one. impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { // Load the `DefPathHash` which is was we encoded the `DefId` as. - let def_path_hash = DefPathHash::decode(d)?; + let def_path_hash = DefPathHash::decode(d); // Using the `DefPathHash`, we can lookup the new `DefId`. // Subtle: We only encode a `DefId` as part of a query result. // If we get to this point, then all of the query inputs were green, // which means that the definition with this hash is guaranteed to // still exist in the current compilation session. - Ok(d.tcx().def_path_hash_to_def_id(def_path_hash, &mut || { + d.tcx().def_path_hash_to_def_id(def_path_hash, &mut || { panic!("Failed to convert DefPathHash {:?}", def_path_hash) - })) + }) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashSet<LocalDefId> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { RefDecodable::decode(d) } } @@ -776,31 +765,31 @@ impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashSet<LocalDefId> impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { RefDecodable::decode(d) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [thir::abstract_const::Node<'tcx>] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { RefDecodable::decode(d) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { RefDecodable::decode(d) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [rustc_ast::InlineAsmTemplatePiece] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { RefDecodable::decode(d) } } impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [Span] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Result<Self, String> { + fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { RefDecodable::decode(d) } } diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 47197a1e492..283eda7c85e 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -100,7 +100,7 @@ impl<'a, K: DepKind + Decodable<opaque::Decoder<'a>>> Decodable<opaque::Decoder< for SerializedDepGraph<K> { #[instrument(level = "debug", skip(d))] - fn decode(d: &mut opaque::Decoder<'a>) -> Result<SerializedDepGraph<K>, String> { + fn decode(d: &mut opaque::Decoder<'a>) -> SerializedDepGraph<K> { let start_position = d.position(); // The last 16 bytes are the node count and edge count. @@ -108,8 +108,8 @@ impl<'a, K: DepKind + Decodable<opaque::Decoder<'a>>> Decodable<opaque::Decoder< d.set_position(d.data.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE); debug!("position: {:?}", d.position()); - let node_count = IntEncodedWithFixedSize::decode(d)?.0 as usize; - let edge_count = IntEncodedWithFixedSize::decode(d)?.0 as usize; + let node_count = IntEncodedWithFixedSize::decode(d).0 as usize; + let edge_count = IntEncodedWithFixedSize::decode(d).0 as usize; debug!(?node_count, ?edge_count); debug!("position: {:?}", d.position()); @@ -123,12 +123,12 @@ impl<'a, K: DepKind + Decodable<opaque::Decoder<'a>>> Decodable<opaque::Decoder< for _index in 0..node_count { d.read_struct(|d| { - let dep_node: DepNode<K> = d.read_struct_field("node", Decodable::decode)?; + let dep_node: DepNode<K> = d.read_struct_field("node", Decodable::decode); let _i: SerializedDepNodeIndex = nodes.push(dep_node); debug_assert_eq!(_i.index(), _index); let fingerprint: Fingerprint = - d.read_struct_field("fingerprint", Decodable::decode)?; + d.read_struct_field("fingerprint", Decodable::decode); let _i: SerializedDepNodeIndex = fingerprints.push(fingerprint); debug_assert_eq!(_i.index(), _index); @@ -136,22 +136,21 @@ impl<'a, K: DepKind + Decodable<opaque::Decoder<'a>>> Decodable<opaque::Decoder< d.read_seq(|d, len| { let start = edge_list_data.len().try_into().unwrap(); for _ in 0..len { - let edge = d.read_seq_elt(Decodable::decode)?; + let edge = d.read_seq_elt(Decodable::decode); edge_list_data.push(edge); } let end = edge_list_data.len().try_into().unwrap(); let _i: SerializedDepNodeIndex = edge_list_indices.push((start, end)); debug_assert_eq!(_i.index(), _index); - Ok(()) }) }) - })?; + }); } let index: FxHashMap<_, _> = nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect(); - Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }) + SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index } } } diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs index 631b8fef668..e4d8b7d5283 100644 --- a/compiler/rustc_resolve/src/build_reduced_graph.rs +++ b/compiler/rustc_resolve/src/build_reduced_graph.rs @@ -108,7 +108,7 @@ impl<'a> Resolver<'a> { /// Reachable macros with block module parents exist due to `#[macro_export] macro_rules!`, /// but they cannot use def-site hygiene, so the assumption holds /// (<https://github.com/rust-lang/rust/pull/77984#issuecomment-712445508>). - crate fn get_nearest_non_block_module(&mut self, mut def_id: DefId) -> Module<'a> { + pub fn get_nearest_non_block_module(&mut self, mut def_id: DefId) -> Module<'a> { loop { match self.get_module(def_id) { Some(module) => return module, diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 7b4fe6f0e07..7e1e5c78805 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -1171,9 +1171,7 @@ impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> { ident: Symbol, kind: &AssocItemKind, ) -> Option<Symbol> { - let module = if let Some((module, _)) = self.current_trait_ref { - module - } else { + let Some((module, _)) = &self.current_trait_ref else { return None; }; if ident == kw::Underscore { diff --git a/compiler/rustc_resolve/src/late/lifetimes.rs b/compiler/rustc_resolve/src/late/lifetimes.rs index b077a5c9144..4c7bdb33fb8 100644 --- a/compiler/rustc_resolve/src/late/lifetimes.rs +++ b/compiler/rustc_resolve/src/late/lifetimes.rs @@ -1000,46 +1000,45 @@ impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> { // `fn foo<'a>() -> MyAnonTy<'a> { ... }` // ^ ^this gets resolved in the current scope for lifetime in lifetimes { - if let hir::GenericArg::Lifetime(lifetime) = lifetime { - self.visit_lifetime(lifetime); + let hir::GenericArg::Lifetime(lifetime) = lifetime else { + continue + }; + self.visit_lifetime(lifetime); + + // Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>` + // and ban them. Type variables instantiated inside binders aren't + // well-supported at the moment, so this doesn't work. + // In the future, this should be fixed and this error should be removed. + let def = self.map.defs.get(&lifetime.hir_id).cloned(); + let Some(Region::LateBound(_, _, def_id, _)) = def else { + continue + }; + let Some(def_id) = def_id.as_local() else { + continue + }; + let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id); + // Ensure that the parent of the def is an item, not HRTB + let parent_id = self.tcx.hir().get_parent_node(hir_id); + // FIXME(cjgillot) Can this check be replaced by + // `let parent_is_item = parent_id.is_owner();`? + let parent_is_item = if let Some(parent_def_id) = parent_id.as_owner() { + matches!(self.tcx.hir().krate().owners.get(parent_def_id), Some(Some(_)),) + } else { + false + }; - // Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>` - // and ban them. Type variables instantiated inside binders aren't - // well-supported at the moment, so this doesn't work. - // In the future, this should be fixed and this error should be removed. - let def = self.map.defs.get(&lifetime.hir_id).cloned(); - if let Some(Region::LateBound(_, _, def_id, _)) = def { - if let Some(def_id) = def_id.as_local() { - let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id); - // Ensure that the parent of the def is an item, not HRTB - let parent_id = self.tcx.hir().get_parent_node(hir_id); - // FIXME(cjgillot) Can this check be replaced by - // `let parent_is_item = parent_id.is_owner();`? - let parent_is_item = - if let Some(parent_def_id) = parent_id.as_owner() { - matches!( - self.tcx.hir().krate().owners.get(parent_def_id), - Some(Some(_)), - ) - } else { - false - }; - - if !parent_is_item { - if !self.trait_definition_only { - struct_span_err!( - self.tcx.sess, - lifetime.span, - E0657, - "`impl Trait` can only capture lifetimes \ - bound at the fn or impl level" - ) - .emit(); - } - self.uninsert_lifetime_on_error(lifetime, def.unwrap()); - } - } + if !parent_is_item { + if !self.trait_definition_only { + struct_span_err!( + self.tcx.sess, + lifetime.span, + E0657, + "`impl Trait` can only capture lifetimes \ + bound at the fn or impl level" + ) + .emit(); } + self.uninsert_lifetime_on_error(lifetime, def.unwrap()); } } diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index f5b2ba8fd72..45cc64ea194 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -53,7 +53,7 @@ use rustc_middle::metadata::ModChild; use rustc_middle::middle::privacy::AccessLevels; use rustc_middle::span_bug; use rustc_middle::ty::query::Providers; -use rustc_middle::ty::{self, DefIdTree, MainDefinition, ResolverOutputs}; +use rustc_middle::ty::{self, DefIdTree, MainDefinition, RegisteredTools, ResolverOutputs}; use rustc_query_system::ich::StableHashingContext; use rustc_session::cstore::{CrateStore, MetadataLoaderDyn}; use rustc_session::lint; @@ -614,7 +614,8 @@ impl<'a> ModuleData<'a> { } } - fn def_id(&self) -> DefId { + // Public for rustdoc. + pub fn def_id(&self) -> DefId { self.opt_def_id().expect("`ModuleData::def_id` is called on a block module") } @@ -989,7 +990,7 @@ pub struct Resolver<'a> { macro_names: FxHashSet<Ident>, builtin_macros: FxHashMap<Symbol, BuiltinMacroState>, registered_attrs: FxHashSet<Ident>, - registered_tools: FxHashSet<Ident>, + registered_tools: RegisteredTools, macro_use_prelude: FxHashMap<Symbol, &'a NameBinding<'a>>, all_macros: FxHashMap<Symbol, Res>, macro_map: FxHashMap<DefId, Lrc<SyntaxExtension>>, @@ -1487,6 +1488,7 @@ impl<'a> Resolver<'a> { trait_impls: self.trait_impls, proc_macros, confused_type_with_std_module, + registered_tools: self.registered_tools, } } @@ -1511,6 +1513,7 @@ impl<'a> Resolver<'a> { trait_impls: self.trait_impls.clone(), proc_macros, confused_type_with_std_module: self.confused_type_with_std_module.clone(), + registered_tools: self.registered_tools.clone(), } } @@ -3405,6 +3408,16 @@ impl<'a> Resolver<'a> { &self.all_macros } + /// For rustdoc. + /// For local modules returns only reexports, for external modules returns all children. + pub fn module_children_or_reexports(&self, def_id: DefId) -> Vec<ModChild> { + if let Some(def_id) = def_id.as_local() { + self.reexport_map.get(&def_id).cloned().unwrap_or_default() + } else { + self.cstore().module_children_untracked(def_id, self.session) + } + } + /// Retrieves the span of the given `DefId` if `DefId` is in the local crate. #[inline] pub fn opt_span(&self, def_id: DefId) -> Option<Span> { diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs index 52685ec697c..82807e2d0a2 100644 --- a/compiler/rustc_resolve/src/macros.rs +++ b/compiler/rustc_resolve/src/macros.rs @@ -23,7 +23,7 @@ use rustc_hir::def::{self, DefKind, NonMacroAttrKind}; use rustc_hir::def_id::{CrateNum, LocalDefId}; use rustc_hir::PrimTy; use rustc_middle::middle::stability; -use rustc_middle::ty; +use rustc_middle::ty::{self, RegisteredTools}; use rustc_session::lint::builtin::{LEGACY_DERIVE_HELPERS, PROC_MACRO_DERIVE_RESOLUTION_FALLBACK}; use rustc_session::lint::builtin::{SOFT_UNSTABLE, UNUSED_MACROS}; use rustc_session::lint::BuiltinLintDiagnostics; @@ -447,6 +447,10 @@ impl<'a> ResolverExpand for Resolver<'a> { fn declare_proc_macro(&mut self, id: NodeId) { self.proc_macros.push(id) } + + fn registered_tools(&self) -> &RegisteredTools { + &self.registered_tools + } } impl<'a> Resolver<'a> { diff --git a/compiler/rustc_save_analysis/src/lib.rs b/compiler/rustc_save_analysis/src/lib.rs index b95fe1b0549..570fa873a23 100644 --- a/compiler/rustc_save_analysis/src/lib.rs +++ b/compiler/rustc_save_analysis/src/lib.rs @@ -984,7 +984,7 @@ pub fn process_crate<'l, 'tcx, H: SaveHandler>( tcx.dep_graph.with_ignore(|| { info!("Dumping crate {}", cratename); - // Privacy checking requires and is done after type checking; use a + // Privacy checking must be done outside of type inference; use a // fallback in case the access levels couldn't have been correctly computed. let access_levels = match tcx.sess.compile_status() { Ok(..) => tcx.privacy_access_levels(()), diff --git a/compiler/rustc_serialize/Cargo.toml b/compiler/rustc_serialize/Cargo.toml index 49778f82253..f6b9e17e58e 100644 --- a/compiler/rustc_serialize/Cargo.toml +++ b/compiler/rustc_serialize/Cargo.toml @@ -4,7 +4,7 @@ version = "0.0.0" edition = "2021" [dependencies] -indexmap = "1" +indexmap = "1.8.0" smallvec = { version = "1.6.1", features = ["union", "may_dangle"] } [dev-dependencies] diff --git a/compiler/rustc_serialize/src/collection_impls.rs b/compiler/rustc_serialize/src/collection_impls.rs index 80a7f650188..02b28f7c626 100644 --- a/compiler/rustc_serialize/src/collection_impls.rs +++ b/compiler/rustc_serialize/src/collection_impls.rs @@ -17,15 +17,8 @@ impl<S: Encoder, A: Array<Item: Encodable<S>>> Encodable<S> for SmallVec<A> { } impl<D: Decoder, A: Array<Item: Decodable<D>>> Decodable<D> for SmallVec<A> { - fn decode(d: &mut D) -> Result<SmallVec<A>, D::Error> { - d.read_seq(|d, len| { - let mut vec = SmallVec::with_capacity(len); - // FIXME(#48994) - could just be collected into a Result<SmallVec, D::Error> - for _ in 0..len { - vec.push(d.read_seq_elt(|d| Decodable::decode(d))?); - } - Ok(vec) - }) + fn decode(d: &mut D) -> SmallVec<A> { + d.read_seq(|d, len| (0..len).map(|_| d.read_seq_elt(|d| Decodable::decode(d))).collect()) } } @@ -41,14 +34,8 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for LinkedList<T> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for LinkedList<T> { - fn decode(d: &mut D) -> Result<LinkedList<T>, D::Error> { - d.read_seq(|d, len| { - let mut list = LinkedList::new(); - for _ in 0..len { - list.push_back(d.read_seq_elt(|d| Decodable::decode(d))?); - } - Ok(list) - }) + fn decode(d: &mut D) -> LinkedList<T> { + d.read_seq(|d, len| (0..len).map(|_| d.read_seq_elt(|d| Decodable::decode(d))).collect()) } } @@ -64,14 +51,8 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for VecDeque<T> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for VecDeque<T> { - fn decode(d: &mut D) -> Result<VecDeque<T>, D::Error> { - d.read_seq(|d, len| { - let mut deque: VecDeque<T> = VecDeque::with_capacity(len); - for _ in 0..len { - deque.push_back(d.read_seq_elt(|d| Decodable::decode(d))?); - } - Ok(deque) - }) + fn decode(d: &mut D) -> VecDeque<T> { + d.read_seq(|d, len| (0..len).map(|_| d.read_seq_elt(|d| Decodable::decode(d))).collect()) } } @@ -96,15 +77,15 @@ where K: Decodable<D> + PartialEq + Ord, V: Decodable<D>, { - fn decode(d: &mut D) -> Result<BTreeMap<K, V>, D::Error> { + fn decode(d: &mut D) -> BTreeMap<K, V> { d.read_map(|d, len| { let mut map = BTreeMap::new(); for _ in 0..len { - let key = d.read_map_elt_key(|d| Decodable::decode(d))?; - let val = d.read_map_elt_val(|d| Decodable::decode(d))?; + let key = d.read_map_elt_key(|d| Decodable::decode(d)); + let val = d.read_map_elt_val(|d| Decodable::decode(d)); map.insert(key, val); } - Ok(map) + map }) } } @@ -127,13 +108,13 @@ impl<D: Decoder, T> Decodable<D> for BTreeSet<T> where T: Decodable<D> + PartialEq + Ord, { - fn decode(d: &mut D) -> Result<BTreeSet<T>, D::Error> { + fn decode(d: &mut D) -> BTreeSet<T> { d.read_seq(|d, len| { let mut set = BTreeSet::new(); for _ in 0..len { - set.insert(d.read_seq_elt(|d| Decodable::decode(d))?); + set.insert(d.read_seq_elt(|d| Decodable::decode(d))); } - Ok(set) + set }) } } @@ -161,16 +142,16 @@ where V: Decodable<D>, S: BuildHasher + Default, { - fn decode(d: &mut D) -> Result<HashMap<K, V, S>, D::Error> { + fn decode(d: &mut D) -> HashMap<K, V, S> { d.read_map(|d, len| { let state = Default::default(); let mut map = HashMap::with_capacity_and_hasher(len, state); for _ in 0..len { - let key = d.read_map_elt_key(|d| Decodable::decode(d))?; - let val = d.read_map_elt_val(|d| Decodable::decode(d))?; + let key = d.read_map_elt_key(|d| Decodable::decode(d)); + let val = d.read_map_elt_val(|d| Decodable::decode(d)); map.insert(key, val); } - Ok(map) + map }) } } @@ -205,14 +186,14 @@ where T: Decodable<D> + Hash + Eq, S: BuildHasher + Default, { - fn decode(d: &mut D) -> Result<HashSet<T, S>, D::Error> { + fn decode(d: &mut D) -> HashSet<T, S> { d.read_seq(|d, len| { let state = Default::default(); let mut set = HashSet::with_capacity_and_hasher(len, state); for _ in 0..len { - set.insert(d.read_seq_elt(|d| Decodable::decode(d))?); + set.insert(d.read_seq_elt(|d| Decodable::decode(d))); } - Ok(set) + set }) } } @@ -240,16 +221,16 @@ where V: Decodable<D>, S: BuildHasher + Default, { - fn decode(d: &mut D) -> Result<indexmap::IndexMap<K, V, S>, D::Error> { + fn decode(d: &mut D) -> indexmap::IndexMap<K, V, S> { d.read_map(|d, len| { let state = Default::default(); let mut map = indexmap::IndexMap::with_capacity_and_hasher(len, state); for _ in 0..len { - let key = d.read_map_elt_key(|d| Decodable::decode(d))?; - let val = d.read_map_elt_val(|d| Decodable::decode(d))?; + let key = d.read_map_elt_key(|d| Decodable::decode(d)); + let val = d.read_map_elt_val(|d| Decodable::decode(d)); map.insert(key, val); } - Ok(map) + map }) } } @@ -274,14 +255,14 @@ where T: Decodable<D> + Hash + Eq, S: BuildHasher + Default, { - fn decode(d: &mut D) -> Result<indexmap::IndexSet<T, S>, D::Error> { + fn decode(d: &mut D) -> indexmap::IndexSet<T, S> { d.read_seq(|d, len| { let state = Default::default(); let mut set = indexmap::IndexSet::with_capacity_and_hasher(len, state); for _ in 0..len { - set.insert(d.read_seq_elt(|d| Decodable::decode(d))?); + set.insert(d.read_seq_elt(|d| Decodable::decode(d))); } - Ok(set) + set }) } } @@ -294,9 +275,9 @@ impl<E: Encoder, T: Encodable<E>> Encodable<E> for Rc<[T]> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Rc<[T]> { - fn decode(d: &mut D) -> Result<Rc<[T]>, D::Error> { - let vec: Vec<T> = Decodable::decode(d)?; - Ok(vec.into()) + fn decode(d: &mut D) -> Rc<[T]> { + let vec: Vec<T> = Decodable::decode(d); + vec.into() } } @@ -308,8 +289,8 @@ impl<E: Encoder, T: Encodable<E>> Encodable<E> for Arc<[T]> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Arc<[T]> { - fn decode(d: &mut D) -> Result<Arc<[T]>, D::Error> { - let vec: Vec<T> = Decodable::decode(d)?; - Ok(vec.into()) + fn decode(d: &mut D) -> Arc<[T]> { + let vec: Vec<T> = Decodable::decode(d); + vec.into() } } diff --git a/compiler/rustc_serialize/src/json.rs b/compiler/rustc_serialize/src/json.rs index cb9df3c3389..044de8e4e24 100644 --- a/compiler/rustc_serialize/src/json.rs +++ b/compiler/rustc_serialize/src/json.rs @@ -89,7 +89,7 @@ //! let encoded = json::encode(&object).unwrap(); //! //! // Deserialize using `json::decode` -//! let decoded: TestStruct = json::decode(&encoded[..]).unwrap(); +//! let decoded: TestStruct = json::decode(&encoded[..]); //! ``` //! //! ## Using the `ToJson` trait @@ -173,7 +173,7 @@ //! let json_str: String = json_obj.to_string(); //! //! // Deserialize like before -//! let decoded: TestStruct = json::decode(&json_str).unwrap(); +//! let decoded: TestStruct = json::decode(&json_str); //! ``` use self::DecoderError::*; @@ -265,6 +265,12 @@ pub enum DecoderError { ApplicationError(string::String), } +macro_rules! bad { + ($e:expr) => {{ + panic!("json decode error: {:?}", $e); + }}; +} + #[derive(Copy, Clone, Debug)] pub enum EncoderError { FmtError(fmt::Error), @@ -295,10 +301,10 @@ pub fn error_str(error: ErrorCode) -> &'static str { } /// Shortcut function to decode a JSON `&str` into an object -pub fn decode<T: crate::Decodable<Decoder>>(s: &str) -> DecodeResult<T> { +pub fn decode<T: crate::Decodable<Decoder>>(s: &str) -> T { let json = match from_str(s) { Ok(x) => x, - Err(e) => return Err(ParseError(e)), + Err(e) => bad!(ParseError(e)), }; let mut decoder = Decoder::new(json); @@ -334,15 +340,6 @@ impl fmt::Display for ParserError { } } -impl fmt::Display for DecoderError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // FIXME this should be a nicer error - fmt::Debug::fmt(self, f) - } -} - -impl std::error::Error for DecoderError {} - impl fmt::Display for EncoderError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME this should be a nicer error @@ -2206,41 +2203,39 @@ impl Decoder { macro_rules! expect { ($e:expr, Null) => {{ match $e { - Json::Null => Ok(()), - other => Err(ExpectedError("Null".to_owned(), other.to_string())), + Json::Null => (), + other => bad!(ExpectedError("Null".to_owned(), other.to_string())), } }}; ($e:expr, $t:ident) => {{ match $e { - Json::$t(v) => Ok(v), - other => Err(ExpectedError(stringify!($t).to_owned(), other.to_string())), + Json::$t(v) => v, + other => bad!(ExpectedError(stringify!($t).to_owned(), other.to_string())), } }}; } macro_rules! read_primitive { ($name:ident, $ty:ty) => { - fn $name(&mut self) -> DecodeResult<$ty> { + fn $name(&mut self) -> $ty { match self.pop() { - Json::I64(f) => Ok(f as $ty), - Json::U64(f) => Ok(f as $ty), - Json::F64(f) => Err(ExpectedError("Integer".to_owned(), f.to_string())), + Json::I64(f) => f as $ty, + Json::U64(f) => f as $ty, + Json::F64(f) => bad!(ExpectedError("Integer".to_owned(), f.to_string())), // re: #12967.. a type w/ numeric keys (ie HashMap<usize, V> etc) // is going to have a string here, as per JSON spec. Json::String(s) => match s.parse().ok() { - Some(f) => Ok(f), - None => Err(ExpectedError("Number".to_owned(), s)), + Some(f) => f, + None => bad!(ExpectedError("Number".to_owned(), s)), }, - value => Err(ExpectedError("Number".to_owned(), value.to_string())), + value => bad!(ExpectedError("Number".to_owned(), value.to_string())), } } }; } impl crate::Decoder for Decoder { - type Error = DecoderError; - - fn read_nil(&mut self) -> DecodeResult<()> { + fn read_unit(&mut self) -> () { expect!(self.pop(), Null) } @@ -2257,156 +2252,150 @@ impl crate::Decoder for Decoder { read_primitive! { read_i64, i64 } read_primitive! { read_i128, i128 } - fn read_f32(&mut self) -> DecodeResult<f32> { - self.read_f64().map(|x| x as f32) + fn read_f32(&mut self) -> f32 { + self.read_f64() as f32 } - fn read_f64(&mut self) -> DecodeResult<f64> { + fn read_f64(&mut self) -> f64 { match self.pop() { - Json::I64(f) => Ok(f as f64), - Json::U64(f) => Ok(f as f64), - Json::F64(f) => Ok(f), + Json::I64(f) => f as f64, + Json::U64(f) => f as f64, + Json::F64(f) => f, Json::String(s) => { // re: #12967.. a type w/ numeric keys (ie HashMap<usize, V> etc) // is going to have a string here, as per JSON spec. match s.parse().ok() { - Some(f) => Ok(f), - None => Err(ExpectedError("Number".to_owned(), s)), + Some(f) => f, + None => bad!(ExpectedError("Number".to_owned(), s)), } } - Json::Null => Ok(f64::NAN), - value => Err(ExpectedError("Number".to_owned(), value.to_string())), + Json::Null => f64::NAN, + value => bad!(ExpectedError("Number".to_owned(), value.to_string())), } } - fn read_bool(&mut self) -> DecodeResult<bool> { + fn read_bool(&mut self) -> bool { expect!(self.pop(), Boolean) } - fn read_char(&mut self) -> DecodeResult<char> { - let s = self.read_str()?; - { - let mut it = s.chars(); - if let (Some(c), None) = (it.next(), it.next()) { - // exactly one character - return Ok(c); - } + fn read_char(&mut self) -> char { + let s = self.read_str(); + let mut it = s.chars(); + if let (Some(c), None) = (it.next(), it.next()) { + // exactly one character + return c; } - Err(ExpectedError("single character string".to_owned(), s.to_string())) + bad!(ExpectedError("single character string".to_owned(), s.to_string())); } - fn read_str(&mut self) -> DecodeResult<Cow<'_, str>> { - expect!(self.pop(), String).map(Cow::Owned) + fn read_str(&mut self) -> Cow<'_, str> { + Cow::Owned(expect!(self.pop(), String)) } - fn read_raw_bytes_into(&mut self, s: &mut [u8]) -> Result<(), Self::Error> { + fn read_raw_bytes_into(&mut self, s: &mut [u8]) { for c in s.iter_mut() { - *c = self.read_u8()?; + *c = self.read_u8(); } - Ok(()) } - fn read_enum<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_enum<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { f(self) } - fn read_enum_variant<T, F>(&mut self, names: &[&str], mut f: F) -> DecodeResult<T> + fn read_enum_variant<T, F>(&mut self, names: &[&str], mut f: F) -> T where - F: FnMut(&mut Decoder, usize) -> DecodeResult<T>, + F: FnMut(&mut Decoder, usize) -> T, { let name = match self.pop() { Json::String(s) => s, Json::Object(mut o) => { let n = match o.remove("variant") { Some(Json::String(s)) => s, - Some(val) => return Err(ExpectedError("String".to_owned(), val.to_string())), - None => return Err(MissingFieldError("variant".to_owned())), + Some(val) => bad!(ExpectedError("String".to_owned(), val.to_string())), + None => bad!(MissingFieldError("variant".to_owned())), }; match o.remove("fields") { Some(Json::Array(l)) => { self.stack.extend(l.into_iter().rev()); } - Some(val) => return Err(ExpectedError("Array".to_owned(), val.to_string())), - None => return Err(MissingFieldError("fields".to_owned())), + Some(val) => bad!(ExpectedError("Array".to_owned(), val.to_string())), + None => bad!(MissingFieldError("fields".to_owned())), } n } - json => return Err(ExpectedError("String or Object".to_owned(), json.to_string())), + json => bad!(ExpectedError("String or Object".to_owned(), json.to_string())), }; let idx = match names.iter().position(|n| *n == &name[..]) { Some(idx) => idx, - None => return Err(UnknownVariantError(name)), + None => bad!(UnknownVariantError(name)), }; f(self, idx) } - fn read_enum_variant_arg<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_enum_variant_arg<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { f(self) } - fn read_struct<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_struct<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { - let value = f(self)?; + let value = f(self); self.pop(); - Ok(value) + value } - fn read_struct_field<T, F>(&mut self, name: &str, f: F) -> DecodeResult<T> + fn read_struct_field<T, F>(&mut self, name: &str, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { - let mut obj = expect!(self.pop(), Object)?; + let mut obj = expect!(self.pop(), Object); let value = match obj.remove(name) { None => { // Add a Null and try to parse it as an Option<_> // to get None as a default value. self.stack.push(Json::Null); - match f(self) { - Ok(x) => x, - Err(_) => return Err(MissingFieldError(name.to_string())), - } + f(self) } Some(json) => { self.stack.push(json); - f(self)? + f(self) } }; self.stack.push(Json::Object(obj)); - Ok(value) + value } - fn read_tuple<T, F>(&mut self, tuple_len: usize, f: F) -> DecodeResult<T> + fn read_tuple<T, F>(&mut self, tuple_len: usize, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { self.read_seq(move |d, len| { if len == tuple_len { f(d) } else { - Err(ExpectedError(format!("Tuple{}", tuple_len), format!("Tuple{}", len))) + bad!(ExpectedError(format!("Tuple{}", tuple_len), format!("Tuple{}", len))); } }) } - fn read_tuple_arg<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_tuple_arg<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { self.read_seq_elt(f) } - fn read_option<T, F>(&mut self, mut f: F) -> DecodeResult<T> + fn read_option<T, F>(&mut self, mut f: F) -> T where - F: FnMut(&mut Decoder, bool) -> DecodeResult<T>, + F: FnMut(&mut Decoder, bool) -> T, { match self.pop() { Json::Null => f(self, false), @@ -2417,28 +2406,28 @@ impl crate::Decoder for Decoder { } } - fn read_seq<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_seq<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder, usize) -> DecodeResult<T>, + F: FnOnce(&mut Decoder, usize) -> T, { - let array = expect!(self.pop(), Array)?; + let array = expect!(self.pop(), Array); let len = array.len(); self.stack.extend(array.into_iter().rev()); f(self, len) } - fn read_seq_elt<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_seq_elt<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { f(self) } - fn read_map<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_map<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder, usize) -> DecodeResult<T>, + F: FnOnce(&mut Decoder, usize) -> T, { - let obj = expect!(self.pop(), Object)?; + let obj = expect!(self.pop(), Object); let len = obj.len(); for (key, value) in obj { self.stack.push(value); @@ -2447,23 +2436,19 @@ impl crate::Decoder for Decoder { f(self, len) } - fn read_map_elt_key<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_map_elt_key<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { f(self) } - fn read_map_elt_val<T, F>(&mut self, f: F) -> DecodeResult<T> + fn read_map_elt_val<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Decoder) -> DecodeResult<T>, + F: FnOnce(&mut Decoder) -> T, { f(self) } - - fn error(&mut self, err: &str) -> DecoderError { - ApplicationError(err.to_string()) - } } /// A trait for converting values to JSON diff --git a/compiler/rustc_serialize/src/opaque.rs b/compiler/rustc_serialize/src/opaque.rs index 078237801be..c272c687a7e 100644 --- a/compiler/rustc_serialize/src/opaque.rs +++ b/compiler/rustc_serialize/src/opaque.rs @@ -560,134 +560,126 @@ impl<'a> Decoder<'a> { } macro_rules! read_leb128 { - ($dec:expr, $fun:ident) => {{ Ok(leb128::$fun($dec.data, &mut $dec.position)) }}; + ($dec:expr, $fun:ident) => {{ leb128::$fun($dec.data, &mut $dec.position) }}; } impl<'a> serialize::Decoder for Decoder<'a> { - type Error = String; - #[inline] - fn read_nil(&mut self) -> Result<(), Self::Error> { - Ok(()) + fn read_unit(&mut self) -> () { + () } #[inline] - fn read_u128(&mut self) -> Result<u128, Self::Error> { + fn read_u128(&mut self) -> u128 { read_leb128!(self, read_u128_leb128) } #[inline] - fn read_u64(&mut self) -> Result<u64, Self::Error> { + fn read_u64(&mut self) -> u64 { read_leb128!(self, read_u64_leb128) } #[inline] - fn read_u32(&mut self) -> Result<u32, Self::Error> { + fn read_u32(&mut self) -> u32 { read_leb128!(self, read_u32_leb128) } #[inline] - fn read_u16(&mut self) -> Result<u16, Self::Error> { + fn read_u16(&mut self) -> u16 { let bytes = [self.data[self.position], self.data[self.position + 1]]; let value = u16::from_le_bytes(bytes); self.position += 2; - Ok(value) + value } #[inline] - fn read_u8(&mut self) -> Result<u8, Self::Error> { + fn read_u8(&mut self) -> u8 { let value = self.data[self.position]; self.position += 1; - Ok(value) + value } #[inline] - fn read_usize(&mut self) -> Result<usize, Self::Error> { + fn read_usize(&mut self) -> usize { read_leb128!(self, read_usize_leb128) } #[inline] - fn read_i128(&mut self) -> Result<i128, Self::Error> { + fn read_i128(&mut self) -> i128 { read_leb128!(self, read_i128_leb128) } #[inline] - fn read_i64(&mut self) -> Result<i64, Self::Error> { + fn read_i64(&mut self) -> i64 { read_leb128!(self, read_i64_leb128) } #[inline] - fn read_i32(&mut self) -> Result<i32, Self::Error> { + fn read_i32(&mut self) -> i32 { read_leb128!(self, read_i32_leb128) } #[inline] - fn read_i16(&mut self) -> Result<i16, Self::Error> { + fn read_i16(&mut self) -> i16 { let bytes = [self.data[self.position], self.data[self.position + 1]]; let value = i16::from_le_bytes(bytes); self.position += 2; - Ok(value) + value } #[inline] - fn read_i8(&mut self) -> Result<i8, Self::Error> { + fn read_i8(&mut self) -> i8 { let as_u8 = self.data[self.position]; self.position += 1; - unsafe { Ok(::std::mem::transmute(as_u8)) } + unsafe { ::std::mem::transmute(as_u8) } } #[inline] - fn read_isize(&mut self) -> Result<isize, Self::Error> { + fn read_isize(&mut self) -> isize { read_leb128!(self, read_isize_leb128) } #[inline] - fn read_bool(&mut self) -> Result<bool, Self::Error> { - let value = self.read_u8()?; - Ok(value != 0) + fn read_bool(&mut self) -> bool { + let value = self.read_u8(); + value != 0 } #[inline] - fn read_f64(&mut self) -> Result<f64, Self::Error> { - let bits = self.read_u64()?; - Ok(f64::from_bits(bits)) + fn read_f64(&mut self) -> f64 { + let bits = self.read_u64(); + f64::from_bits(bits) } #[inline] - fn read_f32(&mut self) -> Result<f32, Self::Error> { - let bits = self.read_u32()?; - Ok(f32::from_bits(bits)) + fn read_f32(&mut self) -> f32 { + let bits = self.read_u32(); + f32::from_bits(bits) } #[inline] - fn read_char(&mut self) -> Result<char, Self::Error> { - let bits = self.read_u32()?; - Ok(std::char::from_u32(bits).unwrap()) + fn read_char(&mut self) -> char { + let bits = self.read_u32(); + std::char::from_u32(bits).unwrap() } #[inline] - fn read_str(&mut self) -> Result<Cow<'_, str>, Self::Error> { - let len = self.read_usize()?; + fn read_str(&mut self) -> Cow<'_, str> { + let len = self.read_usize(); let sentinel = self.data[self.position + len]; assert!(sentinel == STR_SENTINEL); let s = unsafe { std::str::from_utf8_unchecked(&self.data[self.position..self.position + len]) }; self.position += len + 1; - Ok(Cow::Borrowed(s)) - } - - #[inline] - fn error(&mut self, err: &str) -> Self::Error { - err.to_string() + Cow::Borrowed(s) } #[inline] - fn read_raw_bytes_into(&mut self, s: &mut [u8]) -> Result<(), String> { + fn read_raw_bytes_into(&mut self, s: &mut [u8]) { let start = self.position; self.position += s.len(); s.copy_from_slice(&self.data[start..self.position]); - Ok(()) } } @@ -715,9 +707,9 @@ impl serialize::Encodable<FileEncoder> for [u8] { // Specialize decoding `Vec<u8>`. This specialization also applies to decoding `Box<[u8]>`s, etc., // since the default implementations call `decode` to produce a `Vec<u8>` internally. impl<'a> serialize::Decodable<Decoder<'a>> for Vec<u8> { - fn decode(d: &mut Decoder<'a>) -> Result<Self, String> { - let len = serialize::Decoder::read_usize(d)?; - Ok(d.read_raw_bytes(len).to_owned()) + fn decode(d: &mut Decoder<'a>) -> Self { + let len = serialize::Decoder::read_usize(d); + d.read_raw_bytes(len).to_owned() } } @@ -752,13 +744,13 @@ impl serialize::Encodable<FileEncoder> for IntEncodedWithFixedSize { impl<'a> serialize::Decodable<Decoder<'a>> for IntEncodedWithFixedSize { #[inline] - fn decode(decoder: &mut Decoder<'a>) -> Result<IntEncodedWithFixedSize, String> { + fn decode(decoder: &mut Decoder<'a>) -> IntEncodedWithFixedSize { let _start_pos = decoder.position(); let bytes = decoder.read_raw_bytes(IntEncodedWithFixedSize::ENCODED_SIZE); let _end_pos = decoder.position(); debug_assert_eq!((_end_pos - _start_pos), IntEncodedWithFixedSize::ENCODED_SIZE); let value = u64::from_le_bytes(bytes.try_into().unwrap()); - Ok(IntEncodedWithFixedSize(value)) + IntEncodedWithFixedSize(value) } } diff --git a/compiler/rustc_serialize/src/serialize.rs b/compiler/rustc_serialize/src/serialize.rs index 96a2231b590..a6172403fd6 100644 --- a/compiler/rustc_serialize/src/serialize.rs +++ b/compiler/rustc_serialize/src/serialize.rs @@ -173,144 +173,145 @@ pub trait Encoder { } } +// Note: all the methods in this trait are infallible, which may be surprising. +// They used to be fallible (i.e. return a `Result`) but many of the impls just +// panicked when something went wrong, and for the cases that didn't the +// top-level invocation would also just panic on failure. Switching to +// infallibility made things faster and lots of code a little simpler and more +// concise. pub trait Decoder { - type Error; - // Primitive types: - fn read_nil(&mut self) -> Result<(), Self::Error>; - fn read_usize(&mut self) -> Result<usize, Self::Error>; - fn read_u128(&mut self) -> Result<u128, Self::Error>; - fn read_u64(&mut self) -> Result<u64, Self::Error>; - fn read_u32(&mut self) -> Result<u32, Self::Error>; - fn read_u16(&mut self) -> Result<u16, Self::Error>; - fn read_u8(&mut self) -> Result<u8, Self::Error>; - fn read_isize(&mut self) -> Result<isize, Self::Error>; - fn read_i128(&mut self) -> Result<i128, Self::Error>; - fn read_i64(&mut self) -> Result<i64, Self::Error>; - fn read_i32(&mut self) -> Result<i32, Self::Error>; - fn read_i16(&mut self) -> Result<i16, Self::Error>; - fn read_i8(&mut self) -> Result<i8, Self::Error>; - fn read_bool(&mut self) -> Result<bool, Self::Error>; - fn read_f64(&mut self) -> Result<f64, Self::Error>; - fn read_f32(&mut self) -> Result<f32, Self::Error>; - fn read_char(&mut self) -> Result<char, Self::Error>; - fn read_str(&mut self) -> Result<Cow<'_, str>, Self::Error>; - fn read_raw_bytes_into(&mut self, s: &mut [u8]) -> Result<(), Self::Error>; + fn read_unit(&mut self) -> (); + fn read_usize(&mut self) -> usize; + fn read_u128(&mut self) -> u128; + fn read_u64(&mut self) -> u64; + fn read_u32(&mut self) -> u32; + fn read_u16(&mut self) -> u16; + fn read_u8(&mut self) -> u8; + fn read_isize(&mut self) -> isize; + fn read_i128(&mut self) -> i128; + fn read_i64(&mut self) -> i64; + fn read_i32(&mut self) -> i32; + fn read_i16(&mut self) -> i16; + fn read_i8(&mut self) -> i8; + fn read_bool(&mut self) -> bool; + fn read_f64(&mut self) -> f64; + fn read_f32(&mut self) -> f32; + fn read_char(&mut self) -> char; + fn read_str(&mut self) -> Cow<'_, str>; + fn read_raw_bytes_into(&mut self, s: &mut [u8]); // Compound types: #[inline] - fn read_enum<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_enum<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } #[inline] - fn read_enum_variant<T, F>(&mut self, _names: &[&str], mut f: F) -> Result<T, Self::Error> + fn read_enum_variant<T, F>(&mut self, _names: &[&str], mut f: F) -> T where - F: FnMut(&mut Self, usize) -> Result<T, Self::Error>, + F: FnMut(&mut Self, usize) -> T, { - let disr = self.read_usize()?; + let disr = self.read_usize(); f(self, disr) } #[inline] - fn read_enum_variant_arg<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_enum_variant_arg<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } #[inline] - fn read_struct<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_struct<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } #[inline] - fn read_struct_field<T, F>(&mut self, _f_name: &str, f: F) -> Result<T, Self::Error> + fn read_struct_field<T, F>(&mut self, _f_name: &str, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } #[inline] - fn read_tuple<T, F>(&mut self, _len: usize, f: F) -> Result<T, Self::Error> + fn read_tuple<T, F>(&mut self, _len: usize, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } #[inline] - fn read_tuple_arg<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_tuple_arg<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } // Specialized types: - fn read_option<T, F>(&mut self, mut f: F) -> Result<T, Self::Error> + fn read_option<T, F>(&mut self, mut f: F) -> T where - F: FnMut(&mut Self, bool) -> Result<T, Self::Error>, + F: FnMut(&mut Self, bool) -> T, { self.read_enum(move |this| { this.read_enum_variant(&["None", "Some"], move |this, idx| match idx { 0 => f(this, false), 1 => f(this, true), - _ => Err(this.error("read_option: expected 0 for None or 1 for Some")), + _ => panic!("read_option: expected 0 for None or 1 for Some"), }) }) } - fn read_seq<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_seq<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self, usize) -> Result<T, Self::Error>, + F: FnOnce(&mut Self, usize) -> T, { - let len = self.read_usize()?; + let len = self.read_usize(); f(self, len) } #[inline] - fn read_seq_elt<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_seq_elt<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } - fn read_map<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_map<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self, usize) -> Result<T, Self::Error>, + F: FnOnce(&mut Self, usize) -> T, { - let len = self.read_usize()?; + let len = self.read_usize(); f(self, len) } #[inline] - fn read_map_elt_key<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_map_elt_key<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } #[inline] - fn read_map_elt_val<T, F>(&mut self, f: F) -> Result<T, Self::Error> + fn read_map_elt_val<T, F>(&mut self, f: F) -> T where - F: FnOnce(&mut Self) -> Result<T, Self::Error>, + F: FnOnce(&mut Self) -> T, { f(self) } - - // Failure - fn error(&mut self, err: &str) -> Self::Error; } /// Trait for types that can be serialized @@ -340,7 +341,7 @@ pub trait Encodable<S: Encoder> { /// * `TyDecodable` should be used for types that are only serialized in crate /// metadata or the incremental cache. This is most types in `rustc_middle`. pub trait Decodable<D: Decoder>: Sized { - fn decode(d: &mut D) -> Result<Self, D::Error>; + fn decode(d: &mut D) -> Self; } macro_rules! direct_serialize_impls { @@ -353,7 +354,7 @@ macro_rules! direct_serialize_impls { } impl<D: Decoder> Decodable<D> for $ty { - fn decode(d: &mut D) -> Result<$ty, D::Error> { + fn decode(d: &mut D) -> $ty { d.$read_method() } } @@ -387,7 +388,7 @@ impl<S: Encoder> Encodable<S> for ! { } impl<D: Decoder> Decodable<D> for ! { - fn decode(_d: &mut D) -> Result<!, D::Error> { + fn decode(_d: &mut D) -> ! { unreachable!() } } @@ -399,8 +400,8 @@ impl<S: Encoder> Encodable<S> for ::std::num::NonZeroU32 { } impl<D: Decoder> Decodable<D> for ::std::num::NonZeroU32 { - fn decode(d: &mut D) -> Result<Self, D::Error> { - d.read_u32().map(|d| ::std::num::NonZeroU32::new(d).unwrap()) + fn decode(d: &mut D) -> Self { + ::std::num::NonZeroU32::new(d.read_u32()).unwrap() } } @@ -423,8 +424,8 @@ impl<S: Encoder> Encodable<S> for String { } impl<D: Decoder> Decodable<D> for String { - fn decode(d: &mut D) -> Result<String, D::Error> { - Ok(d.read_str()?.into_owned()) + fn decode(d: &mut D) -> String { + d.read_str().into_owned() } } @@ -435,8 +436,8 @@ impl<S: Encoder> Encodable<S> for () { } impl<D: Decoder> Decodable<D> for () { - fn decode(d: &mut D) -> Result<(), D::Error> { - d.read_nil() + fn decode(d: &mut D) -> () { + d.read_unit() } } @@ -447,16 +448,16 @@ impl<S: Encoder, T> Encodable<S> for PhantomData<T> { } impl<D: Decoder, T> Decodable<D> for PhantomData<T> { - fn decode(d: &mut D) -> Result<PhantomData<T>, D::Error> { - d.read_nil()?; - Ok(PhantomData) + fn decode(d: &mut D) -> PhantomData<T> { + d.read_unit(); + PhantomData } } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Box<[T]> { - fn decode(d: &mut D) -> Result<Box<[T]>, D::Error> { - let v: Vec<T> = Decodable::decode(d)?; - Ok(v.into_boxed_slice()) + fn decode(d: &mut D) -> Box<[T]> { + let v: Vec<T> = Decodable::decode(d); + v.into_boxed_slice() } } @@ -467,8 +468,8 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for Rc<T> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Rc<T> { - fn decode(d: &mut D) -> Result<Rc<T>, D::Error> { - Ok(Rc::new(Decodable::decode(d)?)) + fn decode(d: &mut D) -> Rc<T> { + Rc::new(Decodable::decode(d)) } } @@ -491,13 +492,22 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for Vec<T> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Vec<T> { - default fn decode(d: &mut D) -> Result<Vec<T>, D::Error> { + default fn decode(d: &mut D) -> Vec<T> { d.read_seq(|d, len| { - let mut v = Vec::with_capacity(len); - for _ in 0..len { - v.push(d.read_seq_elt(|d| Decodable::decode(d))?); + // SAFETY: we set the capacity in advance, only write elements, and + // only set the length at the end once the writing has succeeded. + let mut vec = Vec::with_capacity(len); + unsafe { + let ptr: *mut T = vec.as_mut_ptr(); + for i in 0..len { + std::ptr::write( + ptr.offset(i as isize), + d.read_seq_elt(|d| Decodable::decode(d)), + ); + } + vec.set_len(len); } - Ok(v) + vec }) } } @@ -510,14 +520,14 @@ impl<S: Encoder, T: Encodable<S>, const N: usize> Encodable<S> for [T; N] { } impl<D: Decoder, const N: usize> Decodable<D> for [u8; N] { - fn decode(d: &mut D) -> Result<[u8; N], D::Error> { + fn decode(d: &mut D) -> [u8; N] { d.read_seq(|d, len| { assert!(len == N); let mut v = [0u8; N]; for i in 0..len { - v[i] = d.read_seq_elt(|d| Decodable::decode(d))?; + v[i] = d.read_seq_elt(|d| Decodable::decode(d)); } - Ok(v) + v }) } } @@ -536,9 +546,9 @@ impl<D: Decoder, T: Decodable<D> + ToOwned> Decodable<D> for Cow<'static, [T]> where [T]: ToOwned<Owned = Vec<T>>, { - fn decode(d: &mut D) -> Result<Cow<'static, [T]>, D::Error> { - let v: Vec<T> = Decodable::decode(d)?; - Ok(Cow::Owned(v)) + fn decode(d: &mut D) -> Cow<'static, [T]> { + let v: Vec<T> = Decodable::decode(d); + Cow::Owned(v) } } @@ -552,8 +562,8 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for Option<T> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Option<T> { - fn decode(d: &mut D) -> Result<Option<T>, D::Error> { - d.read_option(|d, b| if b { Ok(Some(Decodable::decode(d)?)) } else { Ok(None) }) + fn decode(d: &mut D) -> Option<T> { + d.read_option(|d, b| if b { Some(Decodable::decode(d)) } else { None }) } } @@ -571,17 +581,12 @@ impl<S: Encoder, T1: Encodable<S>, T2: Encodable<S>> Encodable<S> for Result<T1, } impl<D: Decoder, T1: Decodable<D>, T2: Decodable<D>> Decodable<D> for Result<T1, T2> { - fn decode(d: &mut D) -> Result<Result<T1, T2>, D::Error> { + fn decode(d: &mut D) -> Result<T1, T2> { d.read_enum(|d| { d.read_enum_variant(&["Ok", "Err"], |d, disr| match disr { - 0 => Ok(Ok(d.read_enum_variant_arg(|d| T1::decode(d))?)), - 1 => Ok(Err(d.read_enum_variant_arg(|d| T2::decode(d))?)), - _ => { - panic!( - "Encountered invalid discriminant while \ - decoding `Result`." - ); - } + 0 => Ok(d.read_enum_variant_arg(|d| T1::decode(d))), + 1 => Err(d.read_enum_variant_arg(|d| T2::decode(d))), + _ => panic!("Encountered invalid discriminant while decoding `Result`."), }) }) } @@ -609,13 +614,13 @@ macro_rules! tuple { ( $($name:ident,)+ ) => ( impl<D: Decoder, $($name: Decodable<D>),+> Decodable<D> for ($($name,)+) { #[allow(non_snake_case)] - fn decode(d: &mut D) -> Result<($($name,)+), D::Error> { + fn decode(d: &mut D) -> ($($name,)+) { let len: usize = count!($($name)+); d.read_tuple(len, |d| { - let ret = ($(d.read_tuple_arg(|d| -> Result<$name, D::Error> { + let ret = ($(d.read_tuple_arg(|d| -> $name { Decodable::decode(d) - })?,)+); - Ok(ret) + }),)+); + ret }) } } @@ -651,9 +656,9 @@ impl<S: Encoder> Encodable<S> for path::PathBuf { } impl<D: Decoder> Decodable<D> for path::PathBuf { - fn decode(d: &mut D) -> Result<path::PathBuf, D::Error> { - let bytes: String = Decodable::decode(d)?; - Ok(path::PathBuf::from(bytes)) + fn decode(d: &mut D) -> path::PathBuf { + let bytes: String = Decodable::decode(d); + path::PathBuf::from(bytes) } } @@ -664,8 +669,8 @@ impl<S: Encoder, T: Encodable<S> + Copy> Encodable<S> for Cell<T> { } impl<D: Decoder, T: Decodable<D> + Copy> Decodable<D> for Cell<T> { - fn decode(d: &mut D) -> Result<Cell<T>, D::Error> { - Ok(Cell::new(Decodable::decode(d)?)) + fn decode(d: &mut D) -> Cell<T> { + Cell::new(Decodable::decode(d)) } } @@ -681,8 +686,8 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for RefCell<T> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for RefCell<T> { - fn decode(d: &mut D) -> Result<RefCell<T>, D::Error> { - Ok(RefCell::new(Decodable::decode(d)?)) + fn decode(d: &mut D) -> RefCell<T> { + RefCell::new(Decodable::decode(d)) } } @@ -693,8 +698,8 @@ impl<S: Encoder, T: Encodable<S>> Encodable<S> for Arc<T> { } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Arc<T> { - fn decode(d: &mut D) -> Result<Arc<T>, D::Error> { - Ok(Arc::new(Decodable::decode(d)?)) + fn decode(d: &mut D) -> Arc<T> { + Arc::new(Decodable::decode(d)) } } @@ -704,7 +709,7 @@ impl<S: Encoder, T: ?Sized + Encodable<S>> Encodable<S> for Box<T> { } } impl<D: Decoder, T: Decodable<D>> Decodable<D> for Box<T> { - fn decode(d: &mut D) -> Result<Box<T>, D::Error> { - Ok(Box::new(Decodable::decode(d)?)) + fn decode(d: &mut D) -> Box<T> { + Box::new(Decodable::decode(d)) } } diff --git a/compiler/rustc_serialize/tests/json.rs b/compiler/rustc_serialize/tests/json.rs index a759fa1bf1a..ede912bdfb6 100644 --- a/compiler/rustc_serialize/tests/json.rs +++ b/compiler/rustc_serialize/tests/json.rs @@ -1,14 +1,10 @@ #![allow(rustc::internal)] -use json::DecoderError::*; use json::ErrorCode::*; use json::Json::*; use json::JsonEvent::*; use json::ParserError::*; -use json::{ - from_str, DecodeResult, Decoder, DecoderError, Encoder, EncoderError, Json, JsonEvent, Parser, - StackElement, -}; +use json::{from_str, Decoder, Encoder, EncoderError, Json, JsonEvent, Parser, StackElement}; use rustc_macros::{Decodable, Encodable}; use rustc_serialize::json; use rustc_serialize::{Decodable, Encodable}; @@ -26,27 +22,27 @@ struct OptionData { #[test] fn test_decode_option_none() { let s = "{}"; - let obj: OptionData = json::decode(s).unwrap(); + let obj: OptionData = json::decode(s); assert_eq!(obj, OptionData { opt: None }); } #[test] fn test_decode_option_some() { let s = "{ \"opt\": 10 }"; - let obj: OptionData = json::decode(s).unwrap(); + let obj: OptionData = json::decode(s); assert_eq!(obj, OptionData { opt: Some(10) }); } #[test] -fn test_decode_option_malformed() { - check_err::<OptionData>( - "{ \"opt\": [] }", - ExpectedError("Number".to_string(), "[]".to_string()), - ); - check_err::<OptionData>( - "{ \"opt\": false }", - ExpectedError("Number".to_string(), "false".to_string()), - ); +#[should_panic(expected = r#"ExpectedError("Number", "[]")"#)] +fn test_decode_option_malformed1() { + check_err::<OptionData>(r#"{ "opt": [] }"#); +} + +#[test] +#[should_panic(expected = r#"ExpectedError("Number", "false")"#)] +fn test_decode_option_malformed2() { + check_err::<OptionData>(r#"{ "opt": false }"#); } #[derive(PartialEq, Encodable, Decodable, Debug)] @@ -329,13 +325,13 @@ fn test_read_identifiers() { #[test] fn test_decode_identifiers() { - let v: () = json::decode("null").unwrap(); + let v: () = json::decode("null"); assert_eq!(v, ()); - let v: bool = json::decode("true").unwrap(); + let v: bool = json::decode("true"); assert_eq!(v, true); - let v: bool = json::decode("false").unwrap(); + let v: bool = json::decode("false"); assert_eq!(v, false); } @@ -368,42 +364,42 @@ fn test_read_number() { } #[test] +#[should_panic(expected = r#"ExpectedError("Integer", "765.25")"#)] fn test_decode_numbers() { - let v: f64 = json::decode("3").unwrap(); + let v: f64 = json::decode("3"); assert_eq!(v, 3.0); - let v: f64 = json::decode("3.1").unwrap(); + let v: f64 = json::decode("3.1"); assert_eq!(v, 3.1); - let v: f64 = json::decode("-1.2").unwrap(); + let v: f64 = json::decode("-1.2"); assert_eq!(v, -1.2); - let v: f64 = json::decode("0.4").unwrap(); + let v: f64 = json::decode("0.4"); assert_eq!(v, 0.4); - let v: f64 = json::decode("0.4e5").unwrap(); + let v: f64 = json::decode("0.4e5"); assert_eq!(v, 0.4e5); - let v: f64 = json::decode("0.4e15").unwrap(); + let v: f64 = json::decode("0.4e15"); assert_eq!(v, 0.4e15); - let v: f64 = json::decode("0.4e-01").unwrap(); + let v: f64 = json::decode("0.4e-01"); assert_eq!(v, 0.4e-01); - let v: u64 = json::decode("0").unwrap(); + let v: u64 = json::decode("0"); assert_eq!(v, 0); - let v: u64 = json::decode("18446744073709551615").unwrap(); + let v: u64 = json::decode("18446744073709551615"); assert_eq!(v, u64::MAX); - let v: i64 = json::decode("-9223372036854775808").unwrap(); + let v: i64 = json::decode("-9223372036854775808"); assert_eq!(v, i64::MIN); - let v: i64 = json::decode("9223372036854775807").unwrap(); + let v: i64 = json::decode("9223372036854775807"); assert_eq!(v, i64::MAX); - let res: DecodeResult<i64> = json::decode("765.25"); - assert_eq!(res, Err(ExpectedError("Integer".to_string(), "765.25".to_string()))); + json::decode::<i64>("765.25"); } #[test] @@ -438,7 +434,7 @@ fn test_decode_str() { ]; for (i, o) in s { - let v: string::String = json::decode(i).unwrap(); + let v: string::String = json::decode(i); assert_eq!(v, o); } } @@ -463,39 +459,41 @@ fn test_read_array() { #[test] fn test_decode_array() { - let v: Vec<()> = json::decode("[]").unwrap(); + let v: Vec<()> = json::decode("[]"); assert_eq!(v, []); - let v: Vec<()> = json::decode("[null]").unwrap(); + let v: Vec<()> = json::decode("[null]"); assert_eq!(v, [()]); - let v: Vec<bool> = json::decode("[true]").unwrap(); + let v: Vec<bool> = json::decode("[true]"); assert_eq!(v, [true]); - let v: Vec<isize> = json::decode("[3, 1]").unwrap(); + let v: Vec<isize> = json::decode("[3, 1]"); assert_eq!(v, [3, 1]); - let v: Vec<Vec<usize>> = json::decode("[[3], [1, 2]]").unwrap(); + let v: Vec<Vec<usize>> = json::decode("[[3], [1, 2]]"); assert_eq!(v, [vec![3], vec![1, 2]]); } #[test] fn test_decode_tuple() { - let t: (usize, usize, usize) = json::decode("[1, 2, 3]").unwrap(); + let t: (usize, usize, usize) = json::decode("[1, 2, 3]"); assert_eq!(t, (1, 2, 3)); - let t: (usize, string::String) = json::decode("[1, \"two\"]").unwrap(); + let t: (usize, string::String) = json::decode("[1, \"two\"]"); assert_eq!(t, (1, "two".to_string())); } #[test] +#[should_panic] fn test_decode_tuple_malformed_types() { - assert!(json::decode::<(usize, string::String)>("[1, 2]").is_err()); + json::decode::<(usize, string::String)>("[1, 2]"); } #[test] +#[should_panic] fn test_decode_tuple_malformed_length() { - assert!(json::decode::<(usize, usize)>("[1, 2, 3]").is_err()); + json::decode::<(usize, usize)>("[1, 2, 3]"); } #[test] @@ -562,7 +560,7 @@ fn test_decode_struct() { ] }"; - let v: Outer = json::decode(s).unwrap(); + let v: Outer = json::decode(s); assert_eq!( v, Outer { inner: vec![Inner { a: (), b: 2, c: vec!["abc".to_string(), "xyz".to_string()] }] } @@ -577,7 +575,7 @@ struct FloatStruct { #[test] fn test_decode_struct_with_nan() { let s = "{\"f\":null,\"a\":[null,123]}"; - let obj: FloatStruct = json::decode(s).unwrap(); + let obj: FloatStruct = json::decode(s); assert!(obj.f.is_nan()); assert!(obj.a[0].is_nan()); assert_eq!(obj.a[1], 123f64); @@ -585,20 +583,20 @@ fn test_decode_struct_with_nan() { #[test] fn test_decode_option() { - let value: Option<string::String> = json::decode("null").unwrap(); + let value: Option<string::String> = json::decode("null"); assert_eq!(value, None); - let value: Option<string::String> = json::decode("\"jodhpurs\"").unwrap(); + let value: Option<string::String> = json::decode("\"jodhpurs\""); assert_eq!(value, Some("jodhpurs".to_string())); } #[test] fn test_decode_enum() { - let value: Animal = json::decode("\"Dog\"").unwrap(); + let value: Animal = json::decode("\"Dog\""); assert_eq!(value, Dog); let s = "{\"variant\":\"Frog\",\"fields\":[\"Henry\",349]}"; - let value: Animal = json::decode(s).unwrap(); + let value: Animal = json::decode(s); assert_eq!(value, Frog("Henry".to_string(), 349)); } @@ -606,7 +604,7 @@ fn test_decode_enum() { fn test_decode_map() { let s = "{\"a\": \"Dog\", \"b\": {\"variant\":\"Frog\",\ \"fields\":[\"Henry\", 349]}}"; - let mut map: BTreeMap<string::String, Animal> = json::decode(s).unwrap(); + let mut map: BTreeMap<string::String, Animal> = json::decode(s); assert_eq!(map.remove(&"a".to_string()), Some(Dog)); assert_eq!(map.remove(&"b".to_string()), Some(Frog("Henry".to_string(), 349))); @@ -630,59 +628,65 @@ enum DecodeEnum { A(f64), B(string::String), } -fn check_err<T: Decodable<Decoder>>(to_parse: &'static str, expected: DecoderError) { - let res: DecodeResult<T> = match from_str(to_parse) { - Err(e) => Err(ParseError(e)), - Ok(json) => Decodable::decode(&mut Decoder::new(json)), - }; - match res { - Ok(_) => panic!("`{:?}` parsed & decoded ok, expecting error `{:?}`", to_parse, expected), - Err(ParseError(e)) => panic!("`{:?}` is not valid json: {:?}", to_parse, e), - Err(e) => { - assert_eq!(e, expected); - } - } +fn check_err<T: Decodable<Decoder>>(to_parse: &str) { + let json = from_str(to_parse).unwrap(); + let _: T = Decodable::decode(&mut Decoder::new(json)); } #[test] -fn test_decode_errors_struct() { - check_err::<DecodeStruct>("[]", ExpectedError("Object".to_string(), "[]".to_string())); - check_err::<DecodeStruct>( - "{\"x\": true, \"y\": true, \"z\": \"\", \"w\": []}", - ExpectedError("Number".to_string(), "true".to_string()), - ); - check_err::<DecodeStruct>( - "{\"x\": 1, \"y\": [], \"z\": \"\", \"w\": []}", - ExpectedError("Boolean".to_string(), "[]".to_string()), - ); - check_err::<DecodeStruct>( - "{\"x\": 1, \"y\": true, \"z\": {}, \"w\": []}", - ExpectedError("String".to_string(), "{}".to_string()), - ); - check_err::<DecodeStruct>( - "{\"x\": 1, \"y\": true, \"z\": \"\", \"w\": null}", - ExpectedError("Array".to_string(), "null".to_string()), - ); - check_err::<DecodeStruct>( - "{\"x\": 1, \"y\": true, \"z\": \"\"}", - MissingFieldError("w".to_string()), - ); +#[should_panic(expected = r#"ExpectedError("Object", "[]")"#)] +fn test_decode_errors_struct1() { + check_err::<DecodeStruct>("[]"); } #[test] -fn test_decode_errors_enum() { - check_err::<DecodeEnum>("{}", MissingFieldError("variant".to_string())); - check_err::<DecodeEnum>( - "{\"variant\": 1}", - ExpectedError("String".to_string(), "1".to_string()), - ); - check_err::<DecodeEnum>("{\"variant\": \"A\"}", MissingFieldError("fields".to_string())); - check_err::<DecodeEnum>( - "{\"variant\": \"A\", \"fields\": null}", - ExpectedError("Array".to_string(), "null".to_string()), - ); - check_err::<DecodeEnum>( - "{\"variant\": \"C\", \"fields\": []}", - UnknownVariantError("C".to_string()), - ); +#[should_panic(expected = r#"ExpectedError("Number", "true")"#)] +fn test_decode_errors_struct2() { + check_err::<DecodeStruct>(r#"{"x": true, "y": true, "z": "", "w": []}"#); +} +#[test] +#[should_panic(expected = r#"ExpectedError("Boolean", "[]")"#)] +fn test_decode_errors_struct3() { + check_err::<DecodeStruct>(r#"{"x": 1, "y": [], "z": "", "w": []}"#); +} +#[test] +#[should_panic(expected = r#"ExpectedError("String", "{}")"#)] +fn test_decode_errors_struct4() { + check_err::<DecodeStruct>(r#"{"x": 1, "y": true, "z": {}, "w": []}"#); +} +#[test] +#[should_panic(expected = r#"ExpectedError("Array", "null")"#)] +fn test_decode_errors_struct5() { + check_err::<DecodeStruct>(r#"{"x": 1, "y": true, "z": "", "w": null}"#); +} +#[test] +#[should_panic(expected = r#"ExpectedError("Array", "null")"#)] +fn test_decode_errors_struct6() { + check_err::<DecodeStruct>(r#"{"x": 1, "y": true, "z": ""}"#); +} + +#[test] +#[should_panic(expected = r#"MissingFieldError("variant")"#)] +fn test_decode_errors_enum1() { + check_err::<DecodeEnum>(r#"{}"#); +} +#[test] +#[should_panic(expected = r#"ExpectedError("String", "1")"#)] +fn test_decode_errors_enum2() { + check_err::<DecodeEnum>(r#"{"variant": 1}"#); +} +#[test] +#[should_panic(expected = r#"MissingFieldError("fields")"#)] +fn test_decode_errors_enum3() { + check_err::<DecodeEnum>(r#"{"variant": "A"}"#); +} +#[test] +#[should_panic(expected = r#"ExpectedError("Array", "null")"#)] +fn test_decode_errors_enum4() { + check_err::<DecodeEnum>(r#"{"variant": "A", "fields": null}"#); +} +#[test] +#[should_panic(expected = r#"UnknownVariantError("C")"#)] +fn test_decode_errors_enum5() { + check_err::<DecodeEnum>(r#"{"variant": "C", "fields": []}"#); } #[test] @@ -944,7 +948,7 @@ fn test_hashmap_with_enum_key() { map.insert(Enum::Foo, 0); let result = json::encode(&map).unwrap(); assert_eq!(&result[..], r#"{"Foo":0}"#); - let decoded: HashMap<Enum, _> = json::decode(&result).unwrap(); + let decoded: HashMap<Enum, _> = json::decode(&result); assert_eq!(map, decoded); } @@ -957,10 +961,11 @@ fn test_hashmap_with_numeric_key_can_handle_double_quote_delimited_key() { Ok(o) => o, }; let mut decoder = Decoder::new(json_obj); - let _hm: HashMap<usize, bool> = Decodable::decode(&mut decoder).unwrap(); + let _hm: HashMap<usize, bool> = Decodable::decode(&mut decoder); } #[test] +#[should_panic(expected = r#"ExpectedError("Number", "a")"#)] fn test_hashmap_with_numeric_key_will_error_with_string_keys() { use std::collections::HashMap; let json_str = "{\"a\":true}"; @@ -969,8 +974,7 @@ fn test_hashmap_with_numeric_key_will_error_with_string_keys() { Ok(o) => o, }; let mut decoder = Decoder::new(json_obj); - let result: Result<HashMap<usize, bool>, DecoderError> = Decodable::decode(&mut decoder); - assert_eq!(result, Err(ExpectedError("Number".to_string(), "a".to_string()))); + let _: HashMap<usize, bool> = Decodable::decode(&mut decoder); } fn assert_stream_equal(src: &str, expected: Vec<(JsonEvent, Vec<StackElement<'_>>)>) { diff --git a/compiler/rustc_serialize/tests/opaque.rs b/compiler/rustc_serialize/tests/opaque.rs index 13b3676a56c..298eb115111 100644 --- a/compiler/rustc_serialize/tests/opaque.rs +++ b/compiler/rustc_serialize/tests/opaque.rs @@ -41,7 +41,7 @@ fn check_round_trip<T: Encodable<Encoder> + for<'a> Decodable<Decoder<'a>> + Par let mut decoder = Decoder::new(&data[..], 0); for value in values { - let decoded = Decodable::decode(&mut decoder).unwrap(); + let decoded = Decodable::decode(&mut decoder); assert_eq!(value, decoded); } } diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 92ad0723f48..a756de4c0fc 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -12,7 +12,7 @@ use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::impl_stable_hash_via_hash; use rustc_target::abi::{Align, TargetDataLayout}; -use rustc_target::spec::{SplitDebuginfo, Target, TargetTriple, TargetWarnings}; +use rustc_target::spec::{LinkerFlavor, SplitDebuginfo, Target, TargetTriple, TargetWarnings}; use rustc_serialize::json; @@ -2237,6 +2237,16 @@ pub fn build_session_options(matches: &getopts::Matches) -> Options { } } + if cg.linker_flavor == Some(LinkerFlavor::L4Bender) + && !nightly_options::is_unstable_enabled(matches) + { + early_error( + error_format, + "`l4-bender` linker flavor is unstable, `-Z unstable-options` \ + flag must also be passed to explicitly use it", + ); + } + let prints = collect_print_requests(&mut cg, &mut debugging_opts, matches, error_format); let cg = cg; diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 730e79a5647..9bcdd7f3da6 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -476,10 +476,6 @@ impl Session { &self.parse_sess.span_diagnostic } - pub fn with_disabled_diagnostic<T, F: FnOnce() -> T>(&self, f: F) -> T { - self.parse_sess.span_diagnostic.with_disabled_diagnostic(f) - } - /// Analogous to calling methods on the given `DiagnosticBuilder`, but /// deduplicates on lint ID, span (if any), and message for this `Session` fn diag_once<'a, 'b>( diff --git a/compiler/rustc_span/src/def_id.rs b/compiler/rustc_span/src/def_id.rs index 5390eed89fa..147c1f9e043 100644 --- a/compiler/rustc_span/src/def_id.rs +++ b/compiler/rustc_span/src/def_id.rs @@ -47,8 +47,8 @@ impl<E: Encoder> Encodable<E> for CrateNum { } impl<D: Decoder> Decodable<D> for CrateNum { - default fn decode(d: &mut D) -> Result<CrateNum, D::Error> { - Ok(CrateNum::from_u32(d.read_u32()?)) + default fn decode(d: &mut D) -> CrateNum { + CrateNum::from_u32(d.read_u32()) } } @@ -209,7 +209,7 @@ impl<E: Encoder> Encodable<E> for DefIndex { } impl<D: Decoder> Decodable<D> for DefIndex { - default fn decode(_: &mut D) -> Result<DefIndex, D::Error> { + default fn decode(_: &mut D) -> DefIndex { panic!("cannot decode `DefIndex` with `{}`", std::any::type_name::<D>()); } } @@ -298,12 +298,10 @@ impl<E: Encoder> Encodable<E> for DefId { } impl<D: Decoder> Decodable<D> for DefId { - default fn decode(d: &mut D) -> Result<DefId, D::Error> { - d.read_struct(|d| { - Ok(DefId { - krate: d.read_struct_field("krate", Decodable::decode)?, - index: d.read_struct_field("index", Decodable::decode)?, - }) + default fn decode(d: &mut D) -> DefId { + d.read_struct(|d| DefId { + krate: d.read_struct_field("krate", Decodable::decode), + index: d.read_struct_field("index", Decodable::decode), }) } } @@ -378,8 +376,8 @@ impl<E: Encoder> Encodable<E> for LocalDefId { } impl<D: Decoder> Decodable<D> for LocalDefId { - fn decode(d: &mut D) -> Result<LocalDefId, D::Error> { - DefId::decode(d).map(|d| d.expect_local()) + fn decode(d: &mut D) -> LocalDefId { + DefId::decode(d).expect_local() } } diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs index 7b70c20d307..e0d6bd8cb7b 100644 --- a/compiler/rustc_span/src/hygiene.rs +++ b/compiler/rustc_span/src/hygiene.rs @@ -1314,19 +1314,16 @@ pub fn decode_expn_id( // to track which `SyntaxContext`s we have already decoded. // The provided closure will be invoked to deserialize a `SyntaxContextData` // if we haven't already seen the id of the `SyntaxContext` we are deserializing. -pub fn decode_syntax_context< - D: Decoder, - F: FnOnce(&mut D, u32) -> Result<SyntaxContextData, D::Error>, ->( +pub fn decode_syntax_context<D: Decoder, F: FnOnce(&mut D, u32) -> SyntaxContextData>( d: &mut D, context: &HygieneDecodeContext, decode_data: F, -) -> Result<SyntaxContext, D::Error> { - let raw_id: u32 = Decodable::decode(d)?; +) -> SyntaxContext { + let raw_id: u32 = Decodable::decode(d); if raw_id == 0 { debug!("decode_syntax_context: deserialized root"); // The root is special - return Ok(SyntaxContext::root()); + return SyntaxContext::root(); } let outer_ctxts = &context.remapped_ctxts; @@ -1334,7 +1331,7 @@ pub fn decode_syntax_context< // Ensure that the lock() temporary is dropped early { if let Some(ctxt) = outer_ctxts.lock().get(raw_id as usize).copied().flatten() { - return Ok(ctxt); + return ctxt; } } @@ -1364,7 +1361,7 @@ pub fn decode_syntax_context< // Don't try to decode data while holding the lock, since we need to // be able to recursively decode a SyntaxContext - let mut ctxt_data = decode_data(d, raw_id)?; + let mut ctxt_data = decode_data(d, raw_id); // Reset `dollar_crate_name` so that it will be updated by `update_dollar_crate_names` // We don't care what the encoding crate set this to - we want to resolve it // from the perspective of the current compilation session @@ -1380,7 +1377,7 @@ pub fn decode_syntax_context< assert_eq!(dummy.dollar_crate_name, kw::Empty); }); - Ok(new_ctxt) + new_ctxt } fn for_all_ctxts_in<E, F: FnMut(u32, SyntaxContext, &SyntaxContextData) -> Result<(), E>>( @@ -1422,13 +1419,13 @@ impl<E: Encoder> Encodable<E> for ExpnId { } impl<D: Decoder> Decodable<D> for LocalExpnId { - fn decode(d: &mut D) -> Result<Self, D::Error> { - ExpnId::decode(d).map(ExpnId::expect_local) + fn decode(d: &mut D) -> Self { + ExpnId::expect_local(ExpnId::decode(d)) } } impl<D: Decoder> Decodable<D> for ExpnId { - default fn decode(_: &mut D) -> Result<Self, D::Error> { + default fn decode(_: &mut D) -> Self { panic!("cannot decode `ExpnId` with `{}`", std::any::type_name::<D>()); } } @@ -1451,7 +1448,7 @@ impl<E: Encoder> Encodable<E> for SyntaxContext { } impl<D: Decoder> Decodable<D> for SyntaxContext { - default fn decode(_: &mut D) -> Result<Self, D::Error> { + default fn decode(_: &mut D) -> Self { panic!("cannot decode `SyntaxContext` with `{}`", std::any::type_name::<D>()); } } diff --git a/compiler/rustc_span/src/lev_distance.rs b/compiler/rustc_span/src/lev_distance.rs index aed699e4839..93cf965f105 100644 --- a/compiler/rustc_span/src/lev_distance.rs +++ b/compiler/rustc_span/src/lev_distance.rs @@ -11,16 +11,21 @@ use std::cmp; mod tests; /// Finds the Levenshtein distance between two strings. -pub fn lev_distance(a: &str, b: &str) -> usize { - // cases which don't require further computation - if a.is_empty() { - return b.chars().count(); - } else if b.is_empty() { - return a.chars().count(); +/// +/// Returns None if the distance exceeds the limit. +pub fn lev_distance(a: &str, b: &str, limit: usize) -> Option<usize> { + let n = a.chars().count(); + let m = b.chars().count(); + let min_dist = if n < m { m - n } else { n - m }; + + if min_dist > limit { + return None; + } + if n == 0 || m == 0 { + return (min_dist <= limit).then_some(min_dist); } - let mut dcol: Vec<_> = (0..=b.len()).collect(); - let mut t_last = 0; + let mut dcol: Vec<_> = (0..=m).collect(); for (i, sc) in a.chars().enumerate() { let mut current = i; @@ -35,10 +40,10 @@ pub fn lev_distance(a: &str, b: &str) -> usize { dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1; } current = next; - t_last = j; } } - dcol[t_last + 1] + + (dcol[m] <= limit).then_some(dcol[m]) } /// Finds the best match for a given word in the given iterator. @@ -51,39 +56,38 @@ pub fn lev_distance(a: &str, b: &str) -> usize { /// on an edge case with a lower(upper)case letters mismatch. #[cold] pub fn find_best_match_for_name( - name_vec: &[Symbol], + candidates: &[Symbol], lookup: Symbol, dist: Option<usize>, ) -> Option<Symbol> { let lookup = lookup.as_str(); - let max_dist = dist.unwrap_or_else(|| cmp::max(lookup.len(), 3) / 3); + let lookup_uppercase = lookup.to_uppercase(); // Priority of matches: // 1. Exact case insensitive match // 2. Levenshtein distance match // 3. Sorted word match - if let Some(case_insensitive_match) = - name_vec.iter().find(|candidate| candidate.as_str().to_uppercase() == lookup.to_uppercase()) - { - return Some(*case_insensitive_match); + if let Some(c) = candidates.iter().find(|c| c.as_str().to_uppercase() == lookup_uppercase) { + return Some(*c); } - let levenshtein_match = name_vec - .iter() - .filter_map(|&name| { - let dist = lev_distance(lookup, name.as_str()); - if dist <= max_dist { Some((name, dist)) } else { None } - }) - // Here we are collecting the next structure: - // (levenshtein_match, levenshtein_distance) - .fold(None, |result, (candidate, dist)| match result { - None => Some((candidate, dist)), - Some((c, d)) => Some(if dist < d { (candidate, dist) } else { (c, d) }), - }); - if levenshtein_match.is_some() { - levenshtein_match.map(|(candidate, _)| candidate) - } else { - find_match_by_sorted_words(name_vec, lookup) + + let mut dist = dist.unwrap_or_else(|| cmp::max(lookup.len(), 3) / 3); + let mut best = None; + for c in candidates { + match lev_distance(lookup, c.as_str(), dist) { + Some(0) => return Some(*c), + Some(d) => { + dist = d - 1; + best = Some(*c); + } + None => {} + } } + if best.is_some() { + return best; + } + + find_match_by_sorted_words(candidates, lookup) } fn find_match_by_sorted_words(iter_names: &[Symbol], lookup: &str) -> Option<Symbol> { diff --git a/compiler/rustc_span/src/lev_distance/tests.rs b/compiler/rustc_span/src/lev_distance/tests.rs index b32f8d32c13..4e34219248d 100644 --- a/compiler/rustc_span/src/lev_distance/tests.rs +++ b/compiler/rustc_span/src/lev_distance/tests.rs @@ -5,18 +5,26 @@ fn test_lev_distance() { use std::char::{from_u32, MAX}; // Test bytelength agnosticity for c in (0..MAX as u32).filter_map(from_u32).map(|i| i.to_string()) { - assert_eq!(lev_distance(&c[..], &c[..]), 0); + assert_eq!(lev_distance(&c[..], &c[..], usize::MAX), Some(0)); } let a = "\nMäry häd ä little lämb\n\nLittle lämb\n"; let b = "\nMary häd ä little lämb\n\nLittle lämb\n"; let c = "Mary häd ä little lämb\n\nLittle lämb\n"; - assert_eq!(lev_distance(a, b), 1); - assert_eq!(lev_distance(b, a), 1); - assert_eq!(lev_distance(a, c), 2); - assert_eq!(lev_distance(c, a), 2); - assert_eq!(lev_distance(b, c), 1); - assert_eq!(lev_distance(c, b), 1); + assert_eq!(lev_distance(a, b, usize::MAX), Some(1)); + assert_eq!(lev_distance(b, a, usize::MAX), Some(1)); + assert_eq!(lev_distance(a, c, usize::MAX), Some(2)); + assert_eq!(lev_distance(c, a, usize::MAX), Some(2)); + assert_eq!(lev_distance(b, c, usize::MAX), Some(1)); + assert_eq!(lev_distance(c, b, usize::MAX), Some(1)); +} + +#[test] +fn test_lev_distance_limit() { + assert_eq!(lev_distance("abc", "abcd", 1), Some(1)); + assert_eq!(lev_distance("abc", "abcd", 0), None); + assert_eq!(lev_distance("abc", "xyz", 3), Some(3)); + assert_eq!(lev_distance("abc", "xyz", 2), None); } #[test] diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index 9602bc5d0b7..2c3db35bb66 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -15,6 +15,7 @@ #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![feature(array_windows)] +#![feature(bool_to_option)] #![feature(crate_visibility_modifier)] #![feature(if_let_guard)] #![feature(negative_impls)] @@ -611,7 +612,7 @@ impl Span { #[inline] /// Returns `true` if `hi == lo`. - pub fn is_empty(&self) -> bool { + pub fn is_empty(self) -> bool { let span = self.data_untracked(); span.hi == span.lo } @@ -639,7 +640,7 @@ impl Span { /// /// Use this instead of `==` when either span could be generated code, /// and you only care that they point to the same bytes of source text. - pub fn source_equal(&self, other: &Span) -> bool { + pub fn source_equal(self, other: Span) -> bool { let span = self.data(); let other = other.data(); span.lo == other.lo && span.hi == other.hi @@ -680,17 +681,17 @@ impl Span { } #[inline] - pub fn rust_2015(&self) -> bool { + pub fn rust_2015(self) -> bool { self.edition() == edition::Edition::Edition2015 } #[inline] - pub fn rust_2018(&self) -> bool { + pub fn rust_2018(self) -> bool { self.edition() >= edition::Edition::Edition2018 } #[inline] - pub fn rust_2021(&self) -> bool { + pub fn rust_2021(self) -> bool { self.edition() >= edition::Edition::Edition2021 } @@ -711,7 +712,7 @@ impl Span { /// Checks if a span is "internal" to a macro in which `#[unstable]` /// items can be used (that is, a macro marked with /// `#[allow_internal_unstable]`). - pub fn allows_unstable(&self, feature: Symbol) -> bool { + pub fn allows_unstable(self, feature: Symbol) -> bool { self.ctxt() .outer_expn_data() .allow_internal_unstable @@ -719,7 +720,7 @@ impl Span { } /// Checks if this span arises from a compiler desugaring of kind `kind`. - pub fn is_desugaring(&self, kind: DesugaringKind) -> bool { + pub fn is_desugaring(self, kind: DesugaringKind) -> bool { match self.ctxt().outer_expn_data().kind { ExpnKind::Desugaring(k) => k == kind, _ => false, @@ -728,7 +729,7 @@ impl Span { /// Returns the compiler desugaring that created this span, or `None` /// if this span is not from a desugaring. - pub fn desugaring_kind(&self) -> Option<DesugaringKind> { + pub fn desugaring_kind(self) -> Option<DesugaringKind> { match self.ctxt().outer_expn_data().kind { ExpnKind::Desugaring(k) => Some(k), _ => None, @@ -738,7 +739,7 @@ impl Span { /// Checks if a span is "internal" to a macro in which `unsafe` /// can be used without triggering the `unsafe_code` lint. // (that is, a macro marked with `#[allow_internal_unsafe]`). - pub fn allows_unsafe(&self) -> bool { + pub fn allows_unsafe(self) -> bool { self.ctxt().outer_expn_data().allow_internal_unsafe } @@ -751,7 +752,7 @@ impl Span { return None; } - let is_recursive = expn_data.call_site.source_equal(&prev_span); + let is_recursive = expn_data.call_site.source_equal(prev_span); prev_span = self; self = expn_data.call_site; @@ -865,13 +866,13 @@ impl Span { /// Equivalent of `Span::call_site` from the proc macro API, /// except that the location is taken from the `self` span. - pub fn with_call_site_ctxt(&self, expn_id: ExpnId) -> Span { + pub fn with_call_site_ctxt(self, expn_id: ExpnId) -> Span { self.with_ctxt_from_mark(expn_id, Transparency::Transparent) } /// Equivalent of `Span::mixed_site` from the proc macro API, /// except that the location is taken from the `self` span. - pub fn with_mixed_site_ctxt(&self, expn_id: ExpnId) -> Span { + pub fn with_mixed_site_ctxt(self, expn_id: ExpnId) -> Span { self.with_ctxt_from_mark(expn_id, Transparency::SemiTransparent) } @@ -975,12 +976,12 @@ impl<E: Encoder> Encodable<E> for Span { } } impl<D: Decoder> Decodable<D> for Span { - default fn decode(s: &mut D) -> Result<Span, D::Error> { + default fn decode(s: &mut D) -> Span { s.read_struct(|d| { - let lo = d.read_struct_field("lo", Decodable::decode)?; - let hi = d.read_struct_field("hi", Decodable::decode)?; + let lo = d.read_struct_field("lo", Decodable::decode); + let hi = d.read_struct_field("hi", Decodable::decode); - Ok(Span::new(lo, hi, SyntaxContext::root(), None)) + Span::new(lo, hi, SyntaxContext::root(), None) }) } } @@ -1448,30 +1449,30 @@ impl<S: Encoder> Encodable<S> for SourceFile { } impl<D: Decoder> Decodable<D> for SourceFile { - fn decode(d: &mut D) -> Result<SourceFile, D::Error> { + fn decode(d: &mut D) -> SourceFile { d.read_struct(|d| { - let name: FileName = d.read_struct_field("name", |d| Decodable::decode(d))?; + let name: FileName = d.read_struct_field("name", |d| Decodable::decode(d)); let src_hash: SourceFileHash = - d.read_struct_field("src_hash", |d| Decodable::decode(d))?; - let start_pos: BytePos = d.read_struct_field("start_pos", |d| Decodable::decode(d))?; - let end_pos: BytePos = d.read_struct_field("end_pos", |d| Decodable::decode(d))?; + d.read_struct_field("src_hash", |d| Decodable::decode(d)); + let start_pos: BytePos = d.read_struct_field("start_pos", |d| Decodable::decode(d)); + let end_pos: BytePos = d.read_struct_field("end_pos", |d| Decodable::decode(d)); let lines: Vec<BytePos> = d.read_struct_field("lines", |d| { - let num_lines: u32 = Decodable::decode(d)?; + let num_lines: u32 = Decodable::decode(d); let mut lines = Vec::with_capacity(num_lines as usize); if num_lines > 0 { // Read the number of bytes used per diff. - let bytes_per_diff: u8 = Decodable::decode(d)?; + let bytes_per_diff: u8 = Decodable::decode(d); // Read the first element. - let mut line_start: BytePos = Decodable::decode(d)?; + let mut line_start: BytePos = Decodable::decode(d); lines.push(line_start); for _ in 1..num_lines { let diff = match bytes_per_diff { - 1 => d.read_u8()? as u32, - 2 => d.read_u16()? as u32, - 4 => d.read_u32()?, + 1 => d.read_u8() as u32, + 2 => d.read_u16() as u32, + 4 => d.read_u32(), _ => unreachable!(), }; @@ -1481,17 +1482,17 @@ impl<D: Decoder> Decodable<D> for SourceFile { } } - Ok(lines) - })?; + lines + }); let multibyte_chars: Vec<MultiByteChar> = - d.read_struct_field("multibyte_chars", |d| Decodable::decode(d))?; + d.read_struct_field("multibyte_chars", |d| Decodable::decode(d)); let non_narrow_chars: Vec<NonNarrowChar> = - d.read_struct_field("non_narrow_chars", |d| Decodable::decode(d))?; - let name_hash: u128 = d.read_struct_field("name_hash", |d| Decodable::decode(d))?; + d.read_struct_field("non_narrow_chars", |d| Decodable::decode(d)); + let name_hash: u128 = d.read_struct_field("name_hash", |d| Decodable::decode(d)); let normalized_pos: Vec<NormalizedPos> = - d.read_struct_field("normalized_pos", |d| Decodable::decode(d))?; - let cnum: CrateNum = d.read_struct_field("cnum", |d| Decodable::decode(d))?; - Ok(SourceFile { + d.read_struct_field("normalized_pos", |d| Decodable::decode(d)); + let cnum: CrateNum = d.read_struct_field("cnum", |d| Decodable::decode(d)); + SourceFile { name, start_pos, end_pos, @@ -1506,7 +1507,7 @@ impl<D: Decoder> Decodable<D> for SourceFile { normalized_pos, name_hash, cnum, - }) + } }) } } @@ -1949,8 +1950,8 @@ impl<S: rustc_serialize::Encoder> Encodable<S> for BytePos { } impl<D: rustc_serialize::Decoder> Decodable<D> for BytePos { - fn decode(d: &mut D) -> Result<BytePos, D::Error> { - Ok(BytePos(d.read_u32()?)) + fn decode(d: &mut D) -> BytePos { + BytePos(d.read_u32()) } } diff --git a/compiler/rustc_span/src/span_encoding.rs b/compiler/rustc_span/src/span_encoding.rs index e9120b98aab..61e4074a7c8 100644 --- a/compiler/rustc_span/src/span_encoding.rs +++ b/compiler/rustc_span/src/span_encoding.rs @@ -61,6 +61,15 @@ use rustc_data_structures::fx::FxIndexSet; /// using the callback `SPAN_TRACK` to access the query engine. /// #[derive(Clone, Copy, Eq, PartialEq, Hash)] +// FIXME(@lcnr): Enable this attribute once the bootstrap +// compiler knows of `rustc_pass_by_value`. +// +// Right now, this lint would only trigger when compiling the +// stage 2 compiler, which is fairly annoying as there are +// a lot of places using `&Span` right now. After the next bootstrap bump, +// the lint will already trigger when using stage 1, which is a lot less annoying. +// +// #[cfg_attr(not(bootstrap), rustc_pass_by_value)] pub struct Span { base_or_index: u32, len_or_tag: u16, diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 702e3594660..757c430e799 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -272,7 +272,6 @@ symbols! { __H, __S, __try_var, - _args, _d, _e, _task_context, @@ -321,8 +320,10 @@ symbols! { and, and_then, any, + append_const_msg, arbitrary_enum_discriminant, arbitrary_self_types, + args, arith_offset, arm, arm_target_feature, @@ -460,6 +461,7 @@ symbols! { const_async_blocks, const_compare_raw_pointers, const_constructor, + const_deallocate, const_eval_limit, const_eval_select, const_eval_select_ct, @@ -990,6 +992,7 @@ symbols! { panic_implementation, panic_info, panic_location, + panic_no_unwind, panic_runtime, panic_str, panic_unwind, @@ -1203,6 +1206,7 @@ symbols! { rustc_trivial_field_reads, rustc_unsafe_specialization_marker, rustc_variance, + rustc_with_negative_coherence, rustdoc, rustdoc_internals, rustfmt, @@ -1755,8 +1759,8 @@ impl<S: Encoder> Encodable<S> for Symbol { impl<D: Decoder> Decodable<D> for Symbol { #[inline] - fn decode(d: &mut D) -> Result<Symbol, D::Error> { - Ok(Symbol::intern(&d.read_str()?)) + fn decode(d: &mut D) -> Symbol { + Symbol::intern(&d.read_str()) } } diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index 0d51f7779e1..f8e8e15e78c 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -556,7 +556,7 @@ impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> { cx = cx.print_def_path(trait_ref.def_id, trait_ref.substs)?; } ty::ExistentialPredicate::Projection(projection) => { - let name = cx.tcx.associated_item(projection.item_def_id).ident; + let name = cx.tcx.associated_item(projection.item_def_id).name; cx.push("p"); cx.push_ident(name.as_str()); cx = match projection.term { diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs index 6b82bb337e6..a84410d0f3c 100644 --- a/compiler/rustc_target/src/asm/mod.rs +++ b/compiler/rustc_target/src/asm/mod.rs @@ -152,6 +152,7 @@ mod avr; mod bpf; mod hexagon; mod mips; +mod msp430; mod nvptx; mod powerpc; mod riscv; @@ -166,6 +167,7 @@ pub use avr::{AvrInlineAsmReg, AvrInlineAsmRegClass}; pub use bpf::{BpfInlineAsmReg, BpfInlineAsmRegClass}; pub use hexagon::{HexagonInlineAsmReg, HexagonInlineAsmRegClass}; pub use mips::{MipsInlineAsmReg, MipsInlineAsmRegClass}; +pub use msp430::{Msp430InlineAsmReg, Msp430InlineAsmRegClass}; pub use nvptx::{NvptxInlineAsmReg, NvptxInlineAsmRegClass}; pub use powerpc::{PowerPCInlineAsmReg, PowerPCInlineAsmRegClass}; pub use riscv::{RiscVInlineAsmReg, RiscVInlineAsmRegClass}; @@ -194,6 +196,7 @@ pub enum InlineAsmArch { Wasm64, Bpf, Avr, + Msp430, } impl FromStr for InlineAsmArch { @@ -219,6 +222,7 @@ impl FromStr for InlineAsmArch { "wasm64" => Ok(Self::Wasm64), "bpf" => Ok(Self::Bpf), "avr" => Ok(Self::Avr), + "msp430" => Ok(Self::Msp430), _ => Err(()), } } @@ -250,6 +254,7 @@ pub enum InlineAsmReg { Wasm(WasmInlineAsmReg), Bpf(BpfInlineAsmReg), Avr(AvrInlineAsmReg), + Msp430(Msp430InlineAsmReg), // Placeholder for invalid register constraints for the current target Err, } @@ -267,6 +272,7 @@ impl InlineAsmReg { Self::S390x(r) => r.name(), Self::Bpf(r) => r.name(), Self::Avr(r) => r.name(), + Self::Msp430(r) => r.name(), Self::Err => "<reg>", } } @@ -283,6 +289,7 @@ impl InlineAsmReg { Self::S390x(r) => InlineAsmRegClass::S390x(r.reg_class()), Self::Bpf(r) => InlineAsmRegClass::Bpf(r.reg_class()), Self::Avr(r) => InlineAsmRegClass::Avr(r.reg_class()), + Self::Msp430(r) => InlineAsmRegClass::Msp430(r.reg_class()), Self::Err => InlineAsmRegClass::Err, } } @@ -336,6 +343,9 @@ impl InlineAsmReg { InlineAsmArch::Avr => { Self::Avr(AvrInlineAsmReg::parse(arch, target_features, target, name)?) } + InlineAsmArch::Msp430 => { + Self::Msp430(Msp430InlineAsmReg::parse(arch, target_features, target, name)?) + } }) } @@ -358,6 +368,7 @@ impl InlineAsmReg { Self::S390x(r) => r.emit(out, arch, modifier), Self::Bpf(r) => r.emit(out, arch, modifier), Self::Avr(r) => r.emit(out, arch, modifier), + Self::Msp430(r) => r.emit(out, arch, modifier), Self::Err => unreachable!("Use of InlineAsmReg::Err"), } } @@ -374,6 +385,7 @@ impl InlineAsmReg { Self::S390x(_) => cb(self), Self::Bpf(r) => r.overlapping_regs(|r| cb(Self::Bpf(r))), Self::Avr(r) => r.overlapping_regs(|r| cb(Self::Avr(r))), + Self::Msp430(_) => cb(self), Self::Err => unreachable!("Use of InlineAsmReg::Err"), } } @@ -405,6 +417,7 @@ pub enum InlineAsmRegClass { Wasm(WasmInlineAsmRegClass), Bpf(BpfInlineAsmRegClass), Avr(AvrInlineAsmRegClass), + Msp430(Msp430InlineAsmRegClass), // Placeholder for invalid register constraints for the current target Err, } @@ -425,12 +438,13 @@ impl InlineAsmRegClass { Self::Wasm(r) => r.name(), Self::Bpf(r) => r.name(), Self::Avr(r) => r.name(), + Self::Msp430(r) => r.name(), Self::Err => rustc_span::symbol::sym::reg, } } /// Returns a suggested register class to use for this type. This is called - /// after type checking via `supported_types` fails to give a better error + /// when `supported_types` fails to give a better error /// message to the user. pub fn suggest_class(self, arch: InlineAsmArch, ty: InlineAsmType) -> Option<Self> { match self { @@ -447,6 +461,7 @@ impl InlineAsmRegClass { Self::Wasm(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Wasm), Self::Bpf(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Bpf), Self::Avr(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Avr), + Self::Msp430(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Msp430), Self::Err => unreachable!("Use of InlineAsmRegClass::Err"), } } @@ -476,6 +491,7 @@ impl InlineAsmRegClass { Self::Wasm(r) => r.suggest_modifier(arch, ty), Self::Bpf(r) => r.suggest_modifier(arch, ty), Self::Avr(r) => r.suggest_modifier(arch, ty), + Self::Msp430(r) => r.suggest_modifier(arch, ty), Self::Err => unreachable!("Use of InlineAsmRegClass::Err"), } } @@ -501,6 +517,7 @@ impl InlineAsmRegClass { Self::Wasm(r) => r.default_modifier(arch), Self::Bpf(r) => r.default_modifier(arch), Self::Avr(r) => r.default_modifier(arch), + Self::Msp430(r) => r.default_modifier(arch), Self::Err => unreachable!("Use of InlineAsmRegClass::Err"), } } @@ -525,6 +542,7 @@ impl InlineAsmRegClass { Self::Wasm(r) => r.supported_types(arch), Self::Bpf(r) => r.supported_types(arch), Self::Avr(r) => r.supported_types(arch), + Self::Msp430(r) => r.supported_types(arch), Self::Err => unreachable!("Use of InlineAsmRegClass::Err"), } } @@ -554,6 +572,7 @@ impl InlineAsmRegClass { } InlineAsmArch::Bpf => Self::Bpf(BpfInlineAsmRegClass::parse(arch, name)?), InlineAsmArch::Avr => Self::Avr(AvrInlineAsmRegClass::parse(arch, name)?), + InlineAsmArch::Msp430 => Self::Msp430(Msp430InlineAsmRegClass::parse(arch, name)?), }) } @@ -574,6 +593,7 @@ impl InlineAsmRegClass { Self::Wasm(r) => r.valid_modifiers(arch), Self::Bpf(r) => r.valid_modifiers(arch), Self::Avr(r) => r.valid_modifiers(arch), + Self::Msp430(r) => r.valid_modifiers(arch), Self::Err => unreachable!("Use of InlineAsmRegClass::Err"), } } @@ -764,6 +784,11 @@ pub fn allocatable_registers( avr::fill_reg_map(arch, target_features, target, &mut map); map } + InlineAsmArch::Msp430 => { + let mut map = msp430::regclass_map(); + msp430::fill_reg_map(arch, target_features, target, &mut map); + map + } } } diff --git a/compiler/rustc_target/src/asm/msp430.rs b/compiler/rustc_target/src/asm/msp430.rs new file mode 100644 index 00000000000..a27d6390a72 --- /dev/null +++ b/compiler/rustc_target/src/asm/msp430.rs @@ -0,0 +1,81 @@ +use super::{InlineAsmArch, InlineAsmType}; +use rustc_macros::HashStable_Generic; +use rustc_span::Symbol; +use std::fmt; + +def_reg_class! { + Msp430 Msp430InlineAsmRegClass { + reg, + } +} + +impl Msp430InlineAsmRegClass { + pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] { + &[] + } + + pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> { + None + } + + pub fn suggest_modifier( + self, + _arch: InlineAsmArch, + _ty: InlineAsmType, + ) -> Option<(char, &'static str)> { + None + } + + pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> { + None + } + + pub fn supported_types( + self, + arch: InlineAsmArch, + ) -> &'static [(InlineAsmType, Option<Symbol>)] { + match (self, arch) { + (Self::reg, _) => types! { _: I8, I16; }, + } + } +} + +// The reserved registers are taken from: +// https://github.com/llvm/llvm-project/blob/36cb29cbbe1b22dcd298ad65e1fabe899b7d7249/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp#L73. +def_regs! { + Msp430 Msp430InlineAsmReg Msp430InlineAsmRegClass { + r5: reg = ["r5"], + r6: reg = ["r6"], + r7: reg = ["r7"], + r8: reg = ["r8"], + r9: reg = ["r9"], + r10: reg = ["r10"], + r11: reg = ["r11"], + r12: reg = ["r12"], + r13: reg = ["r13"], + r14: reg = ["r14"], + r15: reg = ["r15"], + + #error = ["r0", "pc"] => + "the program counter cannot be used as an operand for inline asm", + #error = ["r1", "sp"] => + "the stack pointer cannot be used as an operand for inline asm", + #error = ["r2", "sr"] => + "the status register cannot be used as an operand for inline asm", + #error = ["r3", "cg"] => + "the constant generator cannot be used as an operand for inline asm", + #error = ["r4", "fp"] => + "the frame pointer cannot be used as an operand for inline asm", + } +} + +impl Msp430InlineAsmReg { + pub fn emit( + self, + out: &mut dyn fmt::Write, + _arch: InlineAsmArch, + _modifier: Option<char>, + ) -> fmt::Result { + out.write_str(self.name()) + } +} diff --git a/compiler/rustc_target/src/spec/l4re_base.rs b/compiler/rustc_target/src/spec/l4re_base.rs index f6e3102f617..9e7973f63a9 100644 --- a/compiler/rustc_target/src/spec/l4re_base.rs +++ b/compiler/rustc_target/src/spec/l4re_base.rs @@ -1,25 +1,14 @@ use crate::spec::{LinkerFlavor, PanicStrategy, TargetOptions}; -//use std::process::Command; - -// Use GCC to locate code for crt* libraries from the host, not from L4Re. Note -// that a few files also come from L4Re, for these, the function shouldn't be -// used. This uses GCC for the location of the file, but GCC is required for L4Re anyway. -//fn get_path_or(filename: &str) -> String { -// let child = Command::new("gcc") -// .arg(format!("-print-file-name={}", filename)).output() -// .expect("Failed to execute GCC"); -// String::from_utf8(child.stdout) -// .expect("Couldn't read path from GCC").trim().into() -//} +use std::default::Default; pub fn opts() -> TargetOptions { TargetOptions { os: "l4re".to_string(), env: "uclibc".to_string(), - linker_flavor: LinkerFlavor::Ld, + linker_flavor: LinkerFlavor::L4Bender, executables: true, panic_strategy: PanicStrategy::Abort, - linker: Some("ld".to_string()), + linker: Some("l4-bender".to_string()), linker_is_gnu: false, families: vec!["unix".to_string()], ..Default::default() diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 2c149318730..4effb8bacf6 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -90,6 +90,7 @@ mod windows_uwp_msvc_base; pub enum LinkerFlavor { Em, Gcc, + L4Bender, Ld, Msvc, Lld(LldFlavor), @@ -160,6 +161,7 @@ macro_rules! flavor_mappings { flavor_mappings! { ((LinkerFlavor::Em), "em"), ((LinkerFlavor::Gcc), "gcc"), + ((LinkerFlavor::L4Bender), "l4-bender"), ((LinkerFlavor::Ld), "ld"), ((LinkerFlavor::Msvc), "msvc"), ((LinkerFlavor::PtxLinker), "ptx-linker"), diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs index 1fbd0bb4cec..64c7c1c5f6f 100644 --- a/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs +++ b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs @@ -1,9 +1,12 @@ -use crate::spec::Target; +use crate::spec::{PanicStrategy, Target}; pub fn target() -> Target { let mut base = super::l4re_base::opts(); base.cpu = "x86-64".to_string(); base.max_atomic_width = Some(64); + base.crt_static_allows_dylibs = false; + base.dynamic_linking = false; + base.panic_strategy = PanicStrategy::Abort; Target { llvm_target: "x86_64-unknown-l4re-uclibc".to_string(), diff --git a/compiler/rustc_trait_selection/src/traits/codegen.rs b/compiler/rustc_trait_selection/src/traits/codegen.rs index 848aba7c912..759bc696981 100644 --- a/compiler/rustc_trait_selection/src/traits/codegen.rs +++ b/compiler/rustc_trait_selection/src/traits/codegen.rs @@ -18,7 +18,6 @@ use rustc_middle::ty::{self, TyCtxt}; /// that type check should guarantee to us that all nested /// obligations *could be* resolved if we wanted to. /// -/// Assumes that this is run after the entire crate has been successfully type-checked. /// This also expects that `trait_ref` is fully normalized. pub fn codegen_fulfill_obligation<'tcx>( tcx: TyCtxt<'tcx>, @@ -101,7 +100,7 @@ pub fn codegen_fulfill_obligation<'tcx>( /// Finishes processes any obligations that remain in the /// fulfillment context, and then returns the result with all type /// variables removed and regions erased. Because this is intended -/// for use after type-check has completed, if any errors occur, +/// for use outside of type inference, if any errors occur, /// it will panic. It is used during normalization and other cases /// where processing the obligations in `fulfill_cx` may cause /// type inference variables that appear in `result` to be @@ -124,7 +123,10 @@ where if !errors.is_empty() { infcx.tcx.sess.delay_span_bug( rustc_span::DUMMY_SP, - &format!("Encountered errors `{:?}` resolving bounds after type-checking", errors), + &format!( + "Encountered errors `{:?}` resolving bounds outside of type inference", + errors + ), ); } diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs index af3540386f9..ab1dc8fcbfe 100644 --- a/compiler/rustc_trait_selection/src/traits/coherence.rs +++ b/compiler/rustc_trait_selection/src/traits/coherence.rs @@ -7,10 +7,13 @@ use crate::infer::{CombinedSnapshot, InferOk, TyCtxtInferExt}; use crate::traits::query::evaluate_obligation::InferCtxtExt; use crate::traits::select::IntercrateAmbiguityCause; +use crate::traits::util::impl_trait_ref_and_oblig; use crate::traits::SkipLeakCheck; use crate::traits::{ - self, Normalized, Obligation, ObligationCause, PredicateObligation, SelectionContext, + self, FulfillmentContext, Normalized, Obligation, ObligationCause, PredicateObligation, + PredicateObligations, SelectionContext, }; +use rustc_ast::Attribute; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::ty::fast_reject::{self, SimplifyParams, StripReferences}; use rustc_middle::ty::fold::TypeFoldable; @@ -135,45 +138,89 @@ fn with_fresh_ty_vars<'cx, 'tcx>( header } +/// What kind of overlap check are we doing -- this exists just for testing and feature-gating +/// purposes. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +enum OverlapMode { + /// The 1.0 rules (either types fail to unify, or where clauses are not implemented for crate-local types) + Stable, + /// Feature-gated test: Stable, *or* there is an explicit negative impl that rules out one of the where-clauses. + WithNegative, + /// Just check for negative impls, not for "where clause not implemented": used for testing. + Strict, +} + +impl OverlapMode { + fn use_negative_impl(&self) -> bool { + *self == OverlapMode::Strict || *self == OverlapMode::WithNegative + } + + fn use_implicit_negative(&self) -> bool { + *self == OverlapMode::Stable || *self == OverlapMode::WithNegative + } +} + +fn overlap_mode<'tcx>(tcx: TyCtxt<'tcx>, impl1_def_id: DefId, impl2_def_id: DefId) -> OverlapMode { + // Find the possible coherence mode override opt-in attributes for each `DefId` + let find_coherence_attr = |attr: &Attribute| { + let name = attr.name_or_empty(); + match name { + sym::rustc_with_negative_coherence | sym::rustc_strict_coherence => Some(name), + _ => None, + } + }; + let impl1_coherence_mode = tcx.get_attrs(impl1_def_id).iter().find_map(find_coherence_attr); + let impl2_coherence_mode = tcx.get_attrs(impl2_def_id).iter().find_map(find_coherence_attr); + + // If there are any (that currently happens in tests), they need to match. Otherwise, the + // default 1.0 rules are used. + match (impl1_coherence_mode, impl2_coherence_mode) { + (None, None) => OverlapMode::Stable, + (Some(sym::rustc_with_negative_coherence), Some(sym::rustc_with_negative_coherence)) => { + OverlapMode::WithNegative + } + (Some(sym::rustc_strict_coherence), Some(sym::rustc_strict_coherence)) => { + OverlapMode::Strict + } + (Some(mode), _) | (_, Some(mode)) => { + bug!("Use the same coherence mode on both impls: {}", mode) + } + } +} + /// Can both impl `a` and impl `b` be satisfied by a common type (including /// where-clauses)? If so, returns an `ImplHeader` that unifies the two impls. fn overlap<'cx, 'tcx>( selcx: &mut SelectionContext<'cx, 'tcx>, skip_leak_check: SkipLeakCheck, - a_def_id: DefId, - b_def_id: DefId, + impl1_def_id: DefId, + impl2_def_id: DefId, ) -> Option<OverlapResult<'tcx>> { - debug!("overlap(a_def_id={:?}, b_def_id={:?})", a_def_id, b_def_id); + debug!("overlap(impl1_def_id={:?}, impl2_def_id={:?})", impl1_def_id, impl2_def_id); selcx.infcx().probe_maybe_skip_leak_check(skip_leak_check.is_yes(), |snapshot| { - overlap_within_probe(selcx, skip_leak_check, a_def_id, b_def_id, snapshot) + overlap_within_probe(selcx, skip_leak_check, impl1_def_id, impl2_def_id, snapshot) }) } fn overlap_within_probe<'cx, 'tcx>( selcx: &mut SelectionContext<'cx, 'tcx>, skip_leak_check: SkipLeakCheck, - a_def_id: DefId, - b_def_id: DefId, + impl1_def_id: DefId, + impl2_def_id: DefId, snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> Option<OverlapResult<'tcx>> { - fn loose_check<'cx, 'tcx>( - selcx: &mut SelectionContext<'cx, 'tcx>, - o: &PredicateObligation<'tcx>, - ) -> bool { - !selcx.predicate_may_hold_fatal(o) - } + let infcx = selcx.infcx(); + let tcx = infcx.tcx; + + let overlap_mode = overlap_mode(tcx, impl1_def_id, impl2_def_id); - fn strict_check<'cx, 'tcx>( - selcx: &SelectionContext<'cx, 'tcx>, - o: &PredicateObligation<'tcx>, - ) -> bool { - let infcx = selcx.infcx(); - let tcx = infcx.tcx; - o.flip_polarity(tcx) - .as_ref() - .map(|o| selcx.infcx().predicate_must_hold_modulo_regions(o)) - .unwrap_or(false) + if overlap_mode.use_negative_impl() { + if negative_impl(selcx, impl1_def_id, impl2_def_id) + || negative_impl(selcx, impl2_def_id, impl1_def_id) + { + return None; + } } // For the purposes of this check, we don't bring any placeholder @@ -182,26 +229,61 @@ fn overlap_within_probe<'cx, 'tcx>( // empty environment. let param_env = ty::ParamEnv::empty(); - let a_impl_header = with_fresh_ty_vars(selcx, param_env, a_def_id); - let b_impl_header = with_fresh_ty_vars(selcx, param_env, b_def_id); + let impl1_header = with_fresh_ty_vars(selcx, param_env, impl1_def_id); + let impl2_header = with_fresh_ty_vars(selcx, param_env, impl2_def_id); - debug!("overlap: a_impl_header={:?}", a_impl_header); - debug!("overlap: b_impl_header={:?}", b_impl_header); + debug!("overlap: impl1_header={:?}", impl1_header); + debug!("overlap: impl2_header={:?}", impl2_header); - // Do `a` and `b` unify? If not, no overlap. - let obligations = match selcx - .infcx() - .at(&ObligationCause::dummy(), param_env) - .eq_impl_headers(&a_impl_header, &b_impl_header) - { - Ok(InferOk { obligations, value: () }) => obligations, - Err(_) => { + let obligations = equate_impl_headers(selcx, &impl1_header, &impl2_header)?; + debug!("overlap: unification check succeeded"); + + if overlap_mode.use_implicit_negative() { + if implicit_negative(selcx, param_env, &impl1_header, impl2_header, obligations) { return None; } - }; + } - debug!("overlap: unification check succeeded"); + if !skip_leak_check.is_yes() { + if infcx.leak_check(true, snapshot).is_err() { + debug!("overlap: leak check failed"); + return None; + } + } + + let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes(); + debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes); + + let involves_placeholder = + matches!(selcx.infcx().region_constraints_added_in_snapshot(snapshot), Some(true)); + + let impl_header = selcx.infcx().resolve_vars_if_possible(impl1_header); + Some(OverlapResult { impl_header, intercrate_ambiguity_causes, involves_placeholder }) +} + +fn equate_impl_headers<'cx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'tcx>, + impl1_header: &ty::ImplHeader<'tcx>, + impl2_header: &ty::ImplHeader<'tcx>, +) -> Option<PredicateObligations<'tcx>> { + // Do `a` and `b` unify? If not, no overlap. + selcx + .infcx() + .at(&ObligationCause::dummy(), ty::ParamEnv::empty()) + .eq_impl_headers(impl1_header, impl2_header) + .map(|infer_ok| infer_ok.obligations) + .ok() +} +/// Given impl1 and impl2 check if both impls can be satisfied by a common type (including +/// where-clauses) If so, return false, otherwise return true, they are disjoint. +fn implicit_negative<'cx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + impl1_header: &ty::ImplHeader<'tcx>, + impl2_header: ty::ImplHeader<'tcx>, + obligations: PredicateObligations<'tcx>, +) -> bool { // There's no overlap if obligations are unsatisfiable or if the obligation negated is // satisfied. // @@ -225,11 +307,11 @@ fn overlap_within_probe<'cx, 'tcx>( // at some point an impl for `&'?a str: Error` could be added. let infcx = selcx.infcx(); let tcx = infcx.tcx; - let opt_failing_obligation = a_impl_header + let opt_failing_obligation = impl1_header .predicates .iter() .copied() - .chain(b_impl_header.predicates) + .chain(impl2_header.predicates) .map(|p| infcx.resolve_vars_if_possible(p)) .map(|p| Obligation { cause: ObligationCause::dummy(), @@ -239,15 +321,7 @@ fn overlap_within_probe<'cx, 'tcx>( }) .chain(obligations) .find(|o| { - // if both impl headers are set to strict coherence it means that this will be accepted - // only if it's stated that T: !Trait. So only prove that the negated obligation holds. - if tcx.has_attr(a_def_id, sym::rustc_strict_coherence) - && tcx.has_attr(b_def_id, sym::rustc_strict_coherence) - { - strict_check(selcx, o) - } else { - loose_check(selcx, o) || tcx.features().negative_impls && strict_check(selcx, o) - } + loose_check(selcx, o) || tcx.features().negative_impls && negative_impl_exists(selcx, o) }); // FIXME: the call to `selcx.predicate_may_hold_fatal` above should be ported // to the canonical trait query form, `infcx.predicate_may_hold`, once @@ -255,24 +329,97 @@ fn overlap_within_probe<'cx, 'tcx>( if let Some(failing_obligation) = opt_failing_obligation { debug!("overlap: obligation unsatisfiable {:?}", failing_obligation); - return None; + true + } else { + false } +} - if !skip_leak_check.is_yes() { - if infcx.leak_check(true, snapshot).is_err() { - debug!("overlap: leak check failed"); - return None; - } - } +/// Given impl1 and impl2 check if both impls are never satisfied by a common type (including +/// where-clauses) If so, return true, they are disjoint and false otherwise. +fn negative_impl<'cx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'tcx>, + impl1_def_id: DefId, + impl2_def_id: DefId, +) -> bool { + let tcx = selcx.infcx().tcx; - let impl_header = selcx.infcx().resolve_vars_if_possible(a_impl_header); - let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes(); - debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes); + // create a parameter environment corresponding to a (placeholder) instantiation of impl1 + let impl1_env = tcx.param_env(impl1_def_id); + let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap(); - let involves_placeholder = - matches!(selcx.infcx().region_constraints_added_in_snapshot(snapshot), Some(true)); + // Create an infcx, taking the predicates of impl1 as assumptions: + tcx.infer_ctxt().enter(|infcx| { + // Normalize the trait reference. The WF rules ought to ensure + // that this always succeeds. + let impl1_trait_ref = match traits::fully_normalize( + &infcx, + FulfillmentContext::new(), + ObligationCause::dummy(), + impl1_env, + impl1_trait_ref, + ) { + Ok(impl1_trait_ref) => impl1_trait_ref, + Err(err) => { + bug!("failed to fully normalize {:?}: {:?}", impl1_trait_ref, err); + } + }; + + // Attempt to prove that impl2 applies, given all of the above. + let selcx = &mut SelectionContext::new(&infcx); + let impl2_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl2_def_id); + let (impl2_trait_ref, obligations) = + impl_trait_ref_and_oblig(selcx, impl1_env, impl2_def_id, impl2_substs); + + // do the impls unify? If not, not disjoint. + let more_obligations = match infcx + .at(&ObligationCause::dummy(), impl1_env) + .eq(impl1_trait_ref, impl2_trait_ref) + { + Ok(InferOk { obligations, .. }) => obligations, + Err(_) => { + debug!( + "explicit_disjoint: {:?} does not unify with {:?}", + impl1_trait_ref, impl2_trait_ref + ); + return false; + } + }; - Some(OverlapResult { impl_header, intercrate_ambiguity_causes, involves_placeholder }) + let opt_failing_obligation = obligations + .into_iter() + .chain(more_obligations) + .find(|o| negative_impl_exists(selcx, o)); + + if let Some(failing_obligation) = opt_failing_obligation { + debug!("overlap: obligation unsatisfiable {:?}", failing_obligation); + true + } else { + false + } + }) +} + +fn loose_check<'cx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'tcx>, + o: &PredicateObligation<'tcx>, +) -> bool { + !selcx.predicate_may_hold_fatal(o) +} + +fn negative_impl_exists<'cx, 'tcx>( + selcx: &SelectionContext<'cx, 'tcx>, + o: &PredicateObligation<'tcx>, +) -> bool { + let infcx = selcx.infcx(); + let tcx = infcx.tcx; + o.flip_polarity(tcx) + .as_ref() + .map(|o| { + // FIXME This isn't quite correct, regions should be included + selcx.infcx().predicate_must_hold_modulo_regions(o) + }) + .unwrap_or(false) } pub fn trait_ref_is_knowable<'tcx>( diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs index 0760f626851..687bd16ba30 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs @@ -205,6 +205,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { self.note_obligation_cause_code( &mut err, &obligation.predicate, + obligation.param_env, obligation.cause.code(), &mut vec![], &mut Default::default(), @@ -288,7 +289,11 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { match bound_predicate.skip_binder() { ty::PredicateKind::Trait(trait_predicate) => { let trait_predicate = bound_predicate.rebind(trait_predicate); - let trait_predicate = self.resolve_vars_if_possible(trait_predicate); + let mut trait_predicate = self.resolve_vars_if_possible(trait_predicate); + + trait_predicate.remap_constness_diag(obligation.param_env); + let predicate_is_const = ty::BoundConstness::ConstIfConst + == trait_predicate.skip_binder().constness; if self.tcx.sess.has_errors() && trait_predicate.references_error() { return; @@ -305,13 +310,18 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { }) .unwrap_or_default(); - let OnUnimplementedNote { message, label, note, enclosing_scope } = - self.on_unimplemented_note(trait_ref, &obligation); + let OnUnimplementedNote { + message, + label, + note, + enclosing_scope, + append_const_msg, + } = self.on_unimplemented_note(trait_ref, &obligation); let have_alt_message = message.is_some() || label.is_some(); let is_try_conversion = self.is_try_conversion(span, trait_ref.def_id()); let is_unsize = { Some(trait_ref.def_id()) == self.tcx.lang_items().unsize_trait() }; - let (message, note) = if is_try_conversion { + let (message, note, append_const_msg) = if is_try_conversion { ( Some(format!( "`?` couldn't convert the error to `{}`", @@ -322,9 +332,10 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { conversion on the error value using the `From` trait" .to_owned(), ), + Some(None), ) } else { - (message, note) + (message, note, append_const_msg) }; let mut err = struct_span_err!( @@ -332,11 +343,27 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { span, E0277, "{}", - message.unwrap_or_else(|| format!( - "the trait bound `{}` is not satisfied{}", - trait_ref.without_const().to_predicate(tcx), - post_message, - )) + message + .and_then(|cannot_do_this| { + match (predicate_is_const, append_const_msg) { + // do nothing if predicate is not const + (false, _) => Some(cannot_do_this), + // suggested using default post message + (true, Some(None)) => { + Some(format!("{cannot_do_this} in const contexts")) + } + // overriden post message + (true, Some(Some(post_message))) => { + Some(format!("{cannot_do_this}{post_message}")) + } + // fallback to generic message + (true, None) => None, + } + }) + .unwrap_or_else(|| format!( + "the trait bound `{}` is not satisfied{}", + trait_predicate, post_message, + )) ); if is_try_conversion { @@ -384,7 +411,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { format!( "{}the trait `{}` is not implemented for `{}`", pre_message, - trait_ref.print_only_trait_path(), + trait_predicate.print_modifiers_and_trait_path(), trait_ref.skip_binder().self_ty(), ) }; @@ -392,7 +419,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { if self.suggest_add_reference_to_arg( &obligation, &mut err, - &trait_ref, + trait_predicate, have_alt_message, ) { self.note_obligation_cause(&mut err, &obligation); @@ -412,6 +439,28 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { } else { err.span_label(span, explanation); } + + if trait_predicate.is_const_if_const() && obligation.param_env.is_const() { + let non_const_predicate = trait_ref.without_const(); + let non_const_obligation = Obligation { + cause: obligation.cause.clone(), + param_env: obligation.param_env.without_const(), + predicate: non_const_predicate.to_predicate(tcx), + recursion_depth: obligation.recursion_depth, + }; + if self.predicate_may_hold(&non_const_obligation) { + err.span_note( + span, + &format!( + "the trait `{}` is implemented for `{}`, \ + but that implementation is not `const`", + non_const_predicate.print_modifiers_and_trait_path(), + trait_ref.skip_binder().self_ty(), + ), + ); + } + } + if let Some((msg, span)) = type_def { err.span_label(span, &msg); } @@ -435,18 +484,28 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err.span_label(enclosing_scope_span, s.as_str()); } - self.suggest_dereferences(&obligation, &mut err, trait_ref); - self.suggest_fn_call(&obligation, &mut err, trait_ref); - self.suggest_remove_reference(&obligation, &mut err, trait_ref); - self.suggest_semicolon_removal(&obligation, &mut err, span, trait_ref); + self.suggest_dereferences(&obligation, &mut err, trait_predicate); + self.suggest_fn_call(&obligation, &mut err, trait_predicate); + self.suggest_remove_reference(&obligation, &mut err, trait_predicate); + self.suggest_semicolon_removal( + &obligation, + &mut err, + span, + trait_predicate, + ); self.note_version_mismatch(&mut err, &trait_ref); self.suggest_remove_await(&obligation, &mut err); if Some(trait_ref.def_id()) == tcx.lang_items().try_trait() { - self.suggest_await_before_try(&mut err, &obligation, trait_ref, span); + self.suggest_await_before_try( + &mut err, + &obligation, + trait_predicate, + span, + ); } - if self.suggest_impl_trait(&mut err, span, &obligation, trait_ref) { + if self.suggest_impl_trait(&mut err, span, &obligation, trait_predicate) { err.emit(); return; } @@ -494,7 +553,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // which is somewhat confusing. self.suggest_restricting_param_bound( &mut err, - trait_ref, + trait_predicate, obligation.cause.body_id, ); } else if !have_alt_message { @@ -506,7 +565,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // Changing mutability doesn't make a difference to whether we have // an `Unsize` impl (Fixes ICE in #71036) if !is_unsize { - self.suggest_change_mut(&obligation, &mut err, trait_ref); + self.suggest_change_mut(&obligation, &mut err, trait_predicate); } // If this error is due to `!: Trait` not implemented but `(): Trait` is @@ -1121,7 +1180,7 @@ trait InferCtxtPrivExt<'hir, 'tcx> { fn mk_trait_obligation_with_new_self_ty( &self, param_env: ty::ParamEnv<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_ref: ty::PolyTraitPredicate<'tcx>, new_self_ty: Ty<'tcx>, ) -> PredicateObligation<'tcx>; @@ -1353,6 +1412,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> { .map(|id| (trait_assoc_item, id)) }) .and_then(|(trait_assoc_item, id)| { + let trait_assoc_ident = trait_assoc_item.ident(self.tcx); self.tcx.find_map_relevant_impl( id, proj.projection_ty.self_ty(), @@ -1360,7 +1420,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> { self.tcx .associated_items(did) .in_definition_order() - .filter(|assoc| assoc.ident == trait_assoc_item.ident) + .filter(|assoc| assoc.ident(self.tcx) == trait_assoc_ident) .next() }, ) @@ -1540,7 +1600,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> { ) -> Option<(String, Option<Span>)> { match code { ObligationCauseCode::BuiltinDerivedObligation(data) => { - let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred); match self.get_parent_trait_ref(&data.parent_code) { Some(t) => Some(t), None => { @@ -1593,21 +1653,20 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> { fn mk_trait_obligation_with_new_self_ty( &self, param_env: ty::ParamEnv<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_ref: ty::PolyTraitPredicate<'tcx>, new_self_ty: Ty<'tcx>, ) -> PredicateObligation<'tcx> { assert!(!new_self_ty.has_escaping_bound_vars()); - let trait_ref = trait_ref.map_bound_ref(|tr| ty::TraitRef { - substs: self.tcx.mk_substs_trait(new_self_ty, &tr.substs[1..]), + let trait_pred = trait_ref.map_bound_ref(|tr| ty::TraitPredicate { + trait_ref: ty::TraitRef { + substs: self.tcx.mk_substs_trait(new_self_ty, &tr.trait_ref.substs[1..]), + ..tr.trait_ref + }, ..*tr }); - Obligation::new( - ObligationCause::dummy(), - param_env, - trait_ref.without_const().to_predicate(self.tcx), - ) + Obligation::new(ObligationCause::dummy(), param_env, trait_pred.to_predicate(self.tcx)) } #[instrument(skip(self), level = "debug")] @@ -2008,6 +2067,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> { self.note_obligation_cause_code( err, &obligation.predicate, + obligation.param_env, obligation.cause.code(), &mut vec![], &mut Default::default(), @@ -2155,7 +2215,7 @@ impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> { cause_code: &ObligationCauseCode<'tcx>, ) -> bool { if let ObligationCauseCode::BuiltinDerivedObligation(ref data) = cause_code { - let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred); let self_ty = parent_trait_ref.skip_binder().self_ty(); if obligated_types.iter().any(|ot| ot == &self_ty) { return true; diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index e94c600bb49..8c0dbe9b064 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -48,7 +48,7 @@ pub trait InferCtxtExt<'tcx> { fn suggest_restricting_param_bound( &self, err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, body_id: hir::HirId, ); @@ -56,7 +56,7 @@ pub trait InferCtxtExt<'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ); fn get_closure_name( @@ -70,14 +70,14 @@ pub trait InferCtxtExt<'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ); fn suggest_add_reference_to_arg( &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: &ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, has_custom_message: bool, ) -> bool; @@ -85,7 +85,7 @@ pub trait InferCtxtExt<'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ); fn suggest_remove_await( @@ -98,7 +98,7 @@ pub trait InferCtxtExt<'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ); fn suggest_semicolon_removal( @@ -106,7 +106,7 @@ pub trait InferCtxtExt<'tcx> { obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, span: Span, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ); fn return_type_span(&self, obligation: &PredicateObligation<'tcx>) -> Option<Span>; @@ -116,7 +116,7 @@ pub trait InferCtxtExt<'tcx> { err: &mut DiagnosticBuilder<'_>, span: Span, obligation: &PredicateObligation<'tcx>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) -> bool; fn point_at_returns_when_relevant( @@ -154,7 +154,7 @@ pub trait InferCtxtExt<'tcx> { interior_extra_info: Option<(Option<Span>, Span, Option<hir::HirId>, Option<Span>)>, inner_generator_body: Option<&hir::Body<'tcx>>, outer_generator: Option<DefId>, - trait_ref: ty::TraitRef<'tcx>, + trait_pred: ty::TraitPredicate<'tcx>, target_ty: Ty<'tcx>, typeck_results: Option<&ty::TypeckResults<'tcx>>, obligation: &PredicateObligation<'tcx>, @@ -165,6 +165,7 @@ pub trait InferCtxtExt<'tcx> { &self, err: &mut DiagnosticBuilder<'_>, predicate: &T, + param_env: ty::ParamEnv<'tcx>, cause_code: &ObligationCauseCode<'tcx>, obligated_types: &mut Vec<&ty::TyS<'tcx>>, seen_requirements: &mut FxHashSet<DefId>, @@ -178,7 +179,7 @@ pub trait InferCtxtExt<'tcx> { &self, err: &mut DiagnosticBuilder<'_>, obligation: &PredicateObligation<'tcx>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, span: Span, ); } @@ -204,7 +205,7 @@ fn suggest_restriction<'tcx>( err: &mut DiagnosticBuilder<'_>, fn_sig: Option<&hir::FnSig<'_>>, projection: Option<&ty::ProjectionTy<'_>>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, super_traits: Option<(&Ident, &hir::GenericBounds<'_>)>, ) { // When we are dealing with a trait, `super_traits` will be `Some`: @@ -257,9 +258,9 @@ fn suggest_restriction<'tcx>( // The type param `T: Trait` we will suggest to introduce. let type_param = format!("{}: {}", type_param_name, bound_str); - // FIXME: modify the `trait_ref` instead of string shenanigans. + // FIXME: modify the `trait_pred` instead of string shenanigans. // Turn `<impl Trait as Foo>::Bar: Qux` into `<T as Foo>::Bar: Qux`. - let pred = trait_ref.without_const().to_predicate(tcx).to_string(); + let pred = trait_pred.to_predicate(tcx).to_string(); let pred = pred.replace(&impl_trait_str, &type_param_name); let mut sugg = vec![ // Find the last of the generic parameters contained within the span of @@ -301,19 +302,19 @@ fn suggest_restriction<'tcx>( .find(|p| !matches!(p.kind, hir::GenericParamKind::Type { synthetic: true, .. })), super_traits, ) { - (_, None) => predicate_constraint( - generics, - trait_ref.without_const().to_predicate(tcx).to_string(), + (_, None) => predicate_constraint(generics, trait_pred.to_predicate(tcx).to_string()), + (None, Some((ident, []))) => ( + ident.span.shrink_to_hi(), + format!(": {}", trait_pred.print_modifiers_and_trait_path()), + ), + (_, Some((_, [.., bounds]))) => ( + bounds.span().shrink_to_hi(), + format!(" + {}", trait_pred.print_modifiers_and_trait_path()), + ), + (Some(_), Some((_, []))) => ( + generics.span.shrink_to_hi(), + format!(": {}", trait_pred.print_modifiers_and_trait_path()), ), - (None, Some((ident, []))) => { - (ident.span.shrink_to_hi(), format!(": {}", trait_ref.print_only_trait_path())) - } - (_, Some((_, [.., bounds]))) => { - (bounds.span().shrink_to_hi(), format!(" + {}", trait_ref.print_only_trait_path())) - } - (Some(_), Some((_, []))) => { - (generics.span.shrink_to_hi(), format!(": {}", trait_ref.print_only_trait_path())) - } }; err.span_suggestion_verbose( @@ -329,10 +330,10 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { fn suggest_restricting_param_bound( &self, mut err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, body_id: hir::HirId, ) { - let self_ty = trait_ref.skip_binder().self_ty(); + let self_ty = trait_pred.skip_binder().self_ty(); let (param_ty, projection) = match self_ty.kind() { ty::Param(_) => (true, None), ty::Projection(projection) => (false, Some(projection)), @@ -358,7 +359,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err, None, projection, - trait_ref, + trait_pred, Some((ident, bounds)), ); return; @@ -372,7 +373,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { assert!(param_ty); // Restricting `Self` for a single method. suggest_restriction( - self.tcx, &generics, "`Self`", err, None, projection, trait_ref, None, + self.tcx, &generics, "`Self`", err, None, projection, trait_pred, None, ); return; } @@ -398,7 +399,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err, Some(fn_sig), projection, - trait_ref, + trait_pred, None, ); return; @@ -417,7 +418,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err, None, projection, - trait_ref, + trait_pred, None, ); return; @@ -442,15 +443,16 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { { // Missing generic type parameter bound. let param_name = self_ty.to_string(); - let constraint = - with_no_trimmed_paths(|| trait_ref.print_only_trait_path().to_string()); + let constraint = with_no_trimmed_paths(|| { + trait_pred.print_modifiers_and_trait_path().to_string() + }); if suggest_constraining_type_param( self.tcx, generics, &mut err, ¶m_name, &constraint, - Some(trait_ref.def_id()), + Some(trait_pred.def_id()), ) { return; } @@ -471,7 +473,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { }) if !param_ty => { // Missing generic type parameter bound. let param_name = self_ty.to_string(); - let constraint = trait_ref.print_only_trait_path().to_string(); + let constraint = trait_pred.print_modifiers_and_trait_path().to_string(); if suggest_arbitrary_trait_bound(generics, &mut err, ¶m_name, &constraint) { return; } @@ -492,7 +494,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) { // It only make sense when suggesting dereferences for arguments let code = if let ObligationCauseCode::FunctionArgumentObligation { parent_code, .. } = @@ -505,13 +507,13 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let param_env = obligation.param_env; let body_id = obligation.cause.body_id; let span = obligation.cause.span; - let real_trait_ref = match &*code { + let real_trait_pred = match &*code { ObligationCauseCode::ImplDerivedObligation(cause) | ObligationCauseCode::DerivedObligation(cause) - | ObligationCauseCode::BuiltinDerivedObligation(cause) => cause.parent_trait_ref, - _ => trait_ref, + | ObligationCauseCode::BuiltinDerivedObligation(cause) => cause.parent_trait_pred, + _ => trait_pred, }; - let real_ty = match real_trait_ref.self_ty().no_bound_vars() { + let real_ty = match real_trait_pred.self_ty().no_bound_vars() { Some(ty) => ty, None => return, }; @@ -522,7 +524,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // Re-add the `&` let ty = self.tcx.mk_ref(region, TypeAndMut { ty, mutbl }); let obligation = - self.mk_trait_obligation_with_new_self_ty(param_env, real_trait_ref, ty); + self.mk_trait_obligation_with_new_self_ty(param_env, real_trait_pred, ty); Some(steps).filter(|_| self.predicate_may_hold(&obligation)) }) { if steps > 0 { @@ -589,9 +591,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) { - let self_ty = match trait_ref.self_ty().no_bound_vars() { + let self_ty = match trait_pred.self_ty().no_bound_vars() { None => return, Some(ty) => ty, }; @@ -611,7 +613,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { }; let new_obligation = - self.mk_trait_obligation_with_new_self_ty(obligation.param_env, trait_ref, output_ty); + self.mk_trait_obligation_with_new_self_ty(obligation.param_env, trait_pred, output_ty); match self.evaluate_obligation(&new_obligation) { Ok( @@ -682,7 +684,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - poly_trait_ref: &ty::Binder<'tcx, ty::TraitRef<'tcx>>, + poly_trait_pred: ty::PolyTraitPredicate<'tcx>, has_custom_message: bool, ) -> bool { let span = obligation.cause.span; @@ -715,24 +717,18 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let param_env = obligation.param_env; // Try to apply the original trait binding obligation by borrowing. - let mut try_borrowing = |old_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + let mut try_borrowing = |old_pred: ty::PolyTraitPredicate<'tcx>, blacklist: &[DefId]| -> bool { - if blacklist.contains(&old_ref.def_id()) { + if blacklist.contains(&old_pred.def_id()) { return false; } - let orig_ty = old_ref.self_ty().skip_binder(); + let orig_ty = old_pred.self_ty().skip_binder(); let mk_result = |new_ty| { - let new_ref = old_ref.rebind(ty::TraitRef::new( - old_ref.def_id(), - self.tcx.mk_substs_trait(new_ty, &old_ref.skip_binder().substs[1..]), - )); - self.predicate_must_hold_modulo_regions(&Obligation::new( - ObligationCause::dummy(), - param_env, - new_ref.without_const().to_predicate(self.tcx), - )) + let obligation = + self.mk_trait_obligation_with_new_self_ty(param_env, old_pred, new_ty); + self.predicate_must_hold_modulo_regions(&obligation) }; let imm_result = mk_result(self.tcx.mk_imm_ref(self.tcx.lifetimes.re_static, orig_ty)); let mut_result = mk_result(self.tcx.mk_mut_ref(self.tcx.lifetimes.re_static, orig_ty)); @@ -748,7 +744,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let msg = format!( "the trait bound `{}: {}` is not satisfied", orig_ty, - old_ref.print_only_trait_path(), + old_pred.print_modifiers_and_trait_path(), ); if has_custom_message { err.note(&msg); @@ -764,7 +760,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { span, &format!( "expected an implementor of trait `{}`", - old_ref.print_only_trait_path(), + old_pred.print_modifiers_and_trait_path(), ), ); @@ -806,11 +802,11 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { }; if let ObligationCauseCode::ImplDerivedObligation(obligation) = code { - try_borrowing(obligation.parent_trait_ref, &[]) + try_borrowing(obligation.parent_trait_pred, &[]) } else if let ObligationCauseCode::BindingObligation(_, _) | ObligationCauseCode::ItemObligation(_) = code { - try_borrowing(*poly_trait_ref, &never_suggest_borrow) + try_borrowing(poly_trait_pred, &never_suggest_borrow) } else { false } @@ -822,7 +818,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) { let span = obligation.cause.span; @@ -834,45 +830,44 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { return; } - let mut suggested_ty = match trait_ref.self_ty().no_bound_vars() { + let mut suggested_ty = match trait_pred.self_ty().no_bound_vars() { Some(ty) => ty, None => return, }; for refs_remaining in 0..refs_number { - if let ty::Ref(_, inner_ty, _) = suggested_ty.kind() { - suggested_ty = inner_ty; + let ty::Ref(_, inner_ty, _) = suggested_ty.kind() else { + break; + }; + suggested_ty = inner_ty; - let new_obligation = self.mk_trait_obligation_with_new_self_ty( - obligation.param_env, - trait_ref, - suggested_ty, - ); + let new_obligation = self.mk_trait_obligation_with_new_self_ty( + obligation.param_env, + trait_pred, + suggested_ty, + ); - if self.predicate_may_hold(&new_obligation) { - let sp = self - .tcx - .sess - .source_map() - .span_take_while(span, |c| c.is_whitespace() || *c == '&'); + if self.predicate_may_hold(&new_obligation) { + let sp = self + .tcx + .sess + .source_map() + .span_take_while(span, |c| c.is_whitespace() || *c == '&'); - let remove_refs = refs_remaining + 1; + let remove_refs = refs_remaining + 1; - let msg = if remove_refs == 1 { - "consider removing the leading `&`-reference".to_string() - } else { - format!("consider removing {} leading `&`-references", remove_refs) - }; + let msg = if remove_refs == 1 { + "consider removing the leading `&`-reference".to_string() + } else { + format!("consider removing {} leading `&`-references", remove_refs) + }; - err.span_suggestion_short( - sp, - &msg, - String::new(), - Applicability::MachineApplicable, - ); - break; - } - } else { + err.span_suggestion_short( + sp, + &msg, + String::new(), + Applicability::MachineApplicable, + ); break; } } @@ -942,7 +937,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) { let points_at_arg = matches!( obligation.cause.code(), @@ -957,14 +952,15 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // Do not suggest removal of borrow from type arguments. return; } - let trait_ref = self.resolve_vars_if_possible(trait_ref); - if trait_ref.has_infer_types_or_consts() { + let trait_pred = self.resolve_vars_if_possible(trait_pred); + if trait_pred.has_infer_types_or_consts() { // Do not ICE while trying to find if a reborrow would succeed on a trait with // unresolved bindings. return; } - if let ty::Ref(region, t_type, mutability) = *trait_ref.skip_binder().self_ty().kind() { + if let ty::Ref(region, t_type, mutability) = *trait_pred.skip_binder().self_ty().kind() + { if region.is_late_bound() || t_type.has_escaping_bound_vars() { // Avoid debug assertion in `mk_obligation_for_def_id`. // @@ -981,7 +977,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let new_obligation = self.mk_trait_obligation_with_new_self_ty( obligation.param_env, - trait_ref, + trait_pred, suggested_ty, ); let suggested_ty_would_satisfy_obligation = self @@ -1003,9 +999,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { } else { err.note(&format!( "`{}` is implemented for `{:?}`, but not for `{:?}`", - trait_ref.print_only_trait_path(), + trait_pred.print_modifiers_and_trait_path(), suggested_ty, - trait_ref.skip_binder().self_ty(), + trait_pred.skip_binder().self_ty(), )); } } @@ -1018,7 +1014,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { obligation: &PredicateObligation<'tcx>, err: &mut DiagnosticBuilder<'_>, span: Span, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) { let is_empty_tuple = |ty: ty::Binder<'tcx, Ty<'_>>| *ty.skip_binder().kind() == ty::Tuple(ty::List::empty()); @@ -1034,7 +1030,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { if let hir::ExprKind::Block(blk, _) = &body.value.kind { if sig.decl.output.span().overlaps(span) && blk.expr.is_none() - && is_empty_tuple(trait_ref.self_ty()) + && is_empty_tuple(trait_pred.self_ty()) { // FIXME(estebank): When encountering a method with a trait // bound not satisfied in the return type with a body that has @@ -1070,7 +1066,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err: &mut DiagnosticBuilder<'_>, span: Span, obligation: &PredicateObligation<'tcx>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) -> bool { match obligation.cause.code().peel_derives() { // Only suggest `impl Trait` if the return type is unsized because it is `dyn Trait`. @@ -1089,8 +1085,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { return false; }; let body = hir.body(*body_id); - let trait_ref = self.resolve_vars_if_possible(trait_ref); - let ty = trait_ref.skip_binder().self_ty(); + let trait_pred = self.resolve_vars_if_possible(trait_pred); + let ty = trait_pred.skip_binder().self_ty(); let is_object_safe = match ty.kind() { ty::Dynamic(predicates, _) => { // If the `dyn Trait` is not object safe, do not suggest `Box<dyn Trait>`. @@ -1327,9 +1323,9 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { trait_ref.rebind(sig).to_string() } - let argument_kind = match expected_ref.skip_binder().substs.type_at(0) { - t if t.is_closure() => "closure", - t if t.is_generator() => "generator", + let argument_kind = match expected_ref.skip_binder().self_ty().kind() { + ty::Closure(..) => "closure", + ty::Generator(..) => "generator", _ => "function", }; let mut err = struct_span_err!( @@ -1368,7 +1364,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err.span_suggestion( span, "use the fully qualified path to an implementation", - format!("<Type as {}>::{}", self.tcx.def_path_str(trait_ref), assoc_item.ident), + format!("<Type as {}>::{}", self.tcx.def_path_str(trait_ref), assoc_item.name), Applicability::HasPlaceholders, ); } @@ -1456,7 +1452,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // bound was introduced. At least one generator should be present for this diagnostic to be // modified. let (mut trait_ref, mut target_ty) = match obligation.predicate.kind().skip_binder() { - ty::PredicateKind::Trait(p) => (Some(p.trait_ref), Some(p.self_ty())), + ty::PredicateKind::Trait(p) => (Some(p), Some(p.self_ty())), _ => (None, None), }; let mut generator = None; @@ -1474,11 +1470,11 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { ObligationCauseCode::DerivedObligation(derived_obligation) | ObligationCauseCode::BuiltinDerivedObligation(derived_obligation) | ObligationCauseCode::ImplDerivedObligation(derived_obligation) => { - let ty = derived_obligation.parent_trait_ref.skip_binder().self_ty(); + let ty = derived_obligation.parent_trait_pred.skip_binder().self_ty(); debug!( "maybe_note_obligation_cause_for_async_await: \ parent_trait_ref={:?} self_ty.kind={:?}", - derived_obligation.parent_trait_ref, + derived_obligation.parent_trait_pred, ty.kind() ); @@ -1496,7 +1492,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { seen_upvar_tys_infer_tuple = true; } _ if generator.is_none() => { - trait_ref = Some(derived_obligation.parent_trait_ref.skip_binder()); + trait_ref = Some(derived_obligation.parent_trait_pred.skip_binder()); target_ty = Some(ty); } _ => {} @@ -1652,7 +1648,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { interior_extra_info: Option<(Option<Span>, Span, Option<hir::HirId>, Option<Span>)>, inner_generator_body: Option<&hir::Body<'tcx>>, outer_generator: Option<DefId>, - trait_ref: ty::TraitRef<'tcx>, + trait_pred: ty::TraitPredicate<'tcx>, target_ty: Ty<'tcx>, typeck_results: Option<&ty::TypeckResults<'tcx>>, obligation: &PredicateObligation<'tcx>, @@ -1672,7 +1668,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { // not implemented. let hir = self.tcx.hir(); let trait_explanation = if let Some(name @ (sym::Send | sym::Sync)) = - self.tcx.get_diagnostic_name(trait_ref.def_id) + self.tcx.get_diagnostic_name(trait_pred.def_id()) { let (trait_name, trait_verb) = if name == sym::Send { ("`Send`", "sent") } else { ("`Sync`", "shared") }; @@ -1714,7 +1710,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { format!("is not {}", trait_name) } else { - format!("does not implement `{}`", trait_ref.print_only_trait_path()) + format!("does not implement `{}`", trait_pred.print_modifiers_and_trait_path()) }; let mut explain_yield = |interior_span: Span, @@ -1895,6 +1891,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { self.note_obligation_cause_code( err, &obligation.predicate, + obligation.param_env, next_code.unwrap(), &mut Vec::new(), &mut Default::default(), @@ -1905,6 +1902,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, err: &mut DiagnosticBuilder<'_>, predicate: &T, + param_env: ty::ParamEnv<'tcx>, cause_code: &ObligationCauseCode<'tcx>, obligated_types: &mut Vec<&ty::TyS<'tcx>>, seen_requirements: &mut FxHashSet<DefId>, @@ -2135,7 +2133,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { err.note("shared static variables must have a type that implements `Sync`"); } ObligationCauseCode::BuiltinDerivedObligation(ref data) => { - let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred); let ty = parent_trait_ref.skip_binder().self_ty(); if parent_trait_ref.references_error() { err.cancel(); @@ -2150,7 +2148,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { if let ObligationCauseCode::BuiltinDerivedObligation(ref data) = *data.parent_code { - let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); + let parent_trait_ref = + self.resolve_vars_if_possible(data.parent_trait_pred); let ty = parent_trait_ref.skip_binder().self_ty(); matches!(ty.kind(), ty::Generator(..)) || matches!(ty.kind(), ty::Closure(..)) @@ -2173,13 +2172,14 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { obligated_types.push(ty); - let parent_predicate = parent_trait_ref.without_const().to_predicate(tcx); + let parent_predicate = parent_trait_ref.to_predicate(tcx); if !self.is_recursive_obligation(obligated_types, &data.parent_code) { // #74711: avoid a stack overflow ensure_sufficient_stack(|| { self.note_obligation_cause_code( err, &parent_predicate, + param_env, &data.parent_code, obligated_types, seen_requirements, @@ -2190,6 +2190,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { self.note_obligation_cause_code( err, &parent_predicate, + param_env, &cause_code.peel_derives(), obligated_types, seen_requirements, @@ -2198,17 +2199,18 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { } } ObligationCauseCode::ImplDerivedObligation(ref data) => { - let mut parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); - let parent_def_id = parent_trait_ref.def_id(); + let mut parent_trait_pred = self.resolve_vars_if_possible(data.parent_trait_pred); + parent_trait_pred.remap_constness_diag(param_env); + let parent_def_id = parent_trait_pred.def_id(); let msg = format!( "required because of the requirements on the impl of `{}` for `{}`", - parent_trait_ref.print_only_trait_path(), - parent_trait_ref.skip_binder().self_ty() + parent_trait_pred.print_modifiers_and_trait_path(), + parent_trait_pred.skip_binder().self_ty() ); let mut candidates = vec![]; self.tcx.for_each_relevant_impl( parent_def_id, - parent_trait_ref.self_ty().skip_binder(), + parent_trait_pred.self_ty().skip_binder(), |impl_def_id| match self.tcx.hir().get_if_local(impl_def_id) { Some(Node::Item(hir::Item { kind: hir::ItemKind::Impl(hir::Impl { .. }), @@ -2237,21 +2239,21 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { _ => err.note(&msg), }; - let mut parent_predicate = parent_trait_ref.without_const().to_predicate(tcx); + let mut parent_predicate = parent_trait_pred.to_predicate(tcx); let mut data = data; let mut count = 0; seen_requirements.insert(parent_def_id); while let ObligationCauseCode::ImplDerivedObligation(child) = &*data.parent_code { // Skip redundant recursive obligation notes. See `ui/issue-20413.rs`. - let child_trait_ref = self.resolve_vars_if_possible(child.parent_trait_ref); - let child_def_id = child_trait_ref.def_id(); + let child_trait_pred = self.resolve_vars_if_possible(child.parent_trait_pred); + let child_def_id = child_trait_pred.def_id(); if seen_requirements.insert(child_def_id) { break; } count += 1; data = child; - parent_predicate = child_trait_ref.without_const().to_predicate(tcx); - parent_trait_ref = child_trait_ref; + parent_predicate = child_trait_pred.to_predicate(tcx); + parent_trait_pred = child_trait_pred; } if count > 0 { err.note(&format!( @@ -2261,8 +2263,8 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { )); err.note(&format!( "required because of the requirements on the impl of `{}` for `{}`", - parent_trait_ref.print_only_trait_path(), - parent_trait_ref.skip_binder().self_ty() + parent_trait_pred.print_modifiers_and_trait_path(), + parent_trait_pred.skip_binder().self_ty() )); } // #74711: avoid a stack overflow @@ -2270,6 +2272,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { self.note_obligation_cause_code( err, &parent_predicate, + param_env, &data.parent_code, obligated_types, seen_requirements, @@ -2277,13 +2280,14 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { }); } ObligationCauseCode::DerivedObligation(ref data) => { - let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_ref); - let parent_predicate = parent_trait_ref.without_const().to_predicate(tcx); + let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred); + let parent_predicate = parent_trait_ref.to_predicate(tcx); // #74711: avoid a stack overflow ensure_sufficient_stack(|| { self.note_obligation_cause_code( err, &parent_predicate, + param_env, &data.parent_code, obligated_types, seen_requirements, @@ -2337,6 +2341,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { self.note_obligation_cause_code( err, predicate, + param_env, &parent_code, obligated_types, seen_requirements, @@ -2427,15 +2432,15 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { &self, err: &mut DiagnosticBuilder<'_>, obligation: &PredicateObligation<'tcx>, - trait_ref: ty::Binder<'tcx, ty::TraitRef<'tcx>>, + trait_pred: ty::PolyTraitPredicate<'tcx>, span: Span, ) { debug!( - "suggest_await_before_try: obligation={:?}, span={:?}, trait_ref={:?}, trait_ref_self_ty={:?}", + "suggest_await_before_try: obligation={:?}, span={:?}, trait_pred={:?}, trait_pred_self_ty={:?}", obligation, span, - trait_ref, - trait_ref.self_ty() + trait_pred, + trait_pred.self_ty() ); let body_hir_id = obligation.cause.body_id; let item_id = self.tcx.hir().get_parent_node(body_hir_id); @@ -2445,7 +2450,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { if let Some(hir::GeneratorKind::Async(_)) = body.generator_kind { let future_trait = self.tcx.require_lang_item(LangItem::Future, None); - let self_ty = self.resolve_vars_if_possible(trait_ref.self_ty()); + let self_ty = self.resolve_vars_if_possible(trait_pred.self_ty()); // Do not check on infer_types to avoid panic in evaluate_obligation. if self_ty.has_infer_types() { @@ -2465,7 +2470,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { let projection_ty = ty::ProjectionTy { // `T` substs: self.tcx.mk_substs_trait( - trait_ref.self_ty().skip_binder(), + trait_pred.self_ty().skip_binder(), self.fresh_substs_for_item(span, item_def_id), ), // `Future::Output` @@ -2490,7 +2495,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { ); let try_obligation = self.mk_trait_obligation_with_new_self_ty( obligation.param_env, - trait_ref, + trait_pred, normalized_ty, ); debug!("suggest_await_before_try: try_trait_obligation {:?}", try_obligation); diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs index 23f534858b8..2927e64f705 100644 --- a/compiler/rustc_trait_selection/src/traits/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/mod.rs @@ -291,7 +291,7 @@ pub fn normalize_param_env_or_error<'tcx>( // // In any case, in practice, typeck constructs all the // parameter environments once for every fn as it goes, - // and errors will get reported then; so after typeck we + // and errors will get reported then; so outside of type inference we // can be sure that no errors should occur. debug!( diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs index 7bfedecbdc7..7818053218d 100644 --- a/compiler/rustc_trait_selection/src/traits/object_safety.rs +++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs @@ -89,7 +89,7 @@ fn object_safety_violations_for_trait( .filter(|item| item.kind == ty::AssocKind::Fn) .filter_map(|item| { object_safety_violation_for_method(tcx, trait_def_id, &item) - .map(|(code, span)| ObjectSafetyViolation::Method(item.ident.name, code, span)) + .map(|(code, span)| ObjectSafetyViolation::Method(item.name, code, span)) }) .filter(|violation| { if let ObjectSafetyViolation::Method( @@ -125,7 +125,10 @@ fn object_safety_violations_for_trait( tcx.associated_items(trait_def_id) .in_definition_order() .filter(|item| item.kind == ty::AssocKind::Const) - .map(|item| ObjectSafetyViolation::AssocConst(item.ident.name, item.ident.span)), + .map(|item| { + let ident = item.ident(tcx); + ObjectSafetyViolation::AssocConst(ident.name, ident.span) + }), ); violations.extend( @@ -133,7 +136,10 @@ fn object_safety_violations_for_trait( .in_definition_order() .filter(|item| item.kind == ty::AssocKind::Type) .filter(|item| !tcx.generics_of(item.def_id).params.is_empty()) - .map(|item| ObjectSafetyViolation::GAT(item.ident.name, item.ident.span)), + .map(|item| { + let ident = item.ident(tcx); + ObjectSafetyViolation::GAT(ident.name, ident.span) + }), ); debug!( @@ -367,15 +373,15 @@ fn object_safety_violation_for_method( (MethodViolationCode::ReferencesSelfInput(arg), Some(node)) => node .fn_decl() .and_then(|decl| decl.inputs.get(arg + 1)) - .map_or(method.ident.span, |arg| arg.span), + .map_or(method.ident(tcx).span, |arg| arg.span), (MethodViolationCode::UndispatchableReceiver, Some(node)) => node .fn_decl() .and_then(|decl| decl.inputs.get(0)) - .map_or(method.ident.span, |arg| arg.span), + .map_or(method.ident(tcx).span, |arg| arg.span), (MethodViolationCode::ReferencesSelfOutput, Some(node)) => { - node.fn_decl().map_or(method.ident.span, |decl| decl.output.span()) + node.fn_decl().map_or(method.ident(tcx).span, |decl| decl.output.span()) } - _ => method.ident.span, + _ => method.ident(tcx).span, }; (v, span) }) @@ -404,10 +410,10 @@ fn virtual_call_violation_for_method<'tcx>( ); // Get the span pointing at where the `self` receiver should be. let sm = tcx.sess.source_map(); - let self_span = method.ident.span.to(tcx + let self_span = method.ident(tcx).span.to(tcx .hir() .span_if_local(method.def_id) - .unwrap_or_else(|| sm.next_point(method.ident.span)) + .unwrap_or_else(|| sm.next_point(method.ident(tcx).span)) .shrink_to_hi()); let self_span = sm.span_through_char(self_span, '(').shrink_to_hi(); return Some(MethodViolationCode::StaticMethod( diff --git a/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs index 4840995275a..6b20476b955 100644 --- a/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs +++ b/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs @@ -19,6 +19,7 @@ pub struct OnUnimplementedDirective { pub label: Option<OnUnimplementedFormatString>, pub note: Option<OnUnimplementedFormatString>, pub enclosing_scope: Option<OnUnimplementedFormatString>, + pub append_const_msg: Option<Option<Symbol>>, } #[derive(Default)] @@ -27,6 +28,11 @@ pub struct OnUnimplementedNote { pub label: Option<String>, pub note: Option<String>, pub enclosing_scope: Option<String>, + /// Append a message for `~const Trait` errors. `None` means not requested and + /// should fallback to a generic message, `Some(None)` suggests using the default + /// appended message, `Some(Some(s))` suggests use the `s` message instead of the + /// default one.. + pub append_const_msg: Option<Option<Symbol>>, } fn parse_error( @@ -89,6 +95,7 @@ impl<'tcx> OnUnimplementedDirective { let mut note = None; let mut enclosing_scope = None; let mut subcommands = vec![]; + let mut append_const_msg = None; let parse_value = |value_str| { OnUnimplementedFormatString::try_parse(tcx, trait_def_id, value_str, span).map(Some) @@ -131,6 +138,14 @@ impl<'tcx> OnUnimplementedDirective { } continue; } + } else if item.has_name(sym::append_const_msg) && append_const_msg.is_none() { + if let Some(msg) = item.value_str() { + append_const_msg = Some(Some(msg)); + continue; + } else if item.is_word() { + append_const_msg = Some(None); + continue; + } } // nothing found @@ -153,6 +168,7 @@ impl<'tcx> OnUnimplementedDirective { label, note, enclosing_scope, + append_const_msg, }) } } @@ -183,6 +199,7 @@ impl<'tcx> OnUnimplementedDirective { )?), note: None, enclosing_scope: None, + append_const_msg: None, })) } else { return Err(ErrorReported); @@ -201,6 +218,7 @@ impl<'tcx> OnUnimplementedDirective { let mut label = None; let mut note = None; let mut enclosing_scope = None; + let mut append_const_msg = None; info!("evaluate({:?}, trait_ref={:?}, options={:?})", self, trait_ref, options); for command in self.subcommands.iter().chain(Some(self)).rev() { @@ -235,6 +253,8 @@ impl<'tcx> OnUnimplementedDirective { if let Some(ref enclosing_scope_) = command.enclosing_scope { enclosing_scope = Some(enclosing_scope_.clone()); } + + append_const_msg = command.append_const_msg.clone(); } let options: FxHashMap<Symbol, String> = @@ -244,6 +264,7 @@ impl<'tcx> OnUnimplementedDirective { message: message.map(|m| m.format(tcx, trait_ref, &options)), note: note.map(|n| n.format(tcx, trait_ref, &options)), enclosing_scope: enclosing_scope.map(|e_s| e_s.format(tcx, trait_ref, &options)), + append_const_msg, } } } diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index f49f53351aa..087fc6034d9 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -391,7 +391,7 @@ impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> { // severe performance implications for large opaque types with // late-bound regions. See `issue-88862` benchmark. ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => { - // Only normalize `impl Trait` after type-checking, usually in codegen. + // Only normalize `impl Trait` outside of type inference, usually in codegen. match self.param_env.reveal() { Reveal::UserFacing => ty.super_fold_with(self), @@ -1600,7 +1600,7 @@ fn confirm_generator_candidate<'cx, 'tcx>( gen_sig, ) .map_bound(|(trait_ref, yield_ty, return_ty)| { - let name = tcx.associated_item(obligation.predicate.item_def_id).ident.name; + let name = tcx.associated_item(obligation.predicate.item_def_id).name; let ty = if name == sym::Return { return_ty } else if name == sym::Yield { @@ -1842,7 +1842,7 @@ fn confirm_impl_candidate<'cx, 'tcx>( // just return Error. debug!( "confirm_impl_candidate: no associated type {:?} for {:?}", - assoc_ty.item.ident, obligation.predicate + assoc_ty.item.name, obligation.predicate ); return Progress { ty: tcx.ty_error(), obligations: nested }; } diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs index 81ee22c1de4..3c9e1bbcef2 100644 --- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs +++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs @@ -200,7 +200,7 @@ impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> { // severe performance implications for large opaque types with // late-bound regions. See `issue-88862` benchmark. ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => { - // Only normalize `impl Trait` after type-checking, usually in codegen. + // Only normalize `impl Trait` outside of type inference, usually in codegen. match self.param_env.reveal() { Reveal::UserFacing => ty.try_super_fold_with(self), diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index b573c4b4390..db86041f618 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -305,15 +305,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } else if lang_items.unsize_trait() == Some(def_id) { self.assemble_candidates_for_unsizing(obligation, &mut candidates); } else if lang_items.drop_trait() == Some(def_id) - && obligation.predicate.skip_binder().constness == ty::BoundConstness::ConstIfConst + && obligation.predicate.is_const_if_const() { - if obligation.param_env.constness() == hir::Constness::Const { - self.assemble_const_drop_candidates(obligation, stack, &mut candidates)?; - } else { - debug!("passing ~const Drop bound; in non-const context"); - // `~const Drop` when we are not in a const context has no effect. - candidates.vec.push(ConstDropCandidate) - } + self.assemble_const_drop_candidates(obligation, &mut candidates); } else { if lang_items.clone_trait() == Some(def_id) { // Same builtin conditions as `Copy`, i.e., every type which has builtin support @@ -918,139 +912,77 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } } - fn assemble_const_drop_candidates<'a>( + fn assemble_const_drop_candidates( &mut self, obligation: &TraitObligation<'tcx>, - obligation_stack: &TraitObligationStack<'a, 'tcx>, candidates: &mut SelectionCandidateSet<'tcx>, - ) -> Result<(), SelectionError<'tcx>> { - let mut stack: Vec<(Ty<'tcx>, usize)> = vec![(obligation.self_ty().skip_binder(), 0)]; - - while let Some((ty, depth)) = stack.pop() { - let mut noreturn = false; - - self.check_recursion_depth(depth, obligation)?; - let mut new_candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false }; - let mut copy_obligation = - obligation.with(obligation.predicate.rebind(ty::TraitPredicate { - trait_ref: ty::TraitRef { - def_id: self.tcx().require_lang_item(hir::LangItem::Copy, None), - substs: self.tcx().mk_substs_trait(ty, &[]), - }, - constness: ty::BoundConstness::NotConst, - polarity: ty::ImplPolarity::Positive, - })); - copy_obligation.recursion_depth = depth + 1; - self.assemble_candidates_from_impls(©_obligation, &mut new_candidates); - let copy_conditions = self.copy_clone_conditions(©_obligation); - self.assemble_builtin_bound_candidates(copy_conditions, &mut new_candidates); - let copy_stack = self.push_stack(obligation_stack.list(), ©_obligation); - self.assemble_candidates_from_caller_bounds(©_stack, &mut new_candidates)?; - - let const_drop_obligation = - obligation.with(obligation.predicate.rebind(ty::TraitPredicate { - trait_ref: ty::TraitRef { - def_id: self.tcx().require_lang_item(hir::LangItem::Drop, None), - substs: self.tcx().mk_substs_trait(ty, &[]), - }, - constness: ty::BoundConstness::ConstIfConst, - polarity: ty::ImplPolarity::Positive, - })); - - let const_drop_stack = self.push_stack(obligation_stack.list(), &const_drop_obligation); - self.assemble_candidates_from_caller_bounds(&const_drop_stack, &mut new_candidates)?; - - if !new_candidates.vec.is_empty() { - noreturn = true; - } - debug!(?new_candidates.vec, "assemble_const_drop_candidates"); - - match ty.kind() { - ty::Int(_) - | ty::Uint(_) - | ty::Float(_) - | ty::Infer(ty::IntVar(_)) - | ty::Infer(ty::FloatVar(_)) - | ty::FnPtr(_) - | ty::Never - | ty::Ref(..) - | ty::FnDef(..) - | ty::RawPtr(_) - | ty::Bool - | ty::Char - | ty::Str - | ty::Foreign(_) => {} // Do nothing. These types satisfy `const Drop`. - - ty::Adt(def, subst) => { - let mut set = SelectionCandidateSet { vec: Vec::new(), ambiguous: false }; - self.assemble_candidates_from_impls( - &obligation.with(obligation.predicate.map_bound(|mut pred| { - pred.trait_ref.substs = self.tcx().mk_substs_trait(ty, &[]); - pred - })), - &mut set, - ); - stack.extend(def.all_fields().map(|f| (f.ty(self.tcx(), subst), depth + 1))); - - debug!(?set.vec, "assemble_const_drop_candidates - ty::Adt"); - if set.vec.into_iter().any(|candidate| { - if let SelectionCandidate::ImplCandidate(did) = candidate { - matches!(self.tcx().impl_constness(did), hir::Constness::NotConst) - } else { - false - } - }) { - if !noreturn { - // has non-const Drop - return Ok(()); - } - debug!("not returning"); - } - } - - ty::Array(ty, _) => stack.push((ty, depth + 1)), - - ty::Tuple(_) => stack.extend(ty.tuple_fields().map(|t| (t, depth + 1))), + ) { + // If the predicate is `~const Drop` in a non-const environment, we don't actually need + // to check anything. We'll short-circuit checking any obligations in confirmation, too. + if obligation.param_env.constness() == hir::Constness::NotConst { + candidates.vec.push(ConstDropCandidate(None)); + return; + } - ty::Closure(_, substs) => { - let substs = substs.as_closure(); - let ty = self.infcx.shallow_resolve(substs.tupled_upvars_ty()); - stack.push((ty, depth + 1)); - } + let self_ty = self.infcx().shallow_resolve(obligation.self_ty()); + match self_ty.skip_binder().kind() { + ty::Opaque(..) + | ty::Dynamic(..) + | ty::Error(_) + | ty::Bound(..) + | ty::Param(_) + | ty::Placeholder(_) + | ty::Projection(_) => { + // We don't know if these are `~const Drop`, at least + // not structurally... so don't push a candidate. + } - ty::Generator(_, substs, _) => { - let substs = substs.as_generator(); - let ty = self.infcx.shallow_resolve(substs.tupled_upvars_ty()); + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Infer(ty::IntVar(_)) + | ty::Infer(ty::FloatVar(_)) + | ty::Str + | ty::RawPtr(_) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::Never + | ty::Foreign(_) + | ty::Array(..) + | ty::Slice(_) + | ty::Closure(..) + | ty::Generator(..) + | ty::Tuple(_) + | ty::GeneratorWitness(_) => { + // These are built-in, and cannot have a custom `impl const Drop`. + candidates.vec.push(ConstDropCandidate(None)); + } - stack.push((ty, depth + 1)); - stack.push((substs.witness(), depth + 1)); - } + ty::Adt(..) => { + // Find a custom `impl Drop` impl, if it exists + let relevant_impl = self.tcx().find_map_relevant_impl( + obligation.predicate.def_id(), + obligation.predicate.skip_binder().trait_ref.self_ty(), + Some, + ); - ty::GeneratorWitness(tys) => stack.extend( - self.tcx().erase_late_bound_regions(*tys).iter().map(|t| (t, depth + 1)), - ), - - ty::Slice(ty) => stack.push((ty, depth + 1)), - - ty::Opaque(..) - | ty::Dynamic(..) - | ty::Error(_) - | ty::Bound(..) - | ty::Infer(_) - | ty::Placeholder(_) - | ty::Projection(..) - | ty::Param(..) => { - if !noreturn { - return Ok(()); + if let Some(impl_def_id) = relevant_impl { + // Check that `impl Drop` is actually const, if there is a custom impl + if self.tcx().impl_constness(impl_def_id) == hir::Constness::Const { + candidates.vec.push(ConstDropCandidate(Some(impl_def_id))); } - debug!("not returning"); + } else { + // Otherwise check the ADT like a built-in type (structurally) + candidates.vec.push(ConstDropCandidate(None)); } } - debug!(?stack, "assemble_const_drop_candidates - in loop"); - } - // all types have passed. - candidates.vec.push(ConstDropCandidate); - Ok(()) + ty::Infer(_) => { + candidates.ambiguous = true; + } + } } } diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index 669b6023397..639884844b2 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -72,15 +72,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // CheckPredicate(&A: Super) // CheckPredicate(A: ~const Super) // <- still const env, failure // ``` - if obligation.param_env.constness() == Constness::Const - && obligation.predicate.skip_binder().constness == ty::BoundConstness::NotConst - { + if obligation.param_env.is_const() && !obligation.predicate.is_const_if_const() { new_obligation = TraitObligation { cause: obligation.cause.clone(), param_env: obligation.param_env.without_const(), ..*obligation }; - obligation = &new_obligation; } @@ -159,7 +156,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { Ok(ImplSource::TraitUpcasting(data)) } - ConstDropCandidate => Ok(ImplSource::ConstDrop(ImplSourceConstDropData)), + ConstDropCandidate(def_id) => { + let data = self.confirm_const_drop_candidate(obligation, def_id)?; + Ok(ImplSource::ConstDrop(data)) + } } } @@ -657,7 +657,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { _ => bug!("closure candidate for non-closure {:?}", obligation), }; - let obligation_predicate = obligation.predicate.to_poly_trait_ref(); + let obligation_predicate = obligation.predicate; let Normalized { value: obligation_predicate, mut obligations } = ensure_sufficient_stack(|| { normalize_with_depth( @@ -687,7 +687,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { obligations.extend(self.confirm_poly_trait_refs( obligation.cause.clone(), obligation.param_env, - obligation_predicate, + obligation_predicate.to_poly_trait_ref(), trait_ref, )?); @@ -1087,4 +1087,128 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { Ok(ImplSourceBuiltinData { nested }) } + + fn confirm_const_drop_candidate( + &mut self, + obligation: &TraitObligation<'tcx>, + impl_def_id: Option<DefId>, + ) -> Result<ImplSourceConstDropData<PredicateObligation<'tcx>>, SelectionError<'tcx>> { + // `~const Drop` in a non-const environment is always trivially true, since our type is `Drop` + if obligation.param_env.constness() == Constness::NotConst { + return Ok(ImplSourceConstDropData { nested: vec![] }); + } + + let tcx = self.tcx(); + let self_ty = self.infcx.shallow_resolve(obligation.self_ty()); + + let mut nested = vec![]; + let cause = obligation.derived_cause(BuiltinDerivedObligation); + + // If we have a custom `impl const Drop`, then + // first check it like a regular impl candidate + if let Some(impl_def_id) = impl_def_id { + nested.extend(self.confirm_impl_candidate(obligation, impl_def_id).nested); + } + + // We want to confirm the ADT's fields if we have an ADT + let mut stack = match *self_ty.skip_binder().kind() { + ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(tcx, substs)).collect(), + _ => vec![self_ty.skip_binder()], + }; + + while let Some(nested_ty) = stack.pop() { + match *nested_ty.kind() { + // We know these types are trivially drop + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Infer(ty::IntVar(_)) + | ty::Infer(ty::FloatVar(_)) + | ty::Str + | ty::RawPtr(_) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(_) + | ty::Never + | ty::Foreign(_) => {} + + // These types are built-in, so we can fast-track by registering + // nested predicates for their constituient type(s) + ty::Array(ty, _) | ty::Slice(ty) => { + stack.push(ty); + } + ty::Tuple(tys) => { + stack.extend(tys.iter().map(|ty| ty.expect_ty())); + } + ty::Closure(_, substs) => { + stack.push(substs.as_closure().tupled_upvars_ty()); + } + ty::Generator(_, substs, _) => { + let generator = substs.as_generator(); + stack.extend([generator.tupled_upvars_ty(), generator.witness()]); + } + ty::GeneratorWitness(tys) => { + stack.extend(tcx.erase_late_bound_regions(tys).to_vec()); + } + + // If we have a projection type, make sure to normalize it so we replace it + // with a fresh infer variable + ty::Projection(..) => { + self.infcx.commit_unconditionally(|_| { + let predicate = normalize_with_depth_to( + self, + obligation.param_env, + cause.clone(), + obligation.recursion_depth + 1, + self_ty + .rebind(ty::TraitPredicate { + trait_ref: ty::TraitRef { + def_id: self.tcx().require_lang_item(LangItem::Drop, None), + substs: self.tcx().mk_substs_trait(nested_ty, &[]), + }, + constness: ty::BoundConstness::ConstIfConst, + polarity: ty::ImplPolarity::Positive, + }) + .to_predicate(tcx), + &mut nested, + ); + + nested.push(Obligation::with_depth( + cause.clone(), + obligation.recursion_depth + 1, + obligation.param_env, + predicate, + )); + }); + } + + // If we have any other type (e.g. an ADT), just register a nested obligation + // since it's either not `const Drop` (and we raise an error during selection), + // or it's an ADT (and we need to check for a custom impl during selection) + _ => { + let predicate = self_ty + .rebind(ty::TraitPredicate { + trait_ref: ty::TraitRef { + def_id: self.tcx().require_lang_item(LangItem::Drop, None), + substs: self.tcx().mk_substs_trait(nested_ty, &[]), + }, + constness: ty::BoundConstness::ConstIfConst, + polarity: ty::ImplPolarity::Positive, + }) + .to_predicate(tcx); + + nested.push(Obligation::with_depth( + cause.clone(), + obligation.recursion_depth + 1, + obligation.param_env, + predicate, + )); + } + } + } + + Ok(ImplSourceConstDropData { nested }) + } } diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 1414c742635..47427395b93 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -765,14 +765,38 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { debug!(?result, "CACHE MISS"); self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result); - stack.cache().on_completion(stack.dfn, |fresh_trait_pred, provisional_result| { - self.insert_evaluation_cache( - param_env, - fresh_trait_pred, - dep_node, - provisional_result.max(result), - ); - }); + stack.cache().on_completion( + stack.dfn, + |fresh_trait_pred, provisional_result, provisional_dep_node| { + // Create a new `DepNode` that has dependencies on: + // * The `DepNode` for the original evaluation that resulted in a provisional cache + // entry being crated + // * The `DepNode` for the *current* evaluation, which resulted in us completing + // provisional caches entries and inserting them into the evaluation cache + // + // This ensures that when a query reads this entry from the evaluation cache, + // it will end up (transitively) dependening on all of the incr-comp dependencies + // created during the evaluation of this trait. For example, evaluating a trait + // will usually require us to invoke `type_of(field_def_id)` to determine the + // constituent types, and we want any queries reading from this evaluation + // cache entry to end up with a transitive `type_of(field_def_id`)` dependency. + // + // By using `in_task`, we're also creating an edge from the *current* query + // to the newly-created `combined_dep_node`. This is probably redundant, + // but it's better to add too many dep graph edges than to add too few + // dep graph edges. + let ((), combined_dep_node) = self.in_task(|this| { + this.tcx().dep_graph.read_index(provisional_dep_node); + this.tcx().dep_graph.read_index(dep_node); + }); + self.insert_evaluation_cache( + param_env, + fresh_trait_pred, + combined_dep_node, + provisional_result.max(result), + ); + }, + ); } else { debug!(?result, "PROVISIONAL"); debug!( @@ -781,7 +805,13 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { fresh_trait_pred, stack.depth, reached_depth, ); - stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_pred, result); + stack.cache().insert_provisional( + stack.dfn, + reached_depth, + fresh_trait_pred, + result, + dep_node, + ); } Ok(result) @@ -1143,9 +1173,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ImplCandidate(def_id) if tcx.impl_constness(def_id) == hir::Constness::Const => {} // const param - ParamCandidate(trait_pred) - if trait_pred.skip_binder().constness - == ty::BoundConstness::ConstIfConst => {} + ParamCandidate(trait_pred) if trait_pred.is_const_if_const() => {} // auto trait impl AutoImplCandidate(..) => {} // generator, this will raise error in other places @@ -1153,7 +1181,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { GeneratorCandidate => {} // FnDef where the function is const FnPointerCandidate { is_const: true } => {} - ConstDropCandidate => {} + ConstDropCandidate(_) => {} _ => { // reject all other types of candidates continue; @@ -1537,7 +1565,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { }; // (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`, - // and `DiscriminantKindCandidate` to anything else. + // `DiscriminantKindCandidate`, and `ConstDropCandidate` to anything else. // // This is a fix for #53123 and prevents winnowing from accidentally extending the // lifetime of a variable. @@ -1554,7 +1582,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate | PointeeCandidate - | ConstDropCandidate, + | ConstDropCandidate(_), _, ) => true, ( @@ -1562,7 +1590,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { BuiltinCandidate { has_nested: false } | DiscriminantKindCandidate | PointeeCandidate - | ConstDropCandidate, + | ConstDropCandidate(_), ) => false, (ParamCandidate(other), ParamCandidate(victim)) => { @@ -2383,7 +2411,7 @@ impl<'tcx> TraitObligationExt<'tcx> for TraitObligation<'tcx> { // chain. Ideally, we should have a way to configure this either // by using -Z verbose or just a CLI argument. let derived_cause = DerivedObligationCause { - parent_trait_ref: obligation.predicate.to_poly_trait_ref(), + parent_trait_pred: obligation.predicate, parent_code: obligation.cause.clone_code(), }; let derived_code = variant(derived_cause); @@ -2506,6 +2534,11 @@ struct ProvisionalEvaluation { from_dfn: usize, reached_depth: usize, result: EvaluationResult, + /// The `DepNodeIndex` created for the `evaluate_stack` call for this provisional + /// evaluation. When we create an entry in the evaluation cache using this provisional + /// cache entry (see `on_completion`), we use this `dep_node` to ensure that future reads from + /// the cache will have all of the necessary incr comp dependencies tracked. + dep_node: DepNodeIndex, } impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> { @@ -2548,6 +2581,7 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> { reached_depth: usize, fresh_trait_pred: ty::PolyTraitPredicate<'tcx>, result: EvaluationResult, + dep_node: DepNodeIndex, ) { debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional"); @@ -2573,7 +2607,10 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> { } } - map.insert(fresh_trait_pred, ProvisionalEvaluation { from_dfn, reached_depth, result }); + map.insert( + fresh_trait_pred, + ProvisionalEvaluation { from_dfn, reached_depth, result, dep_node }, + ); } /// Invoked when the node with dfn `dfn` does not get a successful @@ -2624,7 +2661,7 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> { fn on_completion( &self, dfn: usize, - mut op: impl FnMut(ty::PolyTraitPredicate<'tcx>, EvaluationResult), + mut op: impl FnMut(ty::PolyTraitPredicate<'tcx>, EvaluationResult, DepNodeIndex), ) { debug!(?dfn, "on_completion"); @@ -2633,7 +2670,7 @@ impl<'tcx> ProvisionalEvaluationCache<'tcx> { { debug!(?fresh_trait_pred, ?eval, "on_completion"); - op(fresh_trait_pred, eval.result); + op(fresh_trait_pred, eval.result, eval.dep_node); } } } diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs index 195a4a4a653..2c5e7e40cc8 100644 --- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs @@ -506,12 +506,21 @@ crate fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Option<St let mut pretty_predicates = Vec::with_capacity(predicates.len() + types_without_default_bounds.len()); - for (p, _) in predicates { + for (mut p, _) in predicates { if let Some(poly_trait_ref) = p.to_opt_poly_trait_pred() { if Some(poly_trait_ref.def_id()) == sized_trait { types_without_default_bounds.remove(poly_trait_ref.self_ty().skip_binder()); continue; } + + if ty::BoundConstness::ConstIfConst == poly_trait_ref.skip_binder().constness { + let new_trait_pred = poly_trait_ref.map_bound(|mut trait_pred| { + trait_pred.constness = ty::BoundConstness::NotConst; + trait_pred + }); + + p = tcx.mk_predicate(new_trait_pred.map_bound(ty::PredicateKind::Trait)) + } } pretty_predicates.push(p.to_string()); } diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs index 6a355b567e0..493cb199f11 100644 --- a/compiler/rustc_trait_selection/src/traits/wf.rs +++ b/compiler/rustc_trait_selection/src/traits/wf.rs @@ -306,10 +306,9 @@ impl<'a, 'tcx> WfPredicates<'a, 'tcx> { let extend = |obligation: traits::PredicateObligation<'tcx>| { let mut cause = cause.clone(); - if let Some(parent_trait_ref) = obligation.predicate.to_opt_poly_trait_pred() { + if let Some(parent_trait_pred) = obligation.predicate.to_opt_poly_trait_pred() { let derived_cause = traits::DerivedObligationCause { - // FIXME(fee1-dead): when improving error messages, change this to PolyTraitPredicate - parent_trait_ref: parent_trait_ref.map_bound(|t| t.trait_ref), + parent_trait_pred, parent_code: obligation.cause.clone_code(), }; *cause.make_mut_code() = diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs index 781a639b09e..4142c999ca7 100644 --- a/compiler/rustc_ty_utils/src/assoc.rs +++ b/compiler/rustc_ty_utils/src/assoc.rs @@ -100,7 +100,7 @@ fn associated_item_from_trait_item_ref( }; ty::AssocItem { - ident: trait_item_ref.ident, + name: trait_item_ref.ident.name, kind, vis: tcx.visibility(def_id), defaultness: trait_item_ref.defaultness, @@ -124,7 +124,7 @@ fn associated_item_from_impl_item_ref( }; ty::AssocItem { - ident: impl_item_ref.ident, + name: impl_item_ref.ident.name, kind, vis: tcx.visibility(def_id), defaultness: impl_item_ref.defaultness, diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs index fef83190468..b882a940d40 100644 --- a/compiler/rustc_ty_utils/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -149,7 +149,7 @@ fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> { // kind of an "idempotent" action, but I'm not sure where would be // a better place. In practice, we construct environments for // every fn once during type checking, and we'll abort if there - // are any errors at that point, so after type checking you can be + // are any errors at that point, so outside of type inference you can be // sure that this will succeed without errors anyway. if tcx.sess.opts.debugging_opts.chalk { diff --git a/compiler/rustc_typeck/src/astconv/errors.rs b/compiler/rustc_typeck/src/astconv/errors.rs index b532c41642c..a49d6e24f26 100644 --- a/compiler/rustc_typeck/src/astconv/errors.rs +++ b/compiler/rustc_typeck/src/astconv/errors.rs @@ -214,7 +214,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { .map(|r| self.tcx().associated_items(r.def_id()).in_definition_order()) .flatten() .filter_map( - |item| if item.kind == ty::AssocKind::Type { Some(item.ident.name) } else { None }, + |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None }, ) .collect(); @@ -270,7 +270,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let trait_def_id = assoc_item.container.id(); names.push(format!( "`{}` (from trait `{}`)", - assoc_item.ident, + assoc_item.name, tcx.def_path_str(trait_def_id), )); } @@ -327,11 +327,11 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let mut names: FxHashMap<_, usize> = FxHashMap::default(); for item in assoc_items { types_count += 1; - *names.entry(item.ident.name).or_insert(0) += 1; + *names.entry(item.name).or_insert(0) += 1; } let mut dupes = false; for item in assoc_items { - let prefix = if names[&item.ident.name] > 1 { + let prefix = if names[&item.name] > 1 { let trait_def_id = item.container.id(); dupes = true; format!("{}::", tcx.def_path_str(trait_def_id)) @@ -339,7 +339,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { String::new() }; if let Some(sp) = tcx.hir().span_if_local(item.def_id) { - err.span_label(sp, format!("`{}{}` defined here", prefix, item.ident)); + err.span_label(sp, format!("`{}{}` defined here", prefix, item.name)); } } if potential_assoc_types.len() == assoc_items.len() { @@ -350,14 +350,14 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // `Iterator<Item = isize>`. for (potential, item) in iter::zip(&potential_assoc_types, assoc_items) { if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(*potential) { - suggestions.push((*potential, format!("{} = {}", item.ident, snippet))); + suggestions.push((*potential, format!("{} = {}", item.name, snippet))); } } } else if let (Ok(snippet), false) = (tcx.sess.source_map().span_to_snippet(*span), dupes) { let types: Vec<_> = - assoc_items.iter().map(|item| format!("{} = Type", item.ident)).collect(); + assoc_items.iter().map(|item| format!("{} = Type", item.name)).collect(); let code = if snippet.ends_with('>') { // The user wrote `Trait<'a>` or similar and we don't have a type we can // suggest, but at least we can clue them to the correct syntax @@ -388,17 +388,17 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let mut names: FxHashMap<_, usize> = FxHashMap::default(); for item in assoc_items { types_count += 1; - *names.entry(item.ident.name).or_insert(0) += 1; + *names.entry(item.name).or_insert(0) += 1; } let mut label = vec![]; for item in assoc_items { - let postfix = if names[&item.ident.name] > 1 { + let postfix = if names[&item.name] > 1 { let trait_def_id = item.container.id(); format!(" (from trait `{}`)", tcx.def_path_str(trait_def_id)) } else { String::new() }; - label.push(format!("`{}`{}", item.ident, postfix)); + label.push(format!("`{}`{}", item.name, postfix)); } if !label.is_empty() { err.span_label( diff --git a/compiler/rustc_typeck/src/astconv/generics.rs b/compiler/rustc_typeck/src/astconv/generics.rs index 956696546da..05ff7f818c7 100644 --- a/compiler/rustc_typeck/src/astconv/generics.rs +++ b/compiler/rustc_typeck/src/astconv/generics.rs @@ -445,7 +445,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { let named_type_param_count = param_counts.types - has_self as usize - synth_type_param_count; let infer_lifetimes = - gen_pos != GenericArgPosition::Type && !gen_args.has_lifetime_params(); + (gen_pos != GenericArgPosition::Type || infer_args) && !gen_args.has_lifetime_params(); if gen_pos != GenericArgPosition::Type && !gen_args.bindings.is_empty() { Self::prohibit_assoc_ty_binding(tcx, gen_args.bindings[0].span); diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs index d9b3f51b5bd..16fc9a01a27 100644 --- a/compiler/rustc_typeck/src/astconv/mod.rs +++ b/compiler/rustc_typeck/src/astconv/mod.rs @@ -482,7 +482,20 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { ) -> subst::GenericArg<'tcx> { let tcx = self.astconv.tcx(); match param.kind { - GenericParamDefKind::Lifetime => tcx.lifetimes.re_static.into(), + GenericParamDefKind::Lifetime => self + .astconv + .re_infer(Some(param), self.span) + .unwrap_or_else(|| { + debug!(?param, "unelided lifetime in signature"); + + // This indicates an illegal lifetime in a non-assoc-trait position + tcx.sess.delay_span_bug(self.span, "unelided lifetime in signature"); + + // Supply some dummy value. We don't have an + // `re_error`, annoyingly, so use `'static`. + tcx.lifetimes.re_static + }) + .into(), GenericParamDefKind::Type { has_default, .. } => { if !infer_args && has_default { // No type parameter provided, but a default exists. @@ -1137,7 +1150,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { .filter_by_name_unhygienic(assoc_ident.name) .find(|i| { (i.kind == ty::AssocKind::Type || i.kind == ty::AssocKind::Const) - && i.ident.normalize_to_macros_2_0() == assoc_ident + && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident }) .expect("missing associated type"); // FIXME(associated_const_equality): need to handle assoc_consts here as well. @@ -1176,7 +1189,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { // Include substitutions for generic parameters of associated types let projection_ty = candidate.map_bound(|trait_ref| { - let ident = Ident::new(assoc_ty.ident.name, binding.item_name.span); + let ident = Ident::new(assoc_ty.name, binding.item_name.span); let item_segment = hir::PathSegment { ident, hir_id: Some(binding.hir_id), @@ -1868,7 +1881,7 @@ impl<'o, 'tcx> dyn AstConv<'tcx> + 'o { .in_definition_order() .find(|i| { i.kind.namespace() == Namespace::TypeNS - && i.ident.normalize_to_macros_2_0() == assoc_ident + && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident }) .expect("missing associated type"); diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs index eea8f40635d..0fea0afb572 100644 --- a/compiler/rustc_typeck/src/check/callee.rs +++ b/compiler/rustc_typeck/src/check/callee.rs @@ -307,6 +307,36 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } + /// Give appropriate suggestion when encountering `[("a", 0) ("b", 1)]`, where the + /// likely intention is to create an array containing tuples. + fn maybe_suggest_bad_array_definition( + &self, + err: &mut DiagnosticBuilder<'a>, + call_expr: &'tcx hir::Expr<'tcx>, + callee_expr: &'tcx hir::Expr<'tcx>, + ) -> bool { + let hir_id = self.tcx.hir().get_parent_node(call_expr.hir_id); + let parent_node = self.tcx.hir().get(hir_id); + if let ( + hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Array(_), .. }), + hir::ExprKind::Tup(exp), + hir::ExprKind::Call(_, args), + ) = (parent_node, &callee_expr.kind, &call_expr.kind) + { + if args.len() == exp.len() { + let start = callee_expr.span.shrink_to_hi(); + err.span_suggestion( + start, + "consider separating array elements with a comma", + ",".to_string(), + Applicability::MaybeIncorrect, + ); + return true; + } + } + false + } + fn confirm_builtin_call( &self, call_expr: &'tcx hir::Expr<'tcx>, @@ -422,7 +452,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { _ => Res::Err, }; - err.span_label(call_expr.span, "call expression requires function"); + if !self.maybe_suggest_bad_array_definition(&mut err, call_expr, callee_expr) { + err.span_label(call_expr.span, "call expression requires function"); + } if let Some(span) = self.tcx.hir().res_span(def) { let callee_ty = callee_ty.to_string(); diff --git a/compiler/rustc_typeck/src/check/check.rs b/compiler/rustc_typeck/src/check/check.rs index eb49cc0233d..18a0a8767d4 100644 --- a/compiler/rustc_typeck/src/check/check.rs +++ b/compiler/rustc_typeck/src/check/check.rs @@ -999,7 +999,7 @@ fn check_impl_items_against_trait<'tcx>( if is_implemented_here { let trait_item = tcx.associated_item(trait_item_id); - if required_items.contains(&trait_item.ident) { + if required_items.contains(&trait_item.ident(tcx)) { must_implement_one_of = None; } } diff --git a/compiler/rustc_typeck/src/check/compare_method.rs b/compiler/rustc_typeck/src/check/compare_method.rs index 94648d5702c..74910234b7e 100644 --- a/compiler/rustc_typeck/src/check/compare_method.rs +++ b/compiler/rustc_typeck/src/check/compare_method.rs @@ -300,7 +300,7 @@ fn compare_predicate_entailment<'tcx>( cause.span(tcx), E0053, "method `{}` has an incompatible type for trait", - trait_m.ident + trait_m.name ); match &terr { TypeError::ArgumentMutability(0) | TypeError::ArgumentSorts(_, 0) @@ -452,7 +452,7 @@ fn check_region_bounds_on_impl_item<'tcx>( tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait { span, item_kind, - ident: impl_m.ident, + ident: impl_m.ident(tcx), generics_span, }); return Err(ErrorReported); @@ -540,14 +540,14 @@ fn compare_self_type<'tcx>( impl_m_span, E0185, "method `{}` has a `{}` declaration in the impl, but not in the trait", - trait_m.ident, + trait_m.name, self_descr ); err.span_label(impl_m_span, format!("`{}` used in impl", self_descr)); if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) { err.span_label(span, format!("trait method declared without `{}`", self_descr)); } else { - err.note_trait_signature(trait_m.ident.to_string(), trait_m.signature(tcx)); + err.note_trait_signature(trait_m.name.to_string(), trait_m.signature(tcx)); } err.emit(); return Err(ErrorReported); @@ -560,14 +560,14 @@ fn compare_self_type<'tcx>( impl_m_span, E0186, "method `{}` has a `{}` declaration in the trait, but not in the impl", - trait_m.ident, + trait_m.name, self_descr ); err.span_label(impl_m_span, format!("expected `{}` in impl", self_descr)); if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) { err.span_label(span, format!("`{}` used in trait", self_descr)); } else { - err.note_trait_signature(trait_m.ident.to_string(), trait_m.signature(tcx)); + err.note_trait_signature(trait_m.name.to_string(), trait_m.signature(tcx)); } err.emit(); return Err(ErrorReported); @@ -640,7 +640,7 @@ fn compare_number_of_generics<'tcx>( "{} `{}` has {} {kind} parameter{} but its trait \ declaration has {} {kind} parameter{}", item_kind, - trait_.ident, + trait_.name, impl_count, pluralize!(impl_count), trait_count, @@ -747,7 +747,7 @@ fn compare_number_of_method_arguments<'tcx>( impl_span, E0050, "method `{}` has {} but the declaration in trait `{}` has {}", - trait_m.ident, + trait_m.name, potentially_plural_count(impl_number_args, "parameter"), tcx.def_path_str(trait_m.def_id), trait_number_args @@ -761,7 +761,7 @@ fn compare_number_of_method_arguments<'tcx>( ), ); } else { - err.note_trait_signature(trait_m.ident.to_string(), trait_m.signature(tcx)); + err.note_trait_signature(trait_m.name.to_string(), trait_m.signature(tcx)); } err.span_label( impl_span, @@ -811,7 +811,7 @@ fn compare_synthetic_generics<'tcx>( impl_span, E0643, "method `{}` has incompatible signature for trait", - trait_m.ident + trait_m.name ); err.span_label(trait_span, "declaration in trait here"); match (impl_synthetic, trait_synthetic) { @@ -965,7 +965,7 @@ fn compare_const_param_types<'tcx>( *impl_span, E0053, "method `{}` has an incompatible const parameter type for trait", - trait_m.ident + trait_m.name ); err.span_note( trait_span.map_or_else(|| trait_item_span.unwrap_or(*impl_span), |span| *span), @@ -1053,7 +1053,7 @@ crate fn compare_const_impl<'tcx>( cause.span, E0326, "implemented const `{}` has an incompatible type for trait", - trait_c.ident + trait_c.name ); let trait_c_span = trait_c.def_id.as_local().map(|trait_c_def_id| { diff --git a/compiler/rustc_typeck/src/check/dropck.rs b/compiler/rustc_typeck/src/check/dropck.rs index c8986aa7f53..89866c20b61 100644 --- a/compiler/rustc_typeck/src/check/dropck.rs +++ b/compiler/rustc_typeck/src/check/dropck.rs @@ -229,7 +229,13 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( let p = p.kind(); match (predicate.skip_binder(), p.skip_binder()) { (ty::PredicateKind::Trait(a), ty::PredicateKind::Trait(b)) => { - relator.relate(predicate.rebind(a), p.rebind(b)).is_ok() + // Since struct predicates cannot have ~const, project the impl predicate + // onto one that ignores the constness. This is equivalent to saying that + // we match a `Trait` bound on the struct with a `Trait` or `~const Trait` + // in the impl. + let non_const_a = + ty::TraitPredicate { constness: ty::BoundConstness::NotConst, ..a }; + relator.relate(predicate.rebind(non_const_a), p.rebind(b)).is_ok() } (ty::PredicateKind::Projection(a), ty::PredicateKind::Projection(b)) => { relator.relate(predicate.rebind(a), p.rebind(b)).is_ok() diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs index 2cad8aab29e..0e1dbc53806 100644 --- a/compiler/rustc_typeck/src/check/expr.rs +++ b/compiler/rustc_typeck/src/check/expr.rs @@ -31,7 +31,7 @@ use rustc_hir as hir; use rustc_hir::def::{CtorKind, DefKind, Res}; use rustc_hir::def_id::DefId; use rustc_hir::intravisit::Visitor; -use rustc_hir::{ExprKind, QPath}; +use rustc_hir::{ExprKind, HirId, QPath}; use rustc_infer::infer; use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; use rustc_infer::infer::InferOk; @@ -1970,7 +1970,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { "ban_nonexisting_field: field={:?}, base={:?}, expr={:?}, expr_ty={:?}", field, base, expr, expr_t ); - let mut err = self.no_such_field_err(field, expr_t); + let mut err = self.no_such_field_err(field, expr_t, base.hir_id); match *expr_t.peel_refs().kind() { ty::Array(_, len) => { @@ -2209,6 +2209,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { &self, field: Ident, expr_t: &'tcx ty::TyS<'tcx>, + id: HirId, ) -> DiagnosticBuilder<'_> { let span = field.span; debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, expr_t); @@ -2226,9 +2227,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // try to add a suggestion in case the field is a nested field of a field of the Adt if let Some((fields, substs)) = self.get_field_candidates(span, &expr_t) { for candidate_field in fields.iter() { - if let Some(field_path) = - self.check_for_nested_field(span, field, candidate_field, substs, vec![]) - { + if let Some(field_path) = self.check_for_nested_field( + span, + field, + candidate_field, + substs, + vec![], + self.tcx.parent_module(id).to_def_id(), + ) { let field_path_str = field_path .iter() .map(|id| id.name.to_ident_string()) @@ -2280,6 +2286,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { candidate_field: &ty::FieldDef, subst: SubstsRef<'tcx>, mut field_path: Vec<Ident>, + id: DefId, ) -> Option<Vec<Ident>> { debug!( "check_for_nested_field(span: {:?}, candidate_field: {:?}, field_path: {:?}", @@ -2299,10 +2306,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let field_ty = candidate_field.ty(self.tcx, subst); if let Some((nested_fields, subst)) = self.get_field_candidates(span, &field_ty) { for field in nested_fields.iter() { - let ident = field.ident(self.tcx).normalize_to_macros_2_0(); - if ident == target_field { - return Some(field_path); - } else { + let accessible = field.vis.is_accessible_from(id, self.tcx); + if accessible { + let ident = field.ident(self.tcx).normalize_to_macros_2_0(); + if ident == target_field { + return Some(field_path); + } let field_path = field_path.clone(); if let Some(path) = self.check_for_nested_field( span, @@ -2310,6 +2319,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { field, subst, field_path, + id, ) { return Some(path); } diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs index e42d94a6f40..1b93017c5aa 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs @@ -28,6 +28,11 @@ use crate::structured_errors::StructuredDiagnostic; use std::iter; use std::slice; +struct FnArgsAsTuple<'hir> { + first: &'hir hir::Expr<'hir>, + last: &'hir hir::Expr<'hir>, +} + impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub(in super::super) fn check_casts(&self) { let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut(); @@ -127,136 +132,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let expected_arg_count = formal_input_tys.len(); - let param_count_error = |expected_count: usize, - arg_count: usize, - error_code: &str, - c_variadic: bool, - sugg_unit: bool| { - let (span, start_span, args, ctor_of) = match &call_expr.kind { - hir::ExprKind::Call( - hir::Expr { - span, - kind: - hir::ExprKind::Path(hir::QPath::Resolved( - _, - hir::Path { res: Res::Def(DefKind::Ctor(of, _), _), .. }, - )), - .. - }, - args, - ) => (*span, *span, &args[..], Some(of)), - hir::ExprKind::Call(hir::Expr { span, .. }, args) => { - (*span, *span, &args[..], None) - } - hir::ExprKind::MethodCall(path_segment, args, _) => ( - path_segment.ident.span, - // `sp` doesn't point at the whole `foo.bar()`, only at `bar`. - path_segment - .args - .and_then(|args| args.args.iter().last()) - // Account for `foo.bar::<T>()`. - .map(|arg| { - // Skip the closing `>`. - tcx.sess - .source_map() - .next_point(tcx.sess.source_map().next_point(arg.span())) - }) - .unwrap_or(path_segment.ident.span), - &args[1..], // Skip the receiver. - None, // methods are never ctors - ), - k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k), - }; - let arg_spans = if provided_args.is_empty() { - // foo() - // ^^^-- supplied 0 arguments - // | - // expected 2 arguments - vec![tcx.sess.source_map().next_point(start_span).with_hi(call_span.hi())] - } else { - // foo(1, 2, 3) - // ^^^ - - - supplied 3 arguments - // | - // expected 2 arguments - args.iter().map(|arg| arg.span).collect::<Vec<Span>>() - }; - - let mut err = tcx.sess.struct_span_err_with_code( - span, - &format!( - "this {} takes {}{} but {} {} supplied", - match ctor_of { - Some(CtorOf::Struct) => "struct", - Some(CtorOf::Variant) => "enum variant", - None => "function", - }, - if c_variadic { "at least " } else { "" }, - potentially_plural_count(expected_count, "argument"), - potentially_plural_count(arg_count, "argument"), - if arg_count == 1 { "was" } else { "were" } - ), - DiagnosticId::Error(error_code.to_owned()), - ); - let label = format!("supplied {}", potentially_plural_count(arg_count, "argument")); - for (i, span) in arg_spans.into_iter().enumerate() { - err.span_label( - span, - if arg_count == 0 || i + 1 == arg_count { &label } else { "" }, - ); - } - - if let Some(def_id) = fn_def_id { - if let Some(def_span) = tcx.def_ident_span(def_id) { - let mut spans: MultiSpan = def_span.into(); - - let params = tcx - .hir() - .get_if_local(def_id) - .and_then(|node| node.body_id()) - .into_iter() - .map(|id| tcx.hir().body(id).params) - .flatten(); - - for param in params { - spans.push_span_label(param.span, String::new()); - } - - let def_kind = tcx.def_kind(def_id); - err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id))); - } - } - - if sugg_unit { - let sugg_span = tcx.sess.source_map().end_point(call_expr.span); - // remove closing `)` from the span - let sugg_span = sugg_span.shrink_to_lo(); - err.span_suggestion( - sugg_span, - "expected the unit value `()`; create it with empty parentheses", - String::from("()"), - Applicability::MachineApplicable, - ); - } else { - err.span_label( - span, - format!( - "expected {}{}", - if c_variadic { "at least " } else { "" }, - potentially_plural_count(expected_count, "argument") - ), - ); - } - err.emit(); - }; + // expected_count, arg_count, error_code, sugg_unit, sugg_tuple_wrap_args + let mut error: Option<(usize, usize, &str, bool, Option<FnArgsAsTuple<'_>>)> = None; + // If the arguments should be wrapped in a tuple (ex: closures), unwrap them here let (formal_input_tys, expected_input_tys) = if tuple_arguments == TupleArguments { let tuple_type = self.structurally_resolved_type(call_span, formal_input_tys[0]); match tuple_type.kind() { - ty::Tuple(arg_types) if arg_types.len() != provided_args.len() => { - param_count_error(arg_types.len(), provided_args.len(), "E0057", false, false); - (self.err_args(provided_args.len()), vec![]) - } + // We expected a tuple and got a tuple ty::Tuple(arg_types) => { + // Argument length differs + if arg_types.len() != provided_args.len() { + error = Some((arg_types.len(), provided_args.len(), "E0057", false, None)); + } let expected_input_tys = match expected_input_tys.get(0) { Some(&ty) => match ty.kind() { ty::Tuple(ref tys) => tys.iter().map(|k| k.expect_ty()).collect(), @@ -267,6 +155,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { (arg_types.iter().map(|k| k.expect_ty()).collect(), expected_input_tys) } _ => { + // Otherwise, there's a mismatch, so clear out what we're expecting, and set + // our input typs to err_args so we don't blow up the error messages struct_span_err!( tcx.sess, call_span, @@ -284,7 +174,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if supplied_arg_count >= expected_arg_count { (formal_input_tys.to_vec(), expected_input_tys) } else { - param_count_error(expected_arg_count, supplied_arg_count, "E0060", true, false); + error = Some((expected_arg_count, supplied_arg_count, "E0060", false, None)); (self.err_args(supplied_arg_count), vec![]) } } else { @@ -296,8 +186,25 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } else { false }; - param_count_error(expected_arg_count, supplied_arg_count, "E0061", false, sugg_unit); + // are we passing elements of a tuple without the tuple parentheses? + let expected_input_tys = if expected_input_tys.is_empty() { + // In most cases we can use expected_input_tys, but some callers won't have the type + // information, in which case we fall back to the types from the input expressions. + formal_input_tys + } else { + &*expected_input_tys + }; + + let sugg_tuple_wrap_args = self.suggested_tuple_wrap(expected_input_tys, provided_args); + + error = Some(( + expected_arg_count, + supplied_arg_count, + "E0061", + sugg_unit, + sugg_tuple_wrap_args, + )); (self.err_args(supplied_arg_count), vec![]) }; @@ -315,13 +222,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { assert_eq!(expected_input_tys.len(), formal_input_tys.len()); + let provided_arg_count: usize = provided_args.len(); + // Keep track of the fully coerced argument types - let mut final_arg_types: Vec<(usize, Ty<'_>, Ty<'_>)> = vec![]; + let mut final_arg_types: Vec<Option<(Ty<'_>, Ty<'_>)>> = vec![None; provided_arg_count]; // We introduce a helper function to demand that a given argument satisfy a given input // This is more complicated than just checking type equality, as arguments could be coerced // This version writes those types back so further type checking uses the narrowed types - let demand_compatible = |idx, final_arg_types: &mut Vec<(usize, Ty<'tcx>, Ty<'tcx>)>| { + let demand_compatible = |idx, final_arg_types: &mut Vec<Option<(Ty<'tcx>, Ty<'tcx>)>>| { let formal_input_ty: Ty<'tcx> = formal_input_tys[idx]; let expected_input_ty: Ty<'tcx> = expected_input_tys[idx]; let provided_arg = &provided_args[idx]; @@ -340,13 +249,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty); // Keep track of these for below - final_arg_types.push((idx, checked_ty, coerced_ty)); + final_arg_types[idx] = Some((checked_ty, coerced_ty)); // Cause selection errors caused by resolving a single argument to point at the // argument and not the call. This is otherwise redundant with the `demand_coerce` // call immediately after, but it lets us customize the span pointed to in the // fulfillment error to be more accurate. - let _ = + let coerced_ty = self.resolve_vars_with_obligations_and_mutate_fulfillment(coerced_ty, |errors| { self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr); self.point_at_arg_instead_of_call_if_possible( @@ -358,6 +267,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ); }); + final_arg_types[idx] = Some((checked_ty, coerced_ty)); + // We're processing function arguments so we definitely want to use // two-phase borrows. self.demand_coerce(&provided_arg, checked_ty, coerced_ty, None, AllowTwoPhase::Yes); @@ -416,6 +327,133 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } + // If there was an error in parameter count, emit that here + if let Some((expected_count, arg_count, err_code, sugg_unit, sugg_tuple_wrap_args)) = error + { + let (span, start_span, args, ctor_of) = match &call_expr.kind { + hir::ExprKind::Call( + hir::Expr { + span, + kind: + hir::ExprKind::Path(hir::QPath::Resolved( + _, + hir::Path { res: Res::Def(DefKind::Ctor(of, _), _), .. }, + )), + .. + }, + args, + ) => (*span, *span, &args[..], Some(of)), + hir::ExprKind::Call(hir::Expr { span, .. }, args) => { + (*span, *span, &args[..], None) + } + hir::ExprKind::MethodCall(path_segment, args, _) => ( + path_segment.ident.span, + // `sp` doesn't point at the whole `foo.bar()`, only at `bar`. + path_segment + .args + .and_then(|args| args.args.iter().last()) + // Account for `foo.bar::<T>()`. + .map(|arg| { + // Skip the closing `>`. + tcx.sess + .source_map() + .next_point(tcx.sess.source_map().next_point(arg.span())) + }) + .unwrap_or(path_segment.ident.span), + &args[1..], // Skip the receiver. + None, // methods are never ctors + ), + k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k), + }; + let arg_spans = if provided_args.is_empty() { + // foo() + // ^^^-- supplied 0 arguments + // | + // expected 2 arguments + vec![tcx.sess.source_map().next_point(start_span).with_hi(call_span.hi())] + } else { + // foo(1, 2, 3) + // ^^^ - - - supplied 3 arguments + // | + // expected 2 arguments + args.iter().map(|arg| arg.span).collect::<Vec<Span>>() + }; + let call_name = match ctor_of { + Some(CtorOf::Struct) => "struct", + Some(CtorOf::Variant) => "enum variant", + None => "function", + }; + let mut err = tcx.sess.struct_span_err_with_code( + span, + &format!( + "this {} takes {}{} but {} {} supplied", + call_name, + if c_variadic { "at least " } else { "" }, + potentially_plural_count(expected_count, "argument"), + potentially_plural_count(arg_count, "argument"), + if arg_count == 1 { "was" } else { "were" } + ), + DiagnosticId::Error(err_code.to_owned()), + ); + let label = format!("supplied {}", potentially_plural_count(arg_count, "argument")); + for (i, span) in arg_spans.into_iter().enumerate() { + err.span_label( + span, + if arg_count == 0 || i + 1 == arg_count { &label } else { "" }, + ); + } + if let Some(def_id) = fn_def_id { + if let Some(def_span) = tcx.def_ident_span(def_id) { + let mut spans: MultiSpan = def_span.into(); + + let params = tcx + .hir() + .get_if_local(def_id) + .and_then(|node| node.body_id()) + .into_iter() + .map(|id| tcx.hir().body(id).params) + .flatten(); + + for param in params { + spans.push_span_label(param.span, String::new()); + } + + let def_kind = tcx.def_kind(def_id); + err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id))); + } + } + if sugg_unit { + let sugg_span = tcx.sess.source_map().end_point(call_expr.span); + // remove closing `)` from the span + let sugg_span = sugg_span.shrink_to_lo(); + err.span_suggestion( + sugg_span, + "expected the unit value `()`; create it with empty parentheses", + String::from("()"), + Applicability::MachineApplicable, + ); + } else if let Some(FnArgsAsTuple { first, last }) = sugg_tuple_wrap_args { + err.multipart_suggestion( + "use parentheses to construct a tuple", + vec![ + (first.span.shrink_to_lo(), '('.to_string()), + (last.span.shrink_to_hi(), ')'.to_string()), + ], + Applicability::MachineApplicable, + ); + } else { + err.span_label( + span, + format!( + "expected {}{}", + if c_variadic { "at least " } else { "" }, + potentially_plural_count(expected_count, "argument") + ), + ); + } + err.emit(); + } + // We also need to make sure we at least write the ty of the other // arguments which we skipped above. if c_variadic { @@ -452,6 +490,35 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } + fn suggested_tuple_wrap( + &self, + expected_input_tys: &[Ty<'tcx>], + provided_args: &'tcx [hir::Expr<'tcx>], + ) -> Option<FnArgsAsTuple<'_>> { + let [expected_arg_type] = &expected_input_tys[..] else { return None }; + + let ty::Tuple(expected_elems) = self.resolve_vars_if_possible(*expected_arg_type).kind() + else { return None }; + + let expected_types: Vec<_> = expected_elems.iter().map(|k| k.expect_ty()).collect(); + let supplied_types: Vec<_> = provided_args.iter().map(|arg| self.check_expr(arg)).collect(); + + let all_match = iter::zip(expected_types, supplied_types) + .all(|(expected, supplied)| self.can_eq(self.param_env, expected, supplied).is_ok()); + + if all_match { + match provided_args { + [] => None, + [_] => unreachable!( + "shouldn't reach here - need count mismatch between 1-tuple and 1-argument" + ), + [first, .., last] => Some(FnArgsAsTuple { first, last }), + } + } else { + None + } + } + // AST fragment checking pub(in super::super) fn check_lit( &self, @@ -975,7 +1042,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { fn point_at_arg_instead_of_call_if_possible( &self, errors: &mut Vec<traits::FulfillmentError<'tcx>>, - final_arg_types: &[(usize, Ty<'tcx>, Ty<'tcx>)], + final_arg_types: &[Option<(Ty<'tcx>, Ty<'tcx>)>], expr: &'tcx hir::Expr<'tcx>, call_sp: Span, args: &'tcx [hir::Expr<'tcx>], @@ -1016,7 +1083,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ObligationCauseCode::BuiltinDerivedObligation(code) | ObligationCauseCode::ImplDerivedObligation(code) | ObligationCauseCode::DerivedObligation(code) => { - code.parent_trait_ref.self_ty().skip_binder().into() + code.parent_trait_pred.self_ty().skip_binder().into() } _ if let ty::PredicateKind::Trait(predicate) = error.obligation.predicate.kind().skip_binder() => { @@ -1030,8 +1097,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // `FulfillmentError`. let mut referenced_in = final_arg_types .iter() - .map(|&(i, checked_ty, _)| (i, checked_ty)) - .chain(final_arg_types.iter().map(|&(i, _, coerced_ty)| (i, coerced_ty))) + .enumerate() + .filter_map(|(i, arg)| match arg { + Some((checked_ty, coerce_ty)) => Some([(i, *checked_ty), (i, *coerce_ty)]), + _ => None, + }) + .flatten() .flat_map(|(i, ty)| { let ty = self.resolve_vars_if_possible(ty); // We walk the argument type because the argument's type could have diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs index be4c9ec99b9..86cf850d723 100644 --- a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs +++ b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs @@ -237,7 +237,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if let Ok(expr_text) = self.sess().source_map().span_to_snippet(expr.span) { let mut suggestions = iter::zip(iter::repeat(&expr_text), &methods) .filter_map(|(receiver, method)| { - let method_call = format!(".{}()", method.ident); + let method_call = format!(".{}()", method.name); if receiver.ends_with(&method_call) { None // do not suggest code that is already there (#53348) } else { diff --git a/compiler/rustc_typeck/src/check/generator_interior.rs b/compiler/rustc_typeck/src/check/generator_interior.rs index 56b6dd9a284..c6b92db88ae 100644 --- a/compiler/rustc_typeck/src/check/generator_interior.rs +++ b/compiler/rustc_typeck/src/check/generator_interior.rs @@ -22,6 +22,11 @@ use tracing::debug; mod drop_ranges; +// FIXME(eholk): This flag is here to give a quick way to disable drop tracking in case we find +// unexpected breakages while it's still new. It should be removed before too long. For example, +// see #93161. +const ENABLE_DROP_TRACKING: bool = false; + struct InteriorVisitor<'a, 'tcx> { fcx: &'a FnCtxt<'a, 'tcx>, types: FxIndexSet<ty::GeneratorInteriorTypeCause<'tcx>>, @@ -77,7 +82,10 @@ impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> { yield_data.expr_and_pat_count, self.expr_count, source_span ); - if self.drop_ranges.is_dropped_at(hir_id, yield_data.expr_and_pat_count) + if ENABLE_DROP_TRACKING + && self + .drop_ranges + .is_dropped_at(hir_id, yield_data.expr_and_pat_count) { debug!("value is dropped at yield point; not recording"); return false; diff --git a/compiler/rustc_typeck/src/check/intrinsic.rs b/compiler/rustc_typeck/src/check/intrinsic.rs index 4c612ed5be5..74f6f50d412 100644 --- a/compiler/rustc_typeck/src/check/intrinsic.rs +++ b/compiler/rustc_typeck/src/check/intrinsic.rs @@ -297,6 +297,11 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) { sym::const_allocate => { (0, vec![tcx.types.usize, tcx.types.usize], tcx.mk_mut_ptr(tcx.types.u8)) } + sym::const_deallocate => ( + 0, + vec![tcx.mk_mut_ptr(tcx.types.u8), tcx.types.usize, tcx.types.usize], + tcx.mk_unit(), + ), sym::ptr_offset_from => { (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.isize) diff --git a/compiler/rustc_typeck/src/check/method/probe.rs b/compiler/rustc_typeck/src/check/method/probe.rs index 86f3568d2e3..3815fd1992b 100644 --- a/compiler/rustc_typeck/src/check/method/probe.rs +++ b/compiler/rustc_typeck/src/check/method/probe.rs @@ -1033,7 +1033,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { true } }) - .map(|candidate| candidate.item.ident) + .map(|candidate| candidate.item.ident(self.tcx)) .filter(|&name| set.insert(name)) .collect(); @@ -1438,7 +1438,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { "<{} as {}>::{}", stable_pick.self_ty, self.tcx.def_path_str(def_id), - stable_pick.item.ident + stable_pick.item.name ), Applicability::MachineApplicable, ); @@ -1748,14 +1748,12 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { let best_name = { let names = applicable_close_candidates .iter() - .map(|cand| cand.ident.name) + .map(|cand| cand.name) .collect::<Vec<Symbol>>(); find_best_match_for_name(&names, self.method_name.unwrap().name, None) } .unwrap(); - Ok(applicable_close_candidates - .into_iter() - .find(|method| method.ident.name == best_name)) + Ok(applicable_close_candidates.into_iter().find(|method| method.name == best_name)) } }) } @@ -1906,8 +1904,13 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { .associated_items(def_id) .in_definition_order() .filter(|x| { - let dist = lev_distance(name.as_str(), x.ident.as_str()); - x.kind.namespace() == Namespace::ValueNS && dist > 0 && dist <= max_dist + if x.kind.namespace() != Namespace::ValueNS { + return false; + } + match lev_distance(name.as_str(), x.name.as_str(), max_dist) { + Some(d) => d > 0, + None => false, + } }) .copied() .collect() diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_typeck/src/check/method/suggest.rs index 96ab800afaf..58ea197d3e9 100644 --- a/compiler/rustc_typeck/src/check/method/suggest.rs +++ b/compiler/rustc_typeck/src/check/method/suggest.rs @@ -823,9 +823,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { _ => None, }) { - let parent_trait_ref = data.parent_trait_ref; + let parent_trait_ref = data.parent_trait_pred; let parent_def_id = parent_trait_ref.def_id(); - let path = parent_trait_ref.print_only_trait_path(); + let path = parent_trait_ref.print_modifiers_and_trait_path(); let tr_self_ty = parent_trait_ref.skip_binder().self_ty(); let mut candidates = vec![]; self.tcx.for_each_relevant_impl( @@ -1025,7 +1025,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { def_kind.article(), def_kind.descr(lev_candidate.def_id), ), - lev_candidate.ident.to_string(), + lev_candidate.name.to_string(), Applicability::MaybeIncorrect, ); } @@ -1480,7 +1480,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let skip = skippable.contains(&did); if pick.autoderefs == 0 && !skip { err.span_label( - pick.item.ident.span, + pick.item.ident(self.tcx).span, &format!("the method is available for `{}` here", rcvr_ty), ); } @@ -1514,7 +1514,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // an autoderef to `&self` if pick.autoderefs == 0 && !skip { err.span_label( - pick.item.ident.span, + pick.item.ident(self.tcx).span, &format!("the method is available for `{}` here", new_rcvr_t), ); err.multipart_suggestion( diff --git a/compiler/rustc_typeck/src/check/mod.rs b/compiler/rustc_typeck/src/check/mod.rs index 34caabe44d6..6e0b902a00b 100644 --- a/compiler/rustc_typeck/src/check/mod.rs +++ b/compiler/rustc_typeck/src/check/mod.rs @@ -599,7 +599,7 @@ fn missing_items_err( ) { let missing_items_msg = missing_items .iter() - .map(|trait_item| trait_item.ident.to_string()) + .map(|trait_item| trait_item.name.to_string()) .collect::<Vec<_>>() .join("`, `"); @@ -628,7 +628,7 @@ fn missing_items_err( let msg = format!("implement the missing item: `{}`", snippet); let appl = Applicability::HasPlaceholders; if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) { - err.span_label(span, format!("`{}` from trait", trait_item.ident)); + err.span_label(span, format!("`{}` from trait", trait_item.name)); err.tool_only_span_suggestion(sugg_sp, &msg, code, appl); } else { err.span_suggestion_hidden(sugg_sp, &msg, code, appl); @@ -805,16 +805,16 @@ fn suggestion_signature(assoc: &ty::AssocItem, tcx: TyCtxt<'_>) -> String { fn_sig_suggestion( tcx, tcx.fn_sig(assoc.def_id).skip_binder(), - assoc.ident, + assoc.ident(tcx), tcx.predicates_of(assoc.def_id), assoc, ) } - ty::AssocKind::Type => format!("type {} = Type;", assoc.ident), + ty::AssocKind::Type => format!("type {} = Type;", assoc.name), ty::AssocKind::Const => { let ty = tcx.type_of(assoc.def_id); let val = expr::ty_kind_suggestion(ty).unwrap_or("value"); - format!("const {}: {} = {};", assoc.ident, ty, val) + format!("const {}: {} = {};", assoc.name, ty, val) } } } diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs index c20c457de85..74516acbfcf 100644 --- a/compiler/rustc_typeck/src/check/op.rs +++ b/compiler/rustc_typeck/src/check/op.rs @@ -549,16 +549,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { is_assign: IsAssign, op: hir::BinOp, ) -> bool { - let source_map = self.tcx.sess.source_map(); - let remove_borrow_msg = "String concatenation appends the string on the right to the \ - string on the left and may require reallocation. This \ - requires ownership of the string on the left"; - - let msg = "`to_owned()` can be used to create an owned `String` \ - from a string reference. String concatenation \ - appends the string on the right to the string \ - on the left and may require reallocation. This \ - requires ownership of the string on the left"; + let str_concat_note = "string concatenation requires an owned `String` on the left"; + let rm_borrow_msg = "remove the borrow to obtain an owned `String`"; + let to_owned_msg = "create an owned `String` from a string reference"; let string_type = self.tcx.get_diagnostic_item(sym::String); let is_std_string = |ty: Ty<'tcx>| match ty.ty_adt_def() { @@ -574,31 +567,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ) => { if let IsAssign::No = is_assign { // Do not supply this message if `&str += &str` - err.span_label( - op.span, - "`+` cannot be used to concatenate two `&str` strings", - ); - match source_map.span_to_snippet(lhs_expr.span) { - Ok(lstring) => { - err.span_suggestion( - lhs_expr.span, - if lstring.starts_with('&') { - remove_borrow_msg - } else { - msg - }, - if let Some(stripped) = lstring.strip_prefix('&') { - // let a = String::new(); - // let _ = &a + "bar"; - stripped.to_string() - } else { - format!("{}.to_owned()", lstring) - }, - Applicability::MachineApplicable, - ) - } - _ => err.help(msg), - }; + err.span_label(op.span, "`+` cannot be used to concatenate two `&str` strings"); + err.note(str_concat_note); + if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind { + err.span_suggestion_verbose( + lhs_expr.span.until(lhs_inner_expr.span), + rm_borrow_msg, + "".to_owned(), + Applicability::MachineApplicable + ); + } else { + err.span_suggestion_verbose( + lhs_expr.span.shrink_to_hi(), + to_owned_msg, + ".to_owned()".to_owned(), + Applicability::MachineApplicable + ); + } } true } @@ -609,32 +594,30 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { op.span, "`+` cannot be used to concatenate a `&str` with a `String`", ); - match ( - source_map.span_to_snippet(lhs_expr.span), - source_map.span_to_snippet(rhs_expr.span), - is_assign, - ) { - (Ok(l), Ok(r), IsAssign::No) => { - let to_string = if let Some(stripped) = l.strip_prefix('&') { - // let a = String::new(); let b = String::new(); - // let _ = &a + b; - stripped.to_string() + match is_assign { + IsAssign::No => { + let sugg_msg; + let lhs_sugg = if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind { + sugg_msg = "remove the borrow on the left and add one on the right"; + (lhs_expr.span.until(lhs_inner_expr.span), "".to_owned()) } else { - format!("{}.to_owned()", l) + sugg_msg = "create an owned `String` on the left and add a borrow on the right"; + (lhs_expr.span.shrink_to_hi(), ".to_owned()".to_owned()) }; - err.multipart_suggestion( - msg, - vec![ - (lhs_expr.span, to_string), - (rhs_expr.span, format!("&{}", r)), - ], + let suggestions = vec![ + lhs_sugg, + (rhs_expr.span.shrink_to_lo(), "&".to_owned()), + ]; + err.multipart_suggestion_verbose( + sugg_msg, + suggestions, Applicability::MachineApplicable, ); } - _ => { - err.help(msg); + IsAssign::Yes => { + err.note(str_concat_note); } - }; + } true } _ => false, diff --git a/compiler/rustc_typeck/src/check/wfcheck.rs b/compiler/rustc_typeck/src/check/wfcheck.rs index 606a2d6a24e..71f45320e49 100644 --- a/compiler/rustc_typeck/src/check/wfcheck.rs +++ b/compiler/rustc_typeck/src/check/wfcheck.rs @@ -312,7 +312,7 @@ fn check_gat_where_clauses( // of the function signature. In our example, the GAT in the return // type is `<Self as LendingIterator>::Item<'a>`, so 'a and Self are arguments. let (regions, types) = - GATSubstCollector::visit(trait_item.def_id.to_def_id(), sig.output()); + GATSubstCollector::visit(tcx, trait_item.def_id.to_def_id(), sig.output()); // If both regions and types are empty, then this GAT isn't in the // return type, and we shouldn't try to do clause analysis @@ -602,6 +602,7 @@ fn resolve_regions_with_wf_tys<'tcx>( /// the two vectors, `regions` and `types` (depending on their kind). For each /// parameter `Pi` also track the index `i`. struct GATSubstCollector<'tcx> { + tcx: TyCtxt<'tcx>, gat: DefId, // Which region appears and which parameter index its subsituted for regions: FxHashSet<(ty::Region<'tcx>, usize)>, @@ -611,11 +612,16 @@ struct GATSubstCollector<'tcx> { impl<'tcx> GATSubstCollector<'tcx> { fn visit<T: TypeFoldable<'tcx>>( + tcx: TyCtxt<'tcx>, gat: DefId, t: T, ) -> (FxHashSet<(ty::Region<'tcx>, usize)>, FxHashSet<(Ty<'tcx>, usize)>) { - let mut visitor = - GATSubstCollector { gat, regions: FxHashSet::default(), types: FxHashSet::default() }; + let mut visitor = GATSubstCollector { + tcx, + gat, + regions: FxHashSet::default(), + types: FxHashSet::default(), + }; t.visit_with(&mut visitor); (visitor.regions, visitor.types) } @@ -624,6 +630,13 @@ impl<'tcx> GATSubstCollector<'tcx> { impl<'tcx> TypeVisitor<'tcx> for GATSubstCollector<'tcx> { type BreakTy = !; + fn visit_binder<T: TypeFoldable<'tcx>>( + &mut self, + t: &ty::Binder<'tcx, T>, + ) -> ControlFlow<Self::BreakTy> { + self.tcx.liberate_late_bound_regions(self.gat, t.clone()).visit_with(self) + } + fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { match t.kind() { ty::Projection(p) if p.item_def_id == self.gat => { @@ -851,7 +864,7 @@ fn check_associated_item( let hir_sig = sig_if_method.expect("bad signature for method"); check_fn_or_method( fcx, - item.ident.span, + item.ident(fcx.tcx).span, sig, hir_sig.decl, item.def_id, diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs index 59f211bd2c3..a409201372b 100644 --- a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs +++ b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs @@ -36,7 +36,7 @@ impl<'tcx> InherentOverlapChecker<'tcx> { for item1 in impl_items1.in_definition_order() { let collision = impl_items2 - .filter_by_name_unhygienic(item1.ident.name) + .filter_by_name_unhygienic(item1.name) .any(|item2| self.compare_hygienically(item1, item2)); if collision { @@ -50,7 +50,8 @@ impl<'tcx> InherentOverlapChecker<'tcx> { fn compare_hygienically(&self, item1: &ty::AssocItem, item2: &ty::AssocItem) -> bool { // Symbols and namespace match, compare hygienically. item1.kind.namespace() == item2.kind.namespace() - && item1.ident.normalize_to_macros_2_0() == item2.ident.normalize_to_macros_2_0() + && item1.ident(self.tcx).normalize_to_macros_2_0() + == item2.ident(self.tcx).normalize_to_macros_2_0() } fn check_for_common_items_in_impls( @@ -64,11 +65,11 @@ impl<'tcx> InherentOverlapChecker<'tcx> { for item1 in impl_items1.in_definition_order() { let collision = impl_items2 - .filter_by_name_unhygienic(item1.ident.name) + .filter_by_name_unhygienic(item1.name) .find(|item2| self.compare_hygienically(item1, item2)); if let Some(item2) = collision { - let name = item1.ident.normalize_to_macros_2_0(); + let name = item1.ident(self.tcx).normalize_to_macros_2_0(); let mut err = struct_span_err!( self.tcx.sess, self.tcx.span_of_impl(item1.def_id).unwrap(), @@ -181,11 +182,11 @@ impl<'tcx> ItemLikeVisitor<'_> for InherentOverlapChecker<'tcx> { let mut ids = impl_items .in_definition_order() .filter_map(|item| { - let entry = connected_region_ids.entry(item.ident.name); + let entry = connected_region_ids.entry(item.name); if let Entry::Occupied(e) = &entry { Some(*e.get()) } else { - idents_to_add.push(item.ident.name); + idents_to_add.push(item.name); None } }) |
