about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2023-02-06 20:20:53 +0000
committerbors <bors@rust-lang.org>2023-02-06 20:20:53 +0000
commite1eaa2d5d4d1f5b7b89561a940718058d414e89c (patch)
tree1987fc60ff88039718ef203de7a201bc469fb077 /compiler
parent7ff69b49dfcc0f7d37ee17e36f67c57168c44073 (diff)
parent5e958293e3d2bf3ee0b7e9101ea33c385ab704bc (diff)
downloadrust-e1eaa2d5d4d1f5b7b89561a940718058d414e89c.tar.gz
rust-e1eaa2d5d4d1f5b7b89561a940718058d414e89c.zip
Auto merge of #107738 - matthiaskrgr:rollup-o18lzi8, r=matthiaskrgr
Rollup of 9 pull requests

Successful merges:

 - #106477 (Refine error spans for "The trait bound `T: Trait` is not satisfied" when passing literal structs/tuples)
 - #107596 (Add nicer output to PGO build timer)
 - #107692 (Sort Generator `print-type-sizes` according to their yield points)
 - #107714 (Clarify wording on f64::round() and f32::round())
 - #107720 (end entry paragraph with a period (.))
 - #107724 (remove unused rustc_* imports)
 - #107725 (Turn MarkdownWithToc into a struct with named fields)
 - #107731 (interpret: move discriminant reading and writing to separate file)
 - #107735 (Add mailmap for commits made by xes@meta.com)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_ast_lowering/Cargo.toml2
-rw-r--r--compiler/rustc_ast_pretty/Cargo.toml1
-rw-r--r--compiler/rustc_codegen_llvm/Cargo.toml1
-rw-r--r--compiler/rustc_codegen_ssa/Cargo.toml1
-rw-r--r--compiler/rustc_const_eval/Cargo.toml1
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs238
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs1
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs153
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs87
-rw-r--r--compiler/rustc_hir_analysis/Cargo.toml3
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs457
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs84
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs1
-rw-r--r--compiler/rustc_hir_typeck/src/method/probe.rs1
-rw-r--r--compiler/rustc_infer/Cargo.toml1
-rw-r--r--compiler/rustc_infer/src/traits/util.rs50
-rw-r--r--compiler/rustc_interface/Cargo.toml1
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs2
-rw-r--r--compiler/rustc_mir_build/Cargo.toml1
-rw-r--r--compiler/rustc_mir_dataflow/Cargo.toml1
-rw-r--r--compiler/rustc_privacy/Cargo.toml1
-rw-r--r--compiler/rustc_query_impl/Cargo.toml1
-rw-r--r--compiler/rustc_session/src/code_stats.rs6
-rw-r--r--compiler/rustc_trait_selection/Cargo.toml1
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/confirmation.rs1
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/mod.rs3
-rw-r--r--compiler/rustc_traits/Cargo.toml1
-rw-r--r--compiler/rustc_ty_utils/src/layout.rs11
28 files changed, 792 insertions, 320 deletions
diff --git a/compiler/rustc_ast_lowering/Cargo.toml b/compiler/rustc_ast_lowering/Cargo.toml
index 6a59b9e6151..6e76c349a4a 100644
--- a/compiler/rustc_ast_lowering/Cargo.toml
+++ b/compiler/rustc_ast_lowering/Cargo.toml
@@ -7,7 +7,6 @@ edition = "2021"
 doctest = false
 
 [dependencies]
-rustc_arena = { path = "../rustc_arena" }
 rustc_ast = { path = "../rustc_ast" }
 rustc_ast_pretty = { path = "../rustc_ast_pretty" }
 rustc_data_structures = { path = "../rustc_data_structures" }
@@ -16,7 +15,6 @@ rustc_hir = { path = "../rustc_hir" }
 rustc_index = { path = "../rustc_index" }
 rustc_middle = { path = "../rustc_middle" }
 rustc_macros = { path = "../rustc_macros" }
-rustc_query_system = { path = "../rustc_query_system" }
 rustc_session = { path = "../rustc_session" }
 rustc_span = { path = "../rustc_span" }
 rustc_target = { path = "../rustc_target" }
diff --git a/compiler/rustc_ast_pretty/Cargo.toml b/compiler/rustc_ast_pretty/Cargo.toml
index b4900dc39a8..d1513c114fe 100644
--- a/compiler/rustc_ast_pretty/Cargo.toml
+++ b/compiler/rustc_ast_pretty/Cargo.toml
@@ -7,5 +7,4 @@ edition = "2021"
 
 [dependencies]
 rustc_ast = { path = "../rustc_ast" }
-rustc_parse_format = { path = "../rustc_parse_format" }
 rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
index 9c1bcd431ec..773c0ebbe59 100644
--- a/compiler/rustc_codegen_llvm/Cargo.toml
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -30,7 +30,6 @@ rustc_macros = { path = "../rustc_macros" }
 rustc_metadata = { path = "../rustc_metadata" }
 rustc_query_system = { path = "../rustc_query_system" }
 rustc_session = { path = "../rustc_session" }
-rustc_serialize = { path = "../rustc_serialize" }
 rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
 rustc_target = { path = "../rustc_target" }
 smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml
index 0d2d2ec68a2..c55991e00d3 100644
--- a/compiler/rustc_codegen_ssa/Cargo.toml
+++ b/compiler/rustc_codegen_ssa/Cargo.toml
@@ -41,7 +41,6 @@ rustc_metadata = { path = "../rustc_metadata" }
 rustc_query_system = { path = "../rustc_query_system" }
 rustc_target = { path = "../rustc_target" }
 rustc_session = { path = "../rustc_session" }
-rustc_const_eval = { path = "../rustc_const_eval" }
 
 [dependencies.object]
 version = "0.30.1"
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
index 51489e29360..98ac36c1ced 100644
--- a/compiler/rustc_const_eval/Cargo.toml
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -19,7 +19,6 @@ rustc_infer = { path = "../rustc_infer" }
 rustc_macros = { path = "../rustc_macros" }
 rustc_middle = { path = "../rustc_middle" }
 rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
-rustc_query_system = { path = "../rustc_query_system" }
 rustc_session = { path = "../rustc_session" }
 rustc_target = { path = "../rustc_target" }
 rustc_trait_selection = { path = "../rustc_trait_selection" }
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
new file mode 100644
index 00000000000..557e721249d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -0,0 +1,238 @@
+//! Functions for reading and writing discriminants of multi-variant layouts (enums and generators).
+
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{self, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{ImmTy, InterpCx, InterpResult, Machine, OpTy, PlaceTy, Scalar};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+    /// Writes the discriminant of the given variant.
+    #[instrument(skip(self), level = "trace")]
+    pub fn write_discriminant(
+        &mut self,
+        variant_index: VariantIdx,
+        dest: &PlaceTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx> {
+        // Layout computation excludes uninhabited variants from consideration
+        // therefore there's no way to represent those variants in the given layout.
+        // Essentially, uninhabited variants do not have a tag that corresponds to their
+        // discriminant, so we cannot do anything here.
+        // When evaluating we will always error before even getting here, but ConstProp 'executes'
+        // dead code, so we cannot ICE here.
+        if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
+            throw_ub!(UninhabitedEnumVariantWritten)
+        }
+
+        match dest.layout.variants {
+            abi::Variants::Single { index } => {
+                assert_eq!(index, variant_index);
+            }
+            abi::Variants::Multiple {
+                tag_encoding: TagEncoding::Direct,
+                tag: tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                let discr_val =
+                    dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+
+                // raw discriminants for enums are isize or bigger during
+                // their computation, but the in-memory tag is the smallest possible
+                // representation
+                let size = tag_layout.size(self);
+                let tag_val = size.truncate(discr_val);
+
+                let tag_dest = self.place_field(dest, tag_field)?;
+                self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
+            }
+            abi::Variants::Multiple {
+                tag_encoding:
+                    TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
+                tag: tag_layout,
+                tag_field,
+                ..
+            } => {
+                // No need to validate that the discriminant here because the
+                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+                if variant_index != untagged_variant {
+                    let variants_start = niche_variants.start().as_u32();
+                    let variant_index_relative = variant_index
+                        .as_u32()
+                        .checked_sub(variants_start)
+                        .expect("overflow computing relative variant idx");
+                    // We need to use machine arithmetic when taking into account `niche_start`:
+                    // tag_val = variant_index_relative + niche_start_val
+                    let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
+                    let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                    let variant_index_relative_val =
+                        ImmTy::from_uint(variant_index_relative, tag_layout);
+                    let tag_val = self.binary_op(
+                        mir::BinOp::Add,
+                        &variant_index_relative_val,
+                        &niche_start_val,
+                    )?;
+                    // Write result.
+                    let niche_dest = self.place_field(dest, tag_field)?;
+                    self.write_immediate(*tag_val, &niche_dest)?;
+                }
+            }
+        }
+
+        Ok(())
+    }
+
+    /// Read discriminant, return the runtime value as well as the variant index.
+    /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
+    #[instrument(skip(self), level = "trace")]
+    pub fn read_discriminant(
+        &self,
+        op: &OpTy<'tcx, M::Provenance>,
+    ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
+        trace!("read_discriminant_value {:#?}", op.layout);
+        // Get type and layout of the discriminant.
+        let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+        trace!("discriminant type: {:?}", discr_layout.ty);
+
+        // We use "discriminant" to refer to the value associated with a particular enum variant.
+        // This is not to be confused with its "variant index", which is just determining its position in the
+        // declared list of variants -- they can differ with explicitly assigned discriminants.
+        // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+        // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+            Variants::Single { index } => {
+                let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
+                    Some(discr) => {
+                        // This type actually has discriminants.
+                        assert_eq!(discr.ty, discr_layout.ty);
+                        Scalar::from_uint(discr.val, discr_layout.size)
+                    }
+                    None => {
+                        // On a type without actual discriminants, variant is 0.
+                        assert_eq!(index.as_u32(), 0);
+                        Scalar::from_uint(index.as_u32(), discr_layout.size)
+                    }
+                };
+                return Ok((discr, index));
+            }
+            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+                (tag, tag_encoding, tag_field)
+            }
+        };
+
+        // There are *three* layouts that come into play here:
+        // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+        //   the `Scalar` we return.
+        // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+        //   and used to interpret the value we read from the tag field.
+        //   For the return value, a cast to `discr_layout` is performed.
+        // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+        //   may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+        // Get layout for tag.
+        let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
+
+        // Read tag and sanity-check `tag_layout`.
+        let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+        assert_eq!(tag_layout.size, tag_val.layout.size);
+        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+        trace!("tag value: {}", tag_val);
+
+        // Figure out which discriminant and variant this corresponds to.
+        Ok(match *tag_encoding {
+            TagEncoding::Direct => {
+                let scalar = tag_val.to_scalar();
+                // Generate a specific error if `tag_val` is not an integer.
+                // (`tag_bits` itself is only used for error messages below.)
+                let tag_bits = scalar
+                    .try_to_int()
+                    .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
+                    .assert_bits(tag_layout.size);
+                // Cast bits from tag layout to discriminant layout.
+                // After the checks we did above, this cannot fail, as
+                // discriminants are int-like.
+                let discr_val =
+                    self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
+                let discr_bits = discr_val.assert_bits(discr_layout.size);
+                // Convert discriminant to variant index, and catch invalid discriminants.
+                let index = match *op.layout.ty.kind() {
+                    ty::Adt(adt, _) => {
+                        adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+                    }
+                    ty::Generator(def_id, substs, _) => {
+                        let substs = substs.as_generator();
+                        substs
+                            .discriminants(def_id, *self.tcx)
+                            .find(|(_, var)| var.val == discr_bits)
+                    }
+                    _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+                }
+                .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
+                // Return the cast value, and the index.
+                (discr_val, index.0)
+            }
+            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
+                let tag_val = tag_val.to_scalar();
+                // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+                // discriminant (encoded in niche/tag) and variant index are the same.
+                let variants_start = niche_variants.start().as_u32();
+                let variants_end = niche_variants.end().as_u32();
+                let variant = match tag_val.try_to_int() {
+                    Err(dbg_val) => {
+                        // So this is a pointer then, and casting to an int failed.
+                        // Can only happen during CTFE.
+                        // The niche must be just 0, and the ptr not null, then we know this is
+                        // okay. Everything else, we conservatively reject.
+                        let ptr_valid = niche_start == 0
+                            && variants_start == variants_end
+                            && !self.scalar_may_be_null(tag_val)?;
+                        if !ptr_valid {
+                            throw_ub!(InvalidTag(dbg_val))
+                        }
+                        untagged_variant
+                    }
+                    Ok(tag_bits) => {
+                        let tag_bits = tag_bits.assert_bits(tag_layout.size);
+                        // We need to use machine arithmetic to get the relative variant idx:
+                        // variant_index_relative = tag_val - niche_start_val
+                        let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+                        let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+                        let variant_index_relative_val =
+                            self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+                        let variant_index_relative =
+                            variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
+                        // Check if this is in the range that indicates an actual discriminant.
+                        if variant_index_relative <= u128::from(variants_end - variants_start) {
+                            let variant_index_relative = u32::try_from(variant_index_relative)
+                                .expect("we checked that this fits into a u32");
+                            // Then computing the absolute variant idx should not overflow any more.
+                            let variant_index = variants_start
+                                .checked_add(variant_index_relative)
+                                .expect("overflow computing absolute variant idx");
+                            let variants_len = op
+                                .layout
+                                .ty
+                                .ty_adt_def()
+                                .expect("tagged layout for non adt")
+                                .variants()
+                                .len();
+                            assert!(usize::try_from(variant_index).unwrap() < variants_len);
+                            VariantIdx::from_u32(variant_index)
+                        } else {
+                            untagged_variant
+                        }
+                    }
+                };
+                // Compute the size of the scalar we need to return.
+                // No need to cast, because the variant index directly serves as discriminant and is
+                // encoded in the tag.
+                (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+            }
+        })
+    }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
index 2e356f67bf3..86de4e4e32c 100644
--- a/compiler/rustc_const_eval/src/interpret/mod.rs
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -1,6 +1,7 @@
 //! An interpreter for MIR used in CTFE and by miri
 
 mod cast;
+mod discriminant;
 mod eval_context;
 mod intern;
 mod intrinsics;
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index a1b3985dce4..52613d5ca1f 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -4,13 +4,12 @@
 use either::{Either, Left, Right};
 
 use rustc_hir::def::Namespace;
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
 use rustc_middle::ty::{ConstInt, Ty, ValTree};
 use rustc_middle::{mir, ty};
 use rustc_span::Span;
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
-use rustc_target::abi::{VariantIdx, Variants};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
 
 use super::{
     alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
@@ -657,154 +656,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         };
         Ok(OpTy { op, layout, align: Some(layout.align.abi) })
     }
-
-    /// Read discriminant, return the runtime value as well as the variant index.
-    /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
-    pub fn read_discriminant(
-        &self,
-        op: &OpTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
-        trace!("read_discriminant_value {:#?}", op.layout);
-        // Get type and layout of the discriminant.
-        let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
-        trace!("discriminant type: {:?}", discr_layout.ty);
-
-        // We use "discriminant" to refer to the value associated with a particular enum variant.
-        // This is not to be confused with its "variant index", which is just determining its position in the
-        // declared list of variants -- they can differ with explicitly assigned discriminants.
-        // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
-        // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
-        let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
-            Variants::Single { index } => {
-                let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
-                    Some(discr) => {
-                        // This type actually has discriminants.
-                        assert_eq!(discr.ty, discr_layout.ty);
-                        Scalar::from_uint(discr.val, discr_layout.size)
-                    }
-                    None => {
-                        // On a type without actual discriminants, variant is 0.
-                        assert_eq!(index.as_u32(), 0);
-                        Scalar::from_uint(index.as_u32(), discr_layout.size)
-                    }
-                };
-                return Ok((discr, index));
-            }
-            Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
-                (tag, tag_encoding, tag_field)
-            }
-        };
-
-        // There are *three* layouts that come into play here:
-        // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
-        //   the `Scalar` we return.
-        // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
-        //   and used to interpret the value we read from the tag field.
-        //   For the return value, a cast to `discr_layout` is performed.
-        // - The field storing the tag has a layout, which is very similar to `tag_layout` but
-        //   may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
-
-        // Get layout for tag.
-        let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
-
-        // Read tag and sanity-check `tag_layout`.
-        let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
-        assert_eq!(tag_layout.size, tag_val.layout.size);
-        assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
-        trace!("tag value: {}", tag_val);
-
-        // Figure out which discriminant and variant this corresponds to.
-        Ok(match *tag_encoding {
-            TagEncoding::Direct => {
-                let scalar = tag_val.to_scalar();
-                // Generate a specific error if `tag_val` is not an integer.
-                // (`tag_bits` itself is only used for error messages below.)
-                let tag_bits = scalar
-                    .try_to_int()
-                    .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
-                    .assert_bits(tag_layout.size);
-                // Cast bits from tag layout to discriminant layout.
-                // After the checks we did above, this cannot fail, as
-                // discriminants are int-like.
-                let discr_val =
-                    self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
-                let discr_bits = discr_val.assert_bits(discr_layout.size);
-                // Convert discriminant to variant index, and catch invalid discriminants.
-                let index = match *op.layout.ty.kind() {
-                    ty::Adt(adt, _) => {
-                        adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
-                    }
-                    ty::Generator(def_id, substs, _) => {
-                        let substs = substs.as_generator();
-                        substs
-                            .discriminants(def_id, *self.tcx)
-                            .find(|(_, var)| var.val == discr_bits)
-                    }
-                    _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
-                }
-                .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
-                // Return the cast value, and the index.
-                (discr_val, index.0)
-            }
-            TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
-                let tag_val = tag_val.to_scalar();
-                // Compute the variant this niche value/"tag" corresponds to. With niche layout,
-                // discriminant (encoded in niche/tag) and variant index are the same.
-                let variants_start = niche_variants.start().as_u32();
-                let variants_end = niche_variants.end().as_u32();
-                let variant = match tag_val.try_to_int() {
-                    Err(dbg_val) => {
-                        // So this is a pointer then, and casting to an int failed.
-                        // Can only happen during CTFE.
-                        // The niche must be just 0, and the ptr not null, then we know this is
-                        // okay. Everything else, we conservatively reject.
-                        let ptr_valid = niche_start == 0
-                            && variants_start == variants_end
-                            && !self.scalar_may_be_null(tag_val)?;
-                        if !ptr_valid {
-                            throw_ub!(InvalidTag(dbg_val))
-                        }
-                        untagged_variant
-                    }
-                    Ok(tag_bits) => {
-                        let tag_bits = tag_bits.assert_bits(tag_layout.size);
-                        // We need to use machine arithmetic to get the relative variant idx:
-                        // variant_index_relative = tag_val - niche_start_val
-                        let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
-                        let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
-                        let variant_index_relative_val =
-                            self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
-                        let variant_index_relative =
-                            variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
-                        // Check if this is in the range that indicates an actual discriminant.
-                        if variant_index_relative <= u128::from(variants_end - variants_start) {
-                            let variant_index_relative = u32::try_from(variant_index_relative)
-                                .expect("we checked that this fits into a u32");
-                            // Then computing the absolute variant idx should not overflow any more.
-                            let variant_index = variants_start
-                                .checked_add(variant_index_relative)
-                                .expect("overflow computing absolute variant idx");
-                            let variants_len = op
-                                .layout
-                                .ty
-                                .ty_adt_def()
-                                .expect("tagged layout for non adt")
-                                .variants()
-                                .len();
-                            assert!(usize::try_from(variant_index).unwrap() < variants_len);
-                            VariantIdx::from_u32(variant_index)
-                        } else {
-                            untagged_variant
-                        }
-                    }
-                };
-                // Compute the size of the scalar we need to return.
-                // No need to cast, because the variant index directly serves as discriminant and is
-                // encoded in the tag.
-                (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
-            }
-        })
-    }
 }
 
 // Some nodes are used a lot. Make sure they don't unintentionally get bigger.
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 8d4d0420cda..f82a41078d1 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -7,8 +7,8 @@ use either::{Either, Left, Right};
 use rustc_ast::Mutability;
 use rustc_middle::mir;
 use rustc_middle::ty;
-use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
-use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, VariantIdx};
 
 use super::{
     alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
@@ -767,87 +767,8 @@ where
         MPlaceTy { mplace, layout, align: layout.align.abi }
     }
 
-    /// Writes the discriminant of the given variant.
-    #[instrument(skip(self), level = "debug")]
-    pub fn write_discriminant(
-        &mut self,
-        variant_index: VariantIdx,
-        dest: &PlaceTy<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx> {
-        // Layout computation excludes uninhabited variants from consideration
-        // therefore there's no way to represent those variants in the given layout.
-        // Essentially, uninhabited variants do not have a tag that corresponds to their
-        // discriminant, so we cannot do anything here.
-        // When evaluating we will always error before even getting here, but ConstProp 'executes'
-        // dead code, so we cannot ICE here.
-        if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
-            throw_ub!(UninhabitedEnumVariantWritten)
-        }
-
-        match dest.layout.variants {
-            abi::Variants::Single { index } => {
-                assert_eq!(index, variant_index);
-            }
-            abi::Variants::Multiple {
-                tag_encoding: TagEncoding::Direct,
-                tag: tag_layout,
-                tag_field,
-                ..
-            } => {
-                // No need to validate that the discriminant here because the
-                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
-
-                let discr_val =
-                    dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
-
-                // raw discriminants for enums are isize or bigger during
-                // their computation, but the in-memory tag is the smallest possible
-                // representation
-                let size = tag_layout.size(self);
-                let tag_val = size.truncate(discr_val);
-
-                let tag_dest = self.place_field(dest, tag_field)?;
-                self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
-            }
-            abi::Variants::Multiple {
-                tag_encoding:
-                    TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
-                tag: tag_layout,
-                tag_field,
-                ..
-            } => {
-                // No need to validate that the discriminant here because the
-                // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
-
-                if variant_index != untagged_variant {
-                    let variants_start = niche_variants.start().as_u32();
-                    let variant_index_relative = variant_index
-                        .as_u32()
-                        .checked_sub(variants_start)
-                        .expect("overflow computing relative variant idx");
-                    // We need to use machine arithmetic when taking into account `niche_start`:
-                    // tag_val = variant_index_relative + niche_start_val
-                    let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
-                    let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
-                    let variant_index_relative_val =
-                        ImmTy::from_uint(variant_index_relative, tag_layout);
-                    let tag_val = self.binary_op(
-                        mir::BinOp::Add,
-                        &variant_index_relative_val,
-                        &niche_start_val,
-                    )?;
-                    // Write result.
-                    let niche_dest = self.place_field(dest, tag_field)?;
-                    self.write_immediate(*tag_val, &niche_dest)?;
-                }
-            }
-        }
-
-        Ok(())
-    }
-
-    /// Writes the discriminant of the given variant.
-    #[instrument(skip(self), level = "debug")]
+    /// Writes the aggregate to the destination.
+    #[instrument(skip(self), level = "trace")]
     pub fn write_aggregate(
         &mut self,
         kind: &mir::AggregateKind<'tcx>,
diff --git a/compiler/rustc_hir_analysis/Cargo.toml b/compiler/rustc_hir_analysis/Cargo.toml
index 0761d8cdbd8..c939c8303bf 100644
--- a/compiler/rustc_hir_analysis/Cargo.toml
+++ b/compiler/rustc_hir_analysis/Cargo.toml
@@ -15,9 +15,7 @@ rustc_middle = { path = "../rustc_middle" }
 rustc_attr = { path = "../rustc_attr" }
 rustc_data_structures = { path = "../rustc_data_structures" }
 rustc_errors = { path = "../rustc_errors" }
-rustc_graphviz = { path = "../rustc_graphviz" }
 rustc_hir = { path = "../rustc_hir" }
-rustc_hir_pretty = { path = "../rustc_hir_pretty" }
 rustc_target = { path = "../rustc_target" }
 rustc_session = { path = "../rustc_session" }
 smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
@@ -27,6 +25,5 @@ rustc_index = { path = "../rustc_index" }
 rustc_infer = { path = "../rustc_infer" }
 rustc_trait_selection = { path = "../rustc_trait_selection" }
 rustc_lint = { path = "../rustc_lint" }
-rustc_serialize = { path = "../rustc_serialize" }
 rustc_type_ir = { path = "../rustc_type_ir" }
 rustc_feature = { path = "../rustc_feature" }
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
new file mode 100644
index 00000000000..2eab68050d4
--- /dev/null
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/adjust_fulfillment_errors.rs
@@ -0,0 +1,457 @@
+use crate::FnCtxt;
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_middle::ty::{self, DefIdTree, Ty};
+use rustc_trait_selection::traits;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+    /**
+     * Recursively searches for the most-specific blamable expression.
+     * For example, if you have a chain of constraints like:
+     * - want `Vec<i32>: Copy`
+     * - because `Option<Vec<i32>>: Copy` needs `Vec<i32>: Copy` because `impl <T: Copy> Copy for Option<T>`
+     * - because `(Option<Vec<i32>, bool)` needs `Option<Vec<i32>>: Copy` because `impl <A: Copy, B: Copy> Copy for (A, B)`
+     * then if you pass in `(Some(vec![1, 2, 3]), false)`, this helper `point_at_specific_expr_if_possible`
+     * will find the expression `vec![1, 2, 3]` as the "most blameable" reason for this missing constraint.
+     *
+     * This function only updates the error span.
+     */
+    pub fn blame_specific_expr_if_possible(
+        &self,
+        error: &mut traits::FulfillmentError<'tcx>,
+        expr: &'tcx hir::Expr<'tcx>,
+    ) {
+        // Whether it succeeded or failed, it likely made some amount of progress.
+        // In the very worst case, it's just the same `expr` we originally passed in.
+        let expr = match self.blame_specific_expr_if_possible_for_obligation_cause_code(
+            &error.obligation.cause.code(),
+            expr,
+        ) {
+            Ok(expr) => expr,
+            Err(expr) => expr,
+        };
+
+        // Either way, use this expression to update the error span.
+        // If it doesn't overlap the existing span at all, use the original span.
+        // FIXME: It would possibly be better to do this more continuously, at each level...
+        error.obligation.cause.span = expr
+            .span
+            .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+            .unwrap_or(error.obligation.cause.span);
+    }
+
+    fn blame_specific_expr_if_possible_for_obligation_cause_code(
+        &self,
+        obligation_cause_code: &traits::ObligationCauseCode<'tcx>,
+        expr: &'tcx hir::Expr<'tcx>,
+    ) -> Result<&'tcx hir::Expr<'tcx>, &'tcx hir::Expr<'tcx>> {
+        match obligation_cause_code {
+            traits::ObligationCauseCode::ExprBindingObligation(_, _, _, _) => {
+                // This is the "root"; we assume that the `expr` is already pointing here.
+                // Therefore, we return `Ok` so that this `expr` can be refined further.
+                Ok(expr)
+            }
+            traits::ObligationCauseCode::ImplDerivedObligation(impl_derived) => self
+                .blame_specific_expr_if_possible_for_derived_predicate_obligation(
+                    impl_derived,
+                    expr,
+                ),
+            _ => {
+                // We don't recognize this kind of constraint, so we cannot refine the expression
+                // any further.
+                Err(expr)
+            }
+        }
+    }
+
+    /// We want to achieve the error span in the following example:
+    ///
+    /// ```ignore (just for demonstration)
+    /// struct Burrito<Filling> {
+    ///   filling: Filling,
+    /// }
+    /// impl <Filling: Delicious> Delicious for Burrito<Filling> {}
+    /// fn eat_delicious_food<Food: Delicious>(_food: Food) {}
+    ///
+    /// fn will_type_error() {
+    ///   eat_delicious_food(Burrito { filling: Kale });
+    /// } //                                    ^--- The trait bound `Kale: Delicious`
+    ///   //                                         is not satisfied
+    /// ```
+    ///
+    /// Without calling this function, the error span will cover the entire argument expression.
+    ///
+    /// Before we do any of this logic, we recursively call `point_at_specific_expr_if_possible` on the parent
+    /// obligation. Hence we refine the `expr` "outwards-in" and bail at the first kind of expression/impl we don't recognize.
+    ///
+    /// This function returns a `Result<&Expr, &Expr>` - either way, it returns the `Expr` whose span should be
+    /// reported as an error. If it is `Ok`, then it means it refined successfull. If it is `Err`, then it may be
+    /// only a partial success - but it cannot be refined even further.
+    fn blame_specific_expr_if_possible_for_derived_predicate_obligation(
+        &self,
+        obligation: &traits::ImplDerivedObligationCause<'tcx>,
+        expr: &'tcx hir::Expr<'tcx>,
+    ) -> Result<&'tcx hir::Expr<'tcx>, &'tcx hir::Expr<'tcx>> {
+        // First, we attempt to refine the `expr` for our span using the parent obligation.
+        // If this cannot be done, then we are already stuck, so we stop early (hence the use
+        // of the `?` try operator here).
+        let expr = self.blame_specific_expr_if_possible_for_obligation_cause_code(
+            &*obligation.derived.parent_code,
+            expr,
+        )?;
+
+        // This is the "trait" (meaning, the predicate "proved" by this `impl`) which provides the `Self` type we care about.
+        // For the purposes of this function, we hope that it is a `struct` type, and that our current `expr` is a literal of
+        // that struct type.
+        let impl_trait_self_ref: Option<ty::TraitRef<'tcx>> =
+            self.tcx.impl_trait_ref(obligation.impl_def_id).map(|impl_def| impl_def.skip_binder());
+
+        let Some(impl_trait_self_ref) = impl_trait_self_ref else {
+            // It is possible that this is absent. In this case, we make no progress.
+            return Err(expr);
+        };
+
+        // We only really care about the `Self` type itself, which we extract from the ref.
+        let impl_self_ty: Ty<'tcx> = impl_trait_self_ref.self_ty();
+
+        let impl_predicates: ty::GenericPredicates<'tcx> =
+            self.tcx.predicates_of(obligation.impl_def_id);
+        let Some(impl_predicate_index) = obligation.impl_def_predicate_index else {
+            // We don't have the index, so we can only guess.
+            return Err(expr);
+        };
+
+        if impl_predicate_index >= impl_predicates.predicates.len() {
+            // This shouldn't happen, but since this is only a diagnostic improvement, avoid breaking things.
+            return Err(expr);
+        }
+        let relevant_broken_predicate: ty::PredicateKind<'tcx> =
+            impl_predicates.predicates[impl_predicate_index].0.kind().skip_binder();
+
+        match relevant_broken_predicate {
+            ty::PredicateKind::Clause(ty::Clause::Trait(broken_trait)) => {
+                // ...
+                self.blame_specific_part_of_expr_corresponding_to_generic_param(
+                    broken_trait.trait_ref.self_ty().into(),
+                    expr,
+                    impl_self_ty.into(),
+                )
+            }
+            _ => Err(expr),
+        }
+    }
+
+    /// Drills into `expr` to arrive at the equivalent location of `find_generic_param` in `in_ty`.
+    /// For example, given
+    /// - expr: `(Some(vec![1, 2, 3]), false)`
+    /// - param: `T`
+    /// - in_ty: `(Option<Vec<T>, bool)`
+    /// we would drill until we arrive at `vec![1, 2, 3]`.
+    ///
+    /// If successful, we return `Ok(refined_expr)`. If unsuccesful, we return `Err(partially_refined_expr`),
+    /// which will go as far as possible. For example, given `(foo(), false)` instead, we would drill to
+    /// `foo()` and then return `Err("foo()")`.
+    ///
+    /// This means that you can (and should) use the `?` try operator to chain multiple calls to this
+    /// function with different types, since you can only continue drilling the second time if you
+    /// succeeded the first time.
+    fn blame_specific_part_of_expr_corresponding_to_generic_param(
+        &self,
+        param: ty::GenericArg<'tcx>,
+        expr: &'tcx hir::Expr<'tcx>,
+        in_ty: ty::GenericArg<'tcx>,
+    ) -> Result<&'tcx hir::Expr<'tcx>, &'tcx hir::Expr<'tcx>> {
+        if param == in_ty {
+            // The types match exactly, so we have drilled as far as we can.
+            return Ok(expr);
+        }
+
+        let ty::GenericArgKind::Type(in_ty) = in_ty.unpack() else {
+            return Err(expr);
+        };
+
+        if let (hir::ExprKind::Tup(expr_elements), ty::Tuple(in_ty_elements)) =
+            (&expr.kind, in_ty.kind())
+        {
+            if in_ty_elements.len() != expr_elements.len() {
+                return Err(expr);
+            }
+            // Find out which of `in_ty_elements` refer to `param`.
+            // FIXME: It may be better to take the first if there are multiple,
+            // just so that the error points to a smaller expression.
+            let Some((drill_expr, drill_ty)) = Self::is_iterator_singleton(expr_elements.iter().zip( in_ty_elements.iter()).filter(|(_expr_elem, in_ty_elem)| {
+                Self::find_param_in_ty((*in_ty_elem).into(), param)
+            })) else {
+                // The param is not mentioned, or it is mentioned in multiple indexes.
+                return Err(expr);
+            };
+
+            return self.blame_specific_part_of_expr_corresponding_to_generic_param(
+                param,
+                drill_expr,
+                drill_ty.into(),
+            );
+        }
+
+        if let (
+            hir::ExprKind::Struct(expr_struct_path, expr_struct_fields, _expr_struct_rest),
+            ty::Adt(in_ty_adt, in_ty_adt_generic_args),
+        ) = (&expr.kind, in_ty.kind())
+        {
+            // First, confirm that this struct is the same one as in the types, and if so,
+            // find the right variant.
+            let Res::Def(expr_struct_def_kind, expr_struct_def_id) = self.typeck_results.borrow().qpath_res(expr_struct_path, expr.hir_id) else {
+                return Err(expr);
+            };
+
+            let variant_def_id = match expr_struct_def_kind {
+                hir::def::DefKind::Struct => {
+                    if in_ty_adt.did() != expr_struct_def_id {
+                        // FIXME: Deal with type aliases?
+                        return Err(expr);
+                    }
+                    expr_struct_def_id
+                }
+                hir::def::DefKind::Variant => {
+                    // If this is a variant, its parent is the type definition.
+                    if in_ty_adt.did() != self.tcx.parent(expr_struct_def_id) {
+                        // FIXME: Deal with type aliases?
+                        return Err(expr);
+                    }
+                    expr_struct_def_id
+                }
+                _ => {
+                    return Err(expr);
+                }
+            };
+
+            // We need to know which of the generic parameters mentions our target param.
+            // We expect that at least one of them does, since it is expected to be mentioned.
+            let Some((drill_generic_index, generic_argument_type)) =
+                Self::is_iterator_singleton(
+                    in_ty_adt_generic_args.iter().enumerate().filter(
+                        |(_index, in_ty_generic)| {
+                            Self::find_param_in_ty(*in_ty_generic, param)
+                        },
+                    ),
+                ) else {
+                    return Err(expr);
+                };
+
+            let struct_generic_parameters: &ty::Generics = self.tcx.generics_of(in_ty_adt.did());
+            if drill_generic_index >= struct_generic_parameters.params.len() {
+                return Err(expr);
+            }
+
+            let param_to_point_at_in_struct = self.tcx.mk_param_from_def(
+                struct_generic_parameters.param_at(drill_generic_index, self.tcx),
+            );
+
+            // We make 3 steps:
+            // Suppose we have a type like
+            // ```ignore (just for demonstration)
+            // struct ExampleStruct<T> {
+            //   enabled: bool,
+            //   item: Option<(usize, T, bool)>,
+            // }
+            //
+            // f(ExampleStruct {
+            //   enabled: false,
+            //   item: Some((0, Box::new(String::new()), 1) }, true)),
+            // });
+            // ```
+            // Here, `f` is passed a `ExampleStruct<Box<String>>`, but it wants
+            // for `String: Copy`, which isn't true here.
+            //
+            // (1) First, we drill into `.item` and highlight that expression
+            // (2) Then we use the template type `Option<(usize, T, bool)>` to
+            //     drill into the `T`, arriving at a `Box<String>` expression.
+            // (3) Then we keep going, drilling into this expression using our
+            //     outer contextual information.
+
+            // (1) Find the (unique) field which mentions the type in our constraint:
+            let (field_expr, field_type) = self
+                .point_at_field_if_possible(
+                    in_ty_adt.did(),
+                    param_to_point_at_in_struct,
+                    variant_def_id,
+                    expr_struct_fields,
+                )
+                .ok_or(expr)?;
+
+            // (2) Continue drilling into the struct, ignoring the struct's
+            // generic argument types.
+            let expr = self.blame_specific_part_of_expr_corresponding_to_generic_param(
+                param_to_point_at_in_struct,
+                field_expr,
+                field_type.into(),
+            )?;
+
+            // (3) Continue drilling into the expression, having "passed
+            // through" the struct entirely.
+            return self.blame_specific_part_of_expr_corresponding_to_generic_param(
+                param,
+                expr,
+                generic_argument_type,
+            );
+        }
+
+        if let (
+            hir::ExprKind::Call(expr_callee, expr_args),
+            ty::Adt(in_ty_adt, in_ty_adt_generic_args),
+        ) = (&expr.kind, in_ty.kind())
+        {
+            let hir::ExprKind::Path(expr_callee_path) = &expr_callee.kind else {
+                // FIXME: This case overlaps with another one worth handling,
+                // which should happen above since it applies to non-ADTs:
+                // we can drill down into regular generic functions.
+                return Err(expr);
+            };
+            // This is (possibly) a constructor call, like `Some(...)` or `MyStruct(a, b, c)`.
+
+            let Res::Def(expr_struct_def_kind, expr_ctor_def_id) = self.typeck_results.borrow().qpath_res(expr_callee_path, expr_callee.hir_id) else {
+                return Err(expr);
+            };
+
+            let variant_def_id = match expr_struct_def_kind {
+                hir::def::DefKind::Ctor(hir::def::CtorOf::Struct, hir::def::CtorKind::Fn) => {
+                    if in_ty_adt.did() != self.tcx.parent(expr_ctor_def_id) {
+                        // FIXME: Deal with type aliases?
+                        return Err(expr);
+                    }
+                    self.tcx.parent(expr_ctor_def_id)
+                }
+                hir::def::DefKind::Ctor(hir::def::CtorOf::Variant, hir::def::CtorKind::Fn) => {
+                    // If this is a variant, its parent is the type definition.
+                    if in_ty_adt.did() != self.tcx.parent(expr_ctor_def_id) {
+                        // FIXME: Deal with type aliases?
+                        return Err(expr);
+                    }
+                    expr_ctor_def_id
+                }
+                _ => {
+                    return Err(expr);
+                }
+            };
+
+            // We need to know which of the generic parameters mentions our target param.
+            // We expect that at least one of them does, since it is expected to be mentioned.
+            let Some((drill_generic_index, generic_argument_type)) =
+                Self::is_iterator_singleton(
+                    in_ty_adt_generic_args.iter().enumerate().filter(
+                        |(_index, in_ty_generic)| {
+                            Self::find_param_in_ty(*in_ty_generic, param)
+                        },
+                    ),
+                ) else {
+                    return Err(expr);
+                };
+
+            let struct_generic_parameters: &ty::Generics = self.tcx.generics_of(in_ty_adt.did());
+            if drill_generic_index >= struct_generic_parameters.params.len() {
+                return Err(expr);
+            }
+
+            let param_to_point_at_in_struct = self.tcx.mk_param_from_def(
+                struct_generic_parameters.param_at(drill_generic_index, self.tcx),
+            );
+
+            // We make 3 steps:
+            // Suppose we have a type like
+            // ```ignore (just for demonstration)
+            // struct ExampleStruct<T> {
+            //   enabled: bool,
+            //   item: Option<(usize, T, bool)>,
+            // }
+            //
+            // f(ExampleStruct {
+            //   enabled: false,
+            //   item: Some((0, Box::new(String::new()), 1) }, true)),
+            // });
+            // ```
+            // Here, `f` is passed a `ExampleStruct<Box<String>>`, but it wants
+            // for `String: Copy`, which isn't true here.
+            //
+            // (1) First, we drill into `.item` and highlight that expression
+            // (2) Then we use the template type `Option<(usize, T, bool)>` to
+            //     drill into the `T`, arriving at a `Box<String>` expression.
+            // (3) Then we keep going, drilling into this expression using our
+            //     outer contextual information.
+
+            // (1) Find the (unique) field index which mentions the type in our constraint:
+            let Some((field_index, field_type)) = Self::is_iterator_singleton(
+                in_ty_adt
+                    .variant_with_id(variant_def_id)
+                    .fields
+                    .iter()
+                    .map(|field| field.ty(self.tcx, *in_ty_adt_generic_args))
+                    .enumerate()
+                    .filter(|(_index, field_type)| Self::find_param_in_ty((*field_type).into(), param))
+            ) else {
+                return Err(expr);
+            };
+
+            if field_index >= expr_args.len() {
+                return Err(expr);
+            }
+
+            // (2) Continue drilling into the struct, ignoring the struct's
+            // generic argument types.
+            let expr = self.blame_specific_part_of_expr_corresponding_to_generic_param(
+                param_to_point_at_in_struct,
+                &expr_args[field_index],
+                field_type.into(),
+            )?;
+
+            // (3) Continue drilling into the expression, having "passed
+            // through" the struct entirely.
+            return self.blame_specific_part_of_expr_corresponding_to_generic_param(
+                param,
+                expr,
+                generic_argument_type,
+            );
+        }
+
+        // At this point, none of the basic patterns matched.
+        // One major possibility which remains is that we have a function call.
+        // In this case, it's often possible to dive deeper into the call to find something to blame,
+        // but this is not always possible.
+
+        Err(expr)
+    }
+
+    // FIXME: This can be made into a private, non-impl function later.
+    /// Traverses the given ty (either a `ty::Ty` or a `ty::GenericArg`) and searches for references
+    /// to the given `param_to_point_at`. Returns `true` if it finds any use of the param.
+    pub fn find_param_in_ty(
+        ty: ty::GenericArg<'tcx>,
+        param_to_point_at: ty::GenericArg<'tcx>,
+    ) -> bool {
+        let mut walk = ty.walk();
+        while let Some(arg) = walk.next() {
+            if arg == param_to_point_at {
+            return true;
+        } else if let ty::GenericArgKind::Type(ty) = arg.unpack()
+            && let ty::Alias(ty::Projection, ..) = ty.kind()
+        {
+            // This logic may seem a bit strange, but typically when
+            // we have a projection type in a function signature, the
+            // argument that's being passed into that signature is
+            // not actually constraining that projection's substs in
+            // a meaningful way. So we skip it, and see improvements
+            // in some UI tests.
+            walk.skip_current_subtree();
+        }
+        }
+        false
+    }
+
+    // FIXME: This can be made into a private, non-impl function later.
+    /// Returns `Some(iterator.next())` if it has exactly one item, and `None` otherwise.
+    pub fn is_iterator_singleton<T>(mut iterator: impl Iterator<Item = T>) -> Option<T> {
+        match (iterator.next(), iterator.next()) {
+            (_, Some(_)) => None,
+            (first, _) => first,
+        }
+    }
+}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
index 47ef106e750..1055ee953ea 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs
@@ -34,9 +34,10 @@ use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext}
 
 use std::iter;
 use std::mem;
-use std::ops::ControlFlow;
 use std::slice;
 
+use std::ops::ControlFlow;
+
 impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     pub(in super::super) fn check_casts(&mut self) {
         // don't hold the borrow to deferred_cast_checks while checking to avoid borrow checker errors
@@ -1843,7 +1844,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                         .into_iter()
                         .flatten()
                     {
-                        if self.point_at_arg_if_possible(
+                        if self.blame_specific_arg_if_possible(
                                 error,
                                 def_id,
                                 param,
@@ -1873,7 +1874,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     .into_iter()
                     .flatten()
                 {
-                    if self.point_at_arg_if_possible(
+                    if self.blame_specific_arg_if_possible(
                         error,
                         def_id,
                         param,
@@ -1898,16 +1899,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     for param in
                         [param_to_point_at, fallback_param_to_point_at, self_param_to_point_at]
                     {
-                        if let Some(param) = param
-                            && self.point_at_field_if_possible(
-                                error,
+                        if let Some(param) = param {
+                            let refined_expr = self.point_at_field_if_possible(
                                 def_id,
                                 param,
                                 variant_def_id,
                                 fields,
-                            )
-                        {
-                            return true;
+                            );
+
+                            match refined_expr {
+                                None => {}
+                                Some((refined_expr, _)) => {
+                                    error.obligation.cause.span = refined_expr
+                                        .span
+                                        .find_ancestor_in_same_ctxt(error.obligation.cause.span)
+                                        .unwrap_or(refined_expr.span);
+                                    return true;
+                                }
+                            }
                         }
                     }
                 }
@@ -1940,7 +1949,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         }
     }
 
-    fn point_at_arg_if_possible(
+    /// - `blame_specific_*` means that the function will recursively traverse the expression,
+    /// looking for the most-specific-possible span to blame.
+    ///
+    /// - `point_at_*` means that the function will only go "one level", pointing at the specific
+    /// expression mentioned.
+    ///
+    /// `blame_specific_arg_if_possible` will find the most-specific expression anywhere inside
+    /// the provided function call expression, and mark it as responsible for the fullfillment
+    /// error.
+    fn blame_specific_arg_if_possible(
         &self,
         error: &mut traits::FulfillmentError<'tcx>,
         def_id: DefId,
@@ -1959,13 +1977,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             .inputs()
             .iter()
             .enumerate()
-            .filter(|(_, ty)| find_param_in_ty(**ty, param_to_point_at))
+            .filter(|(_, ty)| Self::find_param_in_ty((**ty).into(), param_to_point_at))
             .collect();
         // If there's one field that references the given generic, great!
         if let [(idx, _)] = args_referencing_param.as_slice()
             && let Some(arg) = receiver
                 .map_or(args.get(*idx), |rcvr| if *idx == 0 { Some(rcvr) } else { args.get(*idx - 1) }) {
+
             error.obligation.cause.span = arg.span.find_ancestor_in_same_ctxt(error.obligation.cause.span).unwrap_or(arg.span);
+
+            if let hir::Node::Expr(arg_expr) = self.tcx.hir().get(arg.hir_id) {
+                // This is more specific than pointing at the entire argument.
+                self.blame_specific_expr_if_possible(error, arg_expr)
+            }
+
             error.obligation.cause.map_code(|parent_code| {
                 ObligationCauseCode::FunctionArgumentObligation {
                     arg_hir_id: arg.hir_id,
@@ -1983,14 +2008,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         false
     }
 
-    fn point_at_field_if_possible(
+    // FIXME: Make this private and move to mod adjust_fulfillment_errors
+    pub fn point_at_field_if_possible(
         &self,
-        error: &mut traits::FulfillmentError<'tcx>,
         def_id: DefId,
         param_to_point_at: ty::GenericArg<'tcx>,
         variant_def_id: DefId,
         expr_fields: &[hir::ExprField<'tcx>],
-    ) -> bool {
+    ) -> Option<(&'tcx hir::Expr<'tcx>, Ty<'tcx>)> {
         let def = self.tcx.adt_def(def_id);
 
         let identity_substs = ty::InternalSubsts::identity_for_item(self.tcx, def_id);
@@ -2000,7 +2025,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             .iter()
             .filter(|field| {
                 let field_ty = field.ty(self.tcx, identity_substs);
-                find_param_in_ty(field_ty, param_to_point_at)
+                Self::find_param_in_ty(field_ty.into(), param_to_point_at)
             })
             .collect();
 
@@ -2010,17 +2035,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                 // same rules that check_expr_struct uses for macro hygiene.
                 if self.tcx.adjust_ident(expr_field.ident, variant_def_id) == field.ident(self.tcx)
                 {
-                    error.obligation.cause.span = expr_field
-                        .expr
-                        .span
-                        .find_ancestor_in_same_ctxt(error.obligation.cause.span)
-                        .unwrap_or(expr_field.span);
-                    return true;
+                    return Some((expr_field.expr, self.tcx.type_of(field.did)));
                 }
             }
         }
 
-        false
+        None
     }
 
     fn point_at_path_if_possible(
@@ -2240,23 +2260,3 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         }
     }
 }
-
-fn find_param_in_ty<'tcx>(ty: Ty<'tcx>, param_to_point_at: ty::GenericArg<'tcx>) -> bool {
-    let mut walk = ty.walk();
-    while let Some(arg) = walk.next() {
-        if arg == param_to_point_at {
-            return true;
-        } else if let ty::GenericArgKind::Type(ty) = arg.unpack()
-            && let ty::Alias(ty::Projection, ..) = ty.kind()
-        {
-            // This logic may seem a bit strange, but typically when
-            // we have a projection type in a function signature, the
-            // argument that's being passed into that signature is
-            // not actually constraining that projection's substs in
-            // a meaningful way. So we skip it, and see improvements
-            // in some UI tests.
-            walk.skip_current_subtree();
-        }
-    }
-    false
-}
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
index 4940015ddd5..1e14eddd4c8 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs
@@ -1,4 +1,5 @@
 mod _impl;
+mod adjust_fulfillment_errors;
 mod arg_matrix;
 mod checks;
 mod suggestions;
diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
index 0b30bf957a3..9ab29a6778f 100644
--- a/compiler/rustc_hir_typeck/src/method/probe.rs
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -1563,6 +1563,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
                                     traits::ImplDerivedObligationCause {
                                         derived,
                                         impl_def_id,
+                                        impl_def_predicate_index: None,
                                         span,
                                     },
                                 ))
diff --git a/compiler/rustc_infer/Cargo.toml b/compiler/rustc_infer/Cargo.toml
index aced787d671..02ac83a5e8b 100644
--- a/compiler/rustc_infer/Cargo.toml
+++ b/compiler/rustc_infer/Cargo.toml
@@ -15,7 +15,6 @@ rustc_hir = { path = "../rustc_hir" }
 rustc_index = { path = "../rustc_index" }
 rustc_macros = { path = "../rustc_macros" }
 rustc_serialize = { path = "../rustc_serialize" }
-rustc_session = { path = "../rustc_session" }
 rustc_span = { path = "../rustc_span" }
 rustc_target = { path = "../rustc_target" }
 smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs
index cd5bde2a791..18a966449aa 100644
--- a/compiler/rustc_infer/src/traits/util.rs
+++ b/compiler/rustc_infer/src/traits/util.rs
@@ -145,30 +145,32 @@ impl<'tcx> Elaborator<'tcx> {
                 // Get predicates declared on the trait.
                 let predicates = tcx.super_predicates_of(data.def_id());
 
-                let obligations = predicates.predicates.iter().map(|&(mut pred, span)| {
-                    // when parent predicate is non-const, elaborate it to non-const predicates.
-                    if data.constness == ty::BoundConstness::NotConst {
-                        pred = pred.without_const(tcx);
-                    }
-
-                    let cause = obligation.cause.clone().derived_cause(
-                        bound_predicate.rebind(data),
-                        |derived| {
-                            traits::ImplDerivedObligation(Box::new(
-                                traits::ImplDerivedObligationCause {
-                                    derived,
-                                    impl_def_id: data.def_id(),
-                                    span,
-                                },
-                            ))
-                        },
-                    );
-                    predicate_obligation(
-                        pred.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
-                        obligation.param_env,
-                        cause,
-                    )
-                });
+                let obligations =
+                    predicates.predicates.iter().enumerate().map(|(index, &(mut pred, span))| {
+                        // when parent predicate is non-const, elaborate it to non-const predicates.
+                        if data.constness == ty::BoundConstness::NotConst {
+                            pred = pred.without_const(tcx);
+                        }
+
+                        let cause = obligation.cause.clone().derived_cause(
+                            bound_predicate.rebind(data),
+                            |derived| {
+                                traits::ImplDerivedObligation(Box::new(
+                                    traits::ImplDerivedObligationCause {
+                                        derived,
+                                        impl_def_id: data.def_id(),
+                                        impl_def_predicate_index: Some(index),
+                                        span,
+                                    },
+                                ))
+                            },
+                        );
+                        predicate_obligation(
+                            pred.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
+                            obligation.param_env,
+                            cause,
+                        )
+                    });
                 debug!(?data, ?obligations, "super_predicates");
 
                 // Only keep those bounds that we haven't already seen.
diff --git a/compiler/rustc_interface/Cargo.toml b/compiler/rustc_interface/Cargo.toml
index 1199ff287c4..955ab3c4680 100644
--- a/compiler/rustc_interface/Cargo.toml
+++ b/compiler/rustc_interface/Cargo.toml
@@ -20,7 +20,6 @@ rustc_macros = { path = "../rustc_macros" }
 rustc_parse = { path = "../rustc_parse" }
 rustc_session = { path = "../rustc_session" }
 rustc_span = { path = "../rustc_span" }
-rustc_serialize = { path = "../rustc_serialize" }
 rustc_middle = { path = "../rustc_middle" }
 rustc_ast_lowering = { path = "../rustc_ast_lowering" }
 rustc_ast_passes = { path = "../rustc_ast_passes" }
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
index 75525059e90..c528929e756 100644
--- a/compiler/rustc_middle/src/traits/mod.rs
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -475,6 +475,8 @@ pub enum WellFormedLoc {
 pub struct ImplDerivedObligationCause<'tcx> {
     pub derived: DerivedObligationCause<'tcx>,
     pub impl_def_id: DefId,
+    /// The index of the derived predicate in the parent impl's predicates.
+    pub impl_def_predicate_index: Option<usize>,
     pub span: Span,
 }
 
diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml
index 4ad3343d303..f24b165d7c2 100644
--- a/compiler/rustc_mir_build/Cargo.toml
+++ b/compiler/rustc_mir_build/Cargo.toml
@@ -11,7 +11,6 @@ tracing = "0.1"
 either = "1"
 rustc_middle = { path = "../rustc_middle" }
 rustc_apfloat = { path = "../rustc_apfloat" }
-rustc_attr = { path = "../rustc_attr" }
 rustc_data_structures = { path = "../rustc_data_structures" }
 rustc_index = { path = "../rustc_index" }
 rustc_errors = { path = "../rustc_errors" }
diff --git a/compiler/rustc_mir_dataflow/Cargo.toml b/compiler/rustc_mir_dataflow/Cargo.toml
index 324644b6792..68c61a18d72 100644
--- a/compiler/rustc_mir_dataflow/Cargo.toml
+++ b/compiler/rustc_mir_dataflow/Cargo.toml
@@ -19,6 +19,5 @@ rustc_index = { path = "../rustc_index" }
 rustc_macros = { path = "../rustc_macros" }
 rustc_middle = { path = "../rustc_middle" }
 rustc_serialize = { path = "../rustc_serialize" }
-rustc_session = { path = "../rustc_session" }
 rustc_target = { path = "../rustc_target" }
 rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_privacy/Cargo.toml b/compiler/rustc_privacy/Cargo.toml
index 832fdc9f016..744cb77dd00 100644
--- a/compiler/rustc_privacy/Cargo.toml
+++ b/compiler/rustc_privacy/Cargo.toml
@@ -13,6 +13,5 @@ rustc_macros = { path = "../rustc_macros" }
 rustc_middle = { path = "../rustc_middle" }
 rustc_session = { path = "../rustc_session" }
 rustc_span = { path = "../rustc_span" }
-rustc_trait_selection = { path = "../rustc_trait_selection" }
 rustc_hir_analysis = { path = "../rustc_hir_analysis" }
 tracing = "0.1"
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml
index 46e77626479..21732d26035 100644
--- a/compiler/rustc_query_impl/Cargo.toml
+++ b/compiler/rustc_query_impl/Cargo.toml
@@ -20,7 +20,6 @@ rustc-rayon-core = { version = "0.4.0", optional = true }
 rustc_serialize = { path = "../rustc_serialize" }
 rustc_session = { path = "../rustc_session" }
 rustc_span = { path = "../rustc_span" }
-rustc_target = { path = "../rustc_target" }
 thin-vec = "0.2.9"
 tracing = "0.1"
 
diff --git a/compiler/rustc_session/src/code_stats.rs b/compiler/rustc_session/src/code_stats.rs
index 87dfccdef2f..55178250472 100644
--- a/compiler/rustc_session/src/code_stats.rs
+++ b/compiler/rustc_session/src/code_stats.rs
@@ -84,7 +84,11 @@ impl CodeStats {
         // Sort variants so the largest ones are shown first. A stable sort is
         // used here so that source code order is preserved for all variants
         // that have the same size.
-        variants.sort_by(|info1, info2| info2.size.cmp(&info1.size));
+        // Except for Generators, whose variants are already sorted according to
+        // their yield points in `variant_info_for_generator`.
+        if kind != DataTypeKind::Generator {
+            variants.sort_by(|info1, info2| info2.size.cmp(&info1.size));
+        }
         let info = TypeSizeInfo {
             kind,
             type_description: type_desc.to_string(),
diff --git a/compiler/rustc_trait_selection/Cargo.toml b/compiler/rustc_trait_selection/Cargo.toml
index 90d879976c2..3f863038efb 100644
--- a/compiler/rustc_trait_selection/Cargo.toml
+++ b/compiler/rustc_trait_selection/Cargo.toml
@@ -16,7 +16,6 @@ rustc_errors = { path = "../rustc_errors" }
 rustc_hir = { path = "../rustc_hir" }
 rustc_index = { path = "../rustc_index" }
 rustc_infer = { path = "../rustc_infer" }
-rustc_lint_defs = { path = "../rustc_lint_defs" }
 rustc_macros = { path = "../rustc_macros" }
 rustc_query_system = { path = "../rustc_query_system" }
 rustc_serialize = { path = "../rustc_serialize" }
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
index 0a4136dc1cf..94d9eb8f587 100644
--- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -1190,6 +1190,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
                 ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
                     derived,
                     impl_def_id,
+                    impl_def_predicate_index: None,
                     span: obligation.cause.span,
                 }))
             });
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
index ad7d479896f..0c6b2406bbd 100644
--- a/compiler/rustc_trait_selection/src/traits/select/mod.rs
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -2608,11 +2608,12 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
         assert_eq!(predicates.parent, None);
         let predicates = predicates.instantiate_own(tcx, substs);
         let mut obligations = Vec::with_capacity(predicates.len());
-        for (predicate, span) in predicates {
+        for (index, (predicate, span)) in predicates.into_iter().enumerate() {
             let cause = cause.clone().derived_cause(parent_trait_pred, |derived| {
                 ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
                     derived,
                     impl_def_id: def_id,
+                    impl_def_predicate_index: Some(index),
                     span,
                 }))
             });
diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml
index a432498abcc..eff6fb26dd4 100644
--- a/compiler/rustc_traits/Cargo.toml
+++ b/compiler/rustc_traits/Cargo.toml
@@ -5,7 +5,6 @@ edition = "2021"
 
 [dependencies]
 tracing = "0.1"
-rustc_attr = { path = "../rustc_attr" }
 rustc_middle = { path = "../rustc_middle" }
 rustc_data_structures = { path = "../rustc_data_structures" }
 rustc_hir = { path = "../rustc_hir" }
diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs
index 93c9c675c9a..2aeb255c164 100644
--- a/compiler/rustc_ty_utils/src/layout.rs
+++ b/compiler/rustc_ty_utils/src/layout.rs
@@ -970,7 +970,7 @@ fn variant_info_for_generator<'tcx>(
         })
         .collect();
 
-    let variant_infos: Vec<_> = generator
+    let mut variant_infos: Vec<_> = generator
         .variant_fields
         .iter_enumerated()
         .map(|(variant_idx, variant_def)| {
@@ -1033,6 +1033,15 @@ fn variant_info_for_generator<'tcx>(
             }
         })
         .collect();
+
+    // The first three variants are hardcoded to be `UNRESUMED`, `RETURNED` and `POISONED`.
+    // We will move the `RETURNED` and `POISONED` elements to the end so we
+    // are left with a sorting order according to the generators yield points:
+    // First `Unresumed`, then the `SuspendN` followed by `Returned` and `Panicked` (POISONED).
+    let end_states = variant_infos.drain(1..=2);
+    let end_states: Vec<_> = end_states.collect();
+    variant_infos.extend(end_states);
+
     (
         variant_infos,
         match tag_encoding {