about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2024-06-11 07:51:38 +0200
committerRalf Jung <post@ralfj.de>2024-06-11 07:51:38 +0200
commit6e06c49954df238c3b4fdcfba374a464019fc07f (patch)
tree7cb7473849e0be0ae58f66b42a6d1505acb3bea7 /compiler
parent9048a180cd941a25fd7ff0716e74c40701e4a486 (diff)
parentfa1681c9f6a66f0240c46c98bfef6209c9d6df23 (diff)
downloadrust-6e06c49954df238c3b4fdcfba374a464019fc07f.tar.gz
rust-6e06c49954df238c3b4fdcfba374a464019fc07f.zip
Merge from rustc
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_ast_ir/Cargo.toml1
-rw-r--r--compiler/rustc_baked_icu_data/Cargo.toml1
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs7
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs6
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs10
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs11
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs12
-rw-r--r--compiler/rustc_codegen_ssa/src/common.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs5
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/discriminant.rs15
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs17
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs13
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs5
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs4
-rw-r--r--compiler/rustc_data_structures/Cargo.toml3
-rw-r--r--compiler/rustc_driver_impl/Cargo.toml1
-rw-r--r--compiler/rustc_expand/src/mbe/transcribe.rs10
-rw-r--r--compiler/rustc_feature/src/unstable.rs2
-rw-r--r--compiler/rustc_hir_typeck/src/demand.rs15
-rw-r--r--compiler/rustc_hir_typeck/src/expr.rs35
-rw-r--r--compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs20
-rw-r--r--compiler/rustc_hir_typeck/src/method/probe.rs3
-rw-r--r--compiler/rustc_hir_typeck/src/method/suggest.rs14
-rw-r--r--compiler/rustc_hir_typeck/src/pat.rs5
-rw-r--r--compiler/rustc_index_macros/Cargo.toml3
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs9
-rw-r--r--compiler/rustc_middle/Cargo.toml3
-rw-r--r--compiler/rustc_middle/src/mir/consts.rs14
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs33
-rw-r--r--compiler/rustc_middle/src/thir.rs4
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs6
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs222
-rw-r--r--compiler/rustc_middle/src/ty/consts/valtree.rs9
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs34
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs4
-rw-r--r--compiler/rustc_mir_build/Cargo.toml2
-rw-r--r--compiler/rustc_mir_build/src/build/mod.rs15
-rw-r--r--compiler/rustc_mir_build/src/thir/constant.rs8
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs7
-rw-r--r--compiler/rustc_mir_transform/src/const_debuginfo.rs102
-rw-r--r--compiler/rustc_mir_transform/src/dataflow_const_prop.rs16
-rw-r--r--compiler/rustc_mir_transform/src/gvn.rs115
-rw-r--r--compiler/rustc_mir_transform/src/known_panics_lint.rs25
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs4
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs2
-rw-r--r--compiler/rustc_mir_transform/src/promote_consts.rs6
-rw-r--r--compiler/rustc_mir_transform/src/simplify_comparison_integral.rs2
-rw-r--r--compiler/rustc_mir_transform/src/single_use_consts.rs199
-rw-r--r--compiler/rustc_mir_transform/src/validate.rs6
-rw-r--r--compiler/rustc_next_trait_solver/Cargo.toml15
-rw-r--r--compiler/rustc_pattern_analysis/src/rustc.rs9
-rw-r--r--compiler/rustc_query_impl/Cargo.toml3
-rw-r--r--compiler/rustc_span/src/lib.rs25
-rw-r--r--compiler/rustc_span/src/span_encoding.rs187
-rw-r--r--compiler/rustc_span/src/symbol.rs1
-rw-r--r--compiler/rustc_target/src/abi/mod.rs23
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs2
-rw-r--r--compiler/rustc_ty_utils/src/consts.rs2
-rw-r--r--compiler/stable_mir/Cargo.toml1
67 files changed, 817 insertions, 548 deletions
diff --git a/compiler/rustc_ast_ir/Cargo.toml b/compiler/rustc_ast_ir/Cargo.toml
index e761b7adad3..a78c91e0615 100644
--- a/compiler/rustc_ast_ir/Cargo.toml
+++ b/compiler/rustc_ast_ir/Cargo.toml
@@ -9,7 +9,6 @@ rustc_data_structures = { path = "../rustc_data_structures", optional = true }
 rustc_macros = { path = "../rustc_macros", optional = true }
 rustc_serialize = { path = "../rustc_serialize", optional = true }
 rustc_span = { path = "../rustc_span", optional = true }
-smallvec = { version = "1.8.1" }
 # tidy-alphabetical-end
 
 [features]
diff --git a/compiler/rustc_baked_icu_data/Cargo.toml b/compiler/rustc_baked_icu_data/Cargo.toml
index 48af4e6f600..e6cfb4887c9 100644
--- a/compiler/rustc_baked_icu_data/Cargo.toml
+++ b/compiler/rustc_baked_icu_data/Cargo.toml
@@ -9,7 +9,6 @@ icu_list = "1.2"
 icu_locid = "1.2"
 icu_locid_transform = "1.3.2"
 icu_provider = "1.2"
-icu_provider_adapters = "1.2"
 zerovec = "0.10.0"
 # tidy-alphabetical-end
 
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 963e5de91ce..6d26ca0b899 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -832,9 +832,10 @@ fn codegen_stmt<'tcx>(
                     let val = match null_op {
                         NullOp::SizeOf => layout.size.bytes(),
                         NullOp::AlignOf => layout.align.abi.bytes(),
-                        NullOp::OffsetOf(fields) => {
-                            layout.offset_of_subfield(fx, fields.iter()).bytes()
-                        }
+                        NullOp::OffsetOf(fields) => fx
+                            .tcx
+                            .offset_of_subfield(ParamEnv::reveal_all(), layout, fields.iter())
+                            .bytes(),
                         NullOp::UbChecks => {
                             let val = fx.tcx.sess.ub_checks();
                             let val = CValue::by_val(
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
index ba98f2e772c..a53598018f4 100644
--- a/compiler/rustc_codegen_cranelift/src/constant.rs
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -110,7 +110,7 @@ pub(crate) fn codegen_const_value<'tcx>(
                 if fx.clif_type(layout.ty).is_some() {
                     return CValue::const_val(fx, layout, int);
                 } else {
-                    let raw_val = int.size().truncate(int.assert_bits(int.size()));
+                    let raw_val = int.size().truncate(int.to_bits(int.size()));
                     let val = match int.size().bytes() {
                         1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
                         2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
@@ -501,12 +501,12 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
                                             Ordering::Equal => scalar_int,
                                             Ordering::Less => match ty.kind() {
                                                 ty::Uint(_) => ScalarInt::try_from_uint(
-                                                    scalar_int.assert_uint(scalar_int.size()),
+                                                    scalar_int.to_uint(scalar_int.size()),
                                                     fx.layout_of(*ty).size,
                                                 )
                                                 .unwrap(),
                                                 ty::Int(_) => ScalarInt::try_from_int(
-                                                    scalar_int.assert_int(scalar_int.size()),
+                                                    scalar_int.to_int(scalar_int.size()),
                                                     fx.layout_of(*ty).size,
                                                 )
                                                 .unwrap(),
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index 27b55ecc72e..d454f3c1de7 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -902,7 +902,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                         .span_fatal(span, "Index argument for `_mm_cmpestri` is not a constant");
                 };
 
-            let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
+            let imm8 = imm8.to_u8();
 
             codegen_inline_asm_inner(
                 fx,
@@ -955,7 +955,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                         .span_fatal(span, "Index argument for `_mm_cmpestrm` is not a constant");
                 };
 
-            let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
+            let imm8 = imm8.to_u8();
 
             codegen_inline_asm_inner(
                 fx,
@@ -1003,7 +1003,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                     );
                 };
 
-            let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
+            let imm8 = imm8.to_u8();
 
             codegen_inline_asm_inner(
                 fx,
@@ -1040,7 +1040,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                     );
                 };
 
-            let imm8 = imm8.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", imm8));
+            let imm8 = imm8.to_u8();
 
             codegen_inline_asm_inner(
                 fx,
@@ -1195,7 +1195,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                     .span_fatal(span, "Func argument for `_mm_sha1rnds4_epu32` is not a constant");
             };
 
-            let func = func.try_to_u8().unwrap_or_else(|_| panic!("kind not scalar: {:?}", func));
+            let func = func.to_u8();
 
             codegen_inline_asm_inner(
                 fx,
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
index 65eeaf156d8..ca910dccb0d 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -147,8 +147,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
 
             let total_len = lane_count * 2;
 
-            let indexes =
-                idx.iter().map(|idx| idx.unwrap_leaf().try_to_u32().unwrap()).collect::<Vec<u32>>();
+            let indexes = idx.iter().map(|idx| idx.unwrap_leaf().to_u32()).collect::<Vec<u32>>();
 
             for &idx in &indexes {
                 assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
@@ -282,9 +281,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
                 fx.tcx.dcx().span_fatal(span, "Index argument for `simd_insert` is not a constant");
             };
 
-            let idx: u32 = idx_const
-                .try_to_u32()
-                .unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
+            let idx: u32 = idx_const.to_u32();
             let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
             if u64::from(idx) >= lane_count {
                 fx.tcx.dcx().span_fatal(
@@ -330,9 +327,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
                 return;
             };
 
-            let idx = idx_const
-                .try_to_u32()
-                .unwrap_or_else(|_| panic!("kind not scalar: {:?}", idx_const));
+            let idx = idx_const.to_u32();
             let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
             if u64::from(idx) >= lane_count {
                 fx.tcx.dcx().span_fatal(
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index 512a96450a4..1aa28daeafc 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -327,7 +327,7 @@ impl<'tcx> CValue<'tcx> {
 
         let val = match layout.ty.kind() {
             ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
-                let const_val = const_val.assert_bits(layout.size);
+                let const_val = const_val.to_bits(layout.size);
                 let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
                 let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
                 fx.bcx.ins().iconcat(lsb, msb)
@@ -339,7 +339,7 @@ impl<'tcx> CValue<'tcx> {
             | ty::Ref(..)
             | ty::RawPtr(..)
             | ty::FnPtr(..) => {
-                let raw_val = const_val.size().truncate(const_val.assert_bits(layout.size));
+                let raw_val = const_val.size().truncate(const_val.to_bits(layout.size));
                 fx.bcx.ins().iconst(clif_ty, raw_val as i64)
             }
             ty::Float(FloatTy::F32) => {
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
index 78d943192db..548c23cc794 100644
--- a/compiler/rustc_codegen_gcc/src/common.rs
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -166,7 +166,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
         match cv {
             Scalar::Int(int) => {
-                let data = int.assert_bits(layout.size(self));
+                let data = int.to_bits(layout.size(self));
 
                 // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
                 // the paths for floating-point values.
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index ab8036a1410..4ffc92eb633 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -244,7 +244,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
         match cv {
             Scalar::Int(int) => {
-                let data = int.assert_bits(layout.size(self));
+                let data = int.to_bits(layout.size(self));
                 let llval = self.const_uint_big(self.type_ix(bitsize), data);
                 if matches!(layout.primitive(), Pointer(_)) {
                     unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 7b1038d5617..b5b0086f740 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -1109,10 +1109,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
     let arg_tys = sig.inputs();
 
-    // Vectors must be immediates (non-power-of-2 #[repr(packed)] are not)
-    for (ty, arg) in arg_tys.iter().zip(args) {
-        if ty.is_simd() && !matches!(arg.val, OperandValue::Immediate(_)) {
-            return_error!(InvalidMonomorphization::SimdArgument { span, name, ty: *ty });
+    // Sanity-check: all vector arguments must be immediates.
+    if cfg!(debug_assertions) {
+        for (ty, arg) in arg_tys.iter().zip(args) {
+            if ty.is_simd() {
+                assert!(matches!(arg.val, OperandValue::Immediate(_)));
+            }
         }
     }
 
@@ -1221,7 +1223,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
             .iter()
             .enumerate()
             .map(|(arg_idx, val)| {
-                let idx = val.unwrap_leaf().try_to_i32().unwrap();
+                let idx = val.unwrap_leaf().to_i32();
                 if idx >= i32::try_from(total_len).unwrap() {
                     bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
                         span,
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
index e4a36b3f591..27b0f127e92 100644
--- a/compiler/rustc_codegen_ssa/src/common.rs
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -163,7 +163,7 @@ pub fn asm_const_to_str<'tcx>(
     let mir::ConstValue::Scalar(scalar) = const_value else {
         span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
     };
-    let value = scalar.assert_bits(ty_and_layout.size);
+    let value = scalar.assert_scalar_int().to_bits(ty_and_layout.size);
     match ty_and_layout.ty.kind() {
         ty::Uint(_) => value.to_string(),
         ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) {
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index c23867be3a1..ad6b3f1159d 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -680,7 +680,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         bx.cx().const_usize(val)
                     }
                     mir::NullOp::OffsetOf(fields) => {
-                        let val = layout.offset_of_subfield(bx.cx(), fields.iter()).bytes();
+                        let val = bx
+                            .tcx()
+                            .offset_of_subfield(bx.param_env(), layout, fields.iter())
+                            .bytes();
                         bx.cx().const_usize(val)
                     }
                     mir::NullOp::UbChecks => {
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
index 5312f1f946f..66993476bef 100644
--- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -95,10 +95,10 @@ fn const_to_valtree_inner<'tcx>(
         }
         ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
             let val = ecx.read_immediate(place)?;
-            let val = val.to_scalar();
+            let val = val.to_scalar_int().unwrap();
             *num_nodes += 1;
 
-            Ok(ty::ValTree::Leaf(val.assert_int()))
+            Ok(ty::ValTree::Leaf(val))
         }
 
         ty::Pat(base, ..) => {
@@ -125,7 +125,7 @@ fn const_to_valtree_inner<'tcx>(
             let val = val.to_scalar();
             // We are in the CTFE machine, so ptr-to-int casts will fail.
             // This can only be `Ok` if `val` already is an integer.
-            let Ok(val) = val.try_to_int() else {
+            let Ok(val) = val.try_to_scalar_int() else {
                 return Err(ValTreeCreationError::NonSupportedType);
             };
             // It's just a ScalarInt!
@@ -411,7 +411,7 @@ fn valtree_into_mplace<'tcx>(
                 ty::Adt(def, _) if def.is_enum() => {
                     // First element of valtree corresponds to variant
                     let scalar_int = branches[0].unwrap_leaf();
-                    let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
+                    let variant_idx = VariantIdx::from_u32(scalar_int.to_u32());
                     let variant = def.variant(variant_idx);
                     debug!(?variant);
 
diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs
index 67fbf9642bf..0dbee8c1d94 100644
--- a/compiler/rustc_const_eval/src/interpret/discriminant.rs
+++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs
@@ -123,14 +123,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 // (`tag_bits` itself is only used for error messages below.)
                 let tag_bits = tag_val
                     .to_scalar()
-                    .try_to_int()
+                    .try_to_scalar_int()
                     .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
-                    .assert_bits(tag_layout.size);
+                    .to_bits(tag_layout.size);
                 // Cast bits from tag layout to discriminant layout.
                 // After the checks we did above, this cannot fail, as
                 // discriminants are int-like.
                 let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
-                let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
+                let discr_bits = discr_val.to_scalar().to_bits(discr_layout.size)?;
                 // Convert discriminant to variant index, and catch invalid discriminants.
                 let index = match *ty.kind() {
                     ty::Adt(adt, _) => {
@@ -152,7 +152,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 // discriminant (encoded in niche/tag) and variant index are the same.
                 let variants_start = niche_variants.start().as_u32();
                 let variants_end = niche_variants.end().as_u32();
-                let variant = match tag_val.try_to_int() {
+                let variant = match tag_val.try_to_scalar_int() {
                     Err(dbg_val) => {
                         // So this is a pointer then, and casting to an int failed.
                         // Can only happen during CTFE.
@@ -167,7 +167,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                         untagged_variant
                     }
                     Ok(tag_bits) => {
-                        let tag_bits = tag_bits.assert_bits(tag_layout.size);
+                        let tag_bits = tag_bits.to_bits(tag_layout.size);
                         // We need to use machine arithmetic to get the relative variant idx:
                         // variant_index_relative = tag_val - niche_start_val
                         let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
@@ -175,7 +175,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                         let variant_index_relative_val =
                             self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
                         let variant_index_relative =
-                            variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
+                            variant_index_relative_val.to_scalar().to_bits(tag_val.layout.size)?;
                         // Check if this is in the range that indicates an actual discriminant.
                         if variant_index_relative <= u128::from(variants_end - variants_start) {
                             let variant_index_relative = u32::try_from(variant_index_relative)
@@ -294,8 +294,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                     ImmTy::from_uint(variant_index_relative, tag_layout);
                 let tag = self
                     .binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
-                    .to_scalar()
-                    .assert_int();
+                    .to_scalar_int()?;
                 Ok(Some((tag, tag_field)))
             }
         }
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index 18b76443cd9..dac5c10addc 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -519,7 +519,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
         // First, check x % y != 0 (or if that computation overflows).
         let rem = self.binary_op(BinOp::Rem, a, b)?;
-        if rem.to_scalar().assert_bits(a.layout.size) != 0 {
+        if rem.to_scalar().to_bits(a.layout.size)? != 0 {
             throw_ub_custom!(
                 fluent::const_eval_exact_div_has_remainder,
                 a = format!("{a}"),
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 521f28b7123..7eb73e9b52f 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -1344,7 +1344,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     /// Test if this value might be null.
     /// If the machine does not support ptr-to-int casts, this is conservative.
     pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
-        Ok(match scalar.try_to_int() {
+        Ok(match scalar.try_to_scalar_int() {
             Ok(int) => int.is_null(),
             Err(_) => {
                 // Can only happen during CTFE.
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
index bbb2c2f3938..0a7e9853763 100644
--- a/compiler/rustc_const_eval/src/interpret/operand.rs
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -89,6 +89,12 @@ impl<Prov: Provenance> Immediate<Prov> {
 
     #[inline]
     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+    pub fn to_scalar_int(self) -> ScalarInt {
+        self.to_scalar().try_to_scalar_int().unwrap()
+    }
+
+    #[inline]
+    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
     pub fn to_scalar_pair(self) -> (Scalar<Prov>, Scalar<Prov>) {
         match self {
             Immediate::ScalarPair(val1, val2) => (val1, val2),
@@ -220,19 +226,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     }
 
     #[inline]
-    pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
-        Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
-    }
-    #[inline]
     pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
         Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
     }
 
     #[inline]
-    pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
-        Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
-    }
-    #[inline]
     pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
         Self::from_scalar(Scalar::from_int(i, layout.size), layout)
     }
@@ -276,7 +274,8 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
     #[inline]
     pub fn to_const_int(self) -> ConstInt {
         assert!(self.layout.ty.is_integral());
-        let int = self.to_scalar().assert_int();
+        let int = self.imm.to_scalar_int();
+        assert_eq!(int.size(), self.layout.size);
         ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
     }
 
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index 6d005dfcd86..c821c98073d 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -95,10 +95,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         let l = left.to_scalar_int()?;
         let r = right.to_scalar_int()?;
         // Prepare to convert the values to signed or unsigned form.
-        let l_signed = || l.assert_int(left.layout.size);
-        let l_unsigned = || l.assert_uint(left.layout.size);
-        let r_signed = || r.assert_int(right.layout.size);
-        let r_unsigned = || r.assert_uint(right.layout.size);
+        let l_signed = || l.to_int(left.layout.size);
+        let l_unsigned = || l.to_uint(left.layout.size);
+        let r_signed = || r.to_int(right.layout.size);
+        let r_unsigned = || r.to_uint(right.layout.size);
 
         let throw_ub_on_overflow = match bin_op {
             AddUnchecked => Some(sym::unchecked_add),
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 4a86ec3f57a..046ff34e3d0 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -499,13 +499,14 @@ where
         &self,
         mplace: &MPlaceTy<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
-        // Basically we just transmute this place into an array following simd_size_and_type.
-        // (Transmuting is okay since this is an in-memory place. We also double-check the size
-        // stays the same.)
+        // Basically we want to transmute this place into an array following simd_size_and_type.
         let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
-        let array = Ty::new_array(self.tcx.tcx, e_ty, len);
-        let layout = self.layout_of(array)?;
-        let mplace = mplace.transmute(layout, self)?;
+        // Some SIMD types have padding, so `len` many `e_ty` does not cover the entire place.
+        // Therefore we cannot transmute, and instead we project at offset 0, which side-steps
+        // the size check.
+        let array_layout = self.layout_of(Ty::new_array(self.tcx.tcx, e_ty, len))?;
+        assert!(array_layout.size <= mplace.layout.size);
+        let mplace = mplace.offset(Size::ZERO, array_layout, self)?;
         Ok((mplace, len))
     }
 
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
index 0e594914c3a..09e1a59dfa1 100644
--- a/compiler/rustc_const_eval/src/interpret/projection.rs
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -81,6 +81,8 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
         ecx: &InterpCx<'tcx, M>,
     ) -> InterpResult<'tcx, Self> {
         assert!(layout.is_sized());
+        // We sometimes do pointer arithmetic with this function, disregarding the source type.
+        // So we don't check the sizes here.
         self.offset_with_meta(offset, OffsetMode::Inbounds, MemPlaceMeta::None, layout, ecx)
     }
 
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
index d0bb821862a..1baf62baa81 100644
--- a/compiler/rustc_const_eval/src/interpret/step.rs
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -253,7 +253,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                         Scalar::from_target_usize(val, self)
                     }
                     mir::NullOp::OffsetOf(fields) => {
-                        let val = layout.offset_of_subfield(self, fields.iter()).bytes();
+                        let val = self
+                            .tcx
+                            .offset_of_subfield(self.param_env, layout, fields.iter())
+                            .bytes();
                         Scalar::from_target_usize(val, self)
                     }
                     mir::NullOp::UbChecks => Scalar::from_bool(self.tcx.sess.ub_checks()),
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 3407c7b8c79..f532f6bbe37 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -653,8 +653,8 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
         let WrappingRange { start, end } = valid_range;
         let max_value = size.unsigned_int_max();
         assert!(end <= max_value);
-        let bits = match scalar.try_to_int() {
-            Ok(int) => int.assert_bits(size),
+        let bits = match scalar.try_to_scalar_int() {
+            Ok(int) => int.to_bits(size),
             Err(_) => {
                 // So this is a pointer then, and casting to an int failed.
                 // Can only happen during CTFE.
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
index 2b61e17efa2..6876046a583 100644
--- a/compiler/rustc_data_structures/Cargo.toml
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -16,7 +16,6 @@ libc = "0.2"
 measureme = "11"
 rustc-hash = "1.1.0"
 rustc-rayon = { version = "0.5.0", optional = true }
-rustc-rayon-core = { version = "0.5.0", optional = true }
 rustc_arena = { path = "../rustc_arena" }
 rustc_graphviz = { path = "../rustc_graphviz" }
 rustc_index = { path = "../rustc_index", package = "rustc_index" }
@@ -52,5 +51,5 @@ portable-atomic = "1.5.1"
 
 [features]
 # tidy-alphabetical-start
-rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "rustc-rayon", "rustc-rayon-core"]
+rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "rustc-rayon"]
 # tidy-alphabetical-end
diff --git a/compiler/rustc_driver_impl/Cargo.toml b/compiler/rustc_driver_impl/Cargo.toml
index 5f7504add8d..91cbffcd707 100644
--- a/compiler/rustc_driver_impl/Cargo.toml
+++ b/compiler/rustc_driver_impl/Cargo.toml
@@ -19,7 +19,6 @@ rustc_errors = { path = "../rustc_errors" }
 rustc_expand = { path = "../rustc_expand" }
 rustc_feature = { path = "../rustc_feature" }
 rustc_fluent_macro = { path = "../rustc_fluent_macro" }
-rustc_hir = { path = "../rustc_hir" }
 rustc_hir_analysis = { path = "../rustc_hir_analysis" }
 rustc_hir_pretty = { path = "../rustc_hir_pretty" }
 rustc_hir_typeck = { path = "../rustc_hir_typeck" }
diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs
index 3196b826085..25e961d6009 100644
--- a/compiler/rustc_expand/src/mbe/transcribe.rs
+++ b/compiler/rustc_expand/src/mbe/transcribe.rs
@@ -30,11 +30,11 @@ impl MutVisitor for Marker {
         // it's some advanced case with macro-generated macros. So if we cache the marked version
         // of that context once, we'll typically have a 100% cache hit rate after that.
         let Marker(expn_id, transparency, ref mut cache) = *self;
-        let data = span.data();
-        let marked_ctxt = *cache
-            .entry(data.ctxt)
-            .or_insert_with(|| data.ctxt.apply_mark(expn_id.to_expn_id(), transparency));
-        *span = data.with_ctxt(marked_ctxt);
+        span.update_ctxt(|ctxt| {
+            *cache
+                .entry(ctxt)
+                .or_insert_with(|| ctxt.apply_mark(expn_id.to_expn_id(), transparency))
+        });
     }
 }
 
diff --git a/compiler/rustc_feature/src/unstable.rs b/compiler/rustc_feature/src/unstable.rs
index d67422849d8..2410019868a 100644
--- a/compiler/rustc_feature/src/unstable.rs
+++ b/compiler/rustc_feature/src/unstable.rs
@@ -559,6 +559,8 @@ declare_features! (
     (unstable, offset_of_enum, "1.75.0", Some(120141)),
     /// Allows using multiple nested field accesses in offset_of!
     (unstable, offset_of_nested, "1.77.0", Some(120140)),
+    /// Allows using fields with slice type in offset_of!
+    (unstable, offset_of_slice, "CURRENT_RUSTC_VERSION", Some(126151)),
     /// Allows using `#[optimize(X)]`.
     (unstable, optimize_attribute, "1.34.0", Some(54882)),
     /// Allows postfix match `expr.match { ... }`
diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs
index 5d30b2a71e0..d2a5924c8bb 100644
--- a/compiler/rustc_hir_typeck/src/demand.rs
+++ b/compiler/rustc_hir_typeck/src/demand.rs
@@ -827,7 +827,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         ) else {
             return;
         };
-        let in_scope_methods = self.probe_for_name_many(
+
+        let Ok(in_scope_methods) = self.probe_for_name_many(
             probe::Mode::MethodCall,
             path.ident,
             Some(expected),
@@ -835,11 +836,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             self_ty,
             deref.hir_id,
             probe::ProbeScope::TraitsInScope,
-        );
+        ) else {
+            return;
+        };
+
         let other_methods_in_scope: Vec<_> =
             in_scope_methods.iter().filter(|c| c.item.def_id != pick.item.def_id).collect();
 
-        let all_methods = self.probe_for_name_many(
+        let Ok(all_methods) = self.probe_for_name_many(
             probe::Mode::MethodCall,
             path.ident,
             Some(expected),
@@ -847,7 +851,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             self_ty,
             deref.hir_id,
             probe::ProbeScope::AllTraits,
-        );
+        ) else {
+            return;
+        };
+
         let suggestions: Vec<_> = all_methods
             .into_iter()
             .filter(|c| c.item.def_id != pick.item.def_id)
diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs
index d5d360ca047..5b27ebe3416 100644
--- a/compiler/rustc_hir_typeck/src/expr.rs
+++ b/compiler/rustc_hir_typeck/src/expr.rs
@@ -3363,7 +3363,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
 
                     let field_ty = self.field_ty(expr.span, field, args);
 
-                    // FIXME: DSTs with static alignment should be allowed
+                    // Enums are anyway always sized. But just to safeguard against future
+                    // language extensions, let's double-check.
                     self.require_type_is_sized(field_ty, expr.span, ObligationCauseCode::Misc);
 
                     if field.vis.is_accessible_from(sub_def_scope, self.tcx) {
@@ -3391,8 +3392,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     {
                         let field_ty = self.field_ty(expr.span, field, args);
 
-                        // FIXME: DSTs with static alignment should be allowed
-                        self.require_type_is_sized(field_ty, expr.span, ObligationCauseCode::Misc);
+                        if self.tcx.features().offset_of_slice {
+                            self.require_type_has_static_alignment(
+                                field_ty,
+                                expr.span,
+                                ObligationCauseCode::Misc,
+                            );
+                        } else {
+                            self.require_type_is_sized(
+                                field_ty,
+                                expr.span,
+                                ObligationCauseCode::Misc,
+                            );
+                        }
 
                         if field.vis.is_accessible_from(def_scope, self.tcx) {
                             self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span, None);
@@ -3412,10 +3424,21 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     if let Ok(index) = field.as_str().parse::<usize>()
                         && field.name == sym::integer(index)
                     {
-                        for ty in tys.iter().take(index + 1) {
-                            self.require_type_is_sized(ty, expr.span, ObligationCauseCode::Misc);
-                        }
                         if let Some(&field_ty) = tys.get(index) {
+                            if self.tcx.features().offset_of_slice {
+                                self.require_type_has_static_alignment(
+                                    field_ty,
+                                    expr.span,
+                                    ObligationCauseCode::Misc,
+                                );
+                            } else {
+                                self.require_type_is_sized(
+                                    field_ty,
+                                    expr.span,
+                                    ObligationCauseCode::Misc,
+                                );
+                            }
+
                             field_indices.push((FIRST_VARIANT, index.into()));
                             current_container = field_ty;
 
diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
index 85c6d4dc12c..e354e1ec59c 100644
--- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
+++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs
@@ -386,6 +386,26 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         }
     }
 
+    pub fn require_type_has_static_alignment(
+        &self,
+        ty: Ty<'tcx>,
+        span: Span,
+        code: traits::ObligationCauseCode<'tcx>,
+    ) {
+        if !ty.references_error() {
+            let tail =
+                self.tcx.struct_tail_with_normalize(ty, |ty| self.normalize(span, ty), || {});
+            // Sized types have static alignment, and so do slices.
+            if tail.is_trivially_sized(self.tcx) || matches!(tail.kind(), ty::Slice(..)) {
+                // Nothing else is required here.
+            } else {
+                // We can't be sure, let's required full `Sized`.
+                let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
+                self.require_type_meets(ty, span, code, lang_item);
+            }
+        }
+    }
+
     pub fn register_bound(
         &self,
         ty: Ty<'tcx>,
diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs
index ab0f16bd87d..e842bba34bf 100644
--- a/compiler/rustc_hir_typeck/src/method/probe.rs
+++ b/compiler/rustc_hir_typeck/src/method/probe.rs
@@ -306,7 +306,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         self_ty: Ty<'tcx>,
         scope_expr_id: HirId,
         scope: ProbeScope,
-    ) -> Vec<Candidate<'tcx>> {
+    ) -> Result<Vec<Candidate<'tcx>>, MethodError<'tcx>> {
         self.probe_op(
             item_name.span,
             mode,
@@ -324,7 +324,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     .collect())
             },
         )
-        .unwrap()
     }
 
     pub(crate) fn probe_op<OP, R>(
diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs
index c1e14f7fb75..bbe4a8791c6 100644
--- a/compiler/rustc_hir_typeck/src/method/suggest.rs
+++ b/compiler/rustc_hir_typeck/src/method/suggest.rs
@@ -1640,10 +1640,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                         .unwrap_or(Ty::new_misc_error(self.tcx)),
                 );
 
-                // FIXME: `probe_for_name_many` searches for methods in inherent implementations,
-                // so it may return a candidate that doesn't belong to this `revr_ty`. We need to
-                // check whether the instantiated type matches the received one.
-                for _matched_method in self.probe_for_name_many(
+                let Ok(candidates) = self.probe_for_name_many(
                     Mode::MethodCall,
                     item_name,
                     None,
@@ -1651,7 +1648,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                     rcvr_ty,
                     source_expr.hir_id,
                     ProbeScope::TraitsInScope,
-                ) {
+                ) else {
+                    return;
+                };
+
+                // FIXME: `probe_for_name_many` searches for methods in inherent implementations,
+                // so it may return a candidate that doesn't belong to this `revr_ty`. We need to
+                // check whether the instantiated type matches the received one.
+                for _matched_method in candidates {
                     // found a match, push to stack
                     stack_methods.push(rcvr_ty);
                 }
diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs
index be91e7d45b6..9476dc70483 100644
--- a/compiler/rustc_hir_typeck/src/pat.rs
+++ b/compiler/rustc_hir_typeck/src/pat.rs
@@ -2385,11 +2385,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         min_len: u64,
     ) -> (Option<Ty<'tcx>>, Ty<'tcx>) {
         let len = match len.eval(self.tcx, self.param_env, span) {
-            // FIXME(BoxyUwU): Assert the `Ty` is a `usize`?
             Ok((_, val)) => val
                 .try_to_scalar()
-                .and_then(|scalar| scalar.try_to_int().ok())
-                .and_then(|int| int.try_to_target_usize(self.tcx).ok()),
+                .and_then(|scalar| scalar.try_to_scalar_int().ok())
+                .map(|int| int.to_target_usize(self.tcx)),
             Err(ErrorHandled::Reported(..)) => {
                 let guar = self.error_scrutinee_unfixed_length(span);
                 return (Some(Ty::new_error(self.tcx, guar)), arr_ty);
diff --git a/compiler/rustc_index_macros/Cargo.toml b/compiler/rustc_index_macros/Cargo.toml
index c4ca29db3c2..07ee81788ce 100644
--- a/compiler/rustc_index_macros/Cargo.toml
+++ b/compiler/rustc_index_macros/Cargo.toml
@@ -7,11 +7,10 @@ edition = "2021"
 proc-macro = true
 
 [dependencies]
-synstructure = "0.13.0"
 syn = { version = "2.0.9", features = ["full"] }
 proc-macro2 = "1"
 quote = "1"
 
 [features]
 default = ["nightly"]
-nightly = []
\ No newline at end of file
+nightly = []
diff --git a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
index b88677b3a4e..effb4090692 100644
--- a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
+++ b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs
@@ -32,6 +32,15 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> {
                         diag.note("no two closures, even if identical, have the same type");
                         diag.help("consider boxing your closure and/or using it as a trait object");
                     }
+                    (ty::Coroutine(def_id1, ..), ty::Coroutine(def_id2, ..))
+                        if self.tcx.coroutine_is_async(def_id1)
+                            && self.tcx.coroutine_is_async(def_id2) =>
+                    {
+                        diag.note("no two async blocks, even if identical, have the same type");
+                        diag.help(
+                            "consider pinning your async block and casting it to a trait object",
+                        );
+                    }
                     (ty::Alias(ty::Opaque, ..), ty::Alias(ty::Opaque, ..)) => {
                         // Issue #63167
                         diag.note("distinct uses of `impl Trait` result in different opaque types");
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
index d1cdabc293d..3dc592980fd 100644
--- a/compiler/rustc_middle/Cargo.toml
+++ b/compiler/rustc_middle/Cargo.toml
@@ -11,7 +11,6 @@ either = "1.5.0"
 field-offset = "0.3.5"
 gsgdt = "0.1.2"
 polonius-engine = "0.13.0"
-rustc-rayon = { version = "0.5.0", optional = true }
 rustc-rayon-core = { version = "0.5.0", optional = true }
 rustc_apfloat = "0.2.0"
 rustc_arena = { path = "../rustc_arena" }
@@ -41,5 +40,5 @@ tracing = "0.1"
 
 [features]
 # tidy-alphabetical-start
-rustc_use_parallel_compiler = ["rustc-rayon", "rustc-rayon-core"]
+rustc_use_parallel_compiler = ["rustc-rayon-core"]
 # tidy-alphabetical-end
diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs
index cc8979dd990..89f5acacf9d 100644
--- a/compiler/rustc_middle/src/mir/consts.rs
+++ b/compiler/rustc_middle/src/mir/consts.rs
@@ -84,11 +84,11 @@ impl<'tcx> ConstValue<'tcx> {
     }
 
     pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
-        self.try_to_scalar()?.try_to_int().ok()
+        self.try_to_scalar()?.try_to_scalar_int().ok()
     }
 
     pub fn try_to_bits(&self, size: Size) -> Option<u128> {
-        self.try_to_scalar_int()?.try_to_bits(size).ok()
+        Some(self.try_to_scalar_int()?.to_bits(size))
     }
 
     pub fn try_to_bool(&self) -> Option<bool> {
@@ -96,7 +96,7 @@ impl<'tcx> ConstValue<'tcx> {
     }
 
     pub fn try_to_target_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
-        self.try_to_scalar_int()?.try_to_target_usize(tcx).ok()
+        Some(self.try_to_scalar_int()?.to_target_usize(tcx))
     }
 
     pub fn try_to_bits_for_ty(
@@ -300,7 +300,7 @@ impl<'tcx> Const<'tcx> {
 
     #[inline]
     pub fn try_to_bits(self, size: Size) -> Option<u128> {
-        self.try_to_scalar_int()?.try_to_bits(size).ok()
+        Some(self.try_to_scalar_int()?.to_bits(size))
     }
 
     #[inline]
@@ -367,7 +367,7 @@ impl<'tcx> Const<'tcx> {
         tcx: TyCtxt<'tcx>,
         param_env: ty::ParamEnv<'tcx>,
     ) -> Option<ScalarInt> {
-        self.try_eval_scalar(tcx, param_env)?.try_to_int().ok()
+        self.try_eval_scalar(tcx, param_env)?.try_to_scalar_int().ok()
     }
 
     #[inline]
@@ -375,7 +375,7 @@ impl<'tcx> Const<'tcx> {
         let int = self.try_eval_scalar_int(tcx, param_env)?;
         let size =
             tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(self.ty())).ok()?.size;
-        int.try_to_bits(size).ok()
+        Some(int.to_bits(size))
     }
 
     /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
@@ -391,7 +391,7 @@ impl<'tcx> Const<'tcx> {
         tcx: TyCtxt<'tcx>,
         param_env: ty::ParamEnv<'tcx>,
     ) -> Option<u64> {
-        self.try_eval_scalar_int(tcx, param_env)?.try_to_target_usize(tcx).ok()
+        Some(self.try_eval_scalar_int(tcx, param_env)?.to_target_usize(tcx))
     }
 
     #[inline]
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 85357265687..70e5ad0635b 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -123,15 +123,11 @@ impl<Prov> Scalar<Prov> {
     }
 
     #[inline]
-    pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
-        ScalarInt::try_from_uint(i, size).map(Scalar::Int)
-    }
-
-    #[inline]
     pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
         let i = i.into();
-        Self::try_from_uint(i, size)
+        ScalarInt::try_from_uint(i, size)
             .unwrap_or_else(|| bug!("Unsigned value {:#x} does not fit in {} bits", i, size.bits()))
+            .into()
     }
 
     #[inline]
@@ -165,15 +161,11 @@ impl<Prov> Scalar<Prov> {
     }
 
     #[inline]
-    pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
-        ScalarInt::try_from_int(i, size).map(Scalar::Int)
-    }
-
-    #[inline]
     pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
         let i = i.into();
-        Self::try_from_int(i, size)
+        ScalarInt::try_from_int(i, size)
             .unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
+            .into()
     }
 
     #[inline]
@@ -227,7 +219,7 @@ impl<Prov> Scalar<Prov> {
     }
 
     /// This is almost certainly not the method you want!  You should dispatch on the type
-    /// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed.
+    /// and use `to_{u8,u16,...}`/`to_pointer` to perform ptr-to-int / int-to-ptr casts as needed.
     ///
     /// This method only exists for the benefit of low-level operations that truly need to treat the
     /// scalar in whatever form it is.
@@ -289,7 +281,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
     /// The error type is `AllocId`, not `CtfeProvenance`, since `AllocId` is the "minimal"
     /// component all provenance types must have.
     #[inline]
-    pub fn try_to_int(self) -> Result<ScalarInt, Scalar<AllocId>> {
+    pub fn try_to_scalar_int(self) -> Result<ScalarInt, Scalar<AllocId>> {
         match self {
             Scalar::Int(int) => Ok(int),
             Scalar::Ptr(ptr, sz) => {
@@ -307,13 +299,13 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
 
     #[inline(always)]
     pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> {
-        self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into())
+        self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into())
     }
 
     #[inline(always)]
     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
-    pub fn assert_int(self) -> ScalarInt {
-        self.try_to_int().unwrap()
+    pub fn assert_scalar_int(self) -> ScalarInt {
+        self.try_to_scalar_int().expect("got a pointer where a ScalarInt was expected")
     }
 
     /// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
@@ -330,13 +322,6 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
         })
     }
 
-    #[inline(always)]
-    #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
-    pub fn assert_bits(self, target_size: Size) -> u128 {
-        self.to_bits(target_size)
-            .unwrap_or_else(|_| panic!("assertion failed: {self:?} fits {target_size:?}"))
-    }
-
     pub fn to_bool(self) -> InterpResult<'tcx, bool> {
         let val = self.to_u8()?;
         match val {
diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs
index 454897aa672..7c8b0ec671a 100644
--- a/compiler/rustc_middle/src/thir.rs
+++ b/compiler/rustc_middle/src/thir.rs
@@ -1033,8 +1033,8 @@ impl<'tcx> PatRangeBoundary<'tcx> {
                 if let (Some(a), Some(b)) = (a.try_to_scalar_int(), b.try_to_scalar_int()) {
                     let sz = ty.primitive_size(tcx);
                     let cmp = match ty.kind() {
-                        ty::Uint(_) | ty::Char => a.assert_uint(sz).cmp(&b.assert_uint(sz)),
-                        ty::Int(_) => a.assert_int(sz).cmp(&b.assert_int(sz)),
+                        ty::Uint(_) | ty::Char => a.to_uint(sz).cmp(&b.to_uint(sz)),
+                        ty::Int(_) => a.to_int(sz).cmp(&b.to_int(sz)),
                         _ => unreachable!(),
                     };
                     return Some(cmp);
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
index cc1daeb6419..12f0c38b054 100644
--- a/compiler/rustc_middle/src/ty/consts.rs
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -376,7 +376,7 @@ impl<'tcx> Const<'tcx> {
         param_env: ParamEnv<'tcx>,
     ) -> Option<(Ty<'tcx>, ScalarInt)> {
         let (ty, scalar) = self.try_eval_scalar(tcx, param_env)?;
-        let val = scalar.try_to_int().ok()?;
+        let val = scalar.try_to_scalar_int().ok()?;
         Some((ty, val))
     }
 
@@ -388,7 +388,7 @@ impl<'tcx> Const<'tcx> {
         let (ty, scalar) = self.try_eval_scalar_int(tcx, param_env)?;
         let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
         // if `ty` does not depend on generic parameters, use an empty param_env
-        scalar.try_to_bits(size).ok()
+        Some(scalar.to_bits(size))
     }
 
     #[inline]
@@ -405,7 +405,7 @@ impl<'tcx> Const<'tcx> {
         param_env: ParamEnv<'tcx>,
     ) -> Option<u64> {
         let (_, scalar) = self.try_eval_scalar_int(tcx, param_env)?;
-        scalar.try_to_target_usize(tcx).ok()
+        Some(scalar.to_target_usize(tcx))
     }
 
     #[inline]
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
index 40ac87873a0..52320dd141b 100644
--- a/compiler/rustc_middle/src/ty/consts/int.rs
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -246,6 +246,10 @@ impl ScalarInt {
         Self::try_from_uint(i, tcx.data_layout.pointer_size)
     }
 
+    /// Try to convert this ScalarInt to the raw underlying bits.
+    /// Fails if the size is wrong. Generally a wrong size should lead to a panic,
+    /// but Miri sometimes wants to be resilient to size mismatches,
+    /// so the interpreter will generally use this `try` method.
     #[inline]
     pub fn try_to_bits(self, target_size: Size) -> Result<u128, Size> {
         assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
@@ -258,165 +262,149 @@ impl ScalarInt {
     }
 
     #[inline]
-    pub fn assert_bits(self, target_size: Size) -> u128 {
+    pub fn to_bits(self, target_size: Size) -> u128 {
         self.try_to_bits(target_size).unwrap_or_else(|size| {
             bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
         })
     }
 
-    /// Tries to convert the `ScalarInt` to an unsigned integer of the given size.
-    /// Fails if the size of the `ScalarInt` is not equal to `size` and returns the
-    /// `ScalarInt`s size in that case.
+    /// Extracts the bits from the scalar without checking the size.
     #[inline]
-    pub fn try_to_uint(self, size: Size) -> Result<u128, Size> {
-        self.try_to_bits(size)
+    pub fn to_bits_unchecked(self) -> u128 {
+        self.check_data();
+        self.data
     }
 
+    /// Converts the `ScalarInt` to an unsigned integer of the given size.
+    /// Panics if the size of the `ScalarInt` is not equal to `size`.
     #[inline]
-    pub fn assert_uint(self, size: Size) -> u128 {
-        self.assert_bits(size)
+    pub fn to_uint(self, size: Size) -> u128 {
+        self.to_bits(size)
     }
 
-    // Tries to convert the `ScalarInt` to `u8`. Fails if the `size` of the `ScalarInt`
-    // in not equal to 1 byte and returns the `size` value of the `ScalarInt` in
-    // that case.
+    /// Converts the `ScalarInt` to `u8`.
+    /// Panics if the `size` of the `ScalarInt`in not equal to 1 byte.
     #[inline]
-    pub fn try_to_u8(self) -> Result<u8, Size> {
-        self.try_to_uint(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap())
+    pub fn to_u8(self) -> u8 {
+        self.to_uint(Size::from_bits(8)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to `u16`. Fails if the size of the `ScalarInt`
-    /// in not equal to 2 bytes and returns the `size` value of the `ScalarInt` in
-    /// that case.
+    /// Converts the `ScalarInt` to `u16`.
+    /// Panics if the size of the `ScalarInt` in not equal to 2 bytes.
     #[inline]
-    pub fn try_to_u16(self) -> Result<u16, Size> {
-        self.try_to_uint(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap())
+    pub fn to_u16(self) -> u16 {
+        self.to_uint(Size::from_bits(16)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to `u32`. Fails if the `size` of the `ScalarInt`
-    /// in not equal to 4 bytes and returns the `size` value of the `ScalarInt` in
-    /// that case.
+    /// Converts the `ScalarInt` to `u32`.
+    /// Panics if the `size` of the `ScalarInt` in not equal to 4 bytes.
     #[inline]
-    pub fn try_to_u32(self) -> Result<u32, Size> {
-        self.try_to_uint(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap())
+    pub fn to_u32(self) -> u32 {
+        self.to_uint(Size::from_bits(32)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to `u64`. Fails if the `size` of the `ScalarInt`
-    /// in not equal to 8 bytes and returns the `size` value of the `ScalarInt` in
-    /// that case.
+    /// Converts the `ScalarInt` to `u64`.
+    /// Panics if the `size` of the `ScalarInt` in not equal to 8 bytes.
     #[inline]
-    pub fn try_to_u64(self) -> Result<u64, Size> {
-        self.try_to_uint(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap())
+    pub fn to_u64(self) -> u64 {
+        self.to_uint(Size::from_bits(64)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to `u128`. Fails if the `size` of the `ScalarInt`
-    /// in not equal to 16 bytes and returns the `size` value of the `ScalarInt` in
-    /// that case.
+    /// Converts the `ScalarInt` to `u128`.
+    /// Panics if the `size` of the `ScalarInt` in not equal to 16 bytes.
     #[inline]
-    pub fn try_to_u128(self) -> Result<u128, Size> {
-        self.try_to_uint(Size::from_bits(128))
+    pub fn to_u128(self) -> u128 {
+        self.to_uint(Size::from_bits(128))
     }
 
     #[inline]
-    pub fn try_to_target_usize(&self, tcx: TyCtxt<'_>) -> Result<u64, Size> {
-        self.try_to_uint(tcx.data_layout.pointer_size).map(|v| u64::try_from(v).unwrap())
+    pub fn to_target_usize(&self, tcx: TyCtxt<'_>) -> u64 {
+        self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap()
     }
 
-    // Tries to convert the `ScalarInt` to `bool`. Fails if the `size` of the `ScalarInt`
-    // in not equal to 1 byte or if the value is not 0 or 1 and returns the `size`
-    // value of the `ScalarInt` in that case.
+    /// Converts the `ScalarInt` to `bool`.
+    /// Panics if the `size` of the `ScalarInt` is not equal to 1 byte.
+    /// Errors if it is not a valid `bool`.
     #[inline]
-    pub fn try_to_bool(self) -> Result<bool, Size> {
-        match self.try_to_u8()? {
+    pub fn try_to_bool(self) -> Result<bool, ()> {
+        match self.to_u8() {
             0 => Ok(false),
             1 => Ok(true),
-            _ => Err(self.size()),
+            _ => Err(()),
         }
     }
 
-    /// Tries to convert the `ScalarInt` to a signed integer of the given size.
-    /// Fails if the size of the `ScalarInt` is not equal to `size` and returns the
-    /// `ScalarInt`s size in that case.
-    #[inline]
-    pub fn try_to_int(self, size: Size) -> Result<i128, Size> {
-        let b = self.try_to_bits(size)?;
-        Ok(size.sign_extend(b) as i128)
-    }
-
+    /// Converts the `ScalarInt` to a signed integer of the given size.
+    /// Panics if the size of the `ScalarInt` is not equal to `size`.
     #[inline]
-    pub fn assert_int(self, size: Size) -> i128 {
-        let b = self.assert_bits(size);
+    pub fn to_int(self, size: Size) -> i128 {
+        let b = self.to_bits(size);
         size.sign_extend(b) as i128
     }
 
-    /// Tries to convert the `ScalarInt` to i8.
-    /// Fails if the size of the `ScalarInt` is not equal to 1 byte
-    /// and returns the `ScalarInt`s size in that case.
-    pub fn try_to_i8(self) -> Result<i8, Size> {
-        self.try_to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap())
+    /// Converts the `ScalarInt` to i8.
+    /// Panics if the size of the `ScalarInt` is not equal to 1 byte.
+    pub fn to_i8(self) -> i8 {
+        self.to_int(Size::from_bits(8)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to i16.
-    /// Fails if the size of the `ScalarInt` is not equal to 2 bytes
-    /// and returns the `ScalarInt`s size in that case.
-    pub fn try_to_i16(self) -> Result<i16, Size> {
-        self.try_to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap())
+    /// Converts the `ScalarInt` to i16.
+    /// Panics if the size of the `ScalarInt` is not equal to 2 bytes.
+    pub fn to_i16(self) -> i16 {
+        self.to_int(Size::from_bits(16)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to i32.
-    /// Fails if the size of the `ScalarInt` is not equal to 4 bytes
-    /// and returns the `ScalarInt`s size in that case.
-    pub fn try_to_i32(self) -> Result<i32, Size> {
-        self.try_to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap())
+    /// Converts the `ScalarInt` to i32.
+    /// Panics if the size of the `ScalarInt` is not equal to 4 bytes.
+    pub fn to_i32(self) -> i32 {
+        self.to_int(Size::from_bits(32)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to i64.
-    /// Fails if the size of the `ScalarInt` is not equal to 8 bytes
-    /// and returns the `ScalarInt`s size in that case.
-    pub fn try_to_i64(self) -> Result<i64, Size> {
-        self.try_to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap())
+    /// Converts the `ScalarInt` to i64.
+    /// Panics if the size of the `ScalarInt` is not equal to 8 bytes.
+    pub fn to_i64(self) -> i64 {
+        self.to_int(Size::from_bits(64)).try_into().unwrap()
     }
 
-    /// Tries to convert the `ScalarInt` to i128.
-    /// Fails if the size of the `ScalarInt` is not equal to 16 bytes
-    /// and returns the `ScalarInt`s size in that case.
-    pub fn try_to_i128(self) -> Result<i128, Size> {
-        self.try_to_int(Size::from_bits(128))
+    /// Converts the `ScalarInt` to i128.
+    /// Panics if the size of the `ScalarInt` is not equal to 16 bytes.
+    pub fn to_i128(self) -> i128 {
+        self.to_int(Size::from_bits(128))
     }
 
     #[inline]
-    pub fn try_to_target_isize(&self, tcx: TyCtxt<'_>) -> Result<i64, Size> {
-        self.try_to_int(tcx.data_layout.pointer_size).map(|v| i64::try_from(v).unwrap())
+    pub fn to_target_isize(&self, tcx: TyCtxt<'_>) -> i64 {
+        self.to_int(tcx.data_layout.pointer_size).try_into().unwrap()
     }
 
     #[inline]
-    pub fn try_to_float<F: Float>(self) -> Result<F, Size> {
+    pub fn to_float<F: Float>(self) -> F {
         // Going through `to_uint` to check size and truncation.
-        Ok(F::from_bits(self.try_to_bits(Size::from_bits(F::BITS))?))
+        F::from_bits(self.to_bits(Size::from_bits(F::BITS)))
     }
 
     #[inline]
-    pub fn try_to_f16(self) -> Result<Half, Size> {
-        self.try_to_float()
+    pub fn to_f16(self) -> Half {
+        self.to_float()
     }
 
     #[inline]
-    pub fn try_to_f32(self) -> Result<Single, Size> {
-        self.try_to_float()
+    pub fn to_f32(self) -> Single {
+        self.to_float()
     }
 
     #[inline]
-    pub fn try_to_f64(self) -> Result<Double, Size> {
-        self.try_to_float()
+    pub fn to_f64(self) -> Double {
+        self.to_float()
     }
 
     #[inline]
-    pub fn try_to_f128(self) -> Result<Quad, Size> {
-        self.try_to_float()
+    pub fn to_f128(self) -> Quad {
+        self.to_float()
     }
 }
 
-macro_rules! from {
+macro_rules! from_x_for_scalar_int {
     ($($ty:ty),*) => {
         $(
             impl From<$ty> for ScalarInt {
@@ -432,30 +420,29 @@ macro_rules! from {
     }
 }
 
-macro_rules! try_from {
+macro_rules! from_scalar_int_for_x {
     ($($ty:ty),*) => {
         $(
-            impl TryFrom<ScalarInt> for $ty {
-                type Error = Size;
+            impl From<ScalarInt> for $ty {
                 #[inline]
-                fn try_from(int: ScalarInt) -> Result<Self, Size> {
+                fn from(int: ScalarInt) -> Self {
                     // The `unwrap` cannot fail because to_bits (if it succeeds)
                     // is guaranteed to return a value that fits into the size.
-                    int.try_to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
-                       .map(|u| u.try_into().unwrap())
+                    int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
+                       .try_into().unwrap()
                 }
             }
         )*
     }
 }
 
-from!(u8, u16, u32, u64, u128, bool);
-try_from!(u8, u16, u32, u64, u128);
+from_x_for_scalar_int!(u8, u16, u32, u64, u128, bool);
+from_scalar_int_for_x!(u8, u16, u32, u64, u128);
 
 impl TryFrom<ScalarInt> for bool {
-    type Error = Size;
+    type Error = ();
     #[inline]
-    fn try_from(int: ScalarInt) -> Result<Self, Size> {
+    fn try_from(int: ScalarInt) -> Result<Self, ()> {
         int.try_to_bool()
     }
 }
@@ -463,7 +450,7 @@ impl TryFrom<ScalarInt> for bool {
 impl From<char> for ScalarInt {
     #[inline]
     fn from(c: char) -> Self {
-        Self { data: c as u128, size: NonZero::new(std::mem::size_of::<char>() as u8).unwrap() }
+        (c as u32).into()
     }
 }
 
@@ -476,10 +463,7 @@ impl TryFrom<ScalarInt> for char {
 
     #[inline]
     fn try_from(int: ScalarInt) -> Result<Self, Self::Error> {
-        let Ok(bits) = int.try_to_bits(Size::from_bytes(std::mem::size_of::<char>())) else {
-            return Err(CharTryFromScalarInt);
-        };
-        match char::from_u32(bits.try_into().unwrap()) {
+        match char::from_u32(int.to_u32()) {
             Some(c) => Ok(c),
             None => Err(CharTryFromScalarInt),
         }
@@ -494,11 +478,10 @@ impl From<Half> for ScalarInt {
     }
 }
 
-impl TryFrom<ScalarInt> for Half {
-    type Error = Size;
+impl From<ScalarInt> for Half {
     #[inline]
-    fn try_from(int: ScalarInt) -> Result<Self, Size> {
-        int.try_to_bits(Size::from_bytes(2)).map(Self::from_bits)
+    fn from(int: ScalarInt) -> Self {
+        Self::from_bits(int.to_bits(Size::from_bytes(2)))
     }
 }
 
@@ -510,11 +493,10 @@ impl From<Single> for ScalarInt {
     }
 }
 
-impl TryFrom<ScalarInt> for Single {
-    type Error = Size;
+impl From<ScalarInt> for Single {
     #[inline]
-    fn try_from(int: ScalarInt) -> Result<Self, Size> {
-        int.try_to_bits(Size::from_bytes(4)).map(Self::from_bits)
+    fn from(int: ScalarInt) -> Self {
+        Self::from_bits(int.to_bits(Size::from_bytes(4)))
     }
 }
 
@@ -526,11 +508,10 @@ impl From<Double> for ScalarInt {
     }
 }
 
-impl TryFrom<ScalarInt> for Double {
-    type Error = Size;
+impl From<ScalarInt> for Double {
     #[inline]
-    fn try_from(int: ScalarInt) -> Result<Self, Size> {
-        int.try_to_bits(Size::from_bytes(8)).map(Self::from_bits)
+    fn from(int: ScalarInt) -> Self {
+        Self::from_bits(int.to_bits(Size::from_bytes(8)))
     }
 }
 
@@ -542,11 +523,10 @@ impl From<Quad> for ScalarInt {
     }
 }
 
-impl TryFrom<ScalarInt> for Quad {
-    type Error = Size;
+impl From<ScalarInt> for Quad {
     #[inline]
-    fn try_from(int: ScalarInt) -> Result<Self, Size> {
-        int.try_to_bits(Size::from_bytes(16)).map(Self::from_bits)
+    fn from(int: ScalarInt) -> Self {
+        Self::from_bits(int.to_bits(Size::from_bytes(16)))
     }
 }
 
diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs
index 96bc5515a56..efc91357af8 100644
--- a/compiler/rustc_middle/src/ty/consts/valtree.rs
+++ b/compiler/rustc_middle/src/ty/consts/valtree.rs
@@ -79,7 +79,7 @@ impl<'tcx> ValTree<'tcx> {
     }
 
     pub fn try_to_target_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
-        self.try_to_scalar_int().and_then(|s| s.try_to_target_usize(tcx).ok())
+        self.try_to_scalar_int().map(|s| s.to_target_usize(tcx))
     }
 
     /// Get the values inside the ValTree as a slice of bytes. This only works for
@@ -100,8 +100,9 @@ impl<'tcx> ValTree<'tcx> {
             _ => return None,
         }
 
-        Some(tcx.arena.alloc_from_iter(
-            self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().try_to_u8().unwrap()),
-        ))
+        Some(
+            tcx.arena
+                .alloc_from_iter(self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().to_u8())),
+        )
     }
 }
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 142872009bf..56945bf6be4 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1351,3 +1351,37 @@ pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
 }
 
 impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
+
+impl<'tcx> TyCtxt<'tcx> {
+    pub fn offset_of_subfield<I>(
+        self,
+        param_env: ty::ParamEnv<'tcx>,
+        mut layout: TyAndLayout<'tcx>,
+        indices: I,
+    ) -> Size
+    where
+        I: Iterator<Item = (VariantIdx, FieldIdx)>,
+    {
+        let cx = LayoutCx { tcx: self, param_env };
+        let mut offset = Size::ZERO;
+
+        for (variant, field) in indices {
+            layout = layout.for_variant(&cx, variant);
+            let index = field.index();
+            offset += layout.fields.offset(index);
+            layout = layout.field(&cx, index);
+            if !layout.is_sized() {
+                // If it is not sized, then the tail must still have at least a known static alignment.
+                let tail = self.struct_tail_erasing_lifetimes(layout.ty, param_env);
+                if !matches!(tail.kind(), ty::Slice(..)) {
+                    bug!(
+                        "offset of not-statically-aligned field (type {:?}) cannot be computed statically",
+                        layout.ty
+                    );
+                }
+            }
+        }
+
+        offset
+    }
+}
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 49d46eb3c4b..662eafd0ccb 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -1652,7 +1652,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
                         if let ty::ConstKind::Value(_, ty::ValTree::Leaf(int)) = len.kind() {
                             match self.tcx().try_get_global_alloc(prov.alloc_id()) {
                                 Some(GlobalAlloc::Memory(alloc)) => {
-                                    let len = int.assert_bits(self.tcx().data_layout.pointer_size);
+                                    let len = int.to_bits(self.tcx().data_layout.pointer_size);
                                     let range =
                                         AllocRange { start: offset, size: Size::from_bytes(len) };
                                     if let Ok(byte_str) =
@@ -1730,7 +1730,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write {
             }
             // Pointer types
             ty::Ref(..) | ty::RawPtr(_, _) | ty::FnPtr(_) => {
-                let data = int.assert_bits(self.tcx().data_layout.pointer_size);
+                let data = int.to_bits(self.tcx().data_layout.pointer_size);
                 self.typed_value(
                     |this| {
                         write!(this, "0x{data:x}")?;
diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml
index 77f27236437..5d828d0093f 100644
--- a/compiler/rustc_mir_build/Cargo.toml
+++ b/compiler/rustc_mir_build/Cargo.toml
@@ -5,7 +5,6 @@ edition = "2021"
 
 [dependencies]
 # tidy-alphabetical-start
-either = "1"
 itertools = "0.12"
 rustc_apfloat = "0.2.0"
 rustc_arena = { path = "../rustc_arena" }
@@ -24,6 +23,5 @@ rustc_session = { path = "../rustc_session" }
 rustc_span = { path = "../rustc_span" }
 rustc_target = { path = "../rustc_target" }
 rustc_trait_selection = { path = "../rustc_trait_selection" }
-smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
 tracing = "0.1"
 # tidy-alphabetical-end
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
index 193f0d124bb..601e5d4d3dc 100644
--- a/compiler/rustc_mir_build/src/build/mod.rs
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -15,11 +15,10 @@ use rustc_index::{Idx, IndexSlice, IndexVec};
 use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
 use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
 use rustc_middle::middle::region;
-use rustc_middle::mir::interpret::Scalar;
 use rustc_middle::mir::*;
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::thir::{self, ExprId, LintLevel, LocalVarId, Param, ParamId, PatKind, Thir};
-use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt};
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
 use rustc_middle::{bug, span_bug};
 use rustc_span::symbol::sym;
 use rustc_span::Span;
@@ -1014,14 +1013,14 @@ fn parse_float_into_constval<'tcx>(
     float_ty: ty::FloatTy,
     neg: bool,
 ) -> Option<ConstValue<'tcx>> {
-    parse_float_into_scalar(num, float_ty, neg).map(ConstValue::Scalar)
+    parse_float_into_scalar(num, float_ty, neg).map(|s| ConstValue::Scalar(s.into()))
 }
 
 pub(crate) fn parse_float_into_scalar(
     num: Symbol,
     float_ty: ty::FloatTy,
     neg: bool,
-) -> Option<Scalar> {
+) -> Option<ScalarInt> {
     let num = num.as_str();
     match float_ty {
         // FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64`
@@ -1030,7 +1029,7 @@ pub(crate) fn parse_float_into_scalar(
             if neg {
                 f = -f;
             }
-            Some(Scalar::from_f16(f))
+            Some(ScalarInt::from(f))
         }
         ty::FloatTy::F32 => {
             let Ok(rust_f) = num.parse::<f32>() else { return None };
@@ -1053,7 +1052,7 @@ pub(crate) fn parse_float_into_scalar(
                 f = -f;
             }
 
-            Some(Scalar::from_f32(f))
+            Some(ScalarInt::from(f))
         }
         ty::FloatTy::F64 => {
             let Ok(rust_f) = num.parse::<f64>() else { return None };
@@ -1076,7 +1075,7 @@ pub(crate) fn parse_float_into_scalar(
                 f = -f;
             }
 
-            Some(Scalar::from_f64(f))
+            Some(ScalarInt::from(f))
         }
         // FIXME(f16_f128): When available, compare to the library parser as with `f32` and `f64`
         ty::FloatTy::F128 => {
@@ -1084,7 +1083,7 @@ pub(crate) fn parse_float_into_scalar(
             if neg {
                 f = -f;
             }
-            Some(Scalar::from_f128(f))
+            Some(ScalarInt::from(f))
         }
     }
 }
diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs
index 31bc72184ca..a98e046d4dc 100644
--- a/compiler/rustc_mir_build/src/thir/constant.rs
+++ b/compiler/rustc_mir_build/src/thir/constant.rs
@@ -58,11 +58,9 @@ pub(crate) fn lit_to_const<'tcx>(
         }
         (ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()),
         (ast::LitKind::Float(n, _), ty::Float(fty)) => {
-            let bits = parse_float_into_scalar(*n, *fty, neg)
-                .ok_or_else(|| {
-                    tcx.dcx().bug(format!("couldn't parse float literal: {:?}", lit_input.lit))
-                })?
-                .assert_int();
+            let bits = parse_float_into_scalar(*n, *fty, neg).ok_or_else(|| {
+                tcx.dcx().bug(format!("couldn't parse float literal: {:?}", lit_input.lit))
+            })?;
             ty::ValTree::from_scalar_int(bits)
         }
         (ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()),
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
index 36495101d3f..192d706bce2 100644
--- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -282,8 +282,7 @@ impl<'tcx> ConstToPat<'tcx> {
             }
             ty::Adt(adt_def, args) if adt_def.is_enum() => {
                 let (&variant_index, fields) = cv.unwrap_branch().split_first().unwrap();
-                let variant_index =
-                    VariantIdx::from_u32(variant_index.unwrap_leaf().try_to_u32().ok().unwrap());
+                let variant_index = VariantIdx::from_u32(variant_index.unwrap_leaf().to_u32());
                 PatKind::Variant {
                     adt_def: *adt_def,
                     args,
@@ -371,8 +370,8 @@ impl<'tcx> ConstToPat<'tcx> {
                 let v = cv.unwrap_leaf();
                 let is_nan = match flt {
                     ty::FloatTy::F16 => unimplemented!("f16_f128"),
-                    ty::FloatTy::F32 => v.try_to_f32().unwrap().is_nan(),
-                    ty::FloatTy::F64 => v.try_to_f64().unwrap().is_nan(),
+                    ty::FloatTy::F32 => v.to_f32().is_nan(),
+                    ty::FloatTy::F64 => v.to_f64().is_nan(),
                     ty::FloatTy::F128 => unimplemented!("f16_f128"),
                 };
                 if is_nan {
diff --git a/compiler/rustc_mir_transform/src/const_debuginfo.rs b/compiler/rustc_mir_transform/src/const_debuginfo.rs
deleted file mode 100644
index e4e4270c499..00000000000
--- a/compiler/rustc_mir_transform/src/const_debuginfo.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-//! Finds locals which are assigned once to a const and unused except for debuginfo and converts
-//! their debuginfo to use the const directly, allowing the local to be removed.
-
-use rustc_middle::{
-    mir::{
-        visit::{PlaceContext, Visitor},
-        Body, ConstOperand, Local, Location, Operand, Rvalue, StatementKind, VarDebugInfoContents,
-    },
-    ty::TyCtxt,
-};
-
-use crate::MirPass;
-use rustc_index::{bit_set::BitSet, IndexVec};
-
-pub struct ConstDebugInfo;
-
-impl<'tcx> MirPass<'tcx> for ConstDebugInfo {
-    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
-        sess.mir_opt_level() > 0
-    }
-
-    fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
-        trace!("running ConstDebugInfo on {:?}", body.source);
-
-        for (local, constant) in find_optimization_opportunities(body) {
-            for debuginfo in &mut body.var_debug_info {
-                if let VarDebugInfoContents::Place(p) = debuginfo.value {
-                    if p.local == local && p.projection.is_empty() {
-                        trace!(
-                            "changing debug info for {:?} from place {:?} to constant {:?}",
-                            debuginfo.name,
-                            p,
-                            constant
-                        );
-                        debuginfo.value = VarDebugInfoContents::Const(constant);
-                    }
-                }
-            }
-        }
-    }
-}
-
-struct LocalUseVisitor {
-    local_mutating_uses: IndexVec<Local, u8>,
-    local_assignment_locations: IndexVec<Local, Option<Location>>,
-}
-
-fn find_optimization_opportunities<'tcx>(body: &Body<'tcx>) -> Vec<(Local, ConstOperand<'tcx>)> {
-    let mut visitor = LocalUseVisitor {
-        local_mutating_uses: IndexVec::from_elem(0, &body.local_decls),
-        local_assignment_locations: IndexVec::from_elem(None, &body.local_decls),
-    };
-
-    visitor.visit_body(body);
-
-    let mut locals_to_debuginfo = BitSet::new_empty(body.local_decls.len());
-    for debuginfo in &body.var_debug_info {
-        if let VarDebugInfoContents::Place(p) = debuginfo.value
-            && let Some(l) = p.as_local()
-        {
-            locals_to_debuginfo.insert(l);
-        }
-    }
-
-    let mut eligible_locals = Vec::new();
-    for (local, mutating_uses) in visitor.local_mutating_uses.drain_enumerated(..) {
-        if mutating_uses != 1 || !locals_to_debuginfo.contains(local) {
-            continue;
-        }
-
-        if let Some(location) = visitor.local_assignment_locations[local] {
-            let bb = &body[location.block];
-
-            // The value is assigned as the result of a call, not a constant
-            if bb.statements.len() == location.statement_index {
-                continue;
-            }
-
-            if let StatementKind::Assign(box (p, Rvalue::Use(Operand::Constant(box c)))) =
-                &bb.statements[location.statement_index].kind
-            {
-                if let Some(local) = p.as_local() {
-                    eligible_locals.push((local, *c));
-                }
-            }
-        }
-    }
-
-    eligible_locals
-}
-
-impl Visitor<'_> for LocalUseVisitor {
-    fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
-        if context.is_mutating_use() {
-            self.local_mutating_uses[local] = self.local_mutating_uses[local].saturating_add(1);
-
-            if context.is_place_assignment() {
-                self.local_assignment_locations[local] = Some(location);
-            }
-        }
-    }
-}
diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
index a8caead46f2..0fd85eb345d 100644
--- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
+++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs
@@ -10,7 +10,7 @@ use rustc_middle::bug;
 use rustc_middle::mir::interpret::{InterpResult, Scalar};
 use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
 use rustc_middle::mir::*;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_mir_dataflow::value_analysis::{
     Map, PlaceIndex, State, TrackElem, ValueAnalysis, ValueAnalysisWrapper, ValueOrPlace,
@@ -285,9 +285,11 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
                 let val = match null_op {
                     NullOp::SizeOf if layout.is_sized() => layout.size.bytes(),
                     NullOp::AlignOf if layout.is_sized() => layout.align.abi.bytes(),
-                    NullOp::OffsetOf(fields) => {
-                        layout.offset_of_subfield(&self.ecx, fields.iter()).bytes()
-                    }
+                    NullOp::OffsetOf(fields) => self
+                        .ecx
+                        .tcx
+                        .offset_of_subfield(self.ecx.param_env(), layout, fields.iter())
+                        .bytes(),
                     _ => return ValueOrPlace::Value(FlatSet::Top),
                 };
                 FlatSet::Elem(Scalar::from_target_usize(val, &self.tcx))
@@ -324,7 +326,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
             // This allows the set of visited edges to grow monotonically with the lattice.
             FlatSet::Bottom => TerminatorEdges::None,
             FlatSet::Elem(scalar) => {
-                let choice = scalar.assert_bits(scalar.size());
+                let choice = scalar.assert_scalar_int().to_bits_unchecked();
                 TerminatorEdges::Single(targets.target_for_value(choice))
             }
             FlatSet::Top => TerminatorEdges::SwitchInt { discr, targets },
@@ -607,7 +609,7 @@ fn propagatable_scalar(
     map: &Map,
 ) -> Option<Scalar> {
     if let FlatSet::Elem(value) = state.get_idx(place, map)
-        && value.try_to_int().is_ok()
+        && value.try_to_scalar_int().is_ok()
     {
         // Do not attempt to propagate pointers, as we may fail to preserve their identity.
         Some(value)
@@ -668,7 +670,7 @@ fn try_write_constant<'tcx>(
                 let FlatSet::Elem(Scalar::Int(discr)) = state.get_idx(discr, map) else {
                     throw_machine_stop_str!("discriminant with provenance")
                 };
-                let discr_bits = discr.assert_bits(discr.size());
+                let discr_bits = discr.to_bits(discr.size());
                 let Some((variant, _)) = def.discriminants(*ecx.tcx).find(|(_, var)| discr_bits == var.val) else {
                     throw_machine_stop_str!("illegal discriminant for enum")
                 };
diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs
index acde16fcb75..121a3b99a39 100644
--- a/compiler/rustc_mir_transform/src/gvn.rs
+++ b/compiler/rustc_mir_transform/src/gvn.rs
@@ -83,8 +83,8 @@
 //! that contain `AllocId`s.
 
 use rustc_const_eval::const_eval::DummyMachine;
-use rustc_const_eval::interpret::{intern_const_alloc_for_constprop, MemoryKind};
-use rustc_const_eval::interpret::{ImmTy, InterpCx, OpTy, Projectable, Scalar};
+use rustc_const_eval::interpret::{intern_const_alloc_for_constprop, MemPlaceMeta, MemoryKind};
+use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, Projectable, Scalar};
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_data_structures::graph::dominators::Dominators;
 use rustc_hir::def::DefKind;
@@ -95,11 +95,11 @@ use rustc_middle::bug;
 use rustc_middle::mir::interpret::GlobalAlloc;
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
-use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
 use rustc_middle::ty::{self, Ty, TyCtxt};
 use rustc_span::def_id::DefId;
 use rustc_span::DUMMY_SP;
-use rustc_target::abi::{self, Abi, Size, VariantIdx, FIRST_VARIANT};
+use rustc_target::abi::{self, Abi, FieldIdx, Size, VariantIdx, FIRST_VARIANT};
 use smallvec::SmallVec;
 use std::borrow::Cow;
 
@@ -177,6 +177,12 @@ enum AggregateTy<'tcx> {
     Array,
     Tuple,
     Def(DefId, ty::GenericArgsRef<'tcx>),
+    RawPtr {
+        /// Needed for cast propagation.
+        data_pointer_ty: Ty<'tcx>,
+        /// The data pointer can be anything thin, so doesn't determine the output.
+        output_pointer_ty: Ty<'tcx>,
+    },
 }
 
 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
@@ -385,11 +391,22 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                     AggregateTy::Def(def_id, args) => {
                         self.tcx.type_of(def_id).instantiate(self.tcx, args)
                     }
+                    AggregateTy::RawPtr { output_pointer_ty, .. } => output_pointer_ty,
                 };
                 let variant = if ty.is_enum() { Some(variant) } else { None };
                 let ty = self.ecx.layout_of(ty).ok()?;
                 if ty.is_zst() {
                     ImmTy::uninit(ty).into()
+                } else if matches!(kind, AggregateTy::RawPtr { .. }) {
+                    // Pointers don't have fields, so don't `project_field` them.
+                    let data = self.ecx.read_pointer(fields[0]).ok()?;
+                    let meta = if fields[1].layout.is_zst() {
+                        MemPlaceMeta::None
+                    } else {
+                        MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).ok()?)
+                    };
+                    let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
+                    ImmTy::from_immediate(ptr_imm, ty).into()
                 } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
                     let dest = self.ecx.allocate(ty, MemoryKind::Stack).ok()?;
                     let variant_dest = if let Some(variant) = variant {
@@ -471,7 +488,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let slice = self.evaluated[slice].as_ref()?;
                 let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
                 let len = slice.len(&self.ecx).ok()?;
-                let imm = ImmTy::try_from_uint(len, usize_layout)?;
+                let imm = ImmTy::from_uint(len, usize_layout);
                 imm.into()
             }
             NullaryOp(null_op, ty) => {
@@ -484,13 +501,15 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
                 let val = match null_op {
                     NullOp::SizeOf => layout.size.bytes(),
                     NullOp::AlignOf => layout.align.abi.bytes(),
-                    NullOp::OffsetOf(fields) => {
-                        layout.offset_of_subfield(&self.ecx, fields.iter()).bytes()
-                    }
+                    NullOp::OffsetOf(fields) => self
+                        .ecx
+                        .tcx
+                        .offset_of_subfield(self.ecx.param_env(), layout, fields.iter())
+                        .bytes(),
                     NullOp::UbChecks => return None,
                 };
                 let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
-                let imm = ImmTy::try_from_uint(val, usize_layout)?;
+                let imm = ImmTy::from_uint(val, usize_layout);
                 imm.into()
             }
             UnaryOp(un_op, operand) => {
@@ -862,10 +881,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
         rvalue: &mut Rvalue<'tcx>,
         location: Location,
     ) -> Option<VnIndex> {
-        let Rvalue::Aggregate(box ref kind, ref mut fields) = *rvalue else { bug!() };
+        let Rvalue::Aggregate(box ref kind, ref mut field_ops) = *rvalue else { bug!() };
 
         let tcx = self.tcx;
-        if fields.is_empty() {
+        if field_ops.is_empty() {
             let is_zst = match *kind {
                 AggregateKind::Array(..)
                 | AggregateKind::Tuple
@@ -884,13 +903,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             }
         }
 
-        let (ty, variant_index) = match *kind {
+        let (mut ty, variant_index) = match *kind {
             AggregateKind::Array(..) => {
-                assert!(!fields.is_empty());
+                assert!(!field_ops.is_empty());
                 (AggregateTy::Array, FIRST_VARIANT)
             }
             AggregateKind::Tuple => {
-                assert!(!fields.is_empty());
+                assert!(!field_ops.is_empty());
                 (AggregateTy::Tuple, FIRST_VARIANT)
             }
             AggregateKind::Closure(did, args)
@@ -901,15 +920,49 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             }
             // Do not track unions.
             AggregateKind::Adt(_, _, _, _, Some(_)) => return None,
-            // FIXME: Do the extra work to GVN `from_raw_parts`
-            AggregateKind::RawPtr(..) => return None,
+            AggregateKind::RawPtr(pointee_ty, mtbl) => {
+                assert_eq!(field_ops.len(), 2);
+                let data_pointer_ty = field_ops[FieldIdx::ZERO].ty(self.local_decls, self.tcx);
+                let output_pointer_ty = Ty::new_ptr(self.tcx, pointee_ty, mtbl);
+                (AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty }, FIRST_VARIANT)
+            }
         };
 
-        let fields: Option<Vec<_>> = fields
+        let fields: Option<Vec<_>> = field_ops
             .iter_mut()
             .map(|op| self.simplify_operand(op, location).or_else(|| self.new_opaque()))
             .collect();
-        let fields = fields?;
+        let mut fields = fields?;
+
+        if let AggregateTy::RawPtr { data_pointer_ty, output_pointer_ty } = &mut ty {
+            let mut was_updated = false;
+
+            // Any thin pointer of matching mutability is fine as the data pointer.
+            while let Value::Cast {
+                kind: CastKind::PtrToPtr,
+                value: cast_value,
+                from: cast_from,
+                to: _,
+            } = self.get(fields[0])
+                && let ty::RawPtr(from_pointee_ty, from_mtbl) = cast_from.kind()
+                && let ty::RawPtr(_, output_mtbl) = output_pointer_ty.kind()
+                && from_mtbl == output_mtbl
+                && from_pointee_ty.is_sized(self.tcx, self.param_env)
+            {
+                fields[0] = *cast_value;
+                *data_pointer_ty = *cast_from;
+                was_updated = true;
+            }
+
+            if was_updated {
+                if let Some(const_) = self.try_as_constant(fields[0]) {
+                    field_ops[FieldIdx::ZERO] = Operand::Constant(Box::new(const_));
+                } else if let Some(local) = self.try_as_local(fields[0], location) {
+                    field_ops[FieldIdx::ZERO] = Operand::Copy(Place::from(local));
+                    self.reused_locals.insert(local);
+                }
+            }
+        }
 
         if let AggregateTy::Array = ty
             && fields.len() > 4
@@ -941,6 +994,9 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             (UnOp::Not, Value::BinaryOp(BinOp::Ne, lhs, rhs)) => {
                 Value::BinaryOp(BinOp::Eq, *lhs, *rhs)
             }
+            (UnOp::PtrMetadata, Value::Aggregate(AggregateTy::RawPtr { .. }, _, fields)) => {
+                return Some(fields[1]);
+            }
             _ => return None,
         };
 
@@ -1092,6 +1148,23 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             return self.new_opaque();
         }
 
+        let mut was_updated = false;
+
+        // If that cast just casts away the metadata again,
+        if let PtrToPtr = kind
+            && let Value::Aggregate(AggregateTy::RawPtr { data_pointer_ty, .. }, _, fields) =
+                self.get(value)
+            && let ty::RawPtr(to_pointee, _) = to.kind()
+            && to_pointee.is_sized(self.tcx, self.param_env)
+        {
+            from = *data_pointer_ty;
+            value = fields[0];
+            was_updated = true;
+            if *data_pointer_ty == to {
+                return Some(fields[0]);
+            }
+        }
+
         if let PtrToPtr | PointerCoercion(MutToConstPointer) = kind
             && let Value::Cast { kind: inner_kind, value: inner_value, from: inner_from, to: _ } =
                 *self.get(value)
@@ -1100,9 +1173,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
             from = inner_from;
             value = inner_value;
             *kind = PtrToPtr;
+            was_updated = true;
             if inner_from == to {
                 return Some(inner_value);
             }
+        }
+
+        if was_updated {
             if let Some(const_) = self.try_as_constant(value) {
                 *operand = Operand::Constant(Box::new(const_));
             } else if let Some(local) = self.try_as_local(value, location) {
@@ -1178,7 +1255,7 @@ fn op_to_prop_const<'tcx>(
     // If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
     if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
         && let Ok(scalar) = ecx.read_scalar(op)
-        && scalar.try_to_int().is_ok()
+        && scalar.try_to_scalar_int().is_ok()
     {
         return Some(ConstValue::Scalar(scalar));
     }
diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs
index 8b46658b322..6a20b46e7f9 100644
--- a/compiler/rustc_mir_transform/src/known_panics_lint.rs
+++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs
@@ -356,15 +356,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 debug!("check_binary_op: reporting assert for {:?}", location);
                 let panic = AssertKind::Overflow(
                     op,
-                    match l {
-                        Some(l) => l.to_const_int(),
-                        // Invent a dummy value, the diagnostic ignores it anyway
-                        None => ConstInt::new(
-                            ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
-                            left_ty.is_signed(),
-                            left_ty.is_ptr_sized_integral(),
-                        ),
-                    },
+                    // Invent a dummy value, the diagnostic ignores it anyway
+                    ConstInt::new(
+                        ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
+                        left_ty.is_signed(),
+                        left_ty.is_ptr_sized_integral(),
+                    ),
                     r.to_const_int(),
                 );
                 self.report_assert_as_lint(location, AssertLintKind::ArithmeticOverflow, panic);
@@ -625,9 +622,10 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
                 let val = match null_op {
                     NullOp::SizeOf => op_layout.size.bytes(),
                     NullOp::AlignOf => op_layout.align.abi.bytes(),
-                    NullOp::OffsetOf(fields) => {
-                        op_layout.offset_of_subfield(self, fields.iter()).bytes()
-                    }
+                    NullOp::OffsetOf(fields) => self
+                        .tcx
+                        .offset_of_subfield(self.param_env, op_layout, fields.iter())
+                        .bytes(),
                     NullOp::UbChecks => return None,
                 };
                 ImmTy::from_scalar(Scalar::from_target_usize(val, self), layout).into()
@@ -786,8 +784,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
             TerminatorKind::SwitchInt { ref discr, ref targets } => {
                 if let Some(ref value) = self.eval_operand(discr)
                     && let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
-                    && let Ok(constant) = value_const.try_to_int()
-                    && let Ok(constant) = constant.try_to_bits(constant.size())
+                    && let Ok(constant) = value_const.to_bits(value_const.size())
                 {
                     // We managed to evaluate the discriminant, so we know we only need to visit
                     // one target.
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
index e4670633914..551760f4703 100644
--- a/compiler/rustc_mir_transform/src/lib.rs
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -55,7 +55,6 @@ mod remove_place_mention;
 // This pass is public to allow external drivers to perform MIR cleanup
 mod add_subtyping_projections;
 pub mod cleanup_post_borrowck;
-mod const_debuginfo;
 mod copy_prop;
 mod coroutine;
 mod cost_checker;
@@ -106,6 +105,7 @@ mod check_alignment;
 pub mod simplify;
 mod simplify_branches;
 mod simplify_comparison_integral;
+mod single_use_consts;
 mod sroa;
 mod unreachable_enum_branching;
 mod unreachable_prop;
@@ -593,7 +593,7 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
             &gvn::GVN,
             &simplify::SimplifyLocals::AfterGVN,
             &dataflow_const_prop::DataflowConstProp,
-            &const_debuginfo::ConstDebugInfo,
+            &single_use_consts::SingleUseConsts,
             &o1(simplify_branches::SimplifyConstCondition::AfterConstProp),
             &jump_threading::JumpThreading,
             &early_otherwise_branch::EarlyOtherwiseBranch,
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
index 1411d9be223..6ab4ec6fe7e 100644
--- a/compiler/rustc_mir_transform/src/match_branches.rs
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -372,7 +372,7 @@ impl<'tcx> SimplifyMatch<'tcx> for SimplifyToExp {
         }
 
         fn int_equal(l: ScalarInt, r: impl Into<u128>, size: Size) -> bool {
-            l.assert_int(l.size()) == ScalarInt::try_from_uint(r, size).unwrap().assert_int(size)
+            l.to_bits_unchecked() == ScalarInt::try_from_uint(r, size).unwrap().to_bits_unchecked()
         }
 
         // We first compare the two branches, and then the other branches need to fulfill the same conditions.
diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs
index 7ec59cc983f..ecd1179ca99 100644
--- a/compiler/rustc_mir_transform/src/promote_consts.rs
+++ b/compiler/rustc_mir_transform/src/promote_consts.rs
@@ -500,14 +500,14 @@ impl<'tcx> Validator<'_, 'tcx> {
                                 }
                                 _ => None,
                             };
-                            match rhs_val.map(|x| x.assert_uint(sz)) {
+                            match rhs_val.map(|x| x.to_uint(sz)) {
                                 // for the zero test, int vs uint does not matter
                                 Some(x) if x != 0 => {}        // okay
                                 _ => return Err(Unpromotable), // value not known or 0 -- not okay
                             }
                             // Furthermore, for signed divison, we also have to exclude `int::MIN / -1`.
                             if lhs_ty.is_signed() {
-                                match rhs_val.map(|x| x.assert_int(sz)) {
+                                match rhs_val.map(|x| x.to_int(sz)) {
                                     Some(-1) | None => {
                                         // The RHS is -1 or unknown, so we have to be careful.
                                         // But is the LHS int::MIN?
@@ -518,7 +518,7 @@ impl<'tcx> Validator<'_, 'tcx> {
                                             _ => None,
                                         };
                                         let lhs_min = sz.signed_int_min();
-                                        match lhs_val.map(|x| x.assert_int(sz)) {
+                                        match lhs_val.map(|x| x.to_int(sz)) {
                                             Some(x) if x != lhs_min => {}  // okay
                                             _ => return Err(Unpromotable), // value not known or int::MIN -- not okay
                                         }
diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
index 03907babf2b..e174cccdad6 100644
--- a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
+++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
@@ -49,7 +49,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
                     let layout = tcx
                         .layout_of(param_env.and(opt.branch_value_ty))
                         .expect("if we have an evaluated constant we must know the layout");
-                    int.assert_bits(layout.size)
+                    int.to_bits(layout.size)
                 }
                 Scalar::Ptr(..) => continue,
             };
diff --git a/compiler/rustc_mir_transform/src/single_use_consts.rs b/compiler/rustc_mir_transform/src/single_use_consts.rs
new file mode 100644
index 00000000000..93736e55996
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/single_use_consts.rs
@@ -0,0 +1,199 @@
+use rustc_index::{bit_set::BitSet, IndexVec};
+use rustc_middle::bug;
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+/// Various parts of MIR building introduce temporaries that are commonly not needed.
+///
+/// Notably, `if CONST` and `match CONST` end up being used-once temporaries, which
+/// obfuscates the structure for other passes and codegen, which would like to always
+/// be able to just see the constant directly.
+///
+/// At higher optimization levels fancier passes like GVN will take care of this
+/// in a more general fashion, but this handles the easy cases so can run in debug.
+///
+/// This only removes constants with a single-use because re-evaluating constants
+/// isn't always an improvement, especially for large ones.
+///
+/// It also removes *never*-used constants, since it had all the information
+/// needed to do that too, including updating the debug info.
+pub struct SingleUseConsts;
+
+impl<'tcx> MirPass<'tcx> for SingleUseConsts {
+    fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+        sess.mir_opt_level() > 0
+    }
+
+    fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+        let mut finder = SingleUseConstsFinder {
+            ineligible_locals: BitSet::new_empty(body.local_decls.len()),
+            locations: IndexVec::from_elem(LocationPair::new(), &body.local_decls),
+            locals_in_debug_info: BitSet::new_empty(body.local_decls.len()),
+        };
+
+        finder.ineligible_locals.insert_range(..=Local::from_usize(body.arg_count));
+
+        finder.visit_body(body);
+
+        for (local, locations) in finder.locations.iter_enumerated() {
+            if finder.ineligible_locals.contains(local) {
+                continue;
+            }
+
+            let Some(init_loc) = locations.init_loc else {
+                continue;
+            };
+
+            // We're only changing an operand, not the terminator kinds or successors
+            let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
+            let init_statement =
+                basic_blocks[init_loc.block].statements[init_loc.statement_index].replace_nop();
+            let StatementKind::Assign(place_and_rvalue) = init_statement.kind else {
+                bug!("No longer an assign?");
+            };
+            let (place, rvalue) = *place_and_rvalue;
+            assert_eq!(place.as_local(), Some(local));
+            let Rvalue::Use(operand) = rvalue else { bug!("No longer a use?") };
+
+            let mut replacer = LocalReplacer { tcx, local, operand: Some(operand) };
+
+            if finder.locals_in_debug_info.contains(local) {
+                for var_debug_info in &mut body.var_debug_info {
+                    replacer.visit_var_debug_info(var_debug_info);
+                }
+            }
+
+            let Some(use_loc) = locations.use_loc else { continue };
+
+            let use_block = &mut basic_blocks[use_loc.block];
+            if let Some(use_statement) = use_block.statements.get_mut(use_loc.statement_index) {
+                replacer.visit_statement(use_statement, use_loc);
+            } else {
+                replacer.visit_terminator(use_block.terminator_mut(), use_loc);
+            }
+
+            if replacer.operand.is_some() {
+                bug!(
+                    "operand wasn't used replacing local {local:?} with locations {locations:?} in body {body:#?}"
+                );
+            }
+        }
+    }
+}
+
+#[derive(Copy, Clone, Debug)]
+struct LocationPair {
+    init_loc: Option<Location>,
+    use_loc: Option<Location>,
+}
+
+impl LocationPair {
+    fn new() -> Self {
+        Self { init_loc: None, use_loc: None }
+    }
+}
+
+struct SingleUseConstsFinder {
+    ineligible_locals: BitSet<Local>,
+    locations: IndexVec<Local, LocationPair>,
+    locals_in_debug_info: BitSet<Local>,
+}
+
+impl<'tcx> Visitor<'tcx> for SingleUseConstsFinder {
+    fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
+        if let Some(local) = place.as_local()
+            && let Rvalue::Use(operand) = rvalue
+            && let Operand::Constant(_) = operand
+        {
+            let locations = &mut self.locations[local];
+            if locations.init_loc.is_some() {
+                self.ineligible_locals.insert(local);
+            } else {
+                locations.init_loc = Some(location);
+            }
+        } else {
+            self.super_assign(place, rvalue, location);
+        }
+    }
+
+    fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+        if let Some(place) = operand.place()
+            && let Some(local) = place.as_local()
+        {
+            let locations = &mut self.locations[local];
+            if locations.use_loc.is_some() {
+                self.ineligible_locals.insert(local);
+            } else {
+                locations.use_loc = Some(location);
+            }
+        } else {
+            self.super_operand(operand, location);
+        }
+    }
+
+    fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+        match &statement.kind {
+            // Storage markers are irrelevant to this.
+            StatementKind::StorageLive(_) | StatementKind::StorageDead(_) => {}
+            _ => self.super_statement(statement, location),
+        }
+    }
+
+    fn visit_var_debug_info(&mut self, var_debug_info: &VarDebugInfo<'tcx>) {
+        if let VarDebugInfoContents::Place(place) = &var_debug_info.value
+            && let Some(local) = place.as_local()
+        {
+            self.locals_in_debug_info.insert(local);
+        } else {
+            self.super_var_debug_info(var_debug_info);
+        }
+    }
+
+    fn visit_local(&mut self, local: Local, _context: PlaceContext, _location: Location) {
+        // If there's any path that gets here, rather than being understood elsewhere,
+        // then we'd better not do anything with this local.
+        self.ineligible_locals.insert(local);
+    }
+}
+
+struct LocalReplacer<'tcx> {
+    tcx: TyCtxt<'tcx>,
+    local: Local,
+    operand: Option<Operand<'tcx>>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for LocalReplacer<'tcx> {
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+
+    fn visit_operand(&mut self, operand: &mut Operand<'tcx>, _location: Location) {
+        if let Operand::Copy(place) | Operand::Move(place) = operand
+            && let Some(local) = place.as_local()
+            && local == self.local
+        {
+            *operand = self.operand.take().unwrap_or_else(|| {
+                bug!("there was a second use of the operand");
+            });
+        }
+    }
+
+    fn visit_var_debug_info(&mut self, var_debug_info: &mut VarDebugInfo<'tcx>) {
+        if let VarDebugInfoContents::Place(place) = &var_debug_info.value
+            && let Some(local) = place.as_local()
+            && local == self.local
+        {
+            let const_op = self
+                .operand
+                .as_ref()
+                .unwrap_or_else(|| {
+                    bug!("the operand was already stolen");
+                })
+                .constant()
+                .unwrap()
+                .clone();
+            var_debug_info.value = VarDebugInfoContents::Const(const_op);
+        }
+    }
+}
diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs
index 3b4d4c93877..586c1254995 100644
--- a/compiler/rustc_mir_transform/src/validate.rs
+++ b/compiler/rustc_mir_transform/src/validate.rs
@@ -5,12 +5,12 @@ use rustc_index::bit_set::BitSet;
 use rustc_index::IndexVec;
 use rustc_infer::traits::Reveal;
 use rustc_middle::mir::coverage::CoverageKind;
-use rustc_middle::mir::interpret::Scalar;
 use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
 use rustc_middle::mir::*;
 use rustc_middle::ty::adjustment::PointerCoercion;
 use rustc_middle::ty::{
-    self, CoroutineArgsExt, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitableExt, Variance,
+    self, CoroutineArgsExt, InstanceDef, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt,
+    Variance,
 };
 use rustc_middle::{bug, span_bug};
 use rustc_target::abi::{Size, FIRST_VARIANT};
@@ -1478,7 +1478,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                 });
 
                 for (value, _) in targets.iter() {
-                    if Scalar::<()>::try_from_uint(value, size).is_none() {
+                    if ScalarInt::try_from_uint(value, size).is_none() {
                         self.fail(
                             location,
                             format!("the value {value:#x} is not a proper {switch_ty:?}"),
diff --git a/compiler/rustc_next_trait_solver/Cargo.toml b/compiler/rustc_next_trait_solver/Cargo.toml
index c30d21fd784..50dbc991f8f 100644
--- a/compiler/rustc_next_trait_solver/Cargo.toml
+++ b/compiler/rustc_next_trait_solver/Cargo.toml
@@ -5,22 +5,9 @@ edition = "2021"
 
 [dependencies]
 # tidy-alphabetical-start
-derivative = "2.2.0"
-rustc_ast_ir = { path = "../rustc_ast_ir", default-features = false }
-rustc_data_structures = { path = "../rustc_data_structures", optional = true }
-rustc_macros = { path = "../rustc_macros", optional = true }
-rustc_serialize = { path = "../rustc_serialize", optional = true }
 rustc_type_ir = { path = "../rustc_type_ir", default-features = false }
-rustc_type_ir_macros = { path = "../rustc_type_ir_macros" }
-tracing = "0.1"
 # tidy-alphabetical-end
 
 [features]
 default = ["nightly"]
-nightly = [
-    "rustc_type_ir/nightly",
-    "rustc_macros",
-    "rustc_serialize",
-    "rustc_data_structures",
-    "rustc_ast_ir/nightly",
-]
+nightly = ["rustc_type_ir/nightly"]
diff --git a/compiler/rustc_pattern_analysis/src/rustc.rs b/compiler/rustc_pattern_analysis/src/rustc.rs
index 81c5f355231..8391c694c64 100644
--- a/compiler/rustc_pattern_analysis/src/rustc.rs
+++ b/compiler/rustc_pattern_analysis/src/rustc.rs
@@ -6,11 +6,12 @@ use rustc_hir::def_id::DefId;
 use rustc_hir::HirId;
 use rustc_index::{Idx, IndexVec};
 use rustc_middle::middle::stability::EvalResult;
-use rustc_middle::mir::interpret::Scalar;
 use rustc_middle::mir::{self, Const};
 use rustc_middle::thir::{self, FieldPat, Pat, PatKind, PatRange, PatRangeBoundary};
 use rustc_middle::ty::layout::IntegerExt;
-use rustc_middle::ty::{self, FieldDef, OpaqueTypeKey, Ty, TyCtxt, TypeVisitableExt, VariantDef};
+use rustc_middle::ty::{
+    self, FieldDef, OpaqueTypeKey, ScalarInt, Ty, TyCtxt, TypeVisitableExt, VariantDef,
+};
 use rustc_middle::{bug, span_bug};
 use rustc_session::lint;
 use rustc_span::{ErrorGuaranteed, Span, DUMMY_SP};
@@ -701,9 +702,9 @@ impl<'p, 'tcx: 'p> RustcPatCtxt<'p, 'tcx> {
                     ty::Int(_) => miint.as_finite_int(size.bits()).unwrap(),
                     _ => miint.as_finite_uint().unwrap(),
                 };
-                match Scalar::try_from_uint(bits, size) {
+                match ScalarInt::try_from_uint(bits, size) {
                     Some(scalar) => {
-                        let value = mir::Const::from_scalar(tcx, scalar, ty.inner());
+                        let value = mir::Const::from_scalar(tcx, scalar.into(), ty.inner());
                         PatRangeBoundary::Finite(value)
                     }
                     // The value doesn't fit. Since `x >= 0` and 0 always encodes the minimum value
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml
index c57f22a0da2..2bb1be22b98 100644
--- a/compiler/rustc_query_impl/Cargo.toml
+++ b/compiler/rustc_query_impl/Cargo.toml
@@ -7,7 +7,6 @@ edition = "2021"
 # tidy-alphabetical-start
 field-offset = "0.3.5"
 measureme = "11"
-rustc-rayon-core = { version = "0.5.0", optional = true }
 rustc_data_structures = { path = "../rustc_data_structures" }
 rustc_errors = { path = "../rustc_errors" }
 rustc_hir = { path = "../rustc_hir" }
@@ -23,5 +22,5 @@ tracing = "0.1"
 
 [features]
 # tidy-alphabetical-start
-rustc_use_parallel_compiler = ["rustc-rayon-core", "rustc_query_system/rustc_use_parallel_compiler"]
+rustc_use_parallel_compiler = ["rustc_query_system/rustc_use_parallel_compiler"]
 # tidy-alphabetical-end
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
index 82179a4a058..99fcaf917fe 100644
--- a/compiler/rustc_span/src/lib.rs
+++ b/compiler/rustc_span/src/lib.rs
@@ -521,7 +521,7 @@ impl SpanData {
         Span::new(self.lo, hi, self.ctxt, self.parent)
     }
     #[inline]
-    pub fn with_ctxt(&self, ctxt: SyntaxContext) -> Span {
+    fn with_ctxt(&self, ctxt: SyntaxContext) -> Span {
         Span::new(self.lo, self.hi, ctxt, self.parent)
     }
     #[inline]
@@ -576,8 +576,9 @@ impl Span {
         self.data().with_hi(hi)
     }
     #[inline]
-    pub fn with_ctxt(self, ctxt: SyntaxContext) -> Span {
-        self.data_untracked().with_ctxt(ctxt)
+    pub fn with_ctxt(mut self, ctxt: SyntaxContext) -> Span {
+        self.update_ctxt(|_| ctxt);
+        self
     }
     #[inline]
     pub fn parent(self) -> Option<LocalDefId> {
@@ -1058,9 +1059,9 @@ impl Span {
     }
 
     #[inline]
-    pub fn apply_mark(self, expn_id: ExpnId, transparency: Transparency) -> Span {
-        let span = self.data();
-        span.with_ctxt(span.ctxt.apply_mark(expn_id, transparency))
+    pub fn apply_mark(mut self, expn_id: ExpnId, transparency: Transparency) -> Span {
+        self.update_ctxt(|ctxt| ctxt.apply_mark(expn_id, transparency));
+        self
     }
 
     #[inline]
@@ -1108,15 +1109,15 @@ impl Span {
     }
 
     #[inline]
-    pub fn normalize_to_macros_2_0(self) -> Span {
-        let span = self.data();
-        span.with_ctxt(span.ctxt.normalize_to_macros_2_0())
+    pub fn normalize_to_macros_2_0(mut self) -> Span {
+        self.update_ctxt(|ctxt| ctxt.normalize_to_macros_2_0());
+        self
     }
 
     #[inline]
-    pub fn normalize_to_macro_rules(self) -> Span {
-        let span = self.data();
-        span.with_ctxt(span.ctxt.normalize_to_macro_rules())
+    pub fn normalize_to_macro_rules(mut self) -> Span {
+        self.update_ctxt(|ctxt| ctxt.normalize_to_macro_rules());
+        self
     }
 }
 
diff --git a/compiler/rustc_span/src/span_encoding.rs b/compiler/rustc_span/src/span_encoding.rs
index 6a028226631..52a1267f891 100644
--- a/compiler/rustc_span/src/span_encoding.rs
+++ b/compiler/rustc_span/src/span_encoding.rs
@@ -87,6 +87,45 @@ pub struct Span {
     ctxt_or_parent_or_marker: u16,
 }
 
+impl Span {
+    #[inline]
+    fn data_inline_ctxt(self) -> SpanData {
+        let len = self.len_with_tag_or_marker as u32;
+        debug_assert!(len <= MAX_LEN);
+        SpanData {
+            lo: BytePos(self.lo_or_index),
+            hi: BytePos(self.lo_or_index.debug_strict_add(len)),
+            ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
+            parent: None,
+        }
+    }
+    #[inline]
+    fn data_inline_parent(self) -> SpanData {
+        let len = (self.len_with_tag_or_marker & !PARENT_TAG) as u32;
+        debug_assert!(len <= MAX_LEN);
+        let parent = LocalDefId {
+            local_def_index: DefIndex::from_u32(self.ctxt_or_parent_or_marker as u32),
+        };
+        SpanData {
+            lo: BytePos(self.lo_or_index),
+            hi: BytePos(self.lo_or_index.debug_strict_add(len)),
+            ctxt: SyntaxContext::root(),
+            parent: Some(parent),
+        }
+    }
+    #[inline]
+    fn data_partially_interned(self) -> SpanData {
+        SpanData {
+            ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
+            ..with_span_interner(|interner| interner.spans[self.lo_or_index as usize])
+        }
+    }
+    #[inline]
+    fn data_interned(self) -> SpanData {
+        with_span_interner(|interner| interner.spans[self.lo_or_index as usize])
+    }
+}
+
 // `MAX_LEN` is chosen so that `PARENT_TAG | MAX_LEN` is distinct from
 // `BASE_LEN_INTERNED_MARKER`. (If `MAX_LEN` was 1 higher, this wouldn't be true.)
 const MAX_LEN: u32 = 0b0111_1111_1111_1110;
@@ -111,42 +150,49 @@ impl Span {
             std::mem::swap(&mut lo, &mut hi);
         }
 
-        let (lo2, len, ctxt2) = (lo.0, hi.0 - lo.0, ctxt.as_u32());
-
+        // Small len may enable one of fully inline formats (or may not).
+        let (len, ctxt32) = (hi.0 - lo.0, ctxt.as_u32());
         if len <= MAX_LEN {
-            if ctxt2 <= MAX_CTXT && parent.is_none() {
+            if ctxt32 <= MAX_CTXT && parent.is_none() {
                 // Inline-context format.
                 return Span {
-                    lo_or_index: lo2,
+                    lo_or_index: lo.0,
                     len_with_tag_or_marker: len as u16,
-                    ctxt_or_parent_or_marker: ctxt2 as u16,
+                    ctxt_or_parent_or_marker: ctxt32 as u16,
                 };
-            } else if ctxt2 == SyntaxContext::root().as_u32()
+            } else if ctxt32 == 0
                 && let Some(parent) = parent
-                && let parent2 = parent.local_def_index.as_u32()
-                && parent2 <= MAX_CTXT
+                && let parent32 = parent.local_def_index.as_u32()
+                && parent32 <= MAX_CTXT
             {
                 // Inline-parent format.
                 return Span {
-                    lo_or_index: lo2,
+                    lo_or_index: lo.0,
                     len_with_tag_or_marker: PARENT_TAG | len as u16,
-                    ctxt_or_parent_or_marker: parent2 as u16,
+                    ctxt_or_parent_or_marker: parent32 as u16,
                 };
             }
         }
 
-        // Partially-interned or fully-interned format.
-        let index =
-            with_span_interner(|interner| interner.intern(&SpanData { lo, hi, ctxt, parent }));
-        let ctxt_or_parent_or_marker = if ctxt2 <= MAX_CTXT {
-            ctxt2 as u16 // partially-interned
-        } else {
-            CTXT_INTERNED_MARKER // fully-interned
+        // Otherwise small ctxt may enable the partially inline format.
+        let index = |ctxt| {
+            with_span_interner(|interner| interner.intern(&SpanData { lo, hi, ctxt, parent }))
         };
-        Span {
-            lo_or_index: index,
-            len_with_tag_or_marker: BASE_LEN_INTERNED_MARKER,
-            ctxt_or_parent_or_marker,
+        if ctxt32 <= MAX_CTXT {
+            // Partially-interned format.
+            Span {
+                // Interned ctxt should never be read, so it can use any value.
+                lo_or_index: index(SyntaxContext::from_u32(u32::MAX)),
+                len_with_tag_or_marker: BASE_LEN_INTERNED_MARKER,
+                ctxt_or_parent_or_marker: ctxt32 as u16,
+            }
+        } else {
+            // Interned format.
+            Span {
+                lo_or_index: index(ctxt),
+                len_with_tag_or_marker: BASE_LEN_INTERNED_MARKER,
+                ctxt_or_parent_or_marker: CTXT_INTERNED_MARKER,
+            }
         }
     }
 
@@ -166,34 +212,17 @@ impl Span {
         if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
             if self.len_with_tag_or_marker & PARENT_TAG == 0 {
                 // Inline-context format.
-                let len = self.len_with_tag_or_marker as u32;
-                debug_assert!(len <= MAX_LEN);
-                SpanData {
-                    lo: BytePos(self.lo_or_index),
-                    hi: BytePos(self.lo_or_index.debug_strict_add(len)),
-                    ctxt: SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32),
-                    parent: None,
-                }
+                self.data_inline_ctxt()
             } else {
                 // Inline-parent format.
-                let len = (self.len_with_tag_or_marker & !PARENT_TAG) as u32;
-                debug_assert!(len <= MAX_LEN);
-                let parent = LocalDefId {
-                    local_def_index: DefIndex::from_u32(self.ctxt_or_parent_or_marker as u32),
-                };
-                SpanData {
-                    lo: BytePos(self.lo_or_index),
-                    hi: BytePos(self.lo_or_index.debug_strict_add(len)),
-                    ctxt: SyntaxContext::root(),
-                    parent: Some(parent),
-                }
+                self.data_inline_parent()
             }
+        } else if self.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
+            // Partially-interned format.
+            self.data_partially_interned()
         } else {
-            // Fully-interned or partially-interned format. In either case,
-            // the interned value contains all the data, so we don't need to
-            // distinguish them.
-            let index = self.lo_or_index;
-            with_span_interner(|interner| interner.spans[index as usize])
+            // Interned format.
+            self.data_interned()
         }
     }
 
@@ -214,27 +243,73 @@ impl Span {
         }
     }
 
+    // For optimization we are interested in cases in which the context is inline and the context
+    // update doesn't change format. All non-inline or format changing scenarios require accessing
+    // interner and can fall back to `Span::new`.
+    #[inline]
+    pub fn update_ctxt(&mut self, update: impl FnOnce(SyntaxContext) -> SyntaxContext) {
+        let (updated_ctxt32, data);
+        if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
+            if self.len_with_tag_or_marker & PARENT_TAG == 0 {
+                // Inline-context format.
+                updated_ctxt32 =
+                    update(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)).as_u32();
+                // Any small new context including zero will preserve the format.
+                if updated_ctxt32 <= MAX_CTXT {
+                    self.ctxt_or_parent_or_marker = updated_ctxt32 as u16;
+                    return;
+                }
+                data = self.data_inline_ctxt();
+            } else {
+                // Inline-parent format.
+                updated_ctxt32 = update(SyntaxContext::root()).as_u32();
+                // Only if the new context is zero the format will be preserved.
+                if updated_ctxt32 == 0 {
+                    // Do nothing.
+                    return;
+                }
+                data = self.data_inline_parent();
+            }
+        } else if self.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
+            // Partially-interned format.
+            updated_ctxt32 =
+                update(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)).as_u32();
+            // Any small new context excluding zero will preserve the format.
+            // Zero may change the format to `InlineParent` if parent and len are small enough.
+            if updated_ctxt32 <= MAX_CTXT && updated_ctxt32 != 0 {
+                self.ctxt_or_parent_or_marker = updated_ctxt32 as u16;
+                return;
+            }
+            data = self.data_partially_interned();
+        } else {
+            // Interned format.
+            data = self.data_interned();
+            updated_ctxt32 = update(data.ctxt).as_u32();
+        }
+
+        // We could not keep the span in the same inline format, fall back to the complete logic.
+        *self = data.with_ctxt(SyntaxContext::from_u32(updated_ctxt32));
+    }
+
     // Returns either syntactic context, if it can be retrieved without taking the interner lock,
     // or an index into the interner if it cannot.
     #[inline]
     fn inline_ctxt(self) -> Result<SyntaxContext, usize> {
-        Ok(if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
+        if self.len_with_tag_or_marker != BASE_LEN_INTERNED_MARKER {
             if self.len_with_tag_or_marker & PARENT_TAG == 0 {
                 // Inline-context format.
-                SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)
+                Ok(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32))
             } else {
-                // Inline-parent format. We know that the SyntaxContext is root.
-                SyntaxContext::root()
+                // Inline-parent format.
+                Ok(SyntaxContext::root())
             }
         } else if self.ctxt_or_parent_or_marker != CTXT_INTERNED_MARKER {
-            // Partially-interned format. This path avoids looking up the
-            // interned value, and is the whole point of the
-            // partially-interned format.
-            SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32)
+            // Partially-interned format.
+            Ok(SyntaxContext::from_u32(self.ctxt_or_parent_or_marker as u32))
         } else {
-            // Fully-interned format.
-            return Err(self.lo_or_index as usize);
-        })
+            // Interned format.
+            Err(self.lo_or_index as usize)
+        }
     }
 
     /// This function is used as a fast path when decoding the full `SpanData` is not necessary.
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 93594264167..e245dfb9f5d 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -1304,6 +1304,7 @@ symbols! {
         offset_of,
         offset_of_enum,
         offset_of_nested,
+        offset_of_slice,
         ok_or_else,
         omit_gdb_pretty_printer_section,
         on,
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 666efd9deca..737e9a8eab0 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -256,29 +256,6 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
         Ty::is_transparent(self)
     }
 
-    pub fn offset_of_subfield<C, I>(self, cx: &C, indices: I) -> Size
-    where
-        Ty: TyAbiInterface<'a, C>,
-        I: Iterator<Item = (VariantIdx, FieldIdx)>,
-    {
-        let mut layout = self;
-        let mut offset = Size::ZERO;
-
-        for (variant, field) in indices {
-            layout = layout.for_variant(cx, variant);
-            let index = field.index();
-            offset += layout.fields.offset(index);
-            layout = layout.field(cx, index);
-            assert!(
-                layout.is_sized(),
-                "offset of unsized field (type {:?}) cannot be computed statically",
-                layout.ty
-            );
-        }
-
-        offset
-    }
-
     /// Finds the one field that is not a 1-ZST.
     /// Returns `None` if there are multiple non-1-ZST fields or only 1-ZST-fields.
     pub fn non_1zst_field<C>(&self, cx: &C) -> Option<(usize, Self)>
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
index 604b68d2cd4..eae1a9dfaa2 100644
--- a/compiler/rustc_transmute/src/layout/tree.rs
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -420,7 +420,7 @@ pub(crate) mod rustc {
         fn from_tag(tag: ScalarInt, tcx: TyCtxt<'tcx>) -> Self {
             use rustc_target::abi::Endian;
             let size = tag.size();
-            let bits = tag.assert_bits(size);
+            let bits = tag.to_bits(size);
             let bytes: [u8; 16];
             let bytes = match tcx.data_layout.endian {
                 Endian::Little => {
diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs
index 1aec40e95f6..58f812fc7cf 100644
--- a/compiler/rustc_ty_utils/src/consts.rs
+++ b/compiler/rustc_ty_utils/src/consts.rs
@@ -47,7 +47,7 @@ fn destructure_const<'tcx>(
         ty::Adt(def, args) => {
             let (variant_idx, branches) = if def.is_enum() {
                 let (head, rest) = branches.split_first().unwrap();
-                (VariantIdx::from_u32(head.unwrap_leaf().try_to_u32().unwrap()), rest)
+                (VariantIdx::from_u32(head.unwrap_leaf().to_u32()), rest)
             } else {
                 (FIRST_VARIANT, branches)
             };
diff --git a/compiler/stable_mir/Cargo.toml b/compiler/stable_mir/Cargo.toml
index c61e217bf9f..4ed61152736 100644
--- a/compiler/stable_mir/Cargo.toml
+++ b/compiler/stable_mir/Cargo.toml
@@ -4,5 +4,4 @@ version = "0.1.0-preview"
 edition = "2021"
 
 [dependencies]
-tracing = "0.1"
 scoped-tls = "1.0"