about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_ast/src/ast.rs5
-rw-r--r--compiler/rustc_ast_lowering/src/asm.rs16
-rw-r--r--compiler/rustc_ast_passes/src/feature_gate.rs18
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs50
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs31
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs21
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs10
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs14
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs9
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs12
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs12
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp7
-rw-r--r--compiler/rustc_parse/src/parser/pat.rs2
-rw-r--r--compiler/rustc_span/src/symbol.rs3
-rw-r--r--compiler/rustc_target/src/asm/aarch64.rs86
-rw-r--r--compiler/rustc_target/src/asm/mod.rs6
-rw-r--r--compiler/rustc_target/src/asm/riscv.rs34
-rw-r--r--compiler/rustc_target/src/asm/x86.rs26
-rw-r--r--compiler/rustc_typeck/src/check/upvar.rs397
24 files changed, 529 insertions, 269 deletions
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
index 03282fd5164..f851725058d 100644
--- a/compiler/rustc_ast/src/ast.rs
+++ b/compiler/rustc_ast/src/ast.rs
@@ -677,7 +677,9 @@ pub enum BindingMode {
 
 #[derive(Clone, Encodable, Decodable, Debug)]
 pub enum RangeEnd {
+    /// `..=` or `...`
     Included(RangeSyntax),
+    /// `..`
     Excluded,
 }
 
@@ -689,6 +691,7 @@ pub enum RangeSyntax {
     DotDotEq,
 }
 
+/// All the different flavors of pattern that Rust recognizes.
 #[derive(Clone, Encodable, Decodable, Debug)]
 pub enum PatKind {
     /// Represents a wildcard pattern (`_`).
@@ -729,7 +732,7 @@ pub enum PatKind {
     /// A literal.
     Lit(P<Expr>),
 
-    /// A range pattern (e.g., `1...2`, `1..=2` or `1..2`).
+    /// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
     Range(Option<P<Expr>>, Option<P<Expr>>, Spanned<RangeEnd>),
 
     /// A slice pattern `[a, b, c]`.
diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs
index 1c3fae2afe7..9ea09a2cf31 100644
--- a/compiler/rustc_ast_lowering/src/asm.rs
+++ b/compiler/rustc_ast_lowering/src/asm.rs
@@ -199,6 +199,22 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
                     }
                 );
 
+                // Some register classes can only be used as clobbers. This
+                // means that we disallow passing a value in/out of the asm and
+                // require that the operand name an explicit register, not a
+                // register class.
+                if reg_class.is_clobber_only(asm_arch.unwrap())
+                    && !(is_clobber && matches!(reg, asm::InlineAsmRegOrRegClass::Reg(_)))
+                {
+                    let msg = format!(
+                        "register class `{}` can only be used as a clobber, \
+                             not as an input or output",
+                        reg_class.name()
+                    );
+                    sess.struct_span_err(op_sp, &msg).emit();
+                    continue;
+                }
+
                 if !is_clobber {
                     // Validate register classes against currently enabled target
                     // features. We check that at least one type is available for
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
index 3f98944d850..3e757e3843e 100644
--- a/compiler/rustc_ast_passes/src/feature_gate.rs
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -565,6 +565,22 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
 
     fn visit_pat(&mut self, pattern: &'a ast::Pat) {
         match &pattern.kind {
+            PatKind::Slice(pats) => {
+                for pat in pats {
+                    let inner_pat = match &pat.kind {
+                        PatKind::Ident(.., Some(pat)) => pat,
+                        _ => pat,
+                    };
+                    if let PatKind::Range(Some(_), None, Spanned { .. }) = inner_pat.kind {
+                        gate_feature_post!(
+                            &self,
+                            half_open_range_patterns,
+                            pat.span,
+                            "`X..` patterns in slices are experimental"
+                        );
+                    }
+                }
+            }
             PatKind::Box(..) => {
                 gate_feature_post!(
                     &self,
@@ -573,7 +589,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
                     "box pattern syntax is experimental"
                 );
             }
-            PatKind::Range(_, _, Spanned { node: RangeEnd::Excluded, .. }) => {
+            PatKind::Range(_, Some(_), Spanned { node: RangeEnd::Excluded, .. }) => {
                 gate_feature_post!(
                     &self,
                     exclusive_range_pattern,
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
index ecf62ed213d..7bd9397d649 100644
--- a/compiler/rustc_codegen_llvm/src/asm.rs
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -128,6 +128,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
         let mut clobbers = vec![];
         let mut output_types = vec![];
         let mut op_idx = FxHashMap::default();
+        let mut clobbered_x87 = false;
         for (idx, op) in operands.iter().enumerate() {
             match *op {
                 InlineAsmOperandRef::Out { reg, late, place } => {
@@ -150,7 +151,27 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                     let ty = if let Some(ref place) = place {
                         layout = Some(&place.layout);
                         llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
-                    } else if !is_target_supported(reg.reg_class()) {
+                    } else if matches!(
+                        reg.reg_class(),
+                        InlineAsmRegClass::X86(
+                            X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
+                        )
+                    ) {
+                        // Special handling for x87/mmx registers: we always
+                        // clobber the whole set if one register is marked as
+                        // clobbered. This is due to the way LLVM handles the
+                        // FP stack in inline assembly.
+                        if !clobbered_x87 {
+                            clobbered_x87 = true;
+                            clobbers.push("~{st}".to_string());
+                            for i in 1..=7 {
+                                clobbers.push(format!("~{{st({})}}", i));
+                            }
+                        }
+                        continue;
+                    } else if !is_target_supported(reg.reg_class())
+                        || reg.reg_class().is_clobber_only(asm_arch)
+                    {
                         // We turn discarded outputs into clobber constraints
                         // if the target feature needed by the register class is
                         // disabled. This is necessary otherwise LLVM will try
@@ -565,6 +586,9 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>)
             InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
             InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+                unreachable!("clobber-only")
+            }
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => "l",
             InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
@@ -586,6 +610,9 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>)
             InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+                unreachable!("clobber-only")
+            }
             InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
             InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
@@ -593,6 +620,9 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>)
             | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
             InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
             InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+            InlineAsmRegClass::X86(
+                X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg,
+            ) => unreachable!("clobber-only"),
             InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
             InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
@@ -617,6 +647,9 @@ fn modifier_to_llvm(
         | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
             if modifier == Some('v') { None } else { modifier }
         }
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
         InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
         | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => None,
         InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
@@ -639,6 +672,9 @@ fn modifier_to_llvm(
         InlineAsmRegClass::PowerPC(_) => None,
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
         | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
         InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
             None if arch == InlineAsmArch::X86_64 => Some('q'),
@@ -663,6 +699,9 @@ fn modifier_to_llvm(
             _ => unreachable!(),
         },
         InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
+            unreachable!("clobber-only")
+        }
         InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
         InlineAsmRegClass::Bpf(_) => None,
         InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
@@ -681,6 +720,9 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
         | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
             cx.type_vector(cx.type_i64(), 2)
         }
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
         InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
         | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(),
         InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
@@ -704,6 +746,9 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
         InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
         InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
         InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
@@ -711,6 +756,9 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
         InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg) => {
+            unreachable!("clobber-only")
+        }
         InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
         InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
         InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 582c9354041..2bb0ce68b17 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -410,17 +410,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }
     }
 
-    fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
+    fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
         unsafe {
-            let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
+            let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
             llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
             load
         }
     }
 
-    fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
+    fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
         unsafe {
-            let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
+            let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
             llvm::LLVMSetVolatile(load, llvm::True);
             load
         }
@@ -428,6 +428,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
     fn atomic_load(
         &mut self,
+        ty: &'ll Type,
         ptr: &'ll Value,
         order: rustc_codegen_ssa::common::AtomicOrdering,
         size: Size,
@@ -435,6 +436,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         unsafe {
             let load = llvm::LLVMRustBuildAtomicLoad(
                 self.llbuilder,
+                ty,
                 ptr,
                 UNNAMED,
                 AtomicOrdering::from_generic(order),
@@ -486,7 +488,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 }
             }
             let llval = const_llval.unwrap_or_else(|| {
-                let load = self.load(place.llval, place.align);
+                let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
                 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
                     scalar_load_metadata(self, load, scalar);
                 }
@@ -498,7 +500,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 
             let mut load = |i, scalar: &abi::Scalar, align| {
                 let llptr = self.struct_gep(place.llval, i as u64);
-                let load = self.load(llptr, align);
+                let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
+                let load = self.load(llty, llptr, align);
                 scalar_load_metadata(self, load, scalar);
                 self.to_immediate_scalar(load, scalar)
             };
@@ -815,13 +818,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         size: &'ll Value,
         flags: MemFlags,
     ) {
-        if flags.contains(MemFlags::NONTEMPORAL) {
-            // HACK(nox): This is inefficient but there is no nontemporal memcpy.
-            let val = self.load(src, src_align);
-            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
-            self.store_with_flags(val, ptr, dst_align, flags);
-            return;
-        }
+        assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
         let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
         let dst = self.pointercast(dst, self.type_i8p());
@@ -848,13 +845,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         size: &'ll Value,
         flags: MemFlags,
     ) {
-        if flags.contains(MemFlags::NONTEMPORAL) {
-            // HACK(nox): This is inefficient but there is no nontemporal memmove.
-            let val = self.load(src, src_align);
-            let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
-            self.store_with_flags(val, ptr, dst_align, flags);
-            return;
-        }
+        assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
         let size = self.intcast(size, self.type_isize(), false);
         let is_volatile = flags.contains(MemFlags::VOLATILE);
         let dst = self.pointercast(dst, self.type_i8p());
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 38f50a6d621..de3f719b816 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -20,7 +20,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_,
         // LLVM to keep around the reference to the global.
         let indices = [bx.const_i32(0), bx.const_i32(0)];
         let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
-        let volative_load_instruction = bx.volatile_load(element);
+        let volative_load_instruction = bx.volatile_load(bx.type_i8(), element);
         unsafe {
             llvm::LLVMSetAlignment(volative_load_instruction, 1);
         }
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 9a968659e2f..a48a694b630 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -162,11 +162,14 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
 
             sym::volatile_load | sym::unaligned_volatile_load => {
                 let tp_ty = substs.type_at(0);
-                let mut ptr = args[0].immediate();
-                if let PassMode::Cast(ty) = fn_abi.ret.mode {
-                    ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
-                }
-                let load = self.volatile_load(ptr);
+                let ptr = args[0].immediate();
+                let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
+                    let llty = ty.llvm_type(self);
+                    let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
+                    self.volatile_load(llty, ptr)
+                } else {
+                    self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
+                };
                 let align = if name == sym::unaligned_volatile_load {
                     1
                 } else {
@@ -319,9 +322,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                     let integer_ty = self.type_ix(layout.size.bits());
                     let ptr_ty = self.type_ptr_to(integer_ty);
                     let a_ptr = self.bitcast(a, ptr_ty);
-                    let a_val = self.load(a_ptr, layout.align.abi);
+                    let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
                     let b_ptr = self.bitcast(b, ptr_ty);
-                    let b_val = self.load(b_ptr, layout.align.abi);
+                    let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
                     self.icmp(IntPredicate::IntEQ, a_val, b_val)
                 } else {
                     let i8p_ty = self.type_i8p();
@@ -540,7 +543,7 @@ fn codegen_msvc_try(
         // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
         let flags = bx.const_i32(8);
         let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
-        let ptr = catchpad_rust.load(slot, ptr_align);
+        let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align);
         catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
         catchpad_rust.catch_ret(&funclet, caught.llbb());
 
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 91923251018..2ade66ac41e 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1385,7 +1385,12 @@ extern "C" {
         Val: &'a Value,
         Name: *const c_char,
     ) -> &'a Value;
-    pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value;
+    pub fn LLVMBuildLoad2(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        PointerVal: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
 
     pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
 
@@ -1631,6 +1636,7 @@ extern "C" {
     // Atomic Operations
     pub fn LLVMRustBuildAtomicLoad(
         B: &Builder<'a>,
+        ElementType: &'a Type,
         PointerVal: &'a Value,
         Name: *const c_char,
         Order: AtomicOrdering,
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 39d08fbee3b..9df1bd7d1d9 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -32,14 +32,15 @@ fn emit_direct_ptr_va_arg(
     slot_size: Align,
     allow_higher_align: bool,
 ) -> (&'ll Value, Align) {
-    let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
+    let va_list_ty = bx.type_i8p();
+    let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
     let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
         bx.bitcast(list.immediate(), va_list_ptr_ty)
     } else {
         list.immediate()
     };
 
-    let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+    let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
 
     let (addr, addr_align) = if allow_higher_align && align > slot_size {
         (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
@@ -82,10 +83,10 @@ fn emit_ptr_va_arg(
     let (addr, addr_align) =
         emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
     if indirect {
-        let tmp_ret = bx.load(addr, addr_align);
-        bx.load(tmp_ret, align.abi)
+        let tmp_ret = bx.load(llty, addr, addr_align);
+        bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
     } else {
-        bx.load(addr, addr_align)
+        bx.load(llty, addr, addr_align)
     }
 }
 
@@ -118,7 +119,7 @@ fn emit_aapcs_va_arg(
     };
 
     // if the offset >= 0 then the value will be on the stack
-    let mut reg_off_v = bx.load(reg_off, offset_align);
+    let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
     let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
     bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());
 
@@ -139,8 +140,9 @@ fn emit_aapcs_va_arg(
     let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
     maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());
 
+    let top_type = bx.type_i8p();
     let top = in_reg.struct_gep(va_list_addr, reg_top_index);
-    let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
+    let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
 
     // reg_value = *(@top + reg_off_v);
     let mut reg_addr = in_reg.gep(top, &[reg_off_v]);
@@ -149,8 +151,9 @@ fn emit_aapcs_va_arg(
         let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
         reg_addr = in_reg.gep(reg_addr, &[offset]);
     }
-    let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx)));
-    let reg_value = in_reg.load(reg_addr, layout.align.abi);
+    let reg_type = layout.llvm_type(bx);
+    let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
+    let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi);
     in_reg.br(&end.llbb());
 
     // On Stack block
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
index 63245a94c8e..b392b2c4ab8 100644
--- a/compiler/rustc_codegen_ssa/src/meth.rs
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -20,10 +20,11 @@ impl<'a, 'tcx> VirtualIndex {
         // Load the data pointer from the object.
         debug!("get_fn({:?}, {:?})", llvtable, self);
 
-        let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi)));
+        let llty = bx.fn_ptr_backend_type(fn_abi);
+        let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
         let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
-        let ptr = bx.load(gep, ptr_align);
+        let ptr = bx.load(llty, gep, ptr_align);
         bx.nonnull_metadata(ptr);
         // Vtable loads are invariant.
         bx.set_invariant_load(ptr);
@@ -38,10 +39,11 @@ impl<'a, 'tcx> VirtualIndex {
         // Load the data pointer from the object.
         debug!("get_int({:?}, {:?})", llvtable, self);
 
-        let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
+        let llty = bx.type_isize();
+        let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
         let usize_align = bx.tcx().data_layout.pointer_align.abi;
         let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
-        let ptr = bx.load(gep, usize_align);
+        let ptr = bx.load(llty, gep, usize_align);
         // Vtable loads are invariant.
         bx.set_invariant_load(ptr);
         ptr
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index 2cb28d7361c..b584801a62d 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -260,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             PassMode::Direct(_) | PassMode::Pair(..) => {
                 let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
                 if let Ref(llval, _, align) = op.val {
-                    bx.load(llval, align)
+                    bx.load(bx.backend_type(op.layout), llval, align)
                 } else {
                     op.immediate_or_packed_pair(&mut bx)
                 }
@@ -287,8 +287,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         llval
                     }
                 };
-                let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty)));
-                bx.load(addr, self.fn_abi.ret.layout.align.abi)
+                let ty = bx.cast_backend_type(&cast_ty);
+                let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
+                bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
             }
         };
         bx.ret(llval);
@@ -1086,15 +1087,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
             if let PassMode::Cast(ty) = arg.mode {
-                let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty)));
-                llval = bx.load(addr, align.min(arg.layout.align.abi));
+                let llty = bx.cast_backend_type(&ty);
+                let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
+                llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
             } else {
                 // We can't use `PlaceRef::load` here because the argument
                 // may have a type we don't treat as immediate, but the ABI
                 // used for this call is passing it by-value. In that case,
                 // the load would just produce `OperandValue::Ref` instead
                 // of the `OperandValue::Immediate` we need for the call.
-                llval = bx.load(llval, align);
+                llval = bx.load(bx.backend_type(arg.layout), llval, align);
                 if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
                     if scalar.is_bool() {
                         bx.range_metadata(llval, 0..2);
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
index 6bb20545f07..c139f915e6c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -274,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 match *elem {
                     mir::ProjectionElem::Deref => {
                         indirect_offsets.push(Size::ZERO);
-                        place = place.project_deref(bx);
+                        place = bx.load_operand(place).deref(bx.cx());
                     }
                     mir::ProjectionElem::Field(field, _) => {
                         let i = field.index();
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 8502309b90e..56ff1b3934c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -448,15 +448,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                             if ty.is_unsafe_ptr() {
                                 // Some platforms do not support atomic operations on pointers,
                                 // so we cast to integer first...
-                                let ptr_llty = bx.type_ptr_to(bx.type_isize());
+                                let llty = bx.type_isize();
+                                let ptr_llty = bx.type_ptr_to(llty);
                                 source = bx.pointercast(source, ptr_llty);
-                            }
-                            let result = bx.atomic_load(source, order, size);
-                            if ty.is_unsafe_ptr() {
+                                let result = bx.atomic_load(llty, source, order, size);
                                 // ... and then cast the result back to a pointer
                                 bx.inttoptr(result, bx.backend_type(layout))
                             } else {
-                                result
+                                bx.atomic_load(bx.backend_type(layout), source, order, size)
                             }
                         } else {
                             return invalid_monomorphization(ty);
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 25e84c38ed3..3c42b2cc2ea 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -289,6 +289,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
         }
         match self {
             OperandValue::Ref(r, None, source_align) => {
+                if flags.contains(MemFlags::NONTEMPORAL) {
+                    // HACK(nox): This is inefficient but there is no nontemporal memcpy.
+                    let ty = bx.backend_type(dest.layout);
+                    let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
+                    let val = bx.load(ty, ptr, source_align);
+                    bx.store_with_flags(val, dest.llval, dest.align, flags);
+                    return;
+                }
                 base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
             }
             OperandValue::Ref(_, Some(_), _) => {
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
index a9e7ebf6d43..66d9d1a1e0c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/place.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -402,18 +402,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
         downcast
     }
 
-    pub fn project_deref<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) -> Self {
-        let target_ty = self.layout.ty.builtin_deref(true).expect("failed to deref");
-        let layout = bx.layout_of(target_ty.ty);
-
-        PlaceRef {
-            llval: bx.load(self.llval, self.align),
-            llextra: None,
-            layout,
-            align: layout.align.abi,
-        }
-    }
-
     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
         bx.lifetime_start(self.llval, self.layout.size);
     }
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index 29b2db5d4d7..f0c232a97bc 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -137,9 +137,15 @@ pub trait BuilderMethods<'a, 'tcx>:
     fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
     fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
 
-    fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
-    fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
-    fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
+    fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
+    fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
+    fn atomic_load(
+        &mut self,
+        ty: Self::Type,
+        ptr: Self::Value,
+        order: AtomicOrdering,
+        size: Size,
+    ) -> Self::Value;
     fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
     -> OperandRef<'tcx, Self::Value>;
 
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index 9b757eb40c1..4cdc8a4155b 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -349,11 +349,10 @@ extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
 }
 
 extern "C" LLVMValueRef
-LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name,
-                        LLVMAtomicOrdering Order) {
+LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
+                        const char *Name, LLVMAtomicOrdering Order) {
   Value *Ptr = unwrap(Source);
-  Type *Ty = Ptr->getType()->getPointerElementType();
-  LoadInst *LI = unwrap(B)->CreateLoad(Ty, Ptr, Name);
+  LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
   LI->setAtomic(fromRust(Order));
   return wrap(LI);
 }
diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs
index 418122202be..566677d032a 100644
--- a/compiler/rustc_parse/src/parser/pat.rs
+++ b/compiler/rustc_parse/src/parser/pat.rs
@@ -715,7 +715,6 @@ impl<'a> Parser<'a> {
         } else if self.eat(&token::DotDotEq) {
             RangeEnd::Included(RangeSyntax::DotDotEq)
         } else if self.eat(&token::DotDot) {
-            self.sess.gated_spans.gate(sym::exclusive_range_pattern, self.prev_token.span);
             RangeEnd::Excluded
         } else {
             return None;
@@ -735,7 +734,6 @@ impl<'a> Parser<'a> {
             Some(self.parse_pat_range_end()?)
         } else {
             // Parsing e.g. `X..`.
-            self.sess.gated_spans.gate(sym::half_open_range_patterns, begin.span.to(re.span));
             if let RangeEnd::Included(_) = re.node {
                 // FIXME(Centril): Consider semantic errors instead in `ast_validation`.
                 // Possibly also do this for `X..=` in *expression* contexts.
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 9051c9d69b5..cfa0c79004c 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -756,6 +756,7 @@ symbols! {
         minnumf64,
         mips_target_feature,
         misc,
+        mmx_reg,
         modifiers,
         module,
         module_path,
@@ -899,6 +900,7 @@ symbols! {
         prefetch_read_instruction,
         prefetch_write_data,
         prefetch_write_instruction,
+        preg,
         prelude,
         prelude_import,
         preserves_flags,
@@ -1343,6 +1345,7 @@ symbols! {
         wrapping_sub,
         wreg,
         write_bytes,
+        x87_reg,
         xmm_reg,
         ymm_reg,
         zmm_reg,
diff --git a/compiler/rustc_target/src/asm/aarch64.rs b/compiler/rustc_target/src/asm/aarch64.rs
index f180eea01a3..76e50678314 100644
--- a/compiler/rustc_target/src/asm/aarch64.rs
+++ b/compiler/rustc_target/src/asm/aarch64.rs
@@ -7,6 +7,7 @@ def_reg_class! {
         reg,
         vreg,
         vreg_low16,
+        preg,
     }
 }
 
@@ -15,6 +16,7 @@ impl AArch64InlineAsmRegClass {
         match self {
             Self::reg => &['w', 'x'],
             Self::vreg | Self::vreg_low16 => &['b', 'h', 's', 'd', 'q', 'v'],
+            Self::preg => &[],
         }
     }
 
@@ -40,6 +42,7 @@ impl AArch64InlineAsmRegClass {
                 128 => Some(('q', "q0")),
                 _ => None,
             },
+            Self::preg => None,
         }
     }
 
@@ -47,6 +50,7 @@ impl AArch64InlineAsmRegClass {
         match self {
             Self::reg => Some(('x', "x0")),
             Self::vreg | Self::vreg_low16 => Some(('v', "v0")),
+            Self::preg => None,
         }
     }
 
@@ -61,6 +65,7 @@ impl AArch64InlineAsmRegClass {
                     VecI8(8), VecI16(4), VecI32(2), VecI64(1), VecF32(2), VecF64(1),
                     VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
             },
+            Self::preg => &[],
         }
     }
 }
@@ -95,38 +100,55 @@ def_regs! {
         x27: reg = ["x27", "w27"],
         x28: reg = ["x28", "w28"],
         x30: reg = ["x30", "w30", "lr", "wlr"],
-        v0: vreg, vreg_low16 = ["v0", "b0", "h0", "s0", "d0", "q0"],
-        v1: vreg, vreg_low16 = ["v1", "b1", "h1", "s1", "d1", "q1"],
-        v2: vreg, vreg_low16 = ["v2", "b2", "h2", "s2", "d2", "q2"],
-        v3: vreg, vreg_low16 = ["v3", "b3", "h3", "s3", "d3", "q3"],
-        v4: vreg, vreg_low16 = ["v4", "b4", "h4", "s4", "d4", "q4"],
-        v5: vreg, vreg_low16 = ["v5", "b5", "h5", "s5", "d5", "q5"],
-        v6: vreg, vreg_low16 = ["v6", "b6", "h6", "s6", "d6", "q6"],
-        v7: vreg, vreg_low16 = ["v7", "b7", "h7", "s7", "d7", "q7"],
-        v8: vreg, vreg_low16 = ["v8", "b8", "h8", "s8", "d8", "q8"],
-        v9: vreg, vreg_low16 = ["v9", "b9", "h9", "s9", "d9", "q9"],
-        v10: vreg, vreg_low16 = ["v10", "b10", "h10", "s10", "d10", "q10"],
-        v11: vreg, vreg_low16 = ["v11", "b11", "h11", "s11", "d11", "q11"],
-        v12: vreg, vreg_low16 = ["v12", "b12", "h12", "s12", "d12", "q12"],
-        v13: vreg, vreg_low16 = ["v13", "b13", "h13", "s13", "d13", "q13"],
-        v14: vreg, vreg_low16 = ["v14", "b14", "h14", "s14", "d14", "q14"],
-        v15: vreg, vreg_low16 = ["v15", "b15", "h15", "s15", "d15", "q15"],
-        v16: vreg = ["v16", "b16", "h16", "s16", "d16", "q16"],
-        v17: vreg = ["v17", "b17", "h17", "s17", "d17", "q17"],
-        v18: vreg = ["v18", "b18", "h18", "s18", "d18", "q18"],
-        v19: vreg = ["v19", "b19", "h19", "s19", "d19", "q19"],
-        v20: vreg = ["v20", "b20", "h20", "s20", "d20", "q20"],
-        v21: vreg = ["v21", "b21", "h21", "s21", "d21", "q21"],
-        v22: vreg = ["v22", "b22", "h22", "s22", "d22", "q22"],
-        v23: vreg = ["v23", "b23", "h23", "s23", "d23", "q23"],
-        v24: vreg = ["v24", "b24", "h24", "s24", "d24", "q24"],
-        v25: vreg = ["v25", "b25", "h25", "s25", "d25", "q25"],
-        v26: vreg = ["v26", "b26", "h26", "s26", "d26", "q26"],
-        v27: vreg = ["v27", "b27", "h27", "s27", "d27", "q27"],
-        v28: vreg = ["v28", "b28", "h28", "s28", "d28", "q28"],
-        v29: vreg = ["v29", "b29", "h29", "s29", "d29", "q29"],
-        v30: vreg = ["v30", "b30", "h30", "s30", "d30", "q30"],
-        v31: vreg = ["v31", "b31", "h31", "s31", "d31", "q31"],
+        v0: vreg, vreg_low16 = ["v0", "b0", "h0", "s0", "d0", "q0", "z0"],
+        v1: vreg, vreg_low16 = ["v1", "b1", "h1", "s1", "d1", "q1", "z1"],
+        v2: vreg, vreg_low16 = ["v2", "b2", "h2", "s2", "d2", "q2", "z2"],
+        v3: vreg, vreg_low16 = ["v3", "b3", "h3", "s3", "d3", "q3", "z3"],
+        v4: vreg, vreg_low16 = ["v4", "b4", "h4", "s4", "d4", "q4", "z4"],
+        v5: vreg, vreg_low16 = ["v5", "b5", "h5", "s5", "d5", "q5", "z5"],
+        v6: vreg, vreg_low16 = ["v6", "b6", "h6", "s6", "d6", "q6", "z6"],
+        v7: vreg, vreg_low16 = ["v7", "b7", "h7", "s7", "d7", "q7", "z7"],
+        v8: vreg, vreg_low16 = ["v8", "b8", "h8", "s8", "d8", "q8", "z8"],
+        v9: vreg, vreg_low16 = ["v9", "b9", "h9", "s9", "d9", "q9", "z9"],
+        v10: vreg, vreg_low16 = ["v10", "b10", "h10", "s10", "d10", "q10", "z10"],
+        v11: vreg, vreg_low16 = ["v11", "b11", "h11", "s11", "d11", "q11", "z11"],
+        v12: vreg, vreg_low16 = ["v12", "b12", "h12", "s12", "d12", "q12", "z12"],
+        v13: vreg, vreg_low16 = ["v13", "b13", "h13", "s13", "d13", "q13", "z13"],
+        v14: vreg, vreg_low16 = ["v14", "b14", "h14", "s14", "d14", "q14", "z14"],
+        v15: vreg, vreg_low16 = ["v15", "b15", "h15", "s15", "d15", "q15", "z15"],
+        v16: vreg = ["v16", "b16", "h16", "s16", "d16", "q16", "z16"],
+        v17: vreg = ["v17", "b17", "h17", "s17", "d17", "q17", "z17"],
+        v18: vreg = ["v18", "b18", "h18", "s18", "d18", "q18", "z18"],
+        v19: vreg = ["v19", "b19", "h19", "s19", "d19", "q19", "z19"],
+        v20: vreg = ["v20", "b20", "h20", "s20", "d20", "q20", "z20"],
+        v21: vreg = ["v21", "b21", "h21", "s21", "d21", "q21", "z21"],
+        v22: vreg = ["v22", "b22", "h22", "s22", "d22", "q22", "z22"],
+        v23: vreg = ["v23", "b23", "h23", "s23", "d23", "q23", "z23"],
+        v24: vreg = ["v24", "b24", "h24", "s24", "d24", "q24", "z24"],
+        v25: vreg = ["v25", "b25", "h25", "s25", "d25", "q25", "z25"],
+        v26: vreg = ["v26", "b26", "h26", "s26", "d26", "q26", "z26"],
+        v27: vreg = ["v27", "b27", "h27", "s27", "d27", "q27", "z27"],
+        v28: vreg = ["v28", "b28", "h28", "s28", "d28", "q28", "z28"],
+        v29: vreg = ["v29", "b29", "h29", "s29", "d29", "q29", "z29"],
+        v30: vreg = ["v30", "b30", "h30", "s30", "d30", "q30", "z30"],
+        v31: vreg = ["v31", "b31", "h31", "s31", "d31", "q31", "z31"],
+        p0: preg = ["p0"],
+        p1: preg = ["p1"],
+        p2: preg = ["p2"],
+        p3: preg = ["p3"],
+        p4: preg = ["p4"],
+        p5: preg = ["p5"],
+        p6: preg = ["p6"],
+        p7: preg = ["p7"],
+        p8: preg = ["p8"],
+        p9: preg = ["p9"],
+        p10: preg = ["p10"],
+        p11: preg = ["p11"],
+        p12: preg = ["p12"],
+        p13: preg = ["p13"],
+        p14: preg = ["p14"],
+        p15: preg = ["p15"],
+        ffr: preg = ["ffr"],
         #error = ["x18", "w18"] =>
             "x18 is used as a reserved register on some targets and cannot be used as an operand for inline asm",
         #error = ["x19", "w19"] =>
diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs
index 305ea7d50e6..b52fa5bbcb2 100644
--- a/compiler/rustc_target/src/asm/mod.rs
+++ b/compiler/rustc_target/src/asm/mod.rs
@@ -533,6 +533,12 @@ impl InlineAsmRegClass {
             Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
         }
     }
+
+    /// Returns whether registers in this class can only be used as clobbers
+    /// and not as inputs/outputs.
+    pub fn is_clobber_only(self, arch: InlineAsmArch) -> bool {
+        self.supported_types(arch).is_empty()
+    }
 }
 
 #[derive(
diff --git a/compiler/rustc_target/src/asm/riscv.rs b/compiler/rustc_target/src/asm/riscv.rs
index e276a9175f9..314bd01de12 100644
--- a/compiler/rustc_target/src/asm/riscv.rs
+++ b/compiler/rustc_target/src/asm/riscv.rs
@@ -7,6 +7,7 @@ def_reg_class! {
     RiscV RiscVInlineAsmRegClass {
         reg,
         freg,
+        vreg,
     }
 }
 
@@ -44,6 +45,7 @@ impl RiscVInlineAsmRegClass {
                 }
             }
             Self::freg => types! { "f": F32; "d": F64; },
+            Self::vreg => &[],
         }
     }
 }
@@ -120,6 +122,38 @@ def_regs! {
         f29: freg = ["f29", "ft9"],
         f30: freg = ["f30", "ft10"],
         f31: freg = ["f31", "ft11"],
+        v0: vreg = ["v0"],
+        v1: vreg = ["v1"],
+        v2: vreg = ["v2"],
+        v3: vreg = ["v3"],
+        v4: vreg = ["v4"],
+        v5: vreg = ["v5"],
+        v6: vreg = ["v6"],
+        v7: vreg = ["v7"],
+        v8: vreg = ["v8"],
+        v9: vreg = ["v9"],
+        v10: vreg = ["v10"],
+        v11: vreg = ["v11"],
+        v12: vreg = ["v12"],
+        v13: vreg = ["v13"],
+        v14: vreg = ["v14"],
+        v15: vreg = ["v15"],
+        v16: vreg = ["v16"],
+        v17: vreg = ["v17"],
+        v18: vreg = ["v18"],
+        v19: vreg = ["v19"],
+        v20: vreg = ["v20"],
+        v21: vreg = ["v21"],
+        v22: vreg = ["v22"],
+        v23: vreg = ["v23"],
+        v24: vreg = ["v24"],
+        v25: vreg = ["v25"],
+        v26: vreg = ["v26"],
+        v27: vreg = ["v27"],
+        v28: vreg = ["v28"],
+        v29: vreg = ["v29"],
+        v30: vreg = ["v30"],
+        v31: vreg = ["v31"],
         #error = ["x9", "s1"] =>
             "s1 is used internally by LLVM and cannot be used as an operand for inline asm",
         #error = ["x8", "s0", "fp"] =>
diff --git a/compiler/rustc_target/src/asm/x86.rs b/compiler/rustc_target/src/asm/x86.rs
index 48f83ca7cd4..5e3828d7d85 100644
--- a/compiler/rustc_target/src/asm/x86.rs
+++ b/compiler/rustc_target/src/asm/x86.rs
@@ -12,6 +12,8 @@ def_reg_class! {
         ymm_reg,
         zmm_reg,
         kreg,
+        mmx_reg,
+        x87_reg,
     }
 }
 
@@ -35,6 +37,7 @@ impl X86InlineAsmRegClass {
             Self::reg_byte => &[],
             Self::xmm_reg | Self::ymm_reg | Self::zmm_reg => &['x', 'y', 'z'],
             Self::kreg => &[],
+            Self::mmx_reg | Self::x87_reg => &[],
         }
     }
 
@@ -73,6 +76,7 @@ impl X86InlineAsmRegClass {
                 _ => Some(('x', "xmm0")),
             },
             Self::kreg => None,
+            Self::mmx_reg | Self::x87_reg => None,
         }
     }
 
@@ -90,6 +94,7 @@ impl X86InlineAsmRegClass {
             Self::ymm_reg => Some(('y', "ymm0")),
             Self::zmm_reg => Some(('z', "zmm0")),
             Self::kreg => None,
+            Self::mmx_reg | Self::x87_reg => None,
         }
     }
 
@@ -125,6 +130,7 @@ impl X86InlineAsmRegClass {
                 "avx512f": I8, I16;
                 "avx512bw": I32, I64;
             },
+            Self::mmx_reg | Self::x87_reg => &[],
         }
     }
 }
@@ -285,16 +291,28 @@ def_regs! {
         k5: kreg = ["k5"],
         k6: kreg = ["k6"],
         k7: kreg = ["k7"],
+        mm0: mmx_reg = ["mm0"],
+        mm1: mmx_reg = ["mm1"],
+        mm2: mmx_reg = ["mm2"],
+        mm3: mmx_reg = ["mm3"],
+        mm4: mmx_reg = ["mm4"],
+        mm5: mmx_reg = ["mm5"],
+        mm6: mmx_reg = ["mm6"],
+        mm7: mmx_reg = ["mm7"],
+        st0: x87_reg = ["st(0)", "st"],
+        st1: x87_reg = ["st(1)"],
+        st2: x87_reg = ["st(2)"],
+        st3: x87_reg = ["st(3)"],
+        st4: x87_reg = ["st(4)"],
+        st5: x87_reg = ["st(5)"],
+        st6: x87_reg = ["st(6)"],
+        st7: x87_reg = ["st(7)"],
         #error = ["bp", "bpl", "ebp", "rbp"] =>
             "the frame pointer cannot be used as an operand for inline asm",
         #error = ["sp", "spl", "esp", "rsp"] =>
             "the stack pointer cannot be used as an operand for inline asm",
         #error = ["ip", "eip", "rip"] =>
             "the instruction pointer cannot be used as an operand for inline asm",
-        #error = ["st", "st(0)", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)"] =>
-            "x87 registers are not currently supported as operands for inline asm",
-        #error = ["mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"] =>
-            "MMX registers are not currently supported as operands for inline asm",
         #error = ["k0"] =>
             "the k0 AVX mask register cannot be used as an operand for inline asm",
     }
diff --git a/compiler/rustc_typeck/src/check/upvar.rs b/compiler/rustc_typeck/src/check/upvar.rs
index 4d6d1da194f..c42ca936e97 100644
--- a/compiler/rustc_typeck/src/check/upvar.rs
+++ b/compiler/rustc_typeck/src/check/upvar.rs
@@ -50,6 +50,7 @@ use rustc_span::sym;
 use rustc_span::{MultiSpan, Span, Symbol};
 use rustc_trait_selection::infer::InferCtxtExt;
 
+use rustc_data_structures::stable_map::FxHashMap;
 use rustc_data_structures::stable_set::FxHashSet;
 use rustc_index::vec::Idx;
 use rustc_target::abi::VariantIdx;
@@ -81,6 +82,22 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     }
 }
 
+/// Intermediate format to store the hir_id pointing to the use that resulted in the
+/// corresponding place being captured and a String which contains the captured value's
+/// name (i.e: a.b.c)
+type CapturesInfo = (Option<hir::HirId>, String);
+
+/// Intermediate format to store information needed to generate migration lint. The tuple
+/// contains the hir_id pointing to the use that resulted in the
+/// corresponding place being captured, a String which contains the captured value's
+/// name (i.e: a.b.c) and a String which contains the reason why migration is needed for that
+/// capture
+type MigrationNeededForCapture = (Option<hir::HirId>, String, String);
+
+/// Intermediate format to store the hir id of the root variable and a HashSet containing
+/// information on why the root variable should be fully captured
+type MigrationDiagnosticInfo = (hir::HirId, Vec<MigrationNeededForCapture>);
+
 struct InferBorrowKindVisitor<'a, 'tcx> {
     fcx: &'a FnCtxt<'a, 'tcx>,
 }
@@ -498,18 +515,58 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
 
             let local_def_id = closure_def_id.expect_local();
             let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(local_def_id);
+            let closure_span = self.tcx.hir().span(closure_hir_id);
+            let closure_head_span = self.tcx.sess.source_map().guess_head_span(closure_span);
             self.tcx.struct_span_lint_hir(
                 lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES,
                 closure_hir_id,
-                span,
+                closure_head_span,
                 |lint| {
                     let mut diagnostics_builder = lint.build(
                         format!(
-                            "{} will change in Rust 2021",
+                            "changes to closure capture in Rust 2021 will affect {}",
                             reasons
                         )
                         .as_str(),
                     );
+                    for (var_hir_id, diagnostics_info) in need_migrations.iter() {
+                        // Labels all the usage of the captured variable and why they are responsible
+                        // for migration being needed
+                        for (captured_hir_id, captured_name, reasons) in diagnostics_info.iter() {
+                            if let Some(captured_hir_id) = captured_hir_id {
+                                let cause_span = self.tcx.hir().span(*captured_hir_id);
+                                diagnostics_builder.span_label(cause_span, format!("in Rust 2018, closure captures all of `{}`, but in Rust 2021, it only captures `{}`",
+                                    self.tcx.hir().name(*var_hir_id),
+                                    captured_name,
+                                ));
+                            }
+
+                            // Add a label pointing to where a captured variable affected by drop order
+                            // is dropped
+                            if reasons.contains("drop order") {
+                                let drop_location_span = drop_location_span(self.tcx, &closure_hir_id);
+
+                                diagnostics_builder.span_label(drop_location_span, format!("in Rust 2018, `{}` would be dropped here, but in Rust 2021, only `{}` would be dropped here alongside the closure",
+                                    self.tcx.hir().name(*var_hir_id),
+                                    captured_name,
+                                ));
+                            }
+
+                            // Add a label explaining why a closure no longer implements a trait
+                            if reasons.contains("trait implementation") {
+                                let missing_trait = &reasons[..reasons.find("trait implementation").unwrap() - 1];
+
+                                diagnostics_builder.span_label(closure_head_span, format!("in Rust 2018, this closure would implement {} as `{}` implements {}, but in Rust 2021, this closure would no longer implement {} as `{}` does not implement {}",
+                                    missing_trait,
+                                    self.tcx.hir().name(*var_hir_id),
+                                    missing_trait,
+                                    missing_trait,
+                                    captured_name,
+                                    missing_trait,
+                                ));
+                            }
+                        }
+                    }
                     diagnostics_builder.note("for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
                     let closure_body_span = self.tcx.hir().span(body_id.hir_id);
                     let (sugg, app) =
@@ -556,13 +613,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
 
         if auto_trait_reasons.len() > 0 {
             reasons = format!(
-                "{} trait implementation",
+                "{} trait implementation for closure",
                 auto_trait_reasons.clone().into_iter().collect::<Vec<&str>>().join(", ")
             );
         }
 
         if auto_trait_reasons.len() > 0 && drop_reason {
-            reasons = format!("{}, and ", reasons);
+            reasons = format!("{} and ", reasons);
         }
 
         if drop_reason {
@@ -572,20 +629,35 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         reasons
     }
 
-    /// Returns true if migration is needed for trait for the provided var_hir_id
-    fn need_2229_migrations_for_trait(
+    /// Figures out the list of root variables (and their types) that aren't completely
+    /// captured by the closure when `capture_disjoint_fields` is enabled and auto-traits
+    /// differ between the root variable and the captured paths.
+    ///
+    /// Returns a tuple containing a HashMap of CapturesInfo that maps to a HashSet of trait names
+    /// if migration is needed for traits for the provided var_hir_id, otherwise returns None
+    fn compute_2229_migrations_for_trait(
         &self,
         min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
         var_hir_id: hir::HirId,
-        check_trait: Option<DefId>,
         closure_clause: hir::CaptureBy,
-    ) -> bool {
+    ) -> Option<FxHashMap<CapturesInfo, FxHashSet<&str>>> {
+        let auto_traits_def_id = vec![
+            self.tcx.lang_items().clone_trait(),
+            self.tcx.lang_items().sync_trait(),
+            self.tcx.get_diagnostic_item(sym::send_trait),
+            self.tcx.lang_items().unpin_trait(),
+            self.tcx.get_diagnostic_item(sym::unwind_safe_trait),
+            self.tcx.get_diagnostic_item(sym::ref_unwind_safe_trait),
+        ];
+        let auto_traits =
+            vec!["`Clone`", "`Sync`", "`Send`", "`Unpin`", "`UnwindSafe`", "`RefUnwindSafe`"];
+
         let root_var_min_capture_list = if let Some(root_var_min_capture_list) =
             min_captures.and_then(|m| m.get(&var_hir_id))
         {
             root_var_min_capture_list
         } else {
-            return false;
+            return None;
         };
 
         let ty = self.infcx.resolve_vars_if_possible(self.node_ty(var_hir_id));
@@ -604,19 +676,26 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
             }
         };
 
-        let obligation_should_hold = check_trait
-            .map(|check_trait| {
-                self.infcx
-                    .type_implements_trait(
-                        check_trait,
-                        ty,
-                        self.tcx.mk_substs_trait(ty, &[]),
-                        self.param_env,
-                    )
-                    .must_apply_modulo_regions()
-            })
-            .unwrap_or(false);
+        let mut obligations_should_hold = Vec::new();
+        // Checks if a root variable implements any of the auto traits
+        for check_trait in auto_traits_def_id.iter() {
+            obligations_should_hold.push(
+                check_trait
+                    .map(|check_trait| {
+                        self.infcx
+                            .type_implements_trait(
+                                check_trait,
+                                ty,
+                                self.tcx.mk_substs_trait(ty, &[]),
+                                self.param_env,
+                            )
+                            .must_apply_modulo_regions()
+                    })
+                    .unwrap_or(false),
+            );
+        }
 
+        let mut problematic_captures = FxHashMap::default();
         // Check whether captured fields also implement the trait
         for capture in root_var_min_capture_list.iter() {
             let ty = apply_capture_kind_on_capture_ty(
@@ -625,106 +704,46 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                 capture.info.capture_kind,
             );
 
-            let obligation_holds_for_capture = check_trait
-                .map(|check_trait| {
-                    self.infcx
-                        .type_implements_trait(
-                            check_trait,
-                            ty,
-                            self.tcx.mk_substs_trait(ty, &[]),
-                            self.param_env,
-                        )
-                        .must_apply_modulo_regions()
-                })
-                .unwrap_or(false);
-
-            if !obligation_holds_for_capture && obligation_should_hold {
-                return true;
+            // Checks if a capture implements any of the auto traits
+            let mut obligations_holds_for_capture = Vec::new();
+            for check_trait in auto_traits_def_id.iter() {
+                obligations_holds_for_capture.push(
+                    check_trait
+                        .map(|check_trait| {
+                            self.infcx
+                                .type_implements_trait(
+                                    check_trait,
+                                    ty,
+                                    self.tcx.mk_substs_trait(ty, &[]),
+                                    self.param_env,
+                                )
+                                .must_apply_modulo_regions()
+                        })
+                        .unwrap_or(false),
+                );
             }
-        }
-        false
-    }
-
-    /// Figures out the list of root variables (and their types) that aren't completely
-    /// captured by the closure when `capture_disjoint_fields` is enabled and auto-traits
-    /// differ between the root variable and the captured paths.
-    ///
-    /// The output list would include a root variable if:
-    /// - It would have been captured into the closure when `capture_disjoint_fields` wasn't
-    ///   enabled, **and**
-    /// - It wasn't completely captured by the closure, **and**
-    /// - One of the paths captured does not implement all the auto-traits its root variable
-    ///   implements.
-    fn compute_2229_migrations_for_trait(
-        &self,
-        min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
-        var_hir_id: hir::HirId,
-        closure_clause: hir::CaptureBy,
-    ) -> Option<FxHashSet<&str>> {
-        let tcx = self.infcx.tcx;
-
-        // Check whether catpured fields also implement the trait
-        let mut auto_trait_reasons = FxHashSet::default();
-
-        if self.need_2229_migrations_for_trait(
-            min_captures,
-            var_hir_id,
-            tcx.lang_items().clone_trait(),
-            closure_clause,
-        ) {
-            auto_trait_reasons.insert("`Clone`");
-        }
-
-        if self.need_2229_migrations_for_trait(
-            min_captures,
-            var_hir_id,
-            tcx.lang_items().sync_trait(),
-            closure_clause,
-        ) {
-            auto_trait_reasons.insert("`Sync`");
-        }
 
-        if self.need_2229_migrations_for_trait(
-            min_captures,
-            var_hir_id,
-            tcx.get_diagnostic_item(sym::send_trait),
-            closure_clause,
-        ) {
-            auto_trait_reasons.insert("`Send`");
-        }
+            let mut capture_problems = FxHashSet::default();
 
-        if self.need_2229_migrations_for_trait(
-            min_captures,
-            var_hir_id,
-            tcx.lang_items().unpin_trait(),
-            closure_clause,
-        ) {
-            auto_trait_reasons.insert("`Unpin`");
-        }
-
-        if self.need_2229_migrations_for_trait(
-            min_captures,
-            var_hir_id,
-            tcx.get_diagnostic_item(sym::unwind_safe_trait),
-            closure_clause,
-        ) {
-            auto_trait_reasons.insert("`UnwindSafe`");
-        }
+            // Checks if for any of the auto traits, one or more trait is implemented
+            // by the root variable but not by the capture
+            for (idx, _) in obligations_should_hold.iter().enumerate() {
+                if !obligations_holds_for_capture[idx] && obligations_should_hold[idx] {
+                    capture_problems.insert(auto_traits[idx]);
+                }
+            }
 
-        if self.need_2229_migrations_for_trait(
-            min_captures,
-            var_hir_id,
-            tcx.get_diagnostic_item(sym::ref_unwind_safe_trait),
-            closure_clause,
-        ) {
-            auto_trait_reasons.insert("`RefUnwindSafe`");
+            if capture_problems.len() > 0 {
+                problematic_captures.insert(
+                    (capture.info.path_expr_id, capture.to_string(self.tcx)),
+                    capture_problems,
+                );
+            }
         }
-
-        if auto_trait_reasons.len() > 0 {
-            return Some(auto_trait_reasons);
+        if problematic_captures.len() > 0 {
+            return Some(problematic_captures);
         }
-
-        return None;
+        None
     }
 
     /// Figures out the list of root variables (and their types) that aren't completely
@@ -737,8 +756,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     /// - It wasn't completely captured by the closure, **and**
     /// - One of the paths starting at this root variable, that is not captured needs Drop.
     ///
-    /// This function only returns true for significant drops. A type is considerent to have a
-    /// significant drop if it's Drop implementation is not annotated by `rustc_insignificant_dtor`.
+    /// This function only returns a HashSet of CapturesInfo for significant drops. If there
+    /// are no significant drops than None is returned
     fn compute_2229_migrations_for_drop(
         &self,
         closure_def_id: DefId,
@@ -746,11 +765,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
         closure_clause: hir::CaptureBy,
         var_hir_id: hir::HirId,
-    ) -> bool {
+    ) -> Option<FxHashSet<CapturesInfo>> {
         let ty = self.infcx.resolve_vars_if_possible(self.node_ty(var_hir_id));
 
         if !ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id.expect_local())) {
-            return false;
+            return None;
         }
 
         let root_var_min_capture_list = if let Some(root_var_min_capture_list) =
@@ -763,21 +782,29 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
 
             match closure_clause {
                 // Only migrate if closure is a move closure
-                hir::CaptureBy::Value => return true,
+                hir::CaptureBy::Value => return Some(FxHashSet::default()),
                 hir::CaptureBy::Ref => {}
             }
 
-            return false;
+            return None;
         };
 
-        let projections_list = root_var_min_capture_list
-            .iter()
-            .filter_map(|captured_place| match captured_place.info.capture_kind {
+        let mut projections_list = Vec::new();
+        let mut diagnostics_info = FxHashSet::default();
+
+        for captured_place in root_var_min_capture_list.iter() {
+            match captured_place.info.capture_kind {
                 // Only care about captures that are moved into the closure
-                ty::UpvarCapture::ByValue(..) => Some(captured_place.place.projections.as_slice()),
-                ty::UpvarCapture::ByRef(..) => None,
-            })
-            .collect::<Vec<_>>();
+                ty::UpvarCapture::ByValue(..) => {
+                    projections_list.push(captured_place.place.projections.as_slice());
+                    diagnostics_info.insert((
+                        captured_place.info.path_expr_id,
+                        captured_place.to_string(self.tcx),
+                    ));
+                }
+                ty::UpvarCapture::ByRef(..) => {}
+            }
+        }
 
         let is_moved = !projections_list.is_empty();
 
@@ -793,10 +820,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
                 projections_list,
             )
         {
-            return true;
+            return Some(diagnostics_info);
         }
 
-        return false;
+        return None;
     }
 
     /// Figures out the list of root variables (and their types) that aren't completely
@@ -812,15 +839,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
     /// - One of the paths captured does not implement all the auto-traits its root variable
     ///   implements.
     ///
-    /// Returns a tuple containing a vector of HirIds as well as a String containing the reason
-    /// why root variables whose HirId is contained in the vector should be fully captured.
+    /// Returns a tuple containing a vector of MigrationDiagnosticInfo, as well as a String
+    /// containing the reason why root variables whose HirId is contained in the vector should
+    /// be captured
     fn compute_2229_migrations(
         &self,
         closure_def_id: DefId,
         closure_span: Span,
         closure_clause: hir::CaptureBy,
         min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
-    ) -> (Vec<hir::HirId>, String) {
+    ) -> (Vec<MigrationDiagnosticInfo>, String) {
         let upvars = if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) {
             upvars
         } else {
@@ -828,38 +856,79 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
         };
 
         let mut need_migrations = Vec::new();
-        let mut auto_trait_reasons = FxHashSet::default();
-        let mut drop_reorder_reason = false;
+        let mut auto_trait_migration_reasons = FxHashSet::default();
+        let mut drop_migration_needed = false;
 
         // Perform auto-trait analysis
         for (&var_hir_id, _) in upvars.iter() {
-            let mut need_migration = false;
-            if let Some(trait_migration_cause) =
+            let mut responsible_captured_hir_ids = Vec::new();
+
+            let auto_trait_diagnostic = if let Some(diagnostics_info) =
                 self.compute_2229_migrations_for_trait(min_captures, var_hir_id, closure_clause)
             {
-                need_migration = true;
-                auto_trait_reasons.extend(trait_migration_cause);
+                diagnostics_info
+            } else {
+                FxHashMap::default()
+            };
+
+            let drop_reorder_diagnostic = if let Some(diagnostics_info) = self
+                .compute_2229_migrations_for_drop(
+                    closure_def_id,
+                    closure_span,
+                    min_captures,
+                    closure_clause,
+                    var_hir_id,
+                ) {
+                drop_migration_needed = true;
+                diagnostics_info
+            } else {
+                FxHashSet::default()
+            };
+
+            // Combine all the captures responsible for needing migrations into one HashSet
+            let mut capture_diagnostic = drop_reorder_diagnostic.clone();
+            for key in auto_trait_diagnostic.keys() {
+                capture_diagnostic.insert(key.clone());
             }
 
-            if self.compute_2229_migrations_for_drop(
-                closure_def_id,
-                closure_span,
-                min_captures,
-                closure_clause,
-                var_hir_id,
-            ) {
-                need_migration = true;
-                drop_reorder_reason = true;
+            let mut capture_diagnostic = capture_diagnostic.into_iter().collect::<Vec<_>>();
+            capture_diagnostic.sort();
+            for captured_info in capture_diagnostic.iter() {
+                // Get the auto trait reasons of why migration is needed because of that capture, if there are any
+                let capture_trait_reasons =
+                    if let Some(reasons) = auto_trait_diagnostic.get(captured_info) {
+                        reasons.clone()
+                    } else {
+                        FxHashSet::default()
+                    };
+
+                // Check if migration is needed because of drop reorder as a result of that capture
+                let capture_drop_reorder_reason = drop_reorder_diagnostic.contains(captured_info);
+
+                // Combine all the reasons of why the root variable should be captured as a result of
+                // auto trait implementation issues
+                auto_trait_migration_reasons.extend(capture_trait_reasons.clone());
+
+                responsible_captured_hir_ids.push((
+                    captured_info.0,
+                    captured_info.1.clone(),
+                    self.compute_2229_migrations_reasons(
+                        capture_trait_reasons,
+                        capture_drop_reorder_reason,
+                    ),
+                ));
             }
 
-            if need_migration {
-                need_migrations.push(var_hir_id);
+            if capture_diagnostic.len() > 0 {
+                need_migrations.push((var_hir_id, responsible_captured_hir_ids));
             }
         }
-
         (
             need_migrations,
-            self.compute_2229_migrations_reasons(auto_trait_reasons, drop_reorder_reason),
+            self.compute_2229_migrations_reasons(
+                auto_trait_migration_reasons,
+                drop_migration_needed,
+            ),
         )
     }
 
@@ -1320,6 +1389,26 @@ fn apply_capture_kind_on_capture_ty(
     }
 }
 
+/// Returns the Span of where the value with the provided HirId would be dropped
+fn drop_location_span(tcx: TyCtxt<'tcx>, hir_id: &hir::HirId) -> Span {
+    let owner_id = tcx.hir().get_enclosing_scope(*hir_id).unwrap();
+
+    let owner_node = tcx.hir().get(owner_id);
+    let owner_span = match owner_node {
+        hir::Node::Item(item) => match item.kind {
+            hir::ItemKind::Fn(_, _, owner_id) => tcx.hir().span(owner_id.hir_id),
+            _ => {
+                bug!("Drop location span error: need to handle more ItemKind {:?}", item.kind);
+            }
+        },
+        hir::Node::Block(block) => tcx.hir().span(block.hir_id),
+        _ => {
+            bug!("Drop location span error: need to handle more Node {:?}", owner_node);
+        }
+    };
+    tcx.sess.source_map().end_point(owner_span)
+}
+
 struct InferBorrowKind<'a, 'tcx> {
     fcx: &'a FnCtxt<'a, 'tcx>,
 
@@ -1877,10 +1966,10 @@ fn should_do_rust_2021_incompatible_closure_captures_analysis(
 /// - s2: Comma separated names of the variables being migrated.
 fn migration_suggestion_for_2229(
     tcx: TyCtxt<'_>,
-    need_migrations: &Vec<hir::HirId>,
+    need_migrations: &Vec<MigrationDiagnosticInfo>,
 ) -> (String, String) {
     let need_migrations_variables =
-        need_migrations.iter().map(|v| var_name(tcx, *v)).collect::<Vec<_>>();
+        need_migrations.iter().map(|(v, _)| var_name(tcx, *v)).collect::<Vec<_>>();
 
     let migration_ref_concat =
         need_migrations_variables.iter().map(|v| format!("&{}", v)).collect::<Vec<_>>().join(", ");