about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/ci.yml4
-rw-r--r--Cargo.lock8
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state.rs11
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs26
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs5
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs8
-rw-r--r--compiler/rustc_driver/src/pretty.rs3
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp32
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp16
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs12
-rw-r--r--compiler/rustc_mir/Cargo.toml3
-rw-r--r--compiler/rustc_mir/src/transform/coverage/counters.rs5
-rw-r--r--compiler/rustc_mir/src/transform/coverage/debug.rs16
-rw-r--r--compiler/rustc_mir/src/transform/coverage/graph.rs16
-rw-r--r--compiler/rustc_mir/src/transform/coverage/mod.rs5
-rw-r--r--compiler/rustc_mir/src/transform/coverage/spans.rs16
-rw-r--r--compiler/rustc_mir/src/transform/coverage/test_macros/Cargo.toml12
-rw-r--r--compiler/rustc_mir/src/transform/coverage/test_macros/src/lib.rs6
-rw-r--r--compiler/rustc_mir/src/transform/coverage/tests.rs714
-rw-r--r--compiler/rustc_mir/src/transform/inline.rs41
-rw-r--r--compiler/rustc_mir/src/transform/validate.rs15
-rw-r--r--compiler/rustc_mir_build/src/build/matches/test.rs11
-rw-r--r--compiler/rustc_save_analysis/src/sig.rs4
-rw-r--r--compiler/rustc_target/src/spec/mod.rs16
-rw-r--r--library/alloc/src/collections/vec_deque/into_iter.rs57
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs159
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs128
-rw-r--r--library/alloc/src/collections/vec_deque/macros.rs19
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs (renamed from library/alloc/src/collections/vec_deque.rs)501
-rw-r--r--library/alloc/src/collections/vec_deque/pair_slices.rs66
-rw-r--r--library/alloc/src/collections/vec_deque/ring_slices.rs56
-rw-r--r--library/std/src/sys/wasm/mutex_atomics.rs2
-rw-r--r--src/bootstrap/dist.rs16
-rw-r--r--src/bootstrap/lib.rs1
-rw-r--r--src/bootstrap/native.rs4
-rw-r--r--src/ci/docker/host-x86_64/x86_64-gnu-llvm-9/Dockerfile (renamed from src/ci/docker/host-x86_64/x86_64-gnu-llvm-8/Dockerfile)5
-rw-r--r--src/ci/github-actions/ci.yml4
-rw-r--r--src/test/codegen/abi-efiapi.rs1
-rw-r--r--src/test/codegen/force-unwind-tables.rs1
-rw-r--r--src/test/mir-opt/inline/inline-compatibility.rs22
-rw-r--r--src/test/mir-opt/inline/inline-generator.rs16
-rw-r--r--src/test/mir-opt/inline/inline_compatibility.inlined_no_sanitize.Inline.diff20
-rw-r--r--src/test/mir-opt/inline/inline_compatibility.inlined_target_feature.Inline.diff20
-rw-r--r--src/test/mir-opt/inline/inline_compatibility.not_inlined_c_variadic.Inline.diff25
-rw-r--r--src/test/mir-opt/inline/inline_compatibility.not_inlined_no_sanitize.Inline.diff16
-rw-r--r--src/test/mir-opt/inline/inline_compatibility.not_inlined_target_feature.Inline.diff16
-rw-r--r--src/test/mir-opt/inline/inline_generator.main.Inline.diff123
-rw-r--r--src/test/pretty/qpath-associated-type-bound.rs16
-rw-r--r--src/test/rustdoc/raw-ident-eliminate-r-hashtag.rs22
-rw-r--r--src/test/ui/hygiene/panic-location.run.stderr2
-rw-r--r--src/test/ui/issues/issue-50865-private-impl-trait/auxiliary/lib.rs4
-rw-r--r--src/test/ui/match/issue-72680.rs65
-rw-r--r--src/test/ui/mir/mir-inlining/ice-issue-77306-1.rs18
-rw-r--r--src/test/ui/sanitize/new-llvm-pass-manager-thin-lto.rs1
-rw-r--r--src/test/ui/weird-exprs.rs7
56 files changed, 1732 insertions, 694 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 84e39a4189e..f10b6ca7ea9 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -43,7 +43,7 @@ jobs:
           - name: mingw-check
             os: ubuntu-latest-xl
             env: {}
-          - name: x86_64-gnu-llvm-8
+          - name: x86_64-gnu-llvm-9
             os: ubuntu-latest-xl
             env: {}
           - name: x86_64-gnu-tools
@@ -265,7 +265,7 @@ jobs:
           - name: x86_64-gnu-distcheck
             os: ubuntu-latest-xl
             env: {}
-          - name: x86_64-gnu-llvm-8
+          - name: x86_64-gnu-llvm-9
             env:
               RUST_BACKTRACE: 1
             os: ubuntu-latest-xl
diff --git a/Cargo.lock b/Cargo.lock
index c72c7b8481b..f02ae7ef6cf 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -722,6 +722,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "9a21fa21941700a3cd8fcb4091f361a6a712fac632f85d9f487cc892045d55c6"
 
 [[package]]
+name = "coverage_test_macros"
+version = "0.0.0"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
 name = "cpuid-bool"
 version = "0.1.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3922,6 +3929,7 @@ dependencies = [
 name = "rustc_mir"
 version = "0.0.0"
 dependencies = [
+ "coverage_test_macros",
  "either",
  "itertools 0.9.0",
  "polonius-engine",
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
index a566200c338..958219cbebf 100644
--- a/compiler/rustc_ast_pretty/src/pprust/state.rs
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -2327,11 +2327,12 @@ impl<'a> State<'a> {
             self.print_path(path, false, depth);
         }
         self.s.word(">");
-        self.s.word("::");
-        let item_segment = path.segments.last().unwrap();
-        self.print_ident(item_segment.ident);
-        if let Some(ref args) = item_segment.args {
-            self.print_generic_args(args, colons_before_params)
+        for item_segment in &path.segments[qself.position..] {
+            self.s.word("::");
+            self.print_ident(item_segment.ident);
+            if let Some(ref args) = item_segment.args {
+                self.print_generic_args(args, colons_before_params)
+            }
         }
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 87bcce07b34..e06e2d45665 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -144,25 +144,6 @@ fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
     );
 }
 
-fn translate_obsolete_target_features(feature: &str) -> &str {
-    const LLVM9_FEATURE_CHANGES: &[(&str, &str)] =
-        &[("+fp-only-sp", "-fp64"), ("-fp-only-sp", "+fp64"), ("+d16", "-d32"), ("-d16", "+d32")];
-    if llvm_util::get_major_version() >= 9 {
-        for &(old, new) in LLVM9_FEATURE_CHANGES {
-            if feature == old {
-                return new;
-            }
-        }
-    } else {
-        for &(old, new) in LLVM9_FEATURE_CHANGES {
-            if feature == new {
-                return old;
-            }
-        }
-    }
-    feature
-}
-
 pub fn llvm_target_features(sess: &Session) -> impl Iterator<Item = &str> {
     const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
 
@@ -172,12 +153,7 @@ pub fn llvm_target_features(sess: &Session) -> impl Iterator<Item = &str> {
         .target_feature
         .split(',')
         .filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)));
-    sess.target
-        .features
-        .split(',')
-        .chain(cmdline)
-        .filter(|l| !l.is_empty())
-        .map(translate_obsolete_target_features)
+    sess.target.features.split(',').chain(cmdline).filter(|l| !l.is_empty())
 }
 
 pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 6237a4c0020..6f956c3bcc1 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -377,11 +377,6 @@ fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
 }
 
 pub(crate) fn should_use_new_llvm_pass_manager(config: &ModuleConfig) -> bool {
-    // We only support the new pass manager starting with LLVM 9.
-    if llvm_util::get_major_version() < 9 {
-        return false;
-    }
-
     // The new pass manager is disabled by default.
     config.new_llvm_pass_manager
 }
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index b6e922ca545..8dd40308075 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -100,11 +100,6 @@ fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
     }
 }
 
-fn strip_function_ptr_alignment(data_layout: String) -> String {
-    // FIXME: Make this more general.
-    data_layout.replace("-Fi8-", "-")
-}
-
 fn strip_x86_address_spaces(data_layout: String) -> String {
     data_layout.replace("-p270:32:32-p271:32:32-p272:64:64-", "-")
 }
@@ -119,9 +114,6 @@ pub unsafe fn create_module(
     let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
 
     let mut target_data_layout = sess.target.data_layout.clone();
-    if llvm_util::get_major_version() < 9 {
-        target_data_layout = strip_function_ptr_alignment(target_data_layout);
-    }
     if llvm_util::get_major_version() < 10
         && (sess.target.arch == "x86" || sess.target.arch == "x86_64")
     {
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index ab70f72dc61..cc71b6289fa 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -104,7 +104,7 @@ unsafe fn configure_llvm(sess: &Session) {
         }
     }
 
-    if sess.opts.debugging_opts.llvm_time_trace && get_major_version() >= 9 {
+    if sess.opts.debugging_opts.llvm_time_trace {
         // time-trace is not thread safe and running it in parallel will cause seg faults.
         if !sess.opts.debugging_opts.no_parallel_llvm {
             bug!("`-Z llvm-time-trace` requires `-Z no-parallel-llvm")
@@ -122,10 +122,8 @@ unsafe fn configure_llvm(sess: &Session) {
 
 pub fn time_trace_profiler_finish(file_name: &str) {
     unsafe {
-        if get_major_version() >= 9 {
-            let file_name = CString::new(file_name).unwrap();
-            llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr());
-        }
+        let file_name = CString::new(file_name).unwrap();
+        llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr());
     }
 }
 
diff --git a/compiler/rustc_driver/src/pretty.rs b/compiler/rustc_driver/src/pretty.rs
index b0fbf1e03f5..f74e1fe61e7 100644
--- a/compiler/rustc_driver/src/pretty.rs
+++ b/compiler/rustc_driver/src/pretty.rs
@@ -32,9 +32,6 @@ use crate::abort_on_err;
 // Note that since the `&PrinterSupport` is freshly constructed on each
 // call, it would not make sense to try to attach the lifetime of `self`
 // to the lifetime of the `&PrinterObject`.
-//
-// (The `use_once_payload` is working around the current lack of once
-// functions in the compiler.)
 
 /// Constructs a `PrinterSupport` object and passes it to `f`.
 fn call_with_pp_support<'tcx, A, F>(
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
index 71ca4f23bbb..01d76bb3e94 100644
--- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -16,9 +16,7 @@
 #include "llvm/Object/ObjectFile.h"
 #include "llvm/Object/IRObjectFile.h"
 #include "llvm/Passes/PassBuilder.h"
-#if LLVM_VERSION_GE(9, 0)
 #include "llvm/Passes/StandardInstrumentations.h"
-#endif
 #include "llvm/Support/CBindingWrapping.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/Host.h"
@@ -31,15 +29,11 @@
 #include "llvm-c/Transforms/PassManagerBuilder.h"
 
 #include "llvm/Transforms/Instrumentation.h"
-#if LLVM_VERSION_GE(9, 0)
 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
 #include "llvm/Support/TimeProfiler.h"
-#endif
 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
-#if LLVM_VERSION_GE(9, 0)
 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
-#endif
 #include "llvm/Transforms/Utils/NameAnonGlobals.h"
 
 using namespace llvm;
@@ -73,20 +67,18 @@ extern "C" void LLVMTimeTraceProfilerInitialize() {
   timeTraceProfilerInitialize(
       /* TimeTraceGranularity */ 0,
       /* ProcName */ "rustc");
-#elif LLVM_VERSION_GE(9, 0)
+#else
   timeTraceProfilerInitialize();
 #endif
 }
 
 extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
-#if LLVM_VERSION_GE(9, 0)
   StringRef FN(FileName);
   std::error_code EC;
   raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
 
   timeTraceProfilerWrite(OS);
   timeTraceProfilerCleanup();
-#endif
 }
 
 enum class LLVMRustPassKind {
@@ -127,22 +119,14 @@ extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover)
 extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
   const bool CompileKernel = false;
 
-#if LLVM_VERSION_GE(9, 0)
   return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
-#else
-  return wrap(createAddressSanitizerModulePass(CompileKernel, Recover));
-#endif
 }
 
 extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
-#if LLVM_VERSION_GE(9, 0)
   const bool CompileKernel = false;
 
   return wrap(createMemorySanitizerLegacyPassPass(
       MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
-#else
-  return wrap(createMemorySanitizerLegacyPassPass(TrackOrigins, Recover));
-#endif
 }
 
 extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
@@ -657,8 +641,6 @@ extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmS
                                                       const char*);     // IR name
 extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
 
-#if LLVM_VERSION_GE(9, 0)
-
 std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
   if (any_isa<const Module *>(WrappedIr))
     return any_cast<const Module *>(WrappedIr)->getName().str();
@@ -706,7 +688,6 @@ void LLVMSelfProfileInitializeCallbacks(
         AfterPassCallback(LlvmSelfProfiler);
       });
 }
-#endif
 
 enum class LLVMRustOptStage {
   PreLinkNoLTO,
@@ -739,7 +720,6 @@ LLVMRustOptimizeWithNewPassManager(
     void* LlvmSelfProfiler,
     LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
     LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
-#if LLVM_VERSION_GE(9, 0)
   Module *TheModule = unwrap(ModuleRef);
   TargetMachine *TM = unwrap(TMRef);
   PassBuilder::OptimizationLevel OptLevel = fromRust(OptLevelRust);
@@ -970,11 +950,6 @@ LLVMRustOptimizeWithNewPassManager(
     UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
 
   MPM.run(*TheModule, MAM);
-#else
-  // The new pass manager has been available for a long time,
-  // but we don't bother supporting it on old LLVM versions.
-  report_fatal_error("New pass manager only supported since LLVM 9");
-#endif
 }
 
 // Callback to demangle function name
@@ -1325,12 +1300,9 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
                               GlobalValue::LinkageTypes NewLinkage) {
     Ret->ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
   };
-#if LLVM_VERSION_GE(9, 0)
+
   thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
                                   Ret->GUIDPreservedSymbols);
-#else
-  thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage);
-#endif
 
   // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
   // callback below. This callback below will dictate the linkage for all
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
index 938eb19faef..9b0c176b692 100644
--- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -124,9 +124,7 @@ extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M,
   return wrap(unwrap(M)
                   ->getOrInsertFunction(StringRef(Name, NameLen),
                                         unwrap<FunctionType>(FunctionTy))
-#if LLVM_VERSION_GE(9, 0)
                   .getCallee()
-#endif
   );
 }
 
@@ -251,11 +249,7 @@ extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr,
 extern "C" void LLVMRustAddByValCallSiteAttr(LLVMValueRef Instr, unsigned Index,
                                              LLVMTypeRef Ty) {
   CallBase *Call = unwrap<CallBase>(Instr);
-#if LLVM_VERSION_GE(9, 0)
   Attribute Attr = Attribute::getWithByValType(Call->getContext(), unwrap(Ty));
-#else
-  Attribute Attr = Attribute::get(Call->getContext(), Attribute::ByVal);
-#endif
   Call->addAttribute(Index, Attr);
 }
 
@@ -296,11 +290,7 @@ extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn,
 extern "C" void LLVMRustAddByValAttr(LLVMValueRef Fn, unsigned Index,
                                      LLVMTypeRef Ty) {
   Function *F = unwrap<Function>(Fn);
-#if LLVM_VERSION_GE(9, 0)
   Attribute Attr = Attribute::getWithByValType(F->getContext(), unwrap(Ty));
-#else
-  Attribute Attr = Attribute::get(F->getContext(), Attribute::ByVal);
-#endif
   F->addAttribute(Index, Attr);
 }
 
@@ -616,11 +606,9 @@ static DISubprogram::DISPFlags fromRust(LLVMRustDISPFlags SPFlags) {
   if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagOptimized)) {
     Result |= DISubprogram::DISPFlags::SPFlagOptimized;
   }
-#if LLVM_VERSION_GE(9, 0)
   if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagMainSubprogram)) {
     Result |= DISubprogram::DISPFlags::SPFlagMainSubprogram;
   }
-#endif
 
   return Result;
 }
@@ -744,10 +732,6 @@ extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFunction(
       DITemplateParameterArray(unwrap<MDTuple>(TParam));
   DISubprogram::DISPFlags llvmSPFlags = fromRust(SPFlags);
   DINode::DIFlags llvmFlags = fromRust(Flags);
-#if LLVM_VERSION_LT(9, 0)
-  if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagMainSubprogram))
-    llvmFlags |= DINode::DIFlags::FlagMainSubprogram;
-#endif
   DISubprogram *Sub = Builder->createFunction(
       unwrapDI<DIScope>(Scope),
       StringRef(Name, NameLen),
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 06e69a0009b..db11746971d 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -611,6 +611,18 @@ pub struct TyS<'tcx> {
     outer_exclusive_binder: ty::DebruijnIndex,
 }
 
+impl<'tcx> TyS<'tcx> {
+    /// A constructor used only for internal testing.
+    #[allow(rustc::usage_of_ty_tykind)]
+    pub fn make_for_test(
+        kind: TyKind<'tcx>,
+        flags: TypeFlags,
+        outer_exclusive_binder: ty::DebruijnIndex,
+    ) -> TyS<'tcx> {
+        TyS { kind, flags, outer_exclusive_binder }
+    }
+}
+
 // `TyS` is used a lot. Make sure it doesn't unintentionally get bigger.
 #[cfg(target_arch = "x86_64")]
 static_assert_size!(TyS<'_>, 32);
diff --git a/compiler/rustc_mir/Cargo.toml b/compiler/rustc_mir/Cargo.toml
index 487668cfa11..9bfd1da0391 100644
--- a/compiler/rustc_mir/Cargo.toml
+++ b/compiler/rustc_mir/Cargo.toml
@@ -31,3 +31,6 @@ rustc_ast = { path = "../rustc_ast" }
 rustc_span = { path = "../rustc_span" }
 rustc_apfloat = { path = "../rustc_apfloat" }
 smallvec = { version = "1.0", features = ["union", "may_dangle"] }
+
+[dev-dependencies]
+coverage_test_macros = { path = "src/transform/coverage/test_macros" }
diff --git a/compiler/rustc_mir/src/transform/coverage/counters.rs b/compiler/rustc_mir/src/transform/coverage/counters.rs
index d6c2f7f7aaf..20f6a16e0f7 100644
--- a/compiler/rustc_mir/src/transform/coverage/counters.rs
+++ b/compiler/rustc_mir/src/transform/coverage/counters.rs
@@ -14,7 +14,7 @@ use rustc_middle::mir::coverage::*;
 
 /// Manages the counter and expression indexes/IDs to generate `CoverageKind` components for MIR
 /// `Coverage` statements.
-pub(crate) struct CoverageCounters {
+pub(super) struct CoverageCounters {
     function_source_hash: u64,
     next_counter_id: u32,
     num_expressions: u32,
@@ -37,7 +37,7 @@ impl CoverageCounters {
         self.debug_counters.enable();
     }
 
-    /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlocks` directly or
+    /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
     /// indirectly associated with `CoverageSpans`, and returns additional `Expression`s
     /// representing intermediate values.
     pub fn make_bcb_counters(
@@ -120,7 +120,6 @@ struct BcbCounters<'a> {
     basic_coverage_blocks: &'a mut CoverageGraph,
 }
 
-// FIXME(richkadel): Add unit tests for `BcbCounters` functions/algorithms.
 impl<'a> BcbCounters<'a> {
     fn new(
         coverage_counters: &'a mut CoverageCounters,
diff --git a/compiler/rustc_mir/src/transform/coverage/debug.rs b/compiler/rustc_mir/src/transform/coverage/debug.rs
index ffa795134e2..7f1dc3844b2 100644
--- a/compiler/rustc_mir/src/transform/coverage/debug.rs
+++ b/compiler/rustc_mir/src/transform/coverage/debug.rs
@@ -127,7 +127,7 @@ pub const NESTED_INDENT: &str = "    ";
 
 const RUSTC_COVERAGE_DEBUG_OPTIONS: &str = "RUSTC_COVERAGE_DEBUG_OPTIONS";
 
-pub(crate) fn debug_options<'a>() -> &'a DebugOptions {
+pub(super) fn debug_options<'a>() -> &'a DebugOptions {
     static DEBUG_OPTIONS: SyncOnceCell<DebugOptions> = SyncOnceCell::new();
 
     &DEBUG_OPTIONS.get_or_init(|| DebugOptions::from_env())
@@ -136,7 +136,7 @@ pub(crate) fn debug_options<'a>() -> &'a DebugOptions {
 /// Parses and maintains coverage-specific debug options captured from the environment variable
 /// "RUSTC_COVERAGE_DEBUG_OPTIONS", if set.
 #[derive(Debug, Clone)]
-pub(crate) struct DebugOptions {
+pub(super) struct DebugOptions {
     pub allow_unused_expressions: bool,
     counter_format: ExpressionFormat,
 }
@@ -250,7 +250,7 @@ impl Default for ExpressionFormat {
 ///
 /// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be
 /// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`.
-pub(crate) struct DebugCounters {
+pub(super) struct DebugCounters {
     some_counters: Option<FxHashMap<ExpressionOperandId, DebugCounter>>,
 }
 
@@ -386,7 +386,7 @@ impl DebugCounter {
 
 /// If enabled, this data structure captures additional debugging information used when generating
 /// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes.
-pub(crate) struct GraphvizData {
+pub(super) struct GraphvizData {
     some_bcb_to_coverage_spans_with_counters:
         Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, CoverageKind)>>>,
     some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<CoverageKind>>>,
@@ -496,7 +496,7 @@ impl GraphvizData {
 /// directly or indirectly, to compute the coverage counts for all `CoverageSpan`s, and any that are
 /// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs
 /// and/or a `CoverageGraph` graphviz output).
-pub(crate) struct UsedExpressions {
+pub(super) struct UsedExpressions {
     some_used_expression_operands:
         Option<FxHashMap<ExpressionOperandId, Vec<InjectedExpressionId>>>,
     some_unused_expressions:
@@ -626,7 +626,7 @@ impl UsedExpressions {
 }
 
 /// Generates the MIR pass `CoverageSpan`-specific spanview dump file.
-pub(crate) fn dump_coverage_spanview(
+pub(super) fn dump_coverage_spanview(
     tcx: TyCtxt<'tcx>,
     mir_body: &mir::Body<'tcx>,
     basic_coverage_blocks: &CoverageGraph,
@@ -666,7 +666,7 @@ fn span_viewables(
 }
 
 /// Generates the MIR pass coverage-specific graphviz dump file.
-pub(crate) fn dump_coverage_graphviz(
+pub(super) fn dump_coverage_graphviz(
     tcx: TyCtxt<'tcx>,
     mir_body: &mir::Body<'tcx>,
     pass_name: &str,
@@ -815,7 +815,7 @@ fn bcb_to_string_sections(
 
 /// Returns a simple string representation of a `TerminatorKind` variant, indenpendent of any
 /// values it might hold.
-pub(crate) fn term_type(kind: &TerminatorKind<'tcx>) -> &'static str {
+pub(super) fn term_type(kind: &TerminatorKind<'tcx>) -> &'static str {
     match kind {
         TerminatorKind::Goto { .. } => "Goto",
         TerminatorKind::SwitchInt { .. } => "SwitchInt",
diff --git a/compiler/rustc_mir/src/transform/coverage/graph.rs b/compiler/rustc_mir/src/transform/coverage/graph.rs
index c2ed2cbb100..9d375633dcf 100644
--- a/compiler/rustc_mir/src/transform/coverage/graph.rs
+++ b/compiler/rustc_mir/src/transform/coverage/graph.rs
@@ -17,7 +17,8 @@ const ID_SEPARATOR: &str = ",";
 /// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
 /// set of additional counters--if needed--to count incoming edges, if there are more than one.
 /// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
-pub(crate) struct CoverageGraph {
+#[derive(Debug)]
+pub(super) struct CoverageGraph {
     bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
     bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
     pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
@@ -275,7 +276,7 @@ impl graph::WithPredecessors for CoverageGraph {
 
 rustc_index::newtype_index! {
     /// A node in the [control-flow graph][CFG] of CoverageGraph.
-    pub(crate) struct BasicCoverageBlock {
+    pub(super) struct BasicCoverageBlock {
         DEBUG_FORMAT = "bcb{}",
     }
 }
@@ -305,7 +306,7 @@ rustc_index::newtype_index! {
 /// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
 /// significance.
 #[derive(Debug, Clone)]
-pub(crate) struct BasicCoverageBlockData {
+pub(super) struct BasicCoverageBlockData {
     pub basic_blocks: Vec<BasicBlock>,
     pub counter_kind: Option<CoverageKind>,
     edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
@@ -431,7 +432,7 @@ impl BasicCoverageBlockData {
 /// the specific branching BCB, representing the edge between the two. The latter case
 /// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
 #[derive(Clone, Copy, PartialEq, Eq)]
-pub(crate) struct BcbBranch {
+pub(super) struct BcbBranch {
     pub edge_from_bcb: Option<BasicCoverageBlock>,
     pub target_bcb: BasicCoverageBlock,
 }
@@ -498,9 +499,8 @@ fn bcb_filtered_successors<'a, 'tcx>(
 /// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
 /// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
 /// ensures a loop is completely traversed before processing Blocks after the end of the loop.
-// FIXME(richkadel): Add unit tests for TraversalContext.
 #[derive(Debug)]
-pub(crate) struct TraversalContext {
+pub(super) struct TraversalContext {
     /// From one or more backedges returning to a loop header.
     pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
 
@@ -510,7 +510,7 @@ pub(crate) struct TraversalContext {
     pub worklist: Vec<BasicCoverageBlock>,
 }
 
-pub(crate) struct TraverseCoverageGraphWithLoops {
+pub(super) struct TraverseCoverageGraphWithLoops {
     pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
     pub context_stack: Vec<TraversalContext>,
     visited: BitSet<BasicCoverageBlock>,
@@ -642,7 +642,7 @@ impl TraverseCoverageGraphWithLoops {
     }
 }
 
-fn find_loop_backedges(
+pub(super) fn find_loop_backedges(
     basic_coverage_blocks: &CoverageGraph,
 ) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> {
     let num_bcbs = basic_coverage_blocks.num_nodes();
diff --git a/compiler/rustc_mir/src/transform/coverage/mod.rs b/compiler/rustc_mir/src/transform/coverage/mod.rs
index c55349239b0..192bb6680e4 100644
--- a/compiler/rustc_mir/src/transform/coverage/mod.rs
+++ b/compiler/rustc_mir/src/transform/coverage/mod.rs
@@ -5,6 +5,9 @@ mod debug;
 mod graph;
 mod spans;
 
+#[cfg(test)]
+mod tests;
+
 use counters::CoverageCounters;
 use graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
 use spans::{CoverageSpan, CoverageSpans};
@@ -31,7 +34,7 @@ use rustc_span::{CharPos, Pos, SourceFile, Span, Symbol};
 
 /// A simple error message wrapper for `coverage::Error`s.
 #[derive(Debug)]
-pub(crate) struct Error {
+struct Error {
     message: String,
 }
 
diff --git a/compiler/rustc_mir/src/transform/coverage/spans.rs b/compiler/rustc_mir/src/transform/coverage/spans.rs
index cda4fc12544..95c49922262 100644
--- a/compiler/rustc_mir/src/transform/coverage/spans.rs
+++ b/compiler/rustc_mir/src/transform/coverage/spans.rs
@@ -17,7 +17,7 @@ use rustc_span::{BytePos, Span, SyntaxContext};
 use std::cmp::Ordering;
 
 #[derive(Debug, Copy, Clone)]
-pub(crate) enum CoverageStatement {
+pub(super) enum CoverageStatement {
     Statement(BasicBlock, Span, usize),
     Terminator(BasicBlock, Span),
 }
@@ -66,7 +66,7 @@ impl CoverageStatement {
 /// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock`
 /// `is_dominated_by()` the `BasicBlock`s in this `CoverageSpan`.
 #[derive(Debug, Clone)]
-pub(crate) struct CoverageSpan {
+pub(super) struct CoverageSpan {
     pub span: Span,
     pub bcb: BasicCoverageBlock,
     pub coverage_statements: Vec<CoverageStatement>,
@@ -214,7 +214,7 @@ pub struct CoverageSpans<'a, 'tcx> {
 }
 
 impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
-    pub(crate) fn generate_coverage_spans(
+    pub(super) fn generate_coverage_spans(
         mir_body: &'a mir::Body<'tcx>,
         body_span: Span,
         basic_coverage_blocks: &'a CoverageGraph,
@@ -645,7 +645,10 @@ impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
     }
 }
 
-fn filtered_statement_span(statement: &'a Statement<'tcx>, body_span: Span) -> Option<Span> {
+pub(super) fn filtered_statement_span(
+    statement: &'a Statement<'tcx>,
+    body_span: Span,
+) -> Option<Span> {
     match statement.kind {
         // These statements have spans that are often outside the scope of the executed source code
         // for their parent `BasicBlock`.
@@ -686,7 +689,10 @@ fn filtered_statement_span(statement: &'a Statement<'tcx>, body_span: Span) -> O
     }
 }
 
-fn filtered_terminator_span(terminator: &'a Terminator<'tcx>, body_span: Span) -> Option<Span> {
+pub(super) fn filtered_terminator_span(
+    terminator: &'a Terminator<'tcx>,
+    body_span: Span,
+) -> Option<Span> {
     match terminator.kind {
         // These terminators have spans that don't positively contribute to computing a reasonable
         // span of actually executed source code. (For example, SwitchInt terminators extracted from
diff --git a/compiler/rustc_mir/src/transform/coverage/test_macros/Cargo.toml b/compiler/rustc_mir/src/transform/coverage/test_macros/Cargo.toml
new file mode 100644
index 00000000000..a9d6f0c803d
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/test_macros/Cargo.toml
@@ -0,0 +1,12 @@
+[package]
+authors = ["The Rust Project Developers"]
+name = "coverage_test_macros"
+version = "0.0.0"
+edition = "2018"
+
+[lib]
+proc-macro = true
+doctest = false
+
+[dependencies]
+proc-macro2 = "1"
diff --git a/compiler/rustc_mir/src/transform/coverage/test_macros/src/lib.rs b/compiler/rustc_mir/src/transform/coverage/test_macros/src/lib.rs
new file mode 100644
index 00000000000..3d6095d2738
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/test_macros/src/lib.rs
@@ -0,0 +1,6 @@
+use proc_macro::TokenStream;
+
+#[proc_macro]
+pub fn let_bcb(item: TokenStream) -> TokenStream {
+    format!("let bcb{} = graph::BasicCoverageBlock::from_usize({});", item, item).parse().unwrap()
+}
diff --git a/compiler/rustc_mir/src/transform/coverage/tests.rs b/compiler/rustc_mir/src/transform/coverage/tests.rs
new file mode 100644
index 00000000000..d36f1b8e5f6
--- /dev/null
+++ b/compiler/rustc_mir/src/transform/coverage/tests.rs
@@ -0,0 +1,714 @@
+//! This crate hosts a selection of "unit tests" for components of the `InstrumentCoverage` MIR
+//! pass.
+//!
+//! The tests construct a few "mock" objects, as needed, to support the `InstrumentCoverage`
+//! functions and algorithms. Mocked objects include instances of `mir::Body`; including
+//! `Terminator`s of various `kind`s, and `Span` objects. Some functions used by or used on
+//! real, runtime versions of these mocked-up objects have constraints (such as cross-thread
+//! limitations) and deep dependencies on other elements of the full Rust compiler (which is
+//! *not* constructed or mocked for these tests).
+//!
+//! Of particular note, attempting to simply print elements of the `mir::Body` with default
+//! `Debug` formatting can fail because some `Debug` format implementations require the
+//! `TyCtxt`, obtained via a static global variable that is *not* set for these tests.
+//! Initializing the global type context is prohibitively complex for the scope and scale of these
+//! tests (essentially requiring initializing the entire compiler).
+//!
+//! Also note, some basic features of `Span` also rely on the `Span`s own "session globals", which
+//! are unrelated to the `TyCtxt` global. Without initializing the `Span` session globals, some
+//! basic, coverage-specific features would be impossible to test, but thankfully initializing these
+//! globals is comparitively simpler. The easiest way is to wrap the test in a closure argument
+//! to: `rustc_span::with_default_session_globals(|| { test_here(); })`.
+
+use super::counters;
+use super::debug;
+use super::graph;
+use super::spans;
+
+use coverage_test_macros::let_bcb;
+
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_data_structures::graph::WithSuccessors;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::coverage::CoverageKind;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, DebruijnIndex, TyS, TypeFlags};
+use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP};
+
+// All `TEMP_BLOCK` targets should be replaced before calling `to_body() -> mir::Body`.
+const TEMP_BLOCK: BasicBlock = BasicBlock::MAX;
+
+fn dummy_ty() -> &'static TyS<'static> {
+    thread_local! {
+        static DUMMY_TYS: &'static TyS<'static> = Box::leak(box TyS::make_for_test(
+            ty::Bool,
+            TypeFlags::empty(),
+            DebruijnIndex::from_usize(0),
+        ));
+    }
+
+    &DUMMY_TYS.with(|tys| *tys)
+}
+
+struct MockBlocks<'tcx> {
+    blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+    dummy_place: Place<'tcx>,
+    next_local: usize,
+}
+
+impl<'tcx> MockBlocks<'tcx> {
+    fn new() -> Self {
+        Self {
+            blocks: IndexVec::new(),
+            dummy_place: Place { local: RETURN_PLACE, projection: ty::List::empty() },
+            next_local: 0,
+        }
+    }
+
+    fn new_temp(&mut self) -> Local {
+        let index = self.next_local;
+        self.next_local += 1;
+        Local::new(index)
+    }
+
+    fn push(&mut self, kind: TerminatorKind<'tcx>) -> BasicBlock {
+        let next_lo = if let Some(last) = self.blocks.last() {
+            self.blocks[last].terminator().source_info.span.hi()
+        } else {
+            BytePos(1)
+        };
+        let next_hi = next_lo + BytePos(1);
+        self.blocks.push(BasicBlockData {
+            statements: vec![],
+            terminator: Some(Terminator {
+                source_info: SourceInfo::outermost(Span::with_root_ctxt(next_lo, next_hi)),
+                kind,
+            }),
+            is_cleanup: false,
+        })
+    }
+
+    fn link(&mut self, from_block: BasicBlock, to_block: BasicBlock) {
+        match self.blocks[from_block].terminator_mut().kind {
+            TerminatorKind::Assert { ref mut target, .. }
+            | TerminatorKind::Call { destination: Some((_, ref mut target)), .. }
+            | TerminatorKind::Drop { ref mut target, .. }
+            | TerminatorKind::DropAndReplace { ref mut target, .. }
+            | TerminatorKind::FalseEdge { real_target: ref mut target, .. }
+            | TerminatorKind::FalseUnwind { real_target: ref mut target, .. }
+            | TerminatorKind::Goto { ref mut target }
+            | TerminatorKind::InlineAsm { destination: Some(ref mut target), .. }
+            | TerminatorKind::Yield { resume: ref mut target, .. } => *target = to_block,
+            ref invalid => bug!("Invalid from_block: {:?}", invalid),
+        }
+    }
+
+    fn add_block_from(
+        &mut self,
+        some_from_block: Option<BasicBlock>,
+        to_kind: TerminatorKind<'tcx>,
+    ) -> BasicBlock {
+        let new_block = self.push(to_kind);
+        if let Some(from_block) = some_from_block {
+            self.link(from_block, new_block);
+        }
+        new_block
+    }
+
+    fn set_branch(&mut self, switchint: BasicBlock, branch_index: usize, to_block: BasicBlock) {
+        match self.blocks[switchint].terminator_mut().kind {
+            TerminatorKind::SwitchInt { ref mut targets, .. } => {
+                let mut branches = targets.iter().collect::<Vec<_>>();
+                let otherwise = if branch_index == branches.len() {
+                    to_block
+                } else {
+                    let old_otherwise = targets.otherwise();
+                    if branch_index > branches.len() {
+                        branches.push((branches.len() as u128, old_otherwise));
+                        while branches.len() < branch_index {
+                            branches.push((branches.len() as u128, TEMP_BLOCK));
+                        }
+                        to_block
+                    } else {
+                        branches[branch_index] = (branch_index as u128, to_block);
+                        old_otherwise
+                    }
+                };
+                *targets = SwitchTargets::new(branches.into_iter(), otherwise);
+            }
+            ref invalid => bug!("Invalid BasicBlock kind or no to_block: {:?}", invalid),
+        }
+    }
+
+    fn call(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+        self.add_block_from(
+            some_from_block,
+            TerminatorKind::Call {
+                func: Operand::Copy(self.dummy_place.clone()),
+                args: vec![],
+                destination: Some((self.dummy_place.clone(), TEMP_BLOCK)),
+                cleanup: None,
+                from_hir_call: false,
+                fn_span: DUMMY_SP,
+            },
+        )
+    }
+
+    fn goto(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+        self.add_block_from(some_from_block, TerminatorKind::Goto { target: TEMP_BLOCK })
+    }
+
+    fn switchint(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+        let switchint_kind = TerminatorKind::SwitchInt {
+            discr: Operand::Move(Place::from(self.new_temp())),
+            switch_ty: dummy_ty(),
+            targets: SwitchTargets::static_if(0, TEMP_BLOCK, TEMP_BLOCK),
+        };
+        self.add_block_from(some_from_block, switchint_kind)
+    }
+
+    fn return_(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+        self.add_block_from(some_from_block, TerminatorKind::Return)
+    }
+
+    fn to_body(self) -> Body<'tcx> {
+        Body::new_cfg_only(self.blocks)
+    }
+}
+
+fn debug_basic_blocks(mir_body: &Body<'tcx>) -> String {
+    format!(
+        "{:?}",
+        mir_body
+            .basic_blocks()
+            .iter_enumerated()
+            .map(|(bb, data)| {
+                let term = &data.terminator();
+                let kind = &term.kind;
+                let span = term.source_info.span;
+                let sp = format!("(span:{},{})", span.lo().to_u32(), span.hi().to_u32());
+                match kind {
+                    TerminatorKind::Assert { target, .. }
+                    | TerminatorKind::Call { destination: Some((_, target)), .. }
+                    | TerminatorKind::Drop { target, .. }
+                    | TerminatorKind::DropAndReplace { target, .. }
+                    | TerminatorKind::FalseEdge { real_target: target, .. }
+                    | TerminatorKind::FalseUnwind { real_target: target, .. }
+                    | TerminatorKind::Goto { target }
+                    | TerminatorKind::InlineAsm { destination: Some(target), .. }
+                    | TerminatorKind::Yield { resume: target, .. } => {
+                        format!("{}{:?}:{} -> {:?}", sp, bb, debug::term_type(kind), target)
+                    }
+                    TerminatorKind::SwitchInt { targets, .. } => {
+                        format!("{}{:?}:{} -> {:?}", sp, bb, debug::term_type(kind), targets)
+                    }
+                    _ => format!("{}{:?}:{}", sp, bb, debug::term_type(kind)),
+                }
+            })
+            .collect::<Vec<_>>()
+    )
+}
+
+static PRINT_GRAPHS: bool = false;
+
+fn print_mir_graphviz(name: &str, mir_body: &Body<'_>) {
+    if PRINT_GRAPHS {
+        println!(
+            "digraph {} {{\n{}\n}}",
+            name,
+            mir_body
+                .basic_blocks()
+                .iter_enumerated()
+                .map(|(bb, data)| {
+                    format!(
+                        "    {:?} [label=\"{:?}: {}\"];\n{}",
+                        bb,
+                        bb,
+                        debug::term_type(&data.terminator().kind),
+                        mir_body
+                            .successors(bb)
+                            .map(|successor| { format!("    {:?} -> {:?};", bb, successor) })
+                            .collect::<Vec<_>>()
+                            .join("\n")
+                    )
+                })
+                .collect::<Vec<_>>()
+                .join("\n")
+        );
+    }
+}
+
+fn print_coverage_graphviz(
+    name: &str,
+    mir_body: &Body<'_>,
+    basic_coverage_blocks: &graph::CoverageGraph,
+) {
+    if PRINT_GRAPHS {
+        println!(
+            "digraph {} {{\n{}\n}}",
+            name,
+            basic_coverage_blocks
+                .iter_enumerated()
+                .map(|(bcb, bcb_data)| {
+                    format!(
+                        "    {:?} [label=\"{:?}: {}\"];\n{}",
+                        bcb,
+                        bcb,
+                        debug::term_type(&bcb_data.terminator(mir_body).kind),
+                        basic_coverage_blocks
+                            .successors(bcb)
+                            .map(|successor| { format!("    {:?} -> {:?};", bcb, successor) })
+                            .collect::<Vec<_>>()
+                            .join("\n")
+                    )
+                })
+                .collect::<Vec<_>>()
+                .join("\n")
+        );
+    }
+}
+
+/// Create a mock `Body` with a simple flow.
+fn goto_switchint() -> Body<'a> {
+    let mut blocks = MockBlocks::new();
+    let start = blocks.call(None);
+    let goto = blocks.goto(Some(start));
+    let switchint = blocks.switchint(Some(goto));
+    let then_call = blocks.call(None);
+    let else_call = blocks.call(None);
+    blocks.set_branch(switchint, 0, then_call);
+    blocks.set_branch(switchint, 1, else_call);
+    blocks.return_(Some(then_call));
+    blocks.return_(Some(else_call));
+
+    let mir_body = blocks.to_body();
+    print_mir_graphviz("mir_goto_switchint", &mir_body);
+    /* Graphviz character plots created using: `graph-easy --as=boxart`:
+                        ┌────────────────┐
+                        │   bb0: Call    │
+                        └────────────────┘
+                          │
+                          │
+                          ▼
+                        ┌────────────────┐
+                        │   bb1: Goto    │
+                        └────────────────┘
+                          │
+                          │
+                          ▼
+    ┌─────────────┐     ┌────────────────┐
+    │  bb4: Call  │ ◀── │ bb2: SwitchInt │
+    └─────────────┘     └────────────────┘
+      │                   │
+      │                   │
+      ▼                   ▼
+    ┌─────────────┐     ┌────────────────┐
+    │ bb6: Return │     │   bb3: Call    │
+    └─────────────┘     └────────────────┘
+                          │
+                          │
+                          ▼
+                        ┌────────────────┐
+                        │  bb5: Return   │
+                        └────────────────┘
+    */
+    mir_body
+}
+
+macro_rules! assert_successors {
+    ($basic_coverage_blocks:ident, $i:ident, [$($successor:ident),*]) => {
+        let mut successors = $basic_coverage_blocks.successors[$i].clone();
+        successors.sort_unstable();
+        assert_eq!(successors, vec![$($successor),*]);
+    }
+}
+
+#[test]
+fn test_covgraph_goto_switchint() {
+    let mir_body = goto_switchint();
+    if false {
+        println!("basic_blocks = {}", debug_basic_blocks(&mir_body));
+    }
+    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+    print_coverage_graphviz("covgraph_goto_switchint ", &mir_body, &basic_coverage_blocks);
+    /*
+    ┌──────────────┐     ┌─────────────────┐
+    │ bcb2: Return │ ◀── │ bcb0: SwitchInt │
+    └──────────────┘     └─────────────────┘
+                           │
+                           │
+                           ▼
+                         ┌─────────────────┐
+                         │  bcb1: Return   │
+                         └─────────────────┘
+    */
+    assert_eq!(
+        basic_coverage_blocks.num_nodes(),
+        3,
+        "basic_coverage_blocks: {:?}",
+        basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+    );
+
+    let_bcb!(0);
+    let_bcb!(1);
+    let_bcb!(2);
+
+    assert_successors!(basic_coverage_blocks, bcb0, [bcb1, bcb2]);
+    assert_successors!(basic_coverage_blocks, bcb1, []);
+    assert_successors!(basic_coverage_blocks, bcb2, []);
+}
+
+/// Create a mock `Body` with a loop.
+fn switchint_then_loop_else_return() -> Body<'a> {
+    let mut blocks = MockBlocks::new();
+    let start = blocks.call(None);
+    let switchint = blocks.switchint(Some(start));
+    let then_call = blocks.call(None);
+    blocks.set_branch(switchint, 0, then_call);
+    let backedge_goto = blocks.goto(Some(then_call));
+    blocks.link(backedge_goto, switchint);
+    let else_return = blocks.return_(None);
+    blocks.set_branch(switchint, 1, else_return);
+
+    let mir_body = blocks.to_body();
+    print_mir_graphviz("mir_switchint_then_loop_else_return", &mir_body);
+    /*
+                        ┌────────────────┐
+                        │   bb0: Call    │
+                        └────────────────┘
+                          │
+                          │
+                          ▼
+    ┌─────────────┐     ┌────────────────┐
+    │ bb4: Return │ ◀── │ bb1: SwitchInt │ ◀┐
+    └─────────────┘     └────────────────┘  │
+                          │                 │
+                          │                 │
+                          ▼                 │
+                        ┌────────────────┐  │
+                        │   bb2: Call    │  │
+                        └────────────────┘  │
+                          │                 │
+                          │                 │
+                          ▼                 │
+                        ┌────────────────┐  │
+                        │   bb3: Goto    │ ─┘
+                        └────────────────┘
+    */
+    mir_body
+}
+
+#[test]
+fn test_covgraph_switchint_then_loop_else_return() {
+    let mir_body = switchint_then_loop_else_return();
+    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+    print_coverage_graphviz(
+        "covgraph_switchint_then_loop_else_return",
+        &mir_body,
+        &basic_coverage_blocks,
+    );
+    /*
+                       ┌─────────────────┐
+                       │   bcb0: Call    │
+                       └─────────────────┘
+                         │
+                         │
+                         ▼
+    ┌────────────┐     ┌─────────────────┐
+    │ bcb3: Goto │ ◀── │ bcb1: SwitchInt │ ◀┐
+    └────────────┘     └─────────────────┘  │
+      │                  │                  │
+      │                  │                  │
+      │                  ▼                  │
+      │                ┌─────────────────┐  │
+      │                │  bcb2: Return   │  │
+      │                └─────────────────┘  │
+      │                                     │
+      └─────────────────────────────────────┘
+    */
+    assert_eq!(
+        basic_coverage_blocks.num_nodes(),
+        4,
+        "basic_coverage_blocks: {:?}",
+        basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+    );
+
+    let_bcb!(0);
+    let_bcb!(1);
+    let_bcb!(2);
+    let_bcb!(3);
+
+    assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
+    assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
+    assert_successors!(basic_coverage_blocks, bcb2, []);
+    assert_successors!(basic_coverage_blocks, bcb3, [bcb1]);
+}
+
+/// Create a mock `Body` with nested loops.
+fn switchint_loop_then_inner_loop_else_break() -> Body<'a> {
+    let mut blocks = MockBlocks::new();
+    let start = blocks.call(None);
+    let switchint = blocks.switchint(Some(start));
+    let then_call = blocks.call(None);
+    blocks.set_branch(switchint, 0, then_call);
+    let else_return = blocks.return_(None);
+    blocks.set_branch(switchint, 1, else_return);
+
+    let inner_start = blocks.call(Some(then_call));
+    let inner_switchint = blocks.switchint(Some(inner_start));
+    let inner_then_call = blocks.call(None);
+    blocks.set_branch(inner_switchint, 0, inner_then_call);
+    let inner_backedge_goto = blocks.goto(Some(inner_then_call));
+    blocks.link(inner_backedge_goto, inner_switchint);
+    let inner_else_break_goto = blocks.goto(None);
+    blocks.set_branch(inner_switchint, 1, inner_else_break_goto);
+
+    let backedge_goto = blocks.goto(Some(inner_else_break_goto));
+    blocks.link(backedge_goto, switchint);
+
+    let mir_body = blocks.to_body();
+    print_mir_graphviz("mir_switchint_loop_then_inner_loop_else_break", &mir_body);
+    /*
+                        ┌────────────────┐
+                        │   bb0: Call    │
+                        └────────────────┘
+                          │
+                          │
+                          ▼
+    ┌─────────────┐     ┌────────────────┐
+    │ bb3: Return │ ◀── │ bb1: SwitchInt │ ◀─────┐
+    └─────────────┘     └────────────────┘       │
+                          │                      │
+                          │                      │
+                          ▼                      │
+                        ┌────────────────┐       │
+                        │   bb2: Call    │       │
+                        └────────────────┘       │
+                          │                      │
+                          │                      │
+                          ▼                      │
+                        ┌────────────────┐       │
+                        │   bb4: Call    │       │
+                        └────────────────┘       │
+                          │                      │
+                          │                      │
+                          ▼                      │
+    ┌─────────────┐     ┌────────────────┐       │
+    │  bb8: Goto  │ ◀── │ bb5: SwitchInt │ ◀┐    │
+    └─────────────┘     └────────────────┘  │    │
+      │                   │                 │    │
+      │                   │                 │    │
+      ▼                   ▼                 │    │
+    ┌─────────────┐     ┌────────────────┐  │    │
+    │  bb9: Goto  │ ─┐  │   bb6: Call    │  │    │
+    └─────────────┘  │  └────────────────┘  │    │
+                     │    │                 │    │
+                     │    │                 │    │
+                     │    ▼                 │    │
+                     │  ┌────────────────┐  │    │
+                     │  │   bb7: Goto    │ ─┘    │
+                     │  └────────────────┘       │
+                     │                           │
+                     └───────────────────────────┘
+    */
+    mir_body
+}
+
+#[test]
+fn test_covgraph_switchint_loop_then_inner_loop_else_break() {
+    let mir_body = switchint_loop_then_inner_loop_else_break();
+    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+    print_coverage_graphviz(
+        "covgraph_switchint_loop_then_inner_loop_else_break",
+        &mir_body,
+        &basic_coverage_blocks,
+    );
+    /*
+                         ┌─────────────────┐
+                         │   bcb0: Call    │
+                         └─────────────────┘
+                           │
+                           │
+                           ▼
+    ┌──────────────┐     ┌─────────────────┐
+    │ bcb2: Return │ ◀── │ bcb1: SwitchInt │ ◀┐
+    └──────────────┘     └─────────────────┘  │
+                           │                  │
+                           │                  │
+                           ▼                  │
+                         ┌─────────────────┐  │
+                         │   bcb3: Call    │  │
+                         └─────────────────┘  │
+                           │                  │
+                           │                  │
+                           ▼                  │
+    ┌──────────────┐     ┌─────────────────┐  │
+    │  bcb6: Goto  │ ◀── │ bcb4: SwitchInt │ ◀┼────┐
+    └──────────────┘     └─────────────────┘  │    │
+      │                    │                  │    │
+      │                    │                  │    │
+      │                    ▼                  │    │
+      │                  ┌─────────────────┐  │    │
+      │                  │   bcb5: Goto    │ ─┘    │
+      │                  └─────────────────┘       │
+      │                                            │
+      └────────────────────────────────────────────┘
+    */
+    assert_eq!(
+        basic_coverage_blocks.num_nodes(),
+        7,
+        "basic_coverage_blocks: {:?}",
+        basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+    );
+
+    let_bcb!(0);
+    let_bcb!(1);
+    let_bcb!(2);
+    let_bcb!(3);
+    let_bcb!(4);
+    let_bcb!(5);
+    let_bcb!(6);
+
+    assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
+    assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
+    assert_successors!(basic_coverage_blocks, bcb2, []);
+    assert_successors!(basic_coverage_blocks, bcb3, [bcb4]);
+    assert_successors!(basic_coverage_blocks, bcb4, [bcb5, bcb6]);
+    assert_successors!(basic_coverage_blocks, bcb5, [bcb1]);
+    assert_successors!(basic_coverage_blocks, bcb6, [bcb4]);
+}
+
+#[test]
+fn test_find_loop_backedges_none() {
+    let mir_body = goto_switchint();
+    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+    if false {
+        println!(
+            "basic_coverage_blocks = {:?}",
+            basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+        );
+        println!("successors = {:?}", basic_coverage_blocks.successors);
+    }
+    let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
+    assert_eq!(
+        backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
+        0,
+        "backedges: {:?}",
+        backedges
+    );
+}
+
+#[test]
+fn test_find_loop_backedges_one() {
+    let mir_body = switchint_then_loop_else_return();
+    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+    let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
+    assert_eq!(
+        backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
+        1,
+        "backedges: {:?}",
+        backedges
+    );
+
+    let_bcb!(1);
+    let_bcb!(3);
+
+    assert_eq!(backedges[bcb1], vec![bcb3]);
+}
+
+#[test]
+fn test_find_loop_backedges_two() {
+    let mir_body = switchint_loop_then_inner_loop_else_break();
+    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+    let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
+    assert_eq!(
+        backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
+        2,
+        "backedges: {:?}",
+        backedges
+    );
+
+    let_bcb!(1);
+    let_bcb!(4);
+    let_bcb!(5);
+    let_bcb!(6);
+
+    assert_eq!(backedges[bcb1], vec![bcb5]);
+    assert_eq!(backedges[bcb4], vec![bcb6]);
+}
+
+#[test]
+fn test_traverse_coverage_with_loops() {
+    let mir_body = switchint_loop_then_inner_loop_else_break();
+    let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+    let mut traversed_in_order = Vec::new();
+    let mut traversal = graph::TraverseCoverageGraphWithLoops::new(&basic_coverage_blocks);
+    while let Some(bcb) = traversal.next(&basic_coverage_blocks) {
+        traversed_in_order.push(bcb);
+    }
+
+    let_bcb!(6);
+
+    // bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except*
+    // bcb6 are inside the first loop.
+    assert_eq!(
+        *traversed_in_order.last().expect("should have elements"),
+        bcb6,
+        "bcb6 should not be visited until all nodes inside the first loop have been visited"
+    );
+}
+
+fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span {
+    let mut some_span: Option<Span> = None;
+    for (_, data) in mir_body.basic_blocks().iter_enumerated() {
+        let term_span = data.terminator().source_info.span;
+        if let Some(span) = some_span.as_mut() {
+            *span = span.to(term_span);
+        } else {
+            some_span = Some(term_span)
+        }
+    }
+    some_span.expect("body must have at least one BasicBlock")
+}
+
+#[test]
+fn test_make_bcb_counters() {
+    rustc_span::with_default_session_globals(|| {
+        let mir_body = goto_switchint();
+        let body_span = synthesize_body_span_from_terminators(&mir_body);
+        let mut basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+        let mut coverage_spans = Vec::new();
+        for (bcb, data) in basic_coverage_blocks.iter_enumerated() {
+            if let Some(span) =
+                spans::filtered_terminator_span(data.terminator(&mir_body), body_span)
+            {
+                coverage_spans.push(spans::CoverageSpan::for_terminator(span, bcb, data.last_bb()));
+            }
+        }
+        let mut coverage_counters = counters::CoverageCounters::new(0);
+        let intermediate_expressions = coverage_counters
+            .make_bcb_counters(&mut basic_coverage_blocks, &coverage_spans)
+            .expect("should be Ok");
+        assert_eq!(intermediate_expressions.len(), 0);
+
+        let_bcb!(1);
+        assert_eq!(
+            1, // coincidentally, bcb1 has a `Counter` with id = 1
+            match basic_coverage_blocks[bcb1].counter().expect("should have a counter") {
+                CoverageKind::Counter { id, .. } => id,
+                _ => panic!("expected a Counter"),
+            }
+            .as_u32()
+        );
+
+        let_bcb!(2);
+        assert_eq!(
+            2, // coincidentally, bcb2 has a `Counter` with id = 2
+            match basic_coverage_blocks[bcb2].counter().expect("should have a counter") {
+                CoverageKind::Counter { id, .. } => id,
+                _ => panic!("expected a Counter"),
+            }
+            .as_u32()
+        );
+    });
+}
diff --git a/compiler/rustc_mir/src/transform/inline.rs b/compiler/rustc_mir/src/transform/inline.rs
index 7737672dbde..aae98f5b6d8 100644
--- a/compiler/rustc_mir/src/transform/inline.rs
+++ b/compiler/rustc_mir/src/transform/inline.rs
@@ -7,6 +7,7 @@ use rustc_index::vec::Idx;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
 use rustc_middle::mir::visit::*;
 use rustc_middle::mir::*;
+use rustc_middle::ty::subst::Subst;
 use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
 use rustc_span::{hygiene::ExpnKind, ExpnData, Span};
 use rustc_target::spec::abi::Abi;
@@ -28,6 +29,7 @@ pub struct Inline;
 #[derive(Copy, Clone, Debug)]
 struct CallSite<'tcx> {
     callee: Instance<'tcx>,
+    fn_sig: ty::PolyFnSig<'tcx>,
     block: BasicBlock,
     target: Option<BasicBlock>,
     source_info: SourceInfo,
@@ -173,22 +175,23 @@ impl Inliner<'tcx> {
 
         // Only consider direct calls to functions
         let terminator = bb_data.terminator();
-        if let TerminatorKind::Call { func: ref op, ref destination, .. } = terminator.kind {
-            if let ty::FnDef(callee_def_id, substs) = *op.ty(caller_body, self.tcx).kind() {
-                // To resolve an instance its substs have to be fully normalized, so
-                // we do this here.
-                let normalized_substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
+        if let TerminatorKind::Call { ref func, ref destination, .. } = terminator.kind {
+            let func_ty = func.ty(caller_body, self.tcx);
+            if let ty::FnDef(def_id, substs) = *func_ty.kind() {
+                // To resolve an instance its substs have to be fully normalized.
+                let substs = self.tcx.normalize_erasing_regions(self.param_env, substs);
                 let callee =
-                    Instance::resolve(self.tcx, self.param_env, callee_def_id, normalized_substs)
-                        .ok()
-                        .flatten()?;
+                    Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?;
 
                 if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
                     return None;
                 }
 
+                let fn_sig = self.tcx.fn_sig(def_id).subst(self.tcx, substs);
+
                 return Some(CallSite {
                     callee,
+                    fn_sig,
                     block: bb,
                     target: destination.map(|(_, target)| target),
                     source_info: terminator.source_info,
@@ -203,9 +206,8 @@ impl Inliner<'tcx> {
         debug!("should_inline({:?})", callsite);
         let tcx = self.tcx;
 
-        // Cannot inline generators which haven't been transformed yet
-        if callee_body.yield_ty.is_some() {
-            debug!("    yield ty present - not inlining");
+        if callsite.fn_sig.c_variadic() {
+            debug!("callee is variadic - not inlining");
             return false;
         }
 
@@ -218,11 +220,7 @@ impl Inliner<'tcx> {
             return false;
         }
 
-        let self_no_sanitize =
-            self.codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer;
-        let callee_no_sanitize =
-            codegen_fn_attrs.no_sanitize & self.tcx.sess.opts.debugging_opts.sanitizer;
-        if self_no_sanitize != callee_no_sanitize {
+        if self.codegen_fn_attrs.no_sanitize != codegen_fn_attrs.no_sanitize {
             debug!("`callee has incompatible no_sanitize attribute - not inlining");
             return false;
         }
@@ -256,9 +254,9 @@ impl Inliner<'tcx> {
             self.tcx.sess.opts.debugging_opts.inline_mir_threshold
         };
 
-        // Significantly lower the threshold for inlining cold functions
         if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
-            threshold /= 5;
+            debug!("#[cold] present - not inlining");
+            return false;
         }
 
         // Give a bonus functions with a small number of blocks,
@@ -447,7 +445,7 @@ impl Inliner<'tcx> {
                 };
 
                 // Copy the arguments if needed.
-                let args: Vec<_> = self.make_call_args(args, &callsite, caller_body);
+                let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body);
 
                 let mut integrator = Integrator {
                     args: &args,
@@ -528,6 +526,7 @@ impl Inliner<'tcx> {
         args: Vec<Operand<'tcx>>,
         callsite: &CallSite<'tcx>,
         caller_body: &mut Body<'tcx>,
+        callee_body: &Body<'tcx>,
     ) -> Vec<Local> {
         let tcx = self.tcx;
 
@@ -554,9 +553,7 @@ impl Inliner<'tcx> {
         //     tmp2 = tuple_tmp.2
         //
         // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
-        // FIXME(eddyb) make this check for `"rust-call"` ABI combined with
-        // `callee_body.spread_arg == None`, instead of special-casing closures.
-        if tcx.is_closure(callsite.callee.def_id()) {
+        if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() {
             let mut args = args.into_iter();
             let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
             let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
diff --git a/compiler/rustc_mir/src/transform/validate.rs b/compiler/rustc_mir/src/transform/validate.rs
index e1e6e71acb5..876ecee80c6 100644
--- a/compiler/rustc_mir/src/transform/validate.rs
+++ b/compiler/rustc_mir/src/transform/validate.rs
@@ -38,7 +38,9 @@ pub struct Validator {
 impl<'tcx> MirPass<'tcx> for Validator {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
         let def_id = body.source.def_id();
-        let param_env = tcx.param_env(def_id);
+        // We need to param_env_reveal_all_normalized, as some optimizations
+        // change types in ways that require unfolding opaque types.
+        let param_env = tcx.param_env_reveal_all_normalized(def_id);
         let mir_phase = self.mir_phase;
 
         let always_live_locals = AlwaysLiveLocals::new(body);
@@ -79,7 +81,6 @@ pub fn equal_up_to_regions(
     }
 
     // Normalize lifetimes away on both sides, then compare.
-    let param_env = param_env.with_reveal_all_normalized(tcx);
     let normalize = |ty: Ty<'tcx>| {
         tcx.normalize_erasing_regions(
             param_env,
@@ -167,17 +168,14 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
             return true;
         }
         // Normalize projections and things like that.
-        // FIXME: We need to reveal_all, as some optimizations change types in ways
-        // that require unfolding opaque types.
-        let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
-        let src = self.tcx.normalize_erasing_regions(param_env, src);
-        let dest = self.tcx.normalize_erasing_regions(param_env, dest);
+        let src = self.tcx.normalize_erasing_regions(self.param_env, src);
+        let dest = self.tcx.normalize_erasing_regions(self.param_env, dest);
 
         // Type-changing assignments can happen when subtyping is used. While
         // all normal lifetimes are erased, higher-ranked types with their
         // late-bound lifetimes are still around and can lead to type
         // differences. So we compare ignoring lifetimes.
-        equal_up_to_regions(self.tcx, param_env, src, dest)
+        equal_up_to_regions(self.tcx, self.param_env, src, dest)
     }
 }
 
@@ -358,6 +356,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
             }
             TerminatorKind::Call { func, args, destination, cleanup, .. } => {
                 let func_ty = func.ty(&self.body.local_decls, self.tcx);
+                let func_ty = self.tcx.normalize_erasing_regions(self.param_env, func_ty);
                 match func_ty.kind() {
                     ty::FnPtr(..) | ty::FnDef(..) => {}
                     _ => self.fail(
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
index 7bea8220ada..07173f41cd6 100644
--- a/compiler/rustc_mir_build/src/build/matches/test.rs
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -671,6 +671,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
             (&TestKind::Range { .. }, _) => None,
 
             (&TestKind::Eq { .. } | &TestKind::Len { .. }, _) => {
+                // The call to `self.test(&match_pair)` below is not actually used to generate any
+                // MIR. Instead, we just want to compare with `test` (the parameter of the method)
+                // to see if it is the same.
+                //
+                // However, at this point we can still encounter or-patterns that were extracted
+                // from previous calls to `sort_candidate`, so we need to manually address that
+                // case to avoid panicking in `self.test()`.
+                if let PatKind::Or { .. } = &*match_pair.pattern.kind {
+                    return None;
+                }
+
                 // These are all binary tests.
                 //
                 // FIXME(#29623) we can be more clever here
diff --git a/compiler/rustc_save_analysis/src/sig.rs b/compiler/rustc_save_analysis/src/sig.rs
index 1bf8160e4c3..2f82d0546ba 100644
--- a/compiler/rustc_save_analysis/src/sig.rs
+++ b/compiler/rustc_save_analysis/src/sig.rs
@@ -21,7 +21,7 @@
 // references.
 //
 // Signatures do not include visibility info. I'm not sure if this is a feature
-// or an ommission (FIXME).
+// or an omission (FIXME).
 //
 // FIXME where clauses need implementing, defs/refs in generics are mostly missing.
 
@@ -677,7 +677,7 @@ impl<'hir> Sig for hir::Variant<'hir> {
         let mut text = self.ident.to_string();
         match self.data {
             hir::VariantData::Struct(fields, r) => {
-                let id = parent_id.unwrap();
+                let id = parent_id.ok_or("Missing id for Variant's parent")?;
                 let name_def = SigElement {
                     id: id_from_hir_id(id, scx),
                     start: offset,
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
index b37783c5820..045da7963c5 100644
--- a/compiler/rustc_target/src/spec/mod.rs
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -1444,8 +1444,8 @@ impl Target {
         }
 
         key!(is_builtin, bool);
-        key!(endian = "target_endian");
-        key!(c_int_width = "target_c_int_width");
+        key!(endian = "target-endian");
+        key!(c_int_width = "target-c-int-width");
         key!(os);
         key!(env);
         key!(vendor);
@@ -1482,7 +1482,7 @@ impl Target {
         key!(exe_suffix);
         key!(staticlib_prefix);
         key!(staticlib_suffix);
-        key!(os_family = "target_family", optional);
+        key!(os_family = "target-family", optional);
         key!(abi_return_struct_as_int, bool);
         key!(is_like_osx, bool);
         key!(is_like_solaris, bool);
@@ -1527,7 +1527,7 @@ impl Target {
         key!(limit_rdylib_exports, bool);
         key!(override_export_symbols, opt_list);
         key!(merge_functions, MergeFunctions)?;
-        key!(mcount = "target_mcount");
+        key!(mcount = "target-mcount");
         key!(llvm_abiname);
         key!(relax_elf_relocations, bool);
         key!(llvm_args, list);
@@ -1679,8 +1679,8 @@ impl ToJson for Target {
         target_val!(data_layout);
 
         target_option_val!(is_builtin);
-        target_option_val!(endian, "target_endian");
-        target_option_val!(c_int_width, "target_c_int_width");
+        target_option_val!(endian, "target-endian");
+        target_option_val!(c_int_width, "target-c-int-width");
         target_option_val!(os);
         target_option_val!(env);
         target_option_val!(vendor);
@@ -1717,7 +1717,7 @@ impl ToJson for Target {
         target_option_val!(exe_suffix);
         target_option_val!(staticlib_prefix);
         target_option_val!(staticlib_suffix);
-        target_option_val!(os_family, "target_family");
+        target_option_val!(os_family, "target-family");
         target_option_val!(abi_return_struct_as_int);
         target_option_val!(is_like_osx);
         target_option_val!(is_like_solaris);
@@ -1762,7 +1762,7 @@ impl ToJson for Target {
         target_option_val!(limit_rdylib_exports);
         target_option_val!(override_export_symbols);
         target_option_val!(merge_functions);
-        target_option_val!(mcount, "target_mcount");
+        target_option_val!(mcount, "target-mcount");
         target_option_val!(llvm_abiname);
         target_option_val!(relax_elf_relocations);
         target_option_val!(llvm_args);
diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs
new file mode 100644
index 00000000000..465b058cd98
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/into_iter.rs
@@ -0,0 +1,57 @@
+use core::fmt;
+use core::iter::FusedIterator;
+
+use super::VecDeque;
+
+/// An owning iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: VecDeque::into_iter
+#[derive(Clone)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<T> {
+    pub(crate) inner: VecDeque<T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.debug_tuple("IntoIter").field(&self.inner).finish()
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+    type Item = T;
+
+    #[inline]
+    fn next(&mut self) -> Option<T> {
+        self.inner.pop_front()
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = self.inner.len();
+        (len, Some(len))
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<T> {
+        self.inner.pop_back()
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {
+    fn is_empty(&self) -> bool {
+        self.inner.is_empty()
+    }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
new file mode 100644
index 00000000000..ad31b991cb6
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -0,0 +1,159 @@
+use core::fmt;
+use core::iter::FusedIterator;
+use core::ops::Try;
+
+use super::{count, wrap_index, RingSlices};
+
+/// An iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter`] method on [`super::VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter`]: super::VecDeque::iter
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+    pub(crate) ring: &'a [T],
+    pub(crate) tail: usize,
+    pub(crate) head: usize,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        f.debug_tuple("Iter").field(&front).field(&back).finish()
+    }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+    fn clone(&self) -> Self {
+        Iter { ring: self.ring, tail: self.tail, head: self.head }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+    type Item = &'a T;
+
+    #[inline]
+    fn next(&mut self) -> Option<&'a T> {
+        if self.tail == self.head {
+            return None;
+        }
+        let tail = self.tail;
+        self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+        unsafe { Some(self.ring.get_unchecked(tail)) }
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = count(self.tail, self.head, self.ring.len());
+        (len, Some(len))
+    }
+
+    fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        accum = front.iter().fold(accum, &mut f);
+        back.iter().fold(accum, &mut f)
+    }
+
+    fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+    where
+        Self: Sized,
+        F: FnMut(B, Self::Item) -> R,
+        R: Try<Ok = B>,
+    {
+        let (mut iter, final_res);
+        if self.tail <= self.head {
+            // single slice self.ring[self.tail..self.head]
+            iter = self.ring[self.tail..self.head].iter();
+            final_res = iter.try_fold(init, &mut f);
+        } else {
+            // two slices: self.ring[self.tail..], self.ring[..self.head]
+            let (front, back) = self.ring.split_at(self.tail);
+            let mut back_iter = back.iter();
+            let res = back_iter.try_fold(init, &mut f);
+            let len = self.ring.len();
+            self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
+            iter = front[..self.head].iter();
+            final_res = iter.try_fold(res?, &mut f);
+        }
+        self.tail = self.head - iter.len();
+        final_res
+    }
+
+    fn nth(&mut self, n: usize) -> Option<Self::Item> {
+        if n >= count(self.tail, self.head, self.ring.len()) {
+            self.tail = self.head;
+            None
+        } else {
+            self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+            self.next()
+        }
+    }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a T> {
+        self.next_back()
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<&'a T> {
+        if self.tail == self.head {
+            return None;
+        }
+        self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+        unsafe { Some(self.ring.get_unchecked(self.head)) }
+    }
+
+    fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        accum = back.iter().rfold(accum, &mut f);
+        front.iter().rfold(accum, &mut f)
+    }
+
+    fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+    where
+        Self: Sized,
+        F: FnMut(B, Self::Item) -> R,
+        R: Try<Ok = B>,
+    {
+        let (mut iter, final_res);
+        if self.tail <= self.head {
+            // single slice self.ring[self.tail..self.head]
+            iter = self.ring[self.tail..self.head].iter();
+            final_res = iter.try_rfold(init, &mut f);
+        } else {
+            // two slices: self.ring[self.tail..], self.ring[..self.head]
+            let (front, back) = self.ring.split_at(self.tail);
+            let mut front_iter = front[..self.head].iter();
+            let res = front_iter.try_rfold(init, &mut f);
+            self.head = front_iter.len();
+            iter = back.iter();
+            final_res = iter.try_rfold(res?, &mut f);
+        }
+        self.head = self.tail + iter.len();
+        final_res
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+    fn is_empty(&self) -> bool {
+        self.head == self.tail
+    }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
new file mode 100644
index 00000000000..3d0c3094e26
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -0,0 +1,128 @@
+use core::fmt;
+use core::iter::FusedIterator;
+use core::marker::PhantomData;
+
+use super::{count, wrap_index, RingSlices};
+
+/// A mutable iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`super::VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: super::VecDeque::iter_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+    // Internal safety invariant: the entire slice is dereferencable.
+    pub(crate) ring: *mut [T],
+    pub(crate) tail: usize,
+    pub(crate) head: usize,
+    pub(crate) phantom: PhantomData<&'a mut [T]>,
+}
+
+// SAFETY: we do nothing thread-local and there is no interior mutability,
+// so the usual structural `Send`/`Sync` apply.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for IterMut<'_, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+        // The `IterMut` invariant also ensures everything is dereferencable.
+        let (front, back) = unsafe { (&*front, &*back) };
+        f.debug_tuple("IterMut").field(&front).field(&back).finish()
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for IterMut<'a, T> {
+    type Item = &'a mut T;
+
+    #[inline]
+    fn next(&mut self) -> Option<&'a mut T> {
+        if self.tail == self.head {
+            return None;
+        }
+        let tail = self.tail;
+        self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+
+        unsafe {
+            let elem = self.ring.get_unchecked_mut(tail);
+            Some(&mut *elem)
+        }
+    }
+
+    #[inline]
+    fn size_hint(&self) -> (usize, Option<usize>) {
+        let len = count(self.tail, self.head, self.ring.len());
+        (len, Some(len))
+    }
+
+    fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+        // The `IterMut` invariant also ensures everything is dereferencable.
+        let (front, back) = unsafe { (&mut *front, &mut *back) };
+        accum = front.iter_mut().fold(accum, &mut f);
+        back.iter_mut().fold(accum, &mut f)
+    }
+
+    fn nth(&mut self, n: usize) -> Option<Self::Item> {
+        if n >= count(self.tail, self.head, self.ring.len()) {
+            self.tail = self.head;
+            None
+        } else {
+            self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+            self.next()
+        }
+    }
+
+    #[inline]
+    fn last(mut self) -> Option<&'a mut T> {
+        self.next_back()
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+    #[inline]
+    fn next_back(&mut self) -> Option<&'a mut T> {
+        if self.tail == self.head {
+            return None;
+        }
+        self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+
+        unsafe {
+            let elem = self.ring.get_unchecked_mut(self.head);
+            Some(&mut *elem)
+        }
+    }
+
+    fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+    where
+        F: FnMut(Acc, Self::Item) -> Acc,
+    {
+        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+        // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+        // The `IterMut` invariant also ensures everything is dereferencable.
+        let (front, back) = unsafe { (&mut *front, &mut *back) };
+        accum = back.iter_mut().rfold(accum, &mut f);
+        front.iter_mut().rfold(accum, &mut f)
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IterMut<'_, T> {
+    fn is_empty(&self) -> bool {
+        self.head == self.tail
+    }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IterMut<'_, T> {}
diff --git a/library/alloc/src/collections/vec_deque/macros.rs b/library/alloc/src/collections/vec_deque/macros.rs
new file mode 100644
index 00000000000..0d59d312cf4
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/macros.rs
@@ -0,0 +1,19 @@
+macro_rules! __impl_slice_eq1 {
+    ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
+        #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")]
+        impl<A, B, $($vars)*> PartialEq<$rhs> for $lhs
+        where
+            A: PartialEq<B>,
+            $($constraints)*
+        {
+            fn eq(&self, other: &$rhs) -> bool {
+                if self.len() != other.len() {
+                    return false;
+                }
+                let (sa, sb) = self.as_slices();
+                let (oa, ob) = other[..].split_at(sa.len());
+                sa == oa && sb == ob
+            }
+        }
+    }
+}
diff --git a/library/alloc/src/collections/vec_deque.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 22b02a4f849..1c183858e7a 100644
--- a/library/alloc/src/collections/vec_deque.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -7,16 +7,13 @@
 
 #![stable(feature = "rust1", since = "1.0.0")]
 
-// ignore-tidy-filelength
-
-use core::array;
 use core::cmp::{self, Ordering};
 use core::fmt;
 use core::hash::{Hash, Hasher};
-use core::iter::{repeat_with, FromIterator, FusedIterator};
+use core::iter::{repeat_with, FromIterator};
 use core::marker::PhantomData;
-use core::mem::{self, replace, ManuallyDrop};
-use core::ops::{Index, IndexMut, Range, RangeBounds, Try};
+use core::mem::{self, ManuallyDrop};
+use core::ops::{Index, IndexMut, Range, RangeBounds};
 use core::ptr::{self, NonNull};
 use core::slice;
 
@@ -24,11 +21,37 @@ use crate::collections::TryReserveError;
 use crate::raw_vec::RawVec;
 use crate::vec::Vec;
 
+#[macro_use]
+mod macros;
+
 #[stable(feature = "drain", since = "1.6.0")]
 pub use self::drain::Drain;
 
 mod drain;
 
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::iter_mut::IterMut;
+
+mod iter_mut;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::into_iter::IntoIter;
+
+mod into_iter;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::iter::Iter;
+
+mod iter;
+
+use self::pair_slices::PairSlices;
+
+mod pair_slices;
+
+use self::ring_slices::RingSlices;
+
+mod ring_slices;
+
 #[cfg(test)]
 mod tests;
 
@@ -68,67 +91,6 @@ pub struct VecDeque<T> {
     buf: RawVec<T>,
 }
 
-/// PairSlices pairs up equal length slice parts of two deques
-///
-/// For example, given deques "A" and "B" with the following division into slices:
-///
-/// A: [0 1 2] [3 4 5]
-/// B: [a b] [c d e]
-///
-/// It produces the following sequence of matching slices:
-///
-/// ([0 1], [a b])
-/// (\[2\], \[c\])
-/// ([3 4], [d e])
-///
-/// and the uneven remainder of either A or B is skipped.
-struct PairSlices<'a, 'b, T> {
-    a0: &'a mut [T],
-    a1: &'a mut [T],
-    b0: &'b [T],
-    b1: &'b [T],
-}
-
-impl<'a, 'b, T> PairSlices<'a, 'b, T> {
-    fn from(to: &'a mut VecDeque<T>, from: &'b VecDeque<T>) -> Self {
-        let (a0, a1) = to.as_mut_slices();
-        let (b0, b1) = from.as_slices();
-        PairSlices { a0, a1, b0, b1 }
-    }
-
-    fn has_remainder(&self) -> bool {
-        !self.b0.is_empty()
-    }
-
-    fn remainder(self) -> impl Iterator<Item = &'b [T]> {
-        array::IntoIter::new([self.b0, self.b1])
-    }
-}
-
-impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
-    type Item = (&'a mut [T], &'b [T]);
-    fn next(&mut self) -> Option<Self::Item> {
-        // Get next part length
-        let part = cmp::min(self.a0.len(), self.b0.len());
-        if part == 0 {
-            return None;
-        }
-        let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
-        let (q0, q1) = self.b0.split_at(part);
-
-        // Move a1 into a0, if it's empty (and b1, b0 the same way).
-        self.a0 = p1;
-        self.b0 = q1;
-        if self.a0.is_empty() {
-            self.a0 = replace(&mut self.a1, &mut []);
-        }
-        if self.b0.is_empty() {
-            self.b0 = replace(&mut self.b1, &[]);
-        }
-        Some((p0, q0))
-    }
-}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<T: Clone> Clone for VecDeque<T> {
     fn clone(&self) -> VecDeque<T> {
@@ -2605,61 +2567,6 @@ fn wrap_index(index: usize, size: usize) -> usize {
     index & (size - 1)
 }
 
-/// Returns the two slices that cover the `VecDeque`'s valid range
-trait RingSlices: Sized {
-    fn slice(self, from: usize, to: usize) -> Self;
-    fn split_at(self, i: usize) -> (Self, Self);
-
-    fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
-        let contiguous = tail <= head;
-        if contiguous {
-            let (empty, buf) = buf.split_at(0);
-            (buf.slice(tail, head), empty)
-        } else {
-            let (mid, right) = buf.split_at(tail);
-            let (left, _) = mid.split_at(head);
-            (right, left)
-        }
-    }
-}
-
-impl<T> RingSlices for &[T] {
-    fn slice(self, from: usize, to: usize) -> Self {
-        &self[from..to]
-    }
-    fn split_at(self, i: usize) -> (Self, Self) {
-        (*self).split_at(i)
-    }
-}
-
-impl<T> RingSlices for &mut [T] {
-    fn slice(self, from: usize, to: usize) -> Self {
-        &mut self[from..to]
-    }
-    fn split_at(self, i: usize) -> (Self, Self) {
-        (*self).split_at_mut(i)
-    }
-}
-
-impl<T> RingSlices for *mut [T] {
-    fn slice(self, from: usize, to: usize) -> Self {
-        assert!(from <= to && to < self.len());
-        // Not using `get_unchecked_mut` to keep this a safe operation.
-        let len = to - from;
-        ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
-    }
-
-    fn split_at(self, mid: usize) -> (Self, Self) {
-        let len = self.len();
-        let ptr = self.as_mut_ptr();
-        assert!(mid <= len);
-        (
-            ptr::slice_from_raw_parts_mut(ptr, mid),
-            ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
-        )
-    }
-}
-
 /// Calculate the number of elements left to be read in the buffer
 #[inline]
 fn count(tail: usize, head: usize, size: usize) -> usize {
@@ -2667,336 +2574,6 @@ fn count(tail: usize, head: usize, size: usize) -> usize {
     (head.wrapping_sub(tail)) & (size - 1)
 }
 
-/// An iterator over the elements of a `VecDeque`.
-///
-/// This `struct` is created by the [`iter`] method on [`VecDeque`]. See its
-/// documentation for more.
-///
-/// [`iter`]: VecDeque::iter
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct Iter<'a, T: 'a> {
-    ring: &'a [T],
-    tail: usize,
-    head: usize,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
-        f.debug_tuple("Iter").field(&front).field(&back).finish()
-    }
-}
-
-// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Clone for Iter<'_, T> {
-    fn clone(&self) -> Self {
-        Iter { ring: self.ring, tail: self.tail, head: self.head }
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> Iterator for Iter<'a, T> {
-    type Item = &'a T;
-
-    #[inline]
-    fn next(&mut self) -> Option<&'a T> {
-        if self.tail == self.head {
-            return None;
-        }
-        let tail = self.tail;
-        self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
-        unsafe { Some(self.ring.get_unchecked(tail)) }
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        let len = count(self.tail, self.head, self.ring.len());
-        (len, Some(len))
-    }
-
-    fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
-    where
-        F: FnMut(Acc, Self::Item) -> Acc,
-    {
-        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
-        accum = front.iter().fold(accum, &mut f);
-        back.iter().fold(accum, &mut f)
-    }
-
-    fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
-    where
-        Self: Sized,
-        F: FnMut(B, Self::Item) -> R,
-        R: Try<Ok = B>,
-    {
-        let (mut iter, final_res);
-        if self.tail <= self.head {
-            // single slice self.ring[self.tail..self.head]
-            iter = self.ring[self.tail..self.head].iter();
-            final_res = iter.try_fold(init, &mut f);
-        } else {
-            // two slices: self.ring[self.tail..], self.ring[..self.head]
-            let (front, back) = self.ring.split_at(self.tail);
-            let mut back_iter = back.iter();
-            let res = back_iter.try_fold(init, &mut f);
-            let len = self.ring.len();
-            self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
-            iter = front[..self.head].iter();
-            final_res = iter.try_fold(res?, &mut f);
-        }
-        self.tail = self.head - iter.len();
-        final_res
-    }
-
-    fn nth(&mut self, n: usize) -> Option<Self::Item> {
-        if n >= count(self.tail, self.head, self.ring.len()) {
-            self.tail = self.head;
-            None
-        } else {
-            self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
-            self.next()
-        }
-    }
-
-    #[inline]
-    fn last(mut self) -> Option<&'a T> {
-        self.next_back()
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
-    #[inline]
-    fn next_back(&mut self) -> Option<&'a T> {
-        if self.tail == self.head {
-            return None;
-        }
-        self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
-        unsafe { Some(self.ring.get_unchecked(self.head)) }
-    }
-
-    fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
-    where
-        F: FnMut(Acc, Self::Item) -> Acc,
-    {
-        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
-        accum = back.iter().rfold(accum, &mut f);
-        front.iter().rfold(accum, &mut f)
-    }
-
-    fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
-    where
-        Self: Sized,
-        F: FnMut(B, Self::Item) -> R,
-        R: Try<Ok = B>,
-    {
-        let (mut iter, final_res);
-        if self.tail <= self.head {
-            // single slice self.ring[self.tail..self.head]
-            iter = self.ring[self.tail..self.head].iter();
-            final_res = iter.try_rfold(init, &mut f);
-        } else {
-            // two slices: self.ring[self.tail..], self.ring[..self.head]
-            let (front, back) = self.ring.split_at(self.tail);
-            let mut front_iter = front[..self.head].iter();
-            let res = front_iter.try_rfold(init, &mut f);
-            self.head = front_iter.len();
-            iter = back.iter();
-            final_res = iter.try_rfold(res?, &mut f);
-        }
-        self.head = self.tail + iter.len();
-        final_res
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for Iter<'_, T> {
-    fn is_empty(&self) -> bool {
-        self.head == self.tail
-    }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for Iter<'_, T> {}
-
-/// A mutable iterator over the elements of a `VecDeque`.
-///
-/// This `struct` is created by the [`iter_mut`] method on [`VecDeque`]. See its
-/// documentation for more.
-///
-/// [`iter_mut`]: VecDeque::iter_mut
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct IterMut<'a, T: 'a> {
-    // Internal safety invariant: the entire slice is dereferencable.
-    ring: *mut [T],
-    tail: usize,
-    head: usize,
-    phantom: PhantomData<&'a mut [T]>,
-}
-
-// SAFETY: we do nothing thread-local and there is no interior mutability,
-// so the usual structural `Send`/`Sync` apply.
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Send> Send for IterMut<'_, T> {}
-#[stable(feature = "rust1", since = "1.0.0")]
-unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
-        // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
-        // The `IterMut` invariant also ensures everything is dereferencable.
-        let (front, back) = unsafe { (&*front, &*back) };
-        f.debug_tuple("IterMut").field(&front).field(&back).finish()
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> Iterator for IterMut<'a, T> {
-    type Item = &'a mut T;
-
-    #[inline]
-    fn next(&mut self) -> Option<&'a mut T> {
-        if self.tail == self.head {
-            return None;
-        }
-        let tail = self.tail;
-        self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
-
-        unsafe {
-            let elem = self.ring.get_unchecked_mut(tail);
-            Some(&mut *elem)
-        }
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        let len = count(self.tail, self.head, self.ring.len());
-        (len, Some(len))
-    }
-
-    fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
-    where
-        F: FnMut(Acc, Self::Item) -> Acc,
-    {
-        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
-        // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
-        // The `IterMut` invariant also ensures everything is dereferencable.
-        let (front, back) = unsafe { (&mut *front, &mut *back) };
-        accum = front.iter_mut().fold(accum, &mut f);
-        back.iter_mut().fold(accum, &mut f)
-    }
-
-    fn nth(&mut self, n: usize) -> Option<Self::Item> {
-        if n >= count(self.tail, self.head, self.ring.len()) {
-            self.tail = self.head;
-            None
-        } else {
-            self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
-            self.next()
-        }
-    }
-
-    #[inline]
-    fn last(mut self) -> Option<&'a mut T> {
-        self.next_back()
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
-    #[inline]
-    fn next_back(&mut self) -> Option<&'a mut T> {
-        if self.tail == self.head {
-            return None;
-        }
-        self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
-
-        unsafe {
-            let elem = self.ring.get_unchecked_mut(self.head);
-            Some(&mut *elem)
-        }
-    }
-
-    fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
-    where
-        F: FnMut(Acc, Self::Item) -> Acc,
-    {
-        let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
-        // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
-        // The `IterMut` invariant also ensures everything is dereferencable.
-        let (front, back) = unsafe { (&mut *front, &mut *back) };
-        accum = back.iter_mut().rfold(accum, &mut f);
-        front.iter_mut().rfold(accum, &mut f)
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for IterMut<'_, T> {
-    fn is_empty(&self) -> bool {
-        self.head == self.tail
-    }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for IterMut<'_, T> {}
-
-/// An owning iterator over the elements of a `VecDeque`.
-///
-/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
-/// (provided by the `IntoIterator` trait). See its documentation for more.
-///
-/// [`into_iter`]: VecDeque::into_iter
-#[derive(Clone)]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct IntoIter<T> {
-    inner: VecDeque<T>,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
-    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
-        f.debug_tuple("IntoIter").field(&self.inner).finish()
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Iterator for IntoIter<T> {
-    type Item = T;
-
-    #[inline]
-    fn next(&mut self) -> Option<T> {
-        self.inner.pop_front()
-    }
-
-    #[inline]
-    fn size_hint(&self) -> (usize, Option<usize>) {
-        let len = self.inner.len();
-        (len, Some(len))
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> DoubleEndedIterator for IntoIter<T> {
-    #[inline]
-    fn next_back(&mut self) -> Option<T> {
-        self.inner.pop_back()
-    }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for IntoIter<T> {
-    fn is_empty(&self) -> bool {
-        self.inner.is_empty()
-    }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for IntoIter<T> {}
-
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A: PartialEq> PartialEq for VecDeque<A> {
     fn eq(&self, other: &VecDeque<A>) -> bool {
@@ -3039,26 +2616,6 @@ impl<A: PartialEq> PartialEq for VecDeque<A> {
 #[stable(feature = "rust1", since = "1.0.0")]
 impl<A: Eq> Eq for VecDeque<A> {}
 
-macro_rules! __impl_slice_eq1 {
-    ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
-        #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")]
-        impl<A, B, $($vars)*> PartialEq<$rhs> for $lhs
-        where
-            A: PartialEq<B>,
-            $($constraints)*
-        {
-            fn eq(&self, other: &$rhs) -> bool {
-                if self.len() != other.len() {
-                    return false;
-                }
-                let (sa, sb) = self.as_slices();
-                let (oa, ob) = other[..].split_at(sa.len());
-                sa == oa && sb == ob
-            }
-        }
-    }
-}
-
 __impl_slice_eq1! { [] VecDeque<A>, Vec<B>, }
 __impl_slice_eq1! { [] VecDeque<A>, &[B], }
 __impl_slice_eq1! { [] VecDeque<A>, &mut [B], }
diff --git a/library/alloc/src/collections/vec_deque/pair_slices.rs b/library/alloc/src/collections/vec_deque/pair_slices.rs
new file mode 100644
index 00000000000..812765d0b0d
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/pair_slices.rs
@@ -0,0 +1,66 @@
+use core::array;
+use core::cmp::{self};
+use core::mem::replace;
+
+use super::VecDeque;
+
+/// PairSlices pairs up equal length slice parts of two deques
+///
+/// For example, given deques "A" and "B" with the following division into slices:
+///
+/// A: [0 1 2] [3 4 5]
+/// B: [a b] [c d e]
+///
+/// It produces the following sequence of matching slices:
+///
+/// ([0 1], [a b])
+/// (\[2\], \[c\])
+/// ([3 4], [d e])
+///
+/// and the uneven remainder of either A or B is skipped.
+pub struct PairSlices<'a, 'b, T> {
+    pub(crate) a0: &'a mut [T],
+    pub(crate) a1: &'a mut [T],
+    pub(crate) b0: &'b [T],
+    pub(crate) b1: &'b [T],
+}
+
+impl<'a, 'b, T> PairSlices<'a, 'b, T> {
+    pub fn from(to: &'a mut VecDeque<T>, from: &'b VecDeque<T>) -> Self {
+        let (a0, a1) = to.as_mut_slices();
+        let (b0, b1) = from.as_slices();
+        PairSlices { a0, a1, b0, b1 }
+    }
+
+    pub fn has_remainder(&self) -> bool {
+        !self.b0.is_empty()
+    }
+
+    pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
+        array::IntoIter::new([self.b0, self.b1])
+    }
+}
+
+impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
+    type Item = (&'a mut [T], &'b [T]);
+    fn next(&mut self) -> Option<Self::Item> {
+        // Get next part length
+        let part = cmp::min(self.a0.len(), self.b0.len());
+        if part == 0 {
+            return None;
+        }
+        let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
+        let (q0, q1) = self.b0.split_at(part);
+
+        // Move a1 into a0, if it's empty (and b1, b0 the same way).
+        self.a0 = p1;
+        self.b0 = q1;
+        if self.a0.is_empty() {
+            self.a0 = replace(&mut self.a1, &mut []);
+        }
+        if self.b0.is_empty() {
+            self.b0 = replace(&mut self.b1, &[]);
+        }
+        Some((p0, q0))
+    }
+}
diff --git a/library/alloc/src/collections/vec_deque/ring_slices.rs b/library/alloc/src/collections/vec_deque/ring_slices.rs
new file mode 100644
index 00000000000..dd0fa7d6074
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/ring_slices.rs
@@ -0,0 +1,56 @@
+use core::ptr::{self};
+
+/// Returns the two slices that cover the `VecDeque`'s valid range
+pub trait RingSlices: Sized {
+    fn slice(self, from: usize, to: usize) -> Self;
+    fn split_at(self, i: usize) -> (Self, Self);
+
+    fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
+        let contiguous = tail <= head;
+        if contiguous {
+            let (empty, buf) = buf.split_at(0);
+            (buf.slice(tail, head), empty)
+        } else {
+            let (mid, right) = buf.split_at(tail);
+            let (left, _) = mid.split_at(head);
+            (right, left)
+        }
+    }
+}
+
+impl<T> RingSlices for &[T] {
+    fn slice(self, from: usize, to: usize) -> Self {
+        &self[from..to]
+    }
+    fn split_at(self, i: usize) -> (Self, Self) {
+        (*self).split_at(i)
+    }
+}
+
+impl<T> RingSlices for &mut [T] {
+    fn slice(self, from: usize, to: usize) -> Self {
+        &mut self[from..to]
+    }
+    fn split_at(self, i: usize) -> (Self, Self) {
+        (*self).split_at_mut(i)
+    }
+}
+
+impl<T> RingSlices for *mut [T] {
+    fn slice(self, from: usize, to: usize) -> Self {
+        assert!(from <= to && to < self.len());
+        // Not using `get_unchecked_mut` to keep this a safe operation.
+        let len = to - from;
+        ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
+    }
+
+    fn split_at(self, mid: usize) -> (Self, Self) {
+        let len = self.len();
+        let ptr = self.as_mut_ptr();
+        assert!(mid <= len);
+        (
+            ptr::slice_from_raw_parts_mut(ptr, mid),
+            ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
+        )
+    }
+}
diff --git a/library/std/src/sys/wasm/mutex_atomics.rs b/library/std/src/sys/wasm/mutex_atomics.rs
index 479182ffa44..5ff0ec052b6 100644
--- a/library/std/src/sys/wasm/mutex_atomics.rs
+++ b/library/std/src/sys/wasm/mutex_atomics.rs
@@ -138,7 +138,7 @@ impl ReentrantMutex {
                 self.owner.swap(0, SeqCst);
                 // SAFETY: the caller must gurantee that `self.ptr()` is valid i32.
                 unsafe {
-                    wasm32::atomic_notify(self.ptr() as *mut i32, 1);
+                    wasm32::memory_atomic_notify(self.ptr() as *mut i32, 1);
                 } // wake up one waiter, if any
             }
             ref mut n => *n -= 1,
diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs
index 5ce5737f318..514be9e6864 100644
--- a/src/bootstrap/dist.rs
+++ b/src/bootstrap/dist.rs
@@ -2357,6 +2357,22 @@ fn maybe_install_llvm(builder: &Builder<'_>, target: TargetSelection, dst_libdir
         return;
     }
 
+    if let Some(config) = builder.config.target_config.get(&target) {
+        if config.llvm_config.is_some() {
+            // If the LLVM was externally provided, then we don't currently copy
+            // artifacts into the sysroot. This is not necessarily the right
+            // choice (in particular, it will require the LLVM dylib to be in
+            // the linker's load path at runtime), but the common use case for
+            // external LLVMs is distribution provided LLVMs, and in that case
+            // they're usually in the standard search path (e.g., /usr/lib) and
+            // copying them here is going to cause problems as we may end up
+            // with the wrong files and isn't what distributions want.
+            //
+            // This behavior may be revisited in the future though.
+            return;
+        }
+    }
+
     // On macOS, rustc (and LLVM tools) link to an unversioned libLLVM.dylib
     // instead of libLLVM-11-rust-....dylib, as on linux. It's not entirely
     // clear why this is the case, though. llvm-config will emit the versioned
diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs
index 9f037890483..ca140b9d278 100644
--- a/src/bootstrap/lib.rs
+++ b/src/bootstrap/lib.rs
@@ -178,6 +178,7 @@ const LLVM_TOOLS: &[&str] = &[
     "llvm-size",     // used to prints the size of the linker sections of a program
     "llvm-strip",    // used to discard symbols from binary files to reduce their size
     "llvm-ar",       // used for creating and modifying archive files
+    "llvm-as",       // used to convert LLVM assembly to LLVM bitcode
     "llvm-dis",      // used to disassemble LLVM bitcode
     "llc",           // used to compile LLVM bytecode
     "opt",           // used to optimize LLVM bytecode
diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs
index 6dc83c7d70a..d716b23af60 100644
--- a/src/bootstrap/native.rs
+++ b/src/bootstrap/native.rs
@@ -348,11 +348,11 @@ fn check_llvm_version(builder: &Builder<'_>, llvm_config: &Path) {
     let version = output(cmd.arg("--version"));
     let mut parts = version.split('.').take(2).filter_map(|s| s.parse::<u32>().ok());
     if let (Some(major), Some(_minor)) = (parts.next(), parts.next()) {
-        if major >= 8 {
+        if major >= 9 {
             return;
         }
     }
-    panic!("\n\nbad LLVM version: {}, need >=8.0\n\n", version)
+    panic!("\n\nbad LLVM version: {}, need >=9.0\n\n", version)
 }
 
 fn configure_cmake(
diff --git a/src/ci/docker/host-x86_64/x86_64-gnu-llvm-8/Dockerfile b/src/ci/docker/host-x86_64/x86_64-gnu-llvm-9/Dockerfile
index bd046f802c8..0ab1f727a29 100644
--- a/src/ci/docker/host-x86_64/x86_64-gnu-llvm-8/Dockerfile
+++ b/src/ci/docker/host-x86_64/x86_64-gnu-llvm-9/Dockerfile
@@ -13,7 +13,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
   cmake \
   sudo \
   gdb \
-  llvm-8-tools \
+  llvm-9-tools \
+  llvm-9-dev \
   libedit-dev \
   libssl-dev \
   pkg-config \
@@ -27,7 +28,7 @@ RUN sh /scripts/sccache.sh
 # using llvm-link-shared due to libffi issues -- see #34486
 ENV RUST_CONFIGURE_ARGS \
       --build=x86_64-unknown-linux-gnu \
-      --llvm-root=/usr/lib/llvm-8 \
+      --llvm-root=/usr/lib/llvm-9 \
       --enable-llvm-link-shared \
       --set rust.thin-lto-import-instr-limit=10
 
diff --git a/src/ci/github-actions/ci.yml b/src/ci/github-actions/ci.yml
index 031000d147c..9eea6243dfa 100644
--- a/src/ci/github-actions/ci.yml
+++ b/src/ci/github-actions/ci.yml
@@ -280,7 +280,7 @@ jobs:
           - name: mingw-check
             <<: *job-linux-xl
 
-          - name: x86_64-gnu-llvm-8
+          - name: x86_64-gnu-llvm-9
             <<: *job-linux-xl
 
           - name: x86_64-gnu-tools
@@ -412,7 +412,7 @@ jobs:
           - name: x86_64-gnu-distcheck
             <<: *job-linux-xl
 
-          - name: x86_64-gnu-llvm-8
+          - name: x86_64-gnu-llvm-9
             env:
               RUST_BACKTRACE: 1
             <<: *job-linux-xl
diff --git a/src/test/codegen/abi-efiapi.rs b/src/test/codegen/abi-efiapi.rs
index 1c0b77ad9c7..6cb2728359b 100644
--- a/src/test/codegen/abi-efiapi.rs
+++ b/src/test/codegen/abi-efiapi.rs
@@ -1,7 +1,6 @@
 // Checks if the correct annotation for the efiapi ABI is passed to llvm.
 
 // revisions:x86_64 i686 aarch64 arm riscv
-// min-llvm-version: 9.0
 // needs-llvm-components: aarch64 arm riscv
 
 //[x86_64] compile-flags: --target x86_64-unknown-uefi
diff --git a/src/test/codegen/force-unwind-tables.rs b/src/test/codegen/force-unwind-tables.rs
index eba4a7469f9..4c0a5602c6d 100644
--- a/src/test/codegen/force-unwind-tables.rs
+++ b/src/test/codegen/force-unwind-tables.rs
@@ -1,4 +1,3 @@
-// min-llvm-version: 8.0
 // compile-flags: -C no-prepopulate-passes -C force-unwind-tables=y
 
 #![crate_type="lib"]
diff --git a/src/test/mir-opt/inline/inline-compatibility.rs b/src/test/mir-opt/inline/inline-compatibility.rs
index ff9049edb4f..30aff0a64ef 100644
--- a/src/test/mir-opt/inline/inline-compatibility.rs
+++ b/src/test/mir-opt/inline/inline-compatibility.rs
@@ -1,12 +1,11 @@
 // Checks that only functions with compatible attributes are inlined.
 //
 // only-x86_64
-// needs-sanitizer-address
-// compile-flags: -Zsanitizer=address
 
 #![crate_type = "lib"]
 #![feature(no_sanitize)]
 #![feature(target_feature_11)]
+#![feature(c_variadic)]
 
 // EMIT_MIR inline_compatibility.inlined_target_feature.Inline.diff
 #[target_feature(enable = "sse2")]
@@ -35,5 +34,22 @@ pub unsafe fn not_inlined_no_sanitize() {
 pub unsafe fn target_feature() {}
 
 #[inline]
-#[no_sanitize(address, memory)]
+#[no_sanitize(address)]
 pub unsafe fn no_sanitize() {}
+
+// EMIT_MIR inline_compatibility.not_inlined_c_variadic.Inline.diff
+pub unsafe fn not_inlined_c_variadic() {
+    let s = sum(4u32, 4u32, 30u32, 200u32, 1000u32);
+}
+
+#[no_mangle]
+#[inline(always)]
+unsafe extern "C" fn sum(n: u32, mut vs: ...) -> u32 {
+    let mut s = 0;
+    let mut i = 0;
+    while i != n {
+        s += vs.arg::<u32>();
+        i += 1;
+    }
+    s
+}
diff --git a/src/test/mir-opt/inline/inline-generator.rs b/src/test/mir-opt/inline/inline-generator.rs
new file mode 100644
index 00000000000..d11b3e548f7
--- /dev/null
+++ b/src/test/mir-opt/inline/inline-generator.rs
@@ -0,0 +1,16 @@
+// ignore-wasm32-bare compiled with panic=abort by default
+#![feature(generators, generator_trait)]
+
+use std::ops::Generator;
+use std::pin::Pin;
+
+// EMIT_MIR inline_generator.main.Inline.diff
+fn main() {
+    let _r = Pin::new(&mut g()).resume(false);
+}
+
+#[inline(always)]
+pub fn g() -> impl Generator<bool> {
+    #[inline(always)]
+    |a| { yield if a { 7 } else { 13 } }
+}
diff --git a/src/test/mir-opt/inline/inline_compatibility.inlined_no_sanitize.Inline.diff b/src/test/mir-opt/inline/inline_compatibility.inlined_no_sanitize.Inline.diff
index 451ec39422f..c95cf476957 100644
--- a/src/test/mir-opt/inline/inline_compatibility.inlined_no_sanitize.Inline.diff
+++ b/src/test/mir-opt/inline/inline_compatibility.inlined_no_sanitize.Inline.diff
@@ -2,24 +2,24 @@
 + // MIR for `inlined_no_sanitize` after Inline
   
   fn inlined_no_sanitize() -> () {
-      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:24:37: 24:37
-      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:25:5: 25:18
-+     scope 1 (inlined no_sanitize) {      // at $DIR/inline-compatibility.rs:25:5: 25:18
+      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:23:37: 23:37
+      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:24:5: 24:18
++     scope 1 (inlined no_sanitize) {      // at $DIR/inline-compatibility.rs:24:5: 24:18
 +     }
   
       bb0: {
-          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:25:5: 25:18
--         _1 = no_sanitize() -> bb1;       // scope 0 at $DIR/inline-compatibility.rs:25:5: 25:18
+          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:24:5: 24:18
+-         _1 = no_sanitize() -> bb1;       // scope 0 at $DIR/inline-compatibility.rs:24:5: 24:18
 -                                          // mir::Constant
--                                          // + span: $DIR/inline-compatibility.rs:25:5: 25:16
+-                                          // + span: $DIR/inline-compatibility.rs:24:5: 24:16
 -                                          // + literal: Const { ty: unsafe fn() {no_sanitize}, val: Value(Scalar(<ZST>)) }
 -     }
 - 
 -     bb1: {
-+         _1 = const ();                   // scope 1 at $DIR/inline-compatibility.rs:25:5: 25:18
-          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:25:18: 25:19
-          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:24:37: 26:2
-          return;                          // scope 0 at $DIR/inline-compatibility.rs:26:2: 26:2
++         _1 = const ();                   // scope 1 at $DIR/inline-compatibility.rs:24:5: 24:18
+          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:24:18: 24:19
+          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:23:37: 25:2
+          return;                          // scope 0 at $DIR/inline-compatibility.rs:25:2: 25:2
       }
   }
   
diff --git a/src/test/mir-opt/inline/inline_compatibility.inlined_target_feature.Inline.diff b/src/test/mir-opt/inline/inline_compatibility.inlined_target_feature.Inline.diff
index a59ddd344cb..2bb92834322 100644
--- a/src/test/mir-opt/inline/inline_compatibility.inlined_target_feature.Inline.diff
+++ b/src/test/mir-opt/inline/inline_compatibility.inlined_target_feature.Inline.diff
@@ -2,24 +2,24 @@
 + // MIR for `inlined_target_feature` after Inline
   
   fn inlined_target_feature() -> () {
-      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:13:40: 13:40
-      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:14:5: 14:21
-+     scope 1 (inlined target_feature) {   // at $DIR/inline-compatibility.rs:14:5: 14:21
+      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:12:40: 12:40
+      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:13:5: 13:21
++     scope 1 (inlined target_feature) {   // at $DIR/inline-compatibility.rs:13:5: 13:21
 +     }
   
       bb0: {
-          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:14:5: 14:21
--         _1 = target_feature() -> bb1;    // scope 0 at $DIR/inline-compatibility.rs:14:5: 14:21
+          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:13:5: 13:21
+-         _1 = target_feature() -> bb1;    // scope 0 at $DIR/inline-compatibility.rs:13:5: 13:21
 -                                          // mir::Constant
--                                          // + span: $DIR/inline-compatibility.rs:14:5: 14:19
+-                                          // + span: $DIR/inline-compatibility.rs:13:5: 13:19
 -                                          // + literal: Const { ty: unsafe fn() {target_feature}, val: Value(Scalar(<ZST>)) }
 -     }
 - 
 -     bb1: {
-+         _1 = const ();                   // scope 1 at $DIR/inline-compatibility.rs:14:5: 14:21
-          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:14:21: 14:22
-          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:13:40: 15:2
-          return;                          // scope 0 at $DIR/inline-compatibility.rs:15:2: 15:2
++         _1 = const ();                   // scope 1 at $DIR/inline-compatibility.rs:13:5: 13:21
+          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:13:21: 13:22
+          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:12:40: 14:2
+          return;                          // scope 0 at $DIR/inline-compatibility.rs:14:2: 14:2
       }
   }
   
diff --git a/src/test/mir-opt/inline/inline_compatibility.not_inlined_c_variadic.Inline.diff b/src/test/mir-opt/inline/inline_compatibility.not_inlined_c_variadic.Inline.diff
new file mode 100644
index 00000000000..09bca903c80
--- /dev/null
+++ b/src/test/mir-opt/inline/inline_compatibility.not_inlined_c_variadic.Inline.diff
@@ -0,0 +1,25 @@
+- // MIR for `not_inlined_c_variadic` before Inline
++ // MIR for `not_inlined_c_variadic` after Inline
+  
+  fn not_inlined_c_variadic() -> () {
+      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:41:40: 41:40
+      let _1: u32;                         // in scope 0 at $DIR/inline-compatibility.rs:42:9: 42:10
+      scope 1 {
+          debug s => _1;                   // in scope 1 at $DIR/inline-compatibility.rs:42:9: 42:10
+      }
+  
+      bb0: {
+          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:42:9: 42:10
+          _1 = sum(const 4_u32, const 4_u32, const 30_u32, const 200_u32, const 1000_u32) -> bb1; // scope 0 at $DIR/inline-compatibility.rs:42:13: 42:52
+                                           // mir::Constant
+                                           // + span: $DIR/inline-compatibility.rs:42:13: 42:16
+                                           // + literal: Const { ty: unsafe extern "C" fn(u32, ...) -> u32 {sum}, val: Value(Scalar(<ZST>)) }
+      }
+  
+      bb1: {
+          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:41:40: 43:2
+          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:43:1: 43:2
+          return;                          // scope 0 at $DIR/inline-compatibility.rs:43:2: 43:2
+      }
+  }
+  
diff --git a/src/test/mir-opt/inline/inline_compatibility.not_inlined_no_sanitize.Inline.diff b/src/test/mir-opt/inline/inline_compatibility.not_inlined_no_sanitize.Inline.diff
index 651eadc1e84..5af3946f2e5 100644
--- a/src/test/mir-opt/inline/inline_compatibility.not_inlined_no_sanitize.Inline.diff
+++ b/src/test/mir-opt/inline/inline_compatibility.not_inlined_no_sanitize.Inline.diff
@@ -2,21 +2,21 @@
 + // MIR for `not_inlined_no_sanitize` after Inline
   
   fn not_inlined_no_sanitize() -> () {
-      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:29:41: 29:41
-      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:30:5: 30:18
+      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:28:41: 28:41
+      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:29:5: 29:18
   
       bb0: {
-          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:30:5: 30:18
-          _1 = no_sanitize() -> bb1;       // scope 0 at $DIR/inline-compatibility.rs:30:5: 30:18
+          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:29:5: 29:18
+          _1 = no_sanitize() -> bb1;       // scope 0 at $DIR/inline-compatibility.rs:29:5: 29:18
                                            // mir::Constant
-                                           // + span: $DIR/inline-compatibility.rs:30:5: 30:16
+                                           // + span: $DIR/inline-compatibility.rs:29:5: 29:16
                                            // + literal: Const { ty: unsafe fn() {no_sanitize}, val: Value(Scalar(<ZST>)) }
       }
   
       bb1: {
-          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:30:18: 30:19
-          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:29:41: 31:2
-          return;                          // scope 0 at $DIR/inline-compatibility.rs:31:2: 31:2
+          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:29:18: 29:19
+          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:28:41: 30:2
+          return;                          // scope 0 at $DIR/inline-compatibility.rs:30:2: 30:2
       }
   }
   
diff --git a/src/test/mir-opt/inline/inline_compatibility.not_inlined_target_feature.Inline.diff b/src/test/mir-opt/inline/inline_compatibility.not_inlined_target_feature.Inline.diff
index 55b9edf3adc..8c9fa573ce2 100644
--- a/src/test/mir-opt/inline/inline_compatibility.not_inlined_target_feature.Inline.diff
+++ b/src/test/mir-opt/inline/inline_compatibility.not_inlined_target_feature.Inline.diff
@@ -2,21 +2,21 @@
 + // MIR for `not_inlined_target_feature` after Inline
   
   fn not_inlined_target_feature() -> () {
-      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:18:44: 18:44
-      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:19:5: 19:21
+      let mut _0: ();                      // return place in scope 0 at $DIR/inline-compatibility.rs:17:44: 17:44
+      let _1: ();                          // in scope 0 at $DIR/inline-compatibility.rs:18:5: 18:21
   
       bb0: {
-          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:19:5: 19:21
-          _1 = target_feature() -> bb1;    // scope 0 at $DIR/inline-compatibility.rs:19:5: 19:21
+          StorageLive(_1);                 // scope 0 at $DIR/inline-compatibility.rs:18:5: 18:21
+          _1 = target_feature() -> bb1;    // scope 0 at $DIR/inline-compatibility.rs:18:5: 18:21
                                            // mir::Constant
-                                           // + span: $DIR/inline-compatibility.rs:19:5: 19:19
+                                           // + span: $DIR/inline-compatibility.rs:18:5: 18:19
                                            // + literal: Const { ty: unsafe fn() {target_feature}, val: Value(Scalar(<ZST>)) }
       }
   
       bb1: {
-          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:19:21: 19:22
-          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:18:44: 20:2
-          return;                          // scope 0 at $DIR/inline-compatibility.rs:20:2: 20:2
+          StorageDead(_1);                 // scope 0 at $DIR/inline-compatibility.rs:18:21: 18:22
+          _0 = const ();                   // scope 0 at $DIR/inline-compatibility.rs:17:44: 19:2
+          return;                          // scope 0 at $DIR/inline-compatibility.rs:19:2: 19:2
       }
   }
   
diff --git a/src/test/mir-opt/inline/inline_generator.main.Inline.diff b/src/test/mir-opt/inline/inline_generator.main.Inline.diff
new file mode 100644
index 00000000000..aa32daa82dd
--- /dev/null
+++ b/src/test/mir-opt/inline/inline_generator.main.Inline.diff
@@ -0,0 +1,123 @@
+- // MIR for `main` before Inline
++ // MIR for `main` after Inline
+  
+  fn main() -> () {
+      let mut _0: ();                      // return place in scope 0 at $DIR/inline-generator.rs:8:11: 8:11
+      let _1: std::ops::GeneratorState<<impl std::ops::Generator<bool> as std::ops::Generator<bool>>::Yield, <impl std::ops::Generator<bool> as std::ops::Generator<bool>>::Return>; // in scope 0 at $DIR/inline-generator.rs:9:9: 9:11
+      let mut _2: std::pin::Pin<&mut impl std::ops::Generator<bool>>; // in scope 0 at $DIR/inline-generator.rs:9:14: 9:32
+      let mut _3: &mut impl std::ops::Generator<bool>; // in scope 0 at $DIR/inline-generator.rs:9:23: 9:31
+      let mut _4: impl std::ops::Generator<bool>; // in scope 0 at $DIR/inline-generator.rs:9:28: 9:31
++     let mut _7: bool;                    // in scope 0 at $DIR/inline-generator.rs:9:14: 9:46
+      scope 1 {
+          debug _r => _1;                  // in scope 1 at $DIR/inline-generator.rs:9:9: 9:11
+      }
++     scope 2 (inlined g) {                // at $DIR/inline-generator.rs:9:28: 9:31
++     }
++     scope 3 (inlined Pin::<&mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]>::new) { // at $DIR/inline-generator.rs:9:14: 9:32
++         debug pointer => _3;             // in scope 3 at $DIR/inline-generator.rs:9:14: 9:32
++         let mut _5: &mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]; // in scope 3 at $DIR/inline-generator.rs:9:14: 9:32
++         scope 4 {
++             scope 5 (inlined Pin::<&mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]>::new_unchecked) { // at $DIR/inline-generator.rs:9:14: 9:32
++                 debug pointer => _5;     // in scope 5 at $DIR/inline-generator.rs:9:14: 9:32
++                 let mut _6: &mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]; // in scope 5 at $DIR/inline-generator.rs:9:14: 9:32
++             }
++         }
++     }
++     scope 6 (inlined g::{closure#0}) {   // at $DIR/inline-generator.rs:9:14: 9:46
++         debug a => _8;                   // in scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         let mut _8: bool;                // in scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         let mut _9: u32;                 // in scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++     }
+  
+      bb0: {
+          StorageLive(_1);                 // scope 0 at $DIR/inline-generator.rs:9:9: 9:11
+          StorageLive(_2);                 // scope 0 at $DIR/inline-generator.rs:9:14: 9:32
+          StorageLive(_3);                 // scope 0 at $DIR/inline-generator.rs:9:23: 9:31
+          StorageLive(_4);                 // scope 0 at $DIR/inline-generator.rs:9:28: 9:31
+-         _4 = g() -> bb1;                 // scope 0 at $DIR/inline-generator.rs:9:28: 9:31
+-                                          // mir::Constant
+-                                          // + span: $DIR/inline-generator.rs:9:28: 9:29
+-                                          // + literal: Const { ty: fn() -> impl std::ops::Generator<bool> {g}, val: Value(Scalar(<ZST>)) }
+-     }
+- 
+-     bb1: {
++         discriminant(_4) = 0;            // scope 2 at $DIR/inline-generator.rs:9:28: 9:31
+          _3 = &mut _4;                    // scope 0 at $DIR/inline-generator.rs:9:23: 9:31
+-         _2 = Pin::<&mut impl Generator<bool>>::new(move _3) -> [return: bb2, unwind: bb4]; // scope 0 at $DIR/inline-generator.rs:9:14: 9:32
+-                                          // mir::Constant
+-                                          // + span: $DIR/inline-generator.rs:9:14: 9:22
+-                                          // + user_ty: UserType(0)
+-                                          // + literal: Const { ty: fn(&mut impl std::ops::Generator<bool>) -> std::pin::Pin<&mut impl std::ops::Generator<bool>> {std::pin::Pin::<&mut impl std::ops::Generator<bool>>::new}, val: Value(Scalar(<ZST>)) }
+-     }
+- 
+-     bb2: {
++         StorageLive(_5);                 // scope 4 at $DIR/inline-generator.rs:9:14: 9:32
++         _5 = move _3;                    // scope 4 at $DIR/inline-generator.rs:9:14: 9:32
++         StorageLive(_6);                 // scope 5 at $DIR/inline-generator.rs:9:14: 9:32
++         _6 = move _5;                    // scope 5 at $DIR/inline-generator.rs:9:14: 9:32
++         (_2.0: &mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]) = move _6; // scope 5 at $DIR/inline-generator.rs:9:14: 9:32
++         StorageDead(_6);                 // scope 5 at $DIR/inline-generator.rs:9:14: 9:32
++         StorageDead(_5);                 // scope 4 at $DIR/inline-generator.rs:9:14: 9:32
+          StorageDead(_3);                 // scope 0 at $DIR/inline-generator.rs:9:31: 9:32
+-         _1 = <impl Generator<bool> as Generator<bool>>::resume(move _2, const false) -> [return: bb3, unwind: bb4]; // scope 0 at $DIR/inline-generator.rs:9:14: 9:46
+-                                          // mir::Constant
+-                                          // + span: $DIR/inline-generator.rs:9:33: 9:39
+-                                          // + literal: Const { ty: for<'r> fn(std::pin::Pin<&'r mut impl std::ops::Generator<bool>>, bool) -> std::ops::GeneratorState<<impl std::ops::Generator<bool> as std::ops::Generator<bool>>::Yield, <impl std::ops::Generator<bool> as std::ops::Generator<bool>>::Return> {<impl std::ops::Generator<bool> as std::ops::Generator<bool>>::resume}, val: Value(Scalar(<ZST>)) }
++         StorageLive(_7);                 // scope 0 at $DIR/inline-generator.rs:9:14: 9:46
++         _7 = const false;                // scope 0 at $DIR/inline-generator.rs:9:14: 9:46
++         _9 = discriminant((*(_2.0: &mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]))); // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         switchInt(move _9) -> [0_u32: bb3, 1_u32: bb8, 3_u32: bb7, otherwise: bb9]; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
+      }
+  
+-     bb3: {
++     bb1: {
++         StorageDead(_7);                 // scope 0 at $DIR/inline-generator.rs:9:14: 9:46
+          StorageDead(_2);                 // scope 0 at $DIR/inline-generator.rs:9:45: 9:46
+          StorageDead(_4);                 // scope 0 at $DIR/inline-generator.rs:9:46: 9:47
+          _0 = const ();                   // scope 0 at $DIR/inline-generator.rs:8:11: 10:2
+          StorageDead(_1);                 // scope 0 at $DIR/inline-generator.rs:10:1: 10:2
+          return;                          // scope 0 at $DIR/inline-generator.rs:10:2: 10:2
+      }
+  
+-     bb4 (cleanup): {
++     bb2 (cleanup): {
+          resume;                          // scope 0 at $DIR/inline-generator.rs:8:1: 10:2
++     }
++ 
++     bb3: {
++         _8 = move _7;                    // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         switchInt(_8) -> [false: bb4, otherwise: bb5]; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++     }
++ 
++     bb4: {
++         ((_1 as Yielded).0: i32) = const 13_i32; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         goto -> bb6;                     // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++     }
++ 
++     bb5: {
++         ((_1 as Yielded).0: i32) = const 7_i32; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         goto -> bb6;                     // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++     }
++ 
++     bb6: {
++         discriminant(_1) = 0;            // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         discriminant((*(_2.0: &mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]))) = 3; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         goto -> bb1;                     // scope 0 at $DIR/inline-generator.rs:15:11: 15:39
++     }
++ 
++     bb7: {
++         ((_1 as Complete).0: bool) = move _7; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         discriminant(_1) = 1;            // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         discriminant((*(_2.0: &mut [generator@$DIR/inline-generator.rs:15:5: 15:41 {bool, i32}]))) = 1; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++         goto -> bb1;                     // scope 0 at $DIR/inline-generator.rs:15:41: 15:41
++     }
++ 
++     bb8: {
++         assert(const false, "generator resumed after completion") -> [success: bb8, unwind: bb2]; // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
++     }
++ 
++     bb9: {
++         unreachable;                     // scope 6 at $DIR/inline-generator.rs:9:14: 9:46
+      }
+  }
+  
diff --git a/src/test/pretty/qpath-associated-type-bound.rs b/src/test/pretty/qpath-associated-type-bound.rs
new file mode 100644
index 00000000000..e06885e0388
--- /dev/null
+++ b/src/test/pretty/qpath-associated-type-bound.rs
@@ -0,0 +1,16 @@
+// pp-exact
+
+
+mod m {
+    pub trait Tr {
+        type Ts: super::Tu;
+    }
+}
+
+trait Tu {
+    fn dummy() { }
+}
+
+fn foo<T: m::Tr>() { <T as m::Tr>::Ts::dummy(); }
+
+fn main() { }
diff --git a/src/test/rustdoc/raw-ident-eliminate-r-hashtag.rs b/src/test/rustdoc/raw-ident-eliminate-r-hashtag.rs
new file mode 100644
index 00000000000..f895a4c2104
--- /dev/null
+++ b/src/test/rustdoc/raw-ident-eliminate-r-hashtag.rs
@@ -0,0 +1,22 @@
+// ignore-tidy-linelength
+
+#![crate_type="lib"]
+
+pub mod internal {
+    // @has 'raw_ident_eliminate_r_hashtag/internal/struct.mod.html'
+    pub struct r#mod;
+
+    /// See [name], [other name]
+    ///
+    /// [name]: mod
+    /// [other name]: crate::internal::mod
+    // @has 'raw_ident_eliminate_r_hashtag/internal/struct.B.html' '//*a[@href="../../raw_ident_eliminate_r_hashtag/internal/struct.mod.html"]' 'name'
+    // @has 'raw_ident_eliminate_r_hashtag/internal/struct.B.html' '//*a[@href="../../raw_ident_eliminate_r_hashtag/internal/struct.mod.html"]' 'other name'
+    pub struct B;
+}
+
+/// See [name].
+///
+/// [name]: internal::mod
+// @has 'raw_ident_eliminate_r_hashtag/struct.A.html' '//*a[@href="../raw_ident_eliminate_r_hashtag/internal/struct.mod.html"]' 'name'
+pub struct A;
diff --git a/src/test/ui/hygiene/panic-location.run.stderr b/src/test/ui/hygiene/panic-location.run.stderr
index a437a7b5012..216b31586da 100644
--- a/src/test/ui/hygiene/panic-location.run.stderr
+++ b/src/test/ui/hygiene/panic-location.run.stderr
@@ -1,2 +1,2 @@
-thread 'main' panicked at 'capacity overflow', $SRC_DIR/alloc/src/collections/vec_deque.rs:LL:COL
+thread 'main' panicked at 'capacity overflow', $SRC_DIR/alloc/src/collections/vec_deque/mod.rs:LL:COL
 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
diff --git a/src/test/ui/issues/issue-50865-private-impl-trait/auxiliary/lib.rs b/src/test/ui/issues/issue-50865-private-impl-trait/auxiliary/lib.rs
index fb4bf2b8b44..f3a51b415fa 100644
--- a/src/test/ui/issues/issue-50865-private-impl-trait/auxiliary/lib.rs
+++ b/src/test/ui/issues/issue-50865-private-impl-trait/auxiliary/lib.rs
@@ -1,7 +1,3 @@
-// revisions: default miropt
-//[miropt]compile-flags: -Z mir-opt-level=2
-// ~^ This flag is for #77668, it used to be ICE.
-
 #![crate_type = "lib"]
 
 pub fn bar<P>( // Error won't happen if "bar" is not generic
diff --git a/src/test/ui/match/issue-72680.rs b/src/test/ui/match/issue-72680.rs
new file mode 100644
index 00000000000..5b933edc820
--- /dev/null
+++ b/src/test/ui/match/issue-72680.rs
@@ -0,0 +1,65 @@
+// run-pass
+
+#![feature(or_patterns)]
+
+fn main() {
+    assert!(f("", 0));
+    assert!(f("a", 1));
+    assert!(f("b", 1));
+
+    assert!(!f("", 1));
+    assert!(!f("a", 0));
+    assert!(!f("b", 0));
+
+    assert!(!f("asdf", 32));
+
+    ////
+
+    assert!(!g(true, true, true));
+    assert!(!g(false, true, true));
+    assert!(!g(true, false, true));
+    assert!(!g(false, false, true));
+    assert!(!g(true, true, false));
+
+    assert!(g(false, true, false));
+    assert!(g(true, false, false));
+    assert!(g(false, false, false));
+
+    ////
+
+    assert!(!h(true, true, true));
+    assert!(!h(false, true, true));
+    assert!(!h(true, false, true));
+    assert!(!h(false, false, true));
+    assert!(!h(true, true, false));
+
+    assert!(h(false, true, false));
+    assert!(h(true, false, false));
+    assert!(h(false, false, false));
+}
+
+fn f(s: &str, num: usize) -> bool {
+    match (s, num) {
+        ("", 0) | ("a" | "b", 1) => true,
+
+        _ => false,
+    }
+}
+
+fn g(x: bool, y: bool, z: bool) -> bool {
+    match (x, y, x, z) {
+        (true | false, false, true, false) => true,
+        (false, true | false, true | false, false) => true,
+        (true | false, true | false, true | false, true) => false,
+        (true, true | false, true | false, false) => false,
+    }
+}
+
+fn h(x: bool, y: bool, z: bool) -> bool {
+    match (x, (y, (x, (z,)))) {
+        (true | false, (false, (true, (false,)))) => true,
+        (false, (true | false, (true | false, (false,)))) => true,
+        (true | false, (true | false, (true | false, (true,)))) => false,
+        (true, (true | false, (true | false, (false,)))) => false,
+    }
+}
diff --git a/src/test/ui/mir/mir-inlining/ice-issue-77306-1.rs b/src/test/ui/mir/mir-inlining/ice-issue-77306-1.rs
index 4d083bf2321..ccb279f7fa2 100644
--- a/src/test/ui/mir/mir-inlining/ice-issue-77306-1.rs
+++ b/src/test/ui/mir/mir-inlining/ice-issue-77306-1.rs
@@ -1,17 +1,27 @@
-// run-pass
+// Regression test for various issues related to normalization & inlining.
+// * #68347, #77306, #77668 - missed normalization during inlining.
+// * #78442 - missed normalization in validator after inlining.
+//
+// build-pass
 // compile-flags:-Zmir-opt-level=2
 
-// Previously ICEd because we did not normalize during inlining,
-// see https://github.com/rust-lang/rust/pull/77306 for more discussion.
-
 pub fn write() {
     create()()
 }
 
+pub fn write_generic<T>(_t: T) {
+    hide()();
+}
+
 pub fn create() -> impl FnOnce() {
    || ()
 }
 
+pub fn hide() -> impl Fn() {
+    write
+}
+
 fn main() {
     write();
+    write_generic(());
 }
diff --git a/src/test/ui/sanitize/new-llvm-pass-manager-thin-lto.rs b/src/test/ui/sanitize/new-llvm-pass-manager-thin-lto.rs
index 9439df266d5..1542c7f3118 100644
--- a/src/test/ui/sanitize/new-llvm-pass-manager-thin-lto.rs
+++ b/src/test/ui/sanitize/new-llvm-pass-manager-thin-lto.rs
@@ -2,7 +2,6 @@
 // being run when compiling with new LLVM pass manager and ThinLTO.
 // Note: The issue occurred only on non-zero opt-level.
 //
-// min-llvm-version: 9.0
 // needs-sanitizer-support
 // needs-sanitizer-address
 //
diff --git a/src/test/ui/weird-exprs.rs b/src/test/ui/weird-exprs.rs
index 916cabbfb8c..2d7ebbf1d5b 100644
--- a/src/test/ui/weird-exprs.rs
+++ b/src/test/ui/weird-exprs.rs
@@ -1,6 +1,7 @@
 // run-pass
 
 #![feature(generators)]
+#![feature(destructuring_assignment)]
 
 #![allow(non_camel_case_types)]
 #![allow(dead_code)]
@@ -159,6 +160,11 @@ fn match_nested_if() {
     assert!(val);
 }
 
+fn monkey_barrel() {
+    let val = ()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=()=();
+    assert_eq!(val, ());
+}
+
 pub fn main() {
     strange();
     funny();
@@ -177,4 +183,5 @@ pub fn main() {
     r#match();
     i_yield();
     match_nested_if();
+    monkey_barrel();
 }