about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs5
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs66
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs59
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs71
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs89
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs348
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs82
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs6
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs20
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs13
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs114
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs5
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs2
25 files changed, 817 insertions, 131 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index 28be6d033f8..d221bad28ef 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -351,7 +351,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     continue;
                 }
                 PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
-                    let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
+                    let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
                     let ptr_layout = cx.layout_of(ptr_ty);
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
                     llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
index ad0636894b7..a57508815d6 100644
--- a/compiler/rustc_codegen_llvm/src/allocator.rs
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -77,7 +77,7 @@ pub(crate) unsafe fn codegen(
                 llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
             llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
 
-            let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr().cast());
+            let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
 
             let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
             llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
@@ -129,7 +129,7 @@ pub(crate) unsafe fn codegen(
     attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
     llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
 
-    let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr().cast());
+    let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
 
     let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
     llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
index 6d00464e0a0..39275272e42 100644
--- a/compiler/rustc_codegen_llvm/src/attributes.rs
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -1,7 +1,6 @@
 //! Set and unset common attributes on LLVM values.
 
 use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::small_str::SmallStr;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::ty::{self, TyCtxt};
@@ -481,8 +480,8 @@ pub fn from_fn_attrs<'ll, 'tcx>(
 
     let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
     let function_features = function_features.iter().map(|s| s.as_str());
-    let target_features =
-        global_features.chain(function_features).intersperse(",").collect::<SmallStr<1024>>();
+    let target_features: String =
+        global_features.chain(function_features).intersperse(",").collect();
     if !target_features.is_empty() {
         to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
     }
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
index 8b05af7bed9..d7dd98d7938 100644
--- a/compiler/rustc_codegen_llvm/src/back/lto.rs
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -1,4 +1,4 @@
-use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers};
+use crate::back::write::{self, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers};
 use crate::errors::{
     DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
 };
@@ -302,7 +302,13 @@ fn fat_lto(
         // The linking steps below may produce errors and diagnostics within LLVM
         // which we'd like to handle and print, so set up our diagnostic handlers
         // (which get unregistered when they go out of scope below).
-        let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+        let _handler = DiagnosticHandlers::new(
+            cgcx,
+            diag_handler,
+            llcx,
+            &module,
+            CodegenDiagnosticsStage::LTO,
+        );
 
         // For all other modules we codegened we'll need to link them into our own
         // bitcode. All modules were codegened in their own LLVM context, however,
@@ -595,7 +601,7 @@ pub(crate) fn run_pass_manager(
             llvm::LLVMRustAddModuleFlag(
                 module.module_llvm.llmod(),
                 llvm::LLVMModFlagBehavior::Error,
-                c"LTOPostLink".as_ptr().cast(),
+                "LTOPostLink\0".as_ptr().cast(),
                 1,
             );
         }
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 53b4296802e..0f5e975445f 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -268,6 +268,16 @@ pub(crate) fn save_temp_bitcode(
     }
 }
 
+/// In what context is a dignostic handler being attached to a codegen unit?
+pub enum CodegenDiagnosticsStage {
+    /// Prelink optimization stage.
+    Opt,
+    /// LTO/ThinLTO postlink optimization stage.
+    LTO,
+    /// Code generation.
+    Codegen,
+}
+
 pub struct DiagnosticHandlers<'a> {
     data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
     llcx: &'a llvm::Context,
@@ -279,6 +289,8 @@ impl<'a> DiagnosticHandlers<'a> {
         cgcx: &'a CodegenContext<LlvmCodegenBackend>,
         handler: &'a Handler,
         llcx: &'a llvm::Context,
+        module: &ModuleCodegen<ModuleLlvm>,
+        stage: CodegenDiagnosticsStage,
     ) -> Self {
         let remark_passes_all: bool;
         let remark_passes: Vec<CString>;
@@ -295,6 +307,20 @@ impl<'a> DiagnosticHandlers<'a> {
         };
         let remark_passes: Vec<*const c_char> =
             remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
+        let remark_file = cgcx
+            .remark_dir
+            .as_ref()
+            // Use the .opt.yaml file suffix, which is supported by LLVM's opt-viewer.
+            .map(|dir| {
+                let stage_suffix = match stage {
+                    CodegenDiagnosticsStage::Codegen => "codegen",
+                    CodegenDiagnosticsStage::Opt => "opt",
+                    CodegenDiagnosticsStage::LTO => "lto",
+                };
+                dir.join(format!("{}.{stage_suffix}.opt.yaml", module.name))
+            })
+            .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
+
         let data = Box::into_raw(Box::new((cgcx, handler)));
         unsafe {
             let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
@@ -305,6 +331,9 @@ impl<'a> DiagnosticHandlers<'a> {
                 remark_passes_all,
                 remark_passes.as_ptr(),
                 remark_passes.len(),
+                // The `as_ref()` is important here, otherwise the `CString` will be dropped
+                // too soon!
+                remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()),
             );
             DiagnosticHandlers { data, llcx, old_handler }
         }
@@ -523,7 +552,8 @@ pub(crate) unsafe fn optimize(
 
     let llmod = module.module_llvm.llmod();
     let llcx = &*module.module_llvm.llcx;
-    let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+    let _handlers =
+        DiagnosticHandlers::new(cgcx, diag_handler, llcx, module, CodegenDiagnosticsStage::Opt);
 
     let module_name = module.name.clone();
     let module_name = Some(&module_name[..]);
@@ -582,7 +612,13 @@ pub(crate) unsafe fn codegen(
         let tm = &*module.module_llvm.tm;
         let module_name = module.name.clone();
         let module_name = Some(&module_name[..]);
-        let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+        let _handlers = DiagnosticHandlers::new(
+            cgcx,
+            diag_handler,
+            llcx,
+            &module,
+            CodegenDiagnosticsStage::Codegen,
+        );
 
         if cgcx.msvc_imps_needed {
             create_msvc_imps(cgcx, llcx, llmod);
@@ -775,7 +811,6 @@ pub(crate) unsafe fn codegen(
         }
 
         record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
-        drop(handlers);
     }
 
     // `.dwo` files are only emitted if:
@@ -875,14 +910,19 @@ unsafe fn embed_bitcode(
     //   passed though then these sections will show up in the final output.
     //   Additionally the flag that we need to set here is `SHF_EXCLUDE`.
     //
+    // * XCOFF - AIX linker ignores content in .ipa and .info if no auxiliary
+    //   symbol associated with these sections.
+    //
     // Unfortunately, LLVM provides no way to set custom section flags. For ELF
     // and COFF we emit the sections using module level inline assembly for that
     // reason (see issue #90326 for historical background).
+    let is_aix = cgcx.opts.target_triple.triple().contains("-aix");
     let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
         || cgcx.opts.target_triple.triple().contains("-darwin")
         || cgcx.opts.target_triple.triple().contains("-tvos")
         || cgcx.opts.target_triple.triple().contains("-watchos");
     if is_apple
+        || is_aix
         || cgcx.opts.target_triple.triple().starts_with("wasm")
         || cgcx.opts.target_triple.triple().starts_with("asmjs")
     {
@@ -891,11 +931,17 @@ unsafe fn embed_bitcode(
         let llglobal = llvm::LLVMAddGlobal(
             llmod,
             common::val_ty(llconst),
-            c"rustc.embedded.module".as_ptr().cast(),
+            "rustc.embedded.module\0".as_ptr().cast(),
         );
         llvm::LLVMSetInitializer(llglobal, llconst);
 
-        let section = if is_apple { c"__LLVM,__bitcode" } else { c".llvmbc" };
+        let section = if is_apple {
+            "__LLVM,__bitcode\0"
+        } else if is_aix {
+            ".ipa\0"
+        } else {
+            ".llvmbc\0"
+        };
         llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
         llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
         llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
@@ -904,10 +950,16 @@ unsafe fn embed_bitcode(
         let llglobal = llvm::LLVMAddGlobal(
             llmod,
             common::val_ty(llconst),
-            c"rustc.embedded.cmdline".as_ptr().cast(),
+            "rustc.embedded.cmdline\0".as_ptr().cast(),
         );
         llvm::LLVMSetInitializer(llglobal, llconst);
-        let section = if is_apple { c"__LLVM,__cmdline" } else { c".llvmcmd" };
+        let section = if is_apple {
+            "__LLVM,__cmdline\0"
+        } else if is_aix {
+            ".info\0"
+        } else {
+            ".llvmcmd\0"
+        };
         llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
         llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
     } else {
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
index 2f7eb08ad3d..5b2bbdb4bde 100644
--- a/compiler/rustc_codegen_llvm/src/base.rs
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -19,6 +19,8 @@ use crate::context::CodegenCx;
 use crate::llvm;
 use crate::value::Value;
 
+use cstr::cstr;
+
 use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
 use rustc_codegen_ssa::mono_item::MonoItemExt;
 use rustc_codegen_ssa::traits::*;
@@ -108,11 +110,11 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
 
             // Create the llvm.used and llvm.compiler.used variables.
             if !cx.used_statics.borrow().is_empty() {
-                cx.create_used_variable_impl(c"llvm.used", &*cx.used_statics.borrow());
+                cx.create_used_variable_impl(cstr!("llvm.used"), &*cx.used_statics.borrow());
             }
             if !cx.compiler_used_statics.borrow().is_empty() {
                 cx.create_used_variable_impl(
-                    c"llvm.compiler.used",
+                    cstr!("llvm.compiler.used"),
                     &*cx.compiler_used_statics.borrow(),
                 );
             }
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index b4aa001547c..d55992bf092 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -6,6 +6,7 @@ use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True}
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
+use cstr::cstr;
 use libc::{c_char, c_uint};
 use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
@@ -23,7 +24,9 @@ use rustc_span::Span;
 use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions};
 use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
 use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target};
+use smallvec::SmallVec;
 use std::borrow::Cow;
+use std::ffi::CStr;
 use std::iter;
 use std::ops::Deref;
 use std::ptr;
@@ -43,10 +46,13 @@ impl Drop for Builder<'_, '_, '_> {
     }
 }
 
+// FIXME(eddyb) use a checked constructor when they become `const fn`.
+const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
+
 /// Empty string, to be used where LLVM expects an instruction name, indicating
 /// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
 // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
-const UNNAMED: *const c_char = c"".as_ptr();
+const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
 
 impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> {
     type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
@@ -225,7 +231,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         let args = self.check_call("invoke", llty, llfn, args);
         let funclet_bundle = funclet.map(|funclet| funclet.bundle());
         let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
-        let mut bundles = vec![funclet_bundle];
+        let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+        if let Some(funclet_bundle) = funclet_bundle {
+            bundles.push(funclet_bundle);
+        }
 
         // Emit CFI pointer type membership test
         self.cfi_type_test(fn_attrs, fn_abi, llfn);
@@ -233,9 +242,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
         let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
-        bundles.push(kcfi_bundle);
+        if let Some(kcfi_bundle) = kcfi_bundle {
+            bundles.push(kcfi_bundle);
+        }
 
-        bundles.retain(|bundle| bundle.is_some());
         let invoke = unsafe {
             llvm::LLVMRustBuildInvoke(
                 self.llbuilder,
@@ -572,8 +582,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     ) {
         let zero = self.const_usize(0);
         let count = self.const_usize(count);
-        let start = dest.project_index(self, zero).llval;
-        let end = dest.project_index(self, count).llval;
 
         let header_bb = self.append_sibling_block("repeat_loop_header");
         let body_bb = self.append_sibling_block("repeat_loop_body");
@@ -582,24 +590,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         self.br(header_bb);
 
         let mut header_bx = Self::build(self.cx, header_bb);
-        let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
+        let i = header_bx.phi(self.val_ty(zero), &[zero], &[self.llbb()]);
 
-        let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
+        let keep_going = header_bx.icmp(IntPredicate::IntULT, i, count);
         header_bx.cond_br(keep_going, body_bb, next_bb);
 
         let mut body_bx = Self::build(self.cx, body_bb);
-        let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
-        cg_elem
-            .val
-            .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
-
-        let next = body_bx.inbounds_gep(
-            self.backend_type(cg_elem.layout),
-            current,
-            &[self.const_usize(1)],
-        );
+        let dest_elem = dest.project_index(&mut body_bx, i);
+        cg_elem.val.store(&mut body_bx, dest_elem);
+
+        let next = body_bx.unchecked_uadd(i, self.const_usize(1));
         body_bx.br(header_bb);
-        header_bx.add_incoming_to_phi(current, next, body_bb);
+        header_bx.add_incoming_to_phi(i, next, body_bb);
 
         *self = Self::build(self.cx, next_bb);
     }
@@ -1005,13 +1007,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     }
 
     fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
+        let name = cstr!("cleanuppad");
         let ret = unsafe {
             llvm::LLVMBuildCleanupPad(
                 self.llbuilder,
                 parent,
                 args.as_ptr(),
                 args.len() as c_uint,
-                c"cleanuppad".as_ptr(),
+                name.as_ptr(),
             )
         };
         Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
@@ -1025,13 +1028,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
     }
 
     fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
+        let name = cstr!("catchpad");
         let ret = unsafe {
             llvm::LLVMBuildCatchPad(
                 self.llbuilder,
                 parent,
                 args.as_ptr(),
                 args.len() as c_uint,
-                c"catchpad".as_ptr(),
+                name.as_ptr(),
             )
         };
         Funclet::new(ret.expect("LLVM does not have support for catchpad"))
@@ -1043,13 +1047,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         unwind: Option<&'ll BasicBlock>,
         handlers: &[&'ll BasicBlock],
     ) -> &'ll Value {
+        let name = cstr!("catchswitch");
         let ret = unsafe {
             llvm::LLVMBuildCatchSwitch(
                 self.llbuilder,
                 parent,
                 unwind,
                 handlers.len() as c_uint,
-                c"catchswitch".as_ptr(),
+                name.as_ptr(),
             )
         };
         let ret = ret.expect("LLVM does not have support for catchswitch");
@@ -1189,7 +1194,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         let args = self.check_call("call", llty, llfn, args);
         let funclet_bundle = funclet.map(|funclet| funclet.bundle());
         let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
-        let mut bundles = vec![funclet_bundle];
+        let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+        if let Some(funclet_bundle) = funclet_bundle {
+            bundles.push(funclet_bundle);
+        }
 
         // Emit CFI pointer type membership test
         self.cfi_type_test(fn_attrs, fn_abi, llfn);
@@ -1197,9 +1205,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         // Emit KCFI operand bundle
         let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
         let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
-        bundles.push(kcfi_bundle);
+        if let Some(kcfi_bundle) = kcfi_bundle {
+            bundles.push(kcfi_bundle);
+        }
 
-        bundles.retain(|bundle| bundle.is_some());
         let call = unsafe {
             llvm::LLVMRustBuildCall(
                 self.llbuilder,
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 2087754c66b..df52f50f86f 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -8,6 +8,7 @@ use crate::llvm::{self, True};
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
+use cstr::cstr;
 use rustc_codegen_ssa::traits::*;
 use rustc_hir::def_id::DefId;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
@@ -481,9 +482,9 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
                             .all(|&byte| byte == 0);
 
                     let sect_name = if all_bytes_are_zero {
-                        c"__DATA,__thread_bss"
+                        cstr!("__DATA,__thread_bss")
                     } else {
-                        c"__DATA,__thread_data"
+                        cstr!("__DATA,__thread_data")
                     };
                     llvm::LLVMSetSection(g, sect_name.as_ptr());
                 }
@@ -512,7 +513,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
                     let val = llvm::LLVMMetadataAsValue(self.llcx, meta);
                     llvm::LLVMAddNamedMetadataOperand(
                         self.llmod,
-                        c"wasm.custom_sections".as_ptr().cast(),
+                        "wasm.custom_sections\0".as_ptr().cast(),
                         val,
                     );
                 }
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index e8a7afcc632..e1e0a442845 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -8,7 +8,8 @@ use crate::llvm_util;
 use crate::type_::Type;
 use crate::value::Value;
 
-use rustc_codegen_ssa::base::wants_msvc_seh;
+use cstr::cstr;
+use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
 use rustc_codegen_ssa::traits::*;
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::FxHashMap;
@@ -223,42 +224,36 @@ pub unsafe fn create_module<'ll>(
     // If skipping the PLT is enabled, we need to add some module metadata
     // to ensure intrinsic calls don't use it.
     if !sess.needs_plt() {
-        llvm::LLVMRustAddModuleFlag(
-            llmod,
-            llvm::LLVMModFlagBehavior::Warning,
-            c"RtLibUseGOT".as_ptr().cast(),
-            1,
-        );
+        let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
     }
 
     // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.)
     if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() {
+        let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast();
         llvm::LLVMRustAddModuleFlag(
             llmod,
             llvm::LLVMModFlagBehavior::Override,
-            c"CFI Canonical Jump Tables".as_ptr().cast(),
+            canonical_jump_tables,
             1,
         );
     }
 
     // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.)
     if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
+        let enable_split_lto_unit = "EnableSplitLTOUnit\0".as_ptr().cast();
         llvm::LLVMRustAddModuleFlag(
             llmod,
             llvm::LLVMModFlagBehavior::Override,
-            c"EnableSplitLTOUnit".as_ptr().cast(),
+            enable_split_lto_unit,
             1,
         );
     }
 
     // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.)
     if sess.is_sanitizer_kcfi_enabled() {
-        llvm::LLVMRustAddModuleFlag(
-            llmod,
-            llvm::LLVMModFlagBehavior::Override,
-            c"kcfi".as_ptr().cast(),
-            1,
-        );
+        let kcfi = "kcfi\0".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
     }
 
     // Control Flow Guard is currently only supported by the MSVC linker on Windows.
@@ -270,7 +265,7 @@ pub unsafe fn create_module<'ll>(
                 llvm::LLVMRustAddModuleFlag(
                     llmod,
                     llvm::LLVMModFlagBehavior::Warning,
-                    c"cfguard".as_ptr() as *const _,
+                    "cfguard\0".as_ptr() as *const _,
                     1,
                 )
             }
@@ -279,7 +274,7 @@ pub unsafe fn create_module<'ll>(
                 llvm::LLVMRustAddModuleFlag(
                     llmod,
                     llvm::LLVMModFlagBehavior::Warning,
-                    c"cfguard".as_ptr() as *const _,
+                    "cfguard\0".as_ptr() as *const _,
                     2,
                 )
             }
@@ -297,26 +292,26 @@ pub unsafe fn create_module<'ll>(
             llvm::LLVMRustAddModuleFlag(
                 llmod,
                 behavior,
-                c"branch-target-enforcement".as_ptr().cast(),
+                "branch-target-enforcement\0".as_ptr().cast(),
                 bti.into(),
             );
             llvm::LLVMRustAddModuleFlag(
                 llmod,
                 behavior,
-                c"sign-return-address".as_ptr().cast(),
+                "sign-return-address\0".as_ptr().cast(),
                 pac_ret.is_some().into(),
             );
             let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
             llvm::LLVMRustAddModuleFlag(
                 llmod,
                 behavior,
-                c"sign-return-address-all".as_ptr().cast(),
+                "sign-return-address-all\0".as_ptr().cast(),
                 pac_opts.leaf.into(),
             );
             llvm::LLVMRustAddModuleFlag(
                 llmod,
                 behavior,
-                c"sign-return-address-with-bkey".as_ptr().cast(),
+                "sign-return-address-with-bkey\0".as_ptr().cast(),
                 u32::from(pac_opts.key == PAuthKey::B),
             );
         } else {
@@ -332,7 +327,7 @@ pub unsafe fn create_module<'ll>(
         llvm::LLVMRustAddModuleFlag(
             llmod,
             llvm::LLVMModFlagBehavior::Override,
-            c"cf-protection-branch".as_ptr().cast(),
+            "cf-protection-branch\0".as_ptr().cast(),
             1,
         )
     }
@@ -340,7 +335,7 @@ pub unsafe fn create_module<'ll>(
         llvm::LLVMRustAddModuleFlag(
             llmod,
             llvm::LLVMModFlagBehavior::Override,
-            c"cf-protection-return".as_ptr().cast(),
+            "cf-protection-return\0".as_ptr().cast(),
             1,
         )
     }
@@ -349,7 +344,7 @@ pub unsafe fn create_module<'ll>(
         llvm::LLVMRustAddModuleFlag(
             llmod,
             llvm::LLVMModFlagBehavior::Error,
-            c"Virtual Function Elim".as_ptr().cast(),
+            "Virtual Function Elim\0".as_ptr().cast(),
             1,
         );
     }
@@ -481,13 +476,14 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     }
 
     pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
+        let section = cstr!("llvm.metadata");
         let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
 
         unsafe {
             let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
             llvm::LLVMSetInitializer(g, array);
             llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
-            llvm::LLVMSetSection(g, c"llvm.metadata".as_ptr());
+            llvm::LLVMSetSection(g, section.as_ptr());
         }
     }
 }
@@ -532,19 +528,28 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
         if let Some(llpersonality) = self.eh_personality.get() {
             return llpersonality;
         }
+
+        let name = if wants_msvc_seh(self.sess()) {
+            Some("__CxxFrameHandler3")
+        } else if wants_wasm_eh(self.sess()) {
+            // LLVM specifically tests for the name of the personality function
+            // There is no need for this function to exist anywhere, it will
+            // not be called. However, its name has to be "__gxx_wasm_personality_v0"
+            // for native wasm exceptions.
+            Some("__gxx_wasm_personality_v0")
+        } else {
+            None
+        };
+
         let tcx = self.tcx;
         let llfn = match tcx.lang_items().eh_personality() {
-            Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+            Some(def_id) if name.is_none() => self.get_fn_addr(
                 ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty())
                     .unwrap()
                     .unwrap(),
             ),
             _ => {
-                let name = if wants_msvc_seh(self.sess()) {
-                    "__CxxFrameHandler3"
-                } else {
-                    "rust_eh_personality"
-                };
+                let name = name.unwrap_or("rust_eh_personality");
                 if let Some(llfn) = self.get_declared_value(name) {
                     llfn
                 } else {
@@ -662,6 +667,10 @@ impl<'ll> CodegenCx<'ll, '_> {
         let t_f32 = self.type_f32();
         let t_f64 = self.type_f64();
         let t_metadata = self.type_metadata();
+        let t_token = self.type_token();
+
+        ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p);
+        ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
 
         ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
         ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
new file mode 100644
index 00000000000..1791ce4b315
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -0,0 +1,89 @@
+use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex};
+
+/// Must match the layout of `LLVMRustCounterKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum CounterKind {
+    Zero = 0,
+    CounterValueReference = 1,
+    Expression = 2,
+}
+
+/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
+/// report. Note that `id` has different interpretations, depending on the `kind`:
+///   * For `CounterKind::Zero`, `id` is assumed to be `0`
+///   * For `CounterKind::CounterValueReference`,  `id` matches the `counter_id` of the injected
+///     instrumentation counter (the `index` argument to the LLVM intrinsic
+///     `instrprof.increment()`)
+///   * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
+///     counter expressions.
+///
+/// Corresponds to struct `llvm::coverage::Counter`.
+///
+/// Must match the layout of `LLVMRustCounter`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct Counter {
+    // Important: The layout (order and types of fields) must match its C++ counterpart.
+    pub kind: CounterKind,
+    id: u32,
+}
+
+impl Counter {
+    /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
+    /// `id` is not used.
+    pub fn zero() -> Self {
+        Self { kind: CounterKind::Zero, id: 0 }
+    }
+
+    /// Constructs a new `Counter` of kind `CounterValueReference`, and converts
+    /// the given 1-based counter_id to the required 0-based equivalent for
+    /// the `Counter` encoding.
+    pub fn counter_value_reference(counter_id: CounterValueReference) -> Self {
+        Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() }
+    }
+
+    /// Constructs a new `Counter` of kind `Expression`.
+    pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
+        Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
+    }
+
+    /// Returns true if the `Counter` kind is `Zero`.
+    pub fn is_zero(&self) -> bool {
+        matches!(self.kind, CounterKind::Zero)
+    }
+
+    /// An explicitly-named function to get the ID value, making it more obvious
+    /// that the stored value is now 0-based.
+    pub fn zero_based_id(&self) -> u32 {
+        debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
+        self.id
+    }
+}
+
+/// Corresponds to enum `llvm::coverage::CounterExpression::ExprKind`.
+///
+/// Must match the layout of `LLVMRustCounterExprKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum ExprKind {
+    Subtract = 0,
+    Add = 1,
+}
+
+/// Corresponds to struct `llvm::coverage::CounterExpression`.
+///
+/// Must match the layout of `LLVMRustCounterExpression`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterExpression {
+    pub kind: ExprKind,
+    pub lhs: Counter,
+    pub rhs: Counter,
+}
+
+impl CounterExpression {
+    pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
+        Self { kind, lhs, rhs }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
new file mode 100644
index 00000000000..06844afd6b8
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -0,0 +1,348 @@
+pub use super::ffi::*;
+
+use rustc_index::{IndexSlice, IndexVec};
+use rustc_middle::bug;
+use rustc_middle::mir::coverage::{
+    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
+    InjectedExpressionIndex, MappedExpressionIndex, Op,
+};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct Expression {
+    lhs: ExpressionOperandId,
+    op: Op,
+    rhs: ExpressionOperandId,
+    region: Option<CodeRegion>,
+}
+
+/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
+/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
+/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they
+/// can both be operands in an expression. This struct also stores the `function_source_hash`,
+/// computed during instrumentation, and forwarded with counters.
+///
+/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
+/// regions" (or "gap areas"). A gap region is a code region within a counted region (either counter
+/// or expression), but the line or lines in the gap region are not executable (such as lines with
+/// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
+/// for a gap area is only used as the line execution count if there are no other regions on a
+/// line."
+#[derive(Debug)]
+pub struct FunctionCoverage<'tcx> {
+    instance: Instance<'tcx>,
+    source_hash: u64,
+    is_used: bool,
+    counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
+    expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>,
+    unreachable_regions: Vec<CodeRegion>,
+}
+
+impl<'tcx> FunctionCoverage<'tcx> {
+    /// Creates a new set of coverage data for a used (called) function.
+    pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+        Self::create(tcx, instance, true)
+    }
+
+    /// Creates a new set of coverage data for an unused (never called) function.
+    pub fn unused(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+        Self::create(tcx, instance, false)
+    }
+
+    fn create(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, is_used: bool) -> Self {
+        let coverageinfo = tcx.coverageinfo(instance.def);
+        debug!(
+            "FunctionCoverage::create(instance={:?}) has coverageinfo={:?}. is_used={}",
+            instance, coverageinfo, is_used
+        );
+        Self {
+            instance,
+            source_hash: 0, // will be set with the first `add_counter()`
+            is_used,
+            counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
+            expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
+            unreachable_regions: Vec::new(),
+        }
+    }
+
+    /// Returns true for a used (called) function, and false for an unused function.
+    pub fn is_used(&self) -> bool {
+        self.is_used
+    }
+
+    /// Sets the function source hash value. If called multiple times for the same function, all
+    /// calls should have the same hash value.
+    pub fn set_function_source_hash(&mut self, source_hash: u64) {
+        if self.source_hash == 0 {
+            self.source_hash = source_hash;
+        } else {
+            debug_assert_eq!(source_hash, self.source_hash);
+        }
+    }
+
+    /// Adds a code region to be counted by an injected counter intrinsic.
+    pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
+        if let Some(previous_region) = self.counters[id].replace(region.clone()) {
+            assert_eq!(previous_region, region, "add_counter: code region for id changed");
+        }
+    }
+
+    /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
+    /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression
+    /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in
+    /// any order, and expressions can still be assigned contiguous (though descending) IDs, without
+    /// knowing what the last counter ID will be.
+    ///
+    /// When storing the expression data in the `expressions` vector in the `FunctionCoverage`
+    /// struct, its vector index is computed, from the given expression ID, by subtracting from
+    /// `u32::MAX`.
+    ///
+    /// Since the expression operands (`lhs` and `rhs`) can reference either counters or
+    /// expressions, an operand that references an expression also uses its original ID, descending
+    /// from `u32::MAX`. Theses operands are translated only during code generation, after all
+    /// counters and expressions have been added.
+    pub fn add_counter_expression(
+        &mut self,
+        expression_id: InjectedExpressionId,
+        lhs: ExpressionOperandId,
+        op: Op,
+        rhs: ExpressionOperandId,
+        region: Option<CodeRegion>,
+    ) {
+        debug!(
+            "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
+            expression_id, lhs, op, rhs, region
+        );
+        let expression_index = self.expression_index(u32::from(expression_id));
+        debug_assert!(
+            expression_index.as_usize() < self.expressions.len(),
+            "expression_index {} is out of range for expressions.len() = {}
+            for {:?}",
+            expression_index.as_usize(),
+            self.expressions.len(),
+            self,
+        );
+        if let Some(previous_expression) = self.expressions[expression_index].replace(Expression {
+            lhs,
+            op,
+            rhs,
+            region: region.clone(),
+        }) {
+            assert_eq!(
+                previous_expression,
+                Expression { lhs, op, rhs, region },
+                "add_counter_expression: expression for id changed"
+            );
+        }
+    }
+
+    /// Add a region that will be marked as "unreachable", with a constant "zero counter".
+    pub fn add_unreachable_region(&mut self, region: CodeRegion) {
+        self.unreachable_regions.push(region)
+    }
+
+    /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+    /// or not the source code structure changed between different compilations.
+    pub fn source_hash(&self) -> u64 {
+        self.source_hash
+    }
+
+    /// Generate an array of CounterExpressions, and an iterator over all `Counter`s and their
+    /// associated `Regions` (from which the LLVM-specific `CoverageMapGenerator` will create
+    /// `CounterMappingRegion`s.
+    pub fn get_expressions_and_counter_regions(
+        &self,
+    ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+        assert!(
+            self.source_hash != 0 || !self.is_used,
+            "No counters provided the source_hash for used function: {:?}",
+            self.instance
+        );
+
+        let counter_regions = self.counter_regions();
+        let (counter_expressions, expression_regions) = self.expressions_with_regions();
+        let unreachable_regions = self.unreachable_regions();
+
+        let counter_regions =
+            counter_regions.chain(expression_regions.into_iter().chain(unreachable_regions));
+        (counter_expressions, counter_regions)
+    }
+
+    fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+        self.counters.iter_enumerated().filter_map(|(index, entry)| {
+            // Option::map() will return None to filter out missing counters. This may happen
+            // if, for example, a MIR-instrumented counter is removed during an optimization.
+            entry.as_ref().map(|region| (Counter::counter_value_reference(index), region))
+        })
+    }
+
+    fn expressions_with_regions(
+        &self,
+    ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+        let mut counter_expressions = Vec::with_capacity(self.expressions.len());
+        let mut expression_regions = Vec::with_capacity(self.expressions.len());
+        let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
+
+        // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
+        // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
+        // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range
+        // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value
+        // matches the injected counter index); and any other value is converted into a
+        // `CounterKind::Expression` with the expression's `new_index`.
+        //
+        // Expressions will be returned from this function in a sequential vector (array) of
+        // `CounterExpression`, so the expression IDs must be mapped from their original,
+        // potentially sparse set of indexes, originally in reverse order from `u32::MAX`.
+        //
+        // An `Expression` as an operand will have already been encountered as an `Expression` with
+        // operands, so its new_index will already have been generated (as a 1-up index value).
+        // (If an `Expression` as an operand does not have a corresponding new_index, it was
+        // probably optimized out, after the expression was injected into the MIR, so it will
+        // get a `CounterKind::Zero` instead.)
+        //
+        // In other words, an `Expression`s at any given index can include other expressions as
+        // operands, but expression operands can only come from the subset of expressions having
+        // `expression_index`s lower than the referencing `Expression`. Therefore, it is
+        // reasonable to look up the new index of an expression operand while the `new_indexes`
+        // vector is only complete up to the current `ExpressionIndex`.
+        let id_to_counter = |new_indexes: &IndexSlice<
+            InjectedExpressionIndex,
+            Option<MappedExpressionIndex>,
+        >,
+                             id: ExpressionOperandId| {
+            if id == ExpressionOperandId::ZERO {
+                Some(Counter::zero())
+            } else if id.index() < self.counters.len() {
+                debug_assert!(
+                    id.index() > 0,
+                    "ExpressionOperandId indexes for counters are 1-based, but this id={}",
+                    id.index()
+                );
+                // Note: Some codegen-injected Counters may be only referenced by `Expression`s,
+                // and may not have their own `CodeRegion`s,
+                let index = CounterValueReference::from(id.index());
+                // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based.
+                Some(Counter::counter_value_reference(index))
+            } else {
+                let index = self.expression_index(u32::from(id));
+                self.expressions
+                    .get(index)
+                    .expect("expression id is out of range")
+                    .as_ref()
+                    // If an expression was optimized out, assume it would have produced a count
+                    // of zero. This ensures that expressions dependent on optimized-out
+                    // expressions are still valid.
+                    .map_or(Some(Counter::zero()), |_| new_indexes[index].map(Counter::expression))
+            }
+        };
+
+        for (original_index, expression) in
+            self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
+                // Option::map() will return None to filter out missing expressions. This may happen
+                // if, for example, a MIR-instrumented expression is removed during an optimization.
+                entry.as_ref().map(|expression| (original_index, expression))
+            })
+        {
+            let optional_region = &expression.region;
+            let Expression { lhs, op, rhs, .. } = *expression;
+
+            if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
+                .map(|lhs_counter| {
+                    id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
+                })
+            {
+                if lhs_counter.is_zero() && op.is_subtract() {
+                    // The left side of a subtraction was probably optimized out. As an example,
+                    // a branch condition might be evaluated as a constant expression, and the
+                    // branch could be removed, dropping unused counters in the process.
+                    //
+                    // Since counters are unsigned, we must assume the result of the expression
+                    // can be no more and no less than zero. An expression known to evaluate to zero
+                    // does not need to be added to the coverage map.
+                    //
+                    // Coverage test `loops_branches.rs` includes multiple variations of branches
+                    // based on constant conditional (literal `true` or `false`), and demonstrates
+                    // that the expected counts are still correct.
+                    debug!(
+                        "Expression subtracts from zero (assume unreachable): \
+                        original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+                        original_index, lhs, op, rhs, optional_region,
+                    );
+                    rhs_counter = Counter::zero();
+                }
+                debug_assert!(
+                    lhs_counter.is_zero()
+                        // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+                        || ((lhs_counter.zero_based_id() as usize)
+                            <= usize::max(self.counters.len(), self.expressions.len())),
+                    "lhs id={} > both counters.len()={} and expressions.len()={}
+                    ({:?} {:?} {:?})",
+                    lhs_counter.zero_based_id(),
+                    self.counters.len(),
+                    self.expressions.len(),
+                    lhs_counter,
+                    op,
+                    rhs_counter,
+                );
+
+                debug_assert!(
+                    rhs_counter.is_zero()
+                        // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+                        || ((rhs_counter.zero_based_id() as usize)
+                            <= usize::max(self.counters.len(), self.expressions.len())),
+                    "rhs id={} > both counters.len()={} and expressions.len()={}
+                    ({:?} {:?} {:?})",
+                    rhs_counter.zero_based_id(),
+                    self.counters.len(),
+                    self.expressions.len(),
+                    lhs_counter,
+                    op,
+                    rhs_counter,
+                );
+
+                // Both operands exist. `Expression` operands exist in `self.expressions` and have
+                // been assigned a `new_index`.
+                let mapped_expression_index =
+                    MappedExpressionIndex::from(counter_expressions.len());
+                let expression = CounterExpression::new(
+                    lhs_counter,
+                    match op {
+                        Op::Add => ExprKind::Add,
+                        Op::Subtract => ExprKind::Subtract,
+                    },
+                    rhs_counter,
+                );
+                debug!(
+                    "Adding expression {:?} = {:?}, region: {:?}",
+                    mapped_expression_index, expression, optional_region
+                );
+                counter_expressions.push(expression);
+                new_indexes[original_index] = Some(mapped_expression_index);
+                if let Some(region) = optional_region {
+                    expression_regions.push((Counter::expression(mapped_expression_index), region));
+                }
+            } else {
+                bug!(
+                    "expression has one or more missing operands \
+                      original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+                    original_index,
+                    lhs,
+                    op,
+                    rhs,
+                    optional_region,
+                );
+            }
+        }
+        (counter_expressions, expression_regions.into_iter())
+    }
+
+    fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+        self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+    }
+
+    fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex {
+        debug_assert!(id_descending_from_max >= self.counters.len() as u32);
+        InjectedExpressionIndex::from(u32::MAX - id_descending_from_max)
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 21a1ac34844..a1ff2aa6625 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -1,10 +1,10 @@
 use crate::common::CodegenCx;
 use crate::coverageinfo;
+use crate::coverageinfo::map_data::{Counter, CounterExpression};
 use crate::llvm;
 
 use llvm::coverageinfo::CounterMappingRegion;
-use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
-use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
+use rustc_codegen_ssa::traits::ConstMethods;
 use rustc_data_structures::fx::FxIndexSet;
 use rustc_hir::def::DefKind;
 use rustc_hir::def_id::DefId;
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index cd261293e9b..42fdbd78618 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -3,13 +3,13 @@ use crate::llvm;
 use crate::abi::Abi;
 use crate::builder::Builder;
 use crate::common::CodegenCx;
+use crate::coverageinfo::map_data::{CounterExpression, FunctionCoverage};
 
 use libc::c_uint;
 use llvm::coverageinfo::CounterMappingRegion;
-use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
 use rustc_codegen_ssa::traits::{
-    BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, CoverageInfoMethods,
-    MiscMethods, StaticMethods,
+    BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods,
+    StaticMethods,
 };
 use rustc_data_structures::fx::FxHashMap;
 use rustc_hir as hir;
@@ -17,16 +17,20 @@ use rustc_hir::def_id::DefId;
 use rustc_llvm::RustString;
 use rustc_middle::bug;
 use rustc_middle::mir::coverage::{
-    CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
+    CodeRegion, CounterValueReference, CoverageKind, ExpressionOperandId, InjectedExpressionId, Op,
 };
+use rustc_middle::mir::Coverage;
 use rustc_middle::ty;
-use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
 use rustc_middle::ty::subst::InternalSubsts;
 use rustc_middle::ty::Instance;
+use rustc_middle::ty::Ty;
 
 use std::cell::RefCell;
 use std::ffi::CString;
 
+mod ffi;
+pub(crate) mod map_data;
 pub mod mapgen;
 
 const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
@@ -53,11 +57,17 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
     }
 }
 
-impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
-    fn coverageinfo_finalize(&self) {
+// These methods used to be part of trait `CoverageInfoMethods`, which no longer
+// exists after most coverage code was moved out of SSA.
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+    pub(crate) fn coverageinfo_finalize(&self) {
         mapgen::finalize(self)
     }
 
+    /// For LLVM codegen, returns a function-specific `Value` for a global
+    /// string, to hold the function name passed to LLVM intrinsic
+    /// `instrprof.increment()`. The `Value` is only created once per instance.
+    /// Multiple invocations with the same instance return the same `Value`.
     fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
         if let Some(coverage_context) = self.coverage_context() {
             debug!("getting pgo_func_name_var for instance={:?}", instance);
@@ -94,6 +104,54 @@ impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
 }
 
 impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+    fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
+        let bx = self;
+
+        let Coverage { kind, code_region } = coverage.clone();
+        match kind {
+            CoverageKind::Counter { function_source_hash, id } => {
+                if bx.set_function_source_hash(instance, function_source_hash) {
+                    // If `set_function_source_hash()` returned true, the coverage map is enabled,
+                    // so continue adding the counter.
+                    if let Some(code_region) = code_region {
+                        // Note: Some counters do not have code regions, but may still be referenced
+                        // from expressions. In that case, don't add the counter to the coverage map,
+                        // but do inject the counter intrinsic.
+                        bx.add_coverage_counter(instance, id, code_region);
+                    }
+
+                    let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+                    let fn_name = bx.get_pgo_func_name_var(instance);
+                    let hash = bx.const_u64(function_source_hash);
+                    let num_counters = bx.const_u32(coverageinfo.num_counters);
+                    let index = bx.const_u32(id.zero_based_index());
+                    debug!(
+                        "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                        fn_name, hash, num_counters, index,
+                    );
+                    bx.instrprof_increment(fn_name, hash, num_counters, index);
+                }
+            }
+            CoverageKind::Expression { id, lhs, op, rhs } => {
+                bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+            }
+            CoverageKind::Unreachable => {
+                bx.add_coverage_unreachable(
+                    instance,
+                    code_region.expect("unreachable regions always have code regions"),
+                );
+            }
+        }
+    }
+}
+
+// These methods used to be part of trait `CoverageInfoBuilderMethods`, but
+// after moving most coverage code out of SSA they are now just ordinary methods.
+impl<'tcx> Builder<'_, '_, 'tcx> {
+    /// Returns true if the function source hash was added to the coverage map (even if it had
+    /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
+    /// not enabled (a coverage map is not being generated).
     fn set_function_source_hash(
         &mut self,
         instance: Instance<'tcx>,
@@ -115,6 +173,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
         }
     }
 
+    /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
+    /// is not enabled (a coverage map is not being generated).
     fn add_coverage_counter(
         &mut self,
         instance: Instance<'tcx>,
@@ -137,6 +197,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
         }
     }
 
+    /// Returns true if the expression was added to the coverage map; false if
+    /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
     fn add_coverage_counter_expression(
         &mut self,
         instance: Instance<'tcx>,
@@ -163,6 +225,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
         }
     }
 
+    /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
+    /// is not enabled (a coverage map is not being generated).
     fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
         if let Some(coverage_context) = self.coverage_context() {
             debug!(
@@ -199,8 +263,8 @@ fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<
         tcx.symbol_name(instance).name,
         cx.fn_abi_of_fn_ptr(
             ty::Binder::dummy(tcx.mk_fn_sig(
-                [tcx.mk_unit()],
-                tcx.mk_unit(),
+                [Ty::new_unit(tcx)],
+                Ty::new_unit(tcx),
                 false,
                 hir::Unsafety::Unsafe,
                 Abi::Rust,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
index 64961baf272..65cbd5edc59 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -65,10 +65,10 @@ fn make_mir_scope<'ll, 'tcx>(
         debug_context.scopes[parent]
     } else {
         // The root is the function itself.
-        let loc = cx.lookup_debug_loc(mir.span.lo());
+        let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
         debug_context.scopes[scope] = DebugScope {
-            file_start_pos: loc.file.start_pos,
-            file_end_pos: loc.file.end_pos,
+            file_start_pos: file.start_pos,
+            file_end_pos: file.end_pos,
             ..debug_context.scopes[scope]
         };
         instantiated.insert(scope);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
index 8be54b7eb71..37f30917609 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -38,6 +38,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '
         unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) };
 
     section_var.unwrap_or_else(|| {
+        let section_name = b".debug_gdb_scripts\0";
         let mut section_contents = Vec::new();
 
         // Add the pretty printers for the standard library first.
@@ -70,7 +71,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '
             let section_var = cx
                 .define_global(section_var_name, llvm_type)
                 .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
-            llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr().cast());
+            llvm::LLVMSetSection(section_var, section_name.as_ptr().cast());
             llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
             llvm::LLVMSetGlobalConstant(section_var, llvm::True);
             llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 4b88ab8a97a..d61400d3fa3 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -20,6 +20,7 @@ use crate::llvm::debuginfo::{
 };
 use crate::value::Value;
 
+use cstr::cstr;
 use rustc_codegen_ssa::debuginfo::type_names::cpp_like_debuginfo;
 use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind;
 use rustc_codegen_ssa::traits::*;
@@ -167,7 +168,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
     // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
     debug_assert_eq!(
         cx.size_and_align_of(ptr_type),
-        cx.size_and_align_of(cx.tcx.mk_mut_ptr(pointee_type))
+        cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type))
     );
 
     let pointee_type_di_node = type_di_node(cx, pointee_type);
@@ -222,8 +223,11 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
                     //        at all and instead emit regular struct debuginfo for it. We just
                     //        need to make sure that we don't break existing debuginfo consumers
                     //        by doing that (at least not without a warning period).
-                    let layout_type =
-                        if ptr_type.is_box() { cx.tcx.mk_mut_ptr(pointee_type) } else { ptr_type };
+                    let layout_type = if ptr_type.is_box() {
+                        Ty::new_mut_ptr(cx.tcx, pointee_type)
+                    } else {
+                        ptr_type
+                    };
 
                     let layout = cx.layout_of(layout_type);
                     let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
@@ -811,6 +815,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
 
     let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
     let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
+    let flags = "\0";
     let output_filenames = tcx.output_filenames(());
     let split_name = if tcx.sess.target_can_use_split_dwarf() {
         output_filenames
@@ -847,7 +852,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
             producer.as_ptr().cast(),
             producer.len(),
             tcx.sess.opts.optimize != config::OptLevel::No,
-            c"".as_ptr().cast(),
+            flags.as_ptr().cast(),
             0,
             // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead
             // put the path supplied to `MCSplitDwarfFile` into the debug info of the final
@@ -876,7 +881,8 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
             );
             let val = llvm::LLVMMetadataAsValue(debug_context.llcontext, gcov_metadata);
 
-            llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, c"llvm.gcov".as_ptr(), val);
+            let llvm_gcov_ident = cstr!("llvm.gcov");
+            llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, llvm_gcov_ident.as_ptr(), val);
         }
 
         // Insert `llvm.ident` metadata on the wasm targets since that will
@@ -889,7 +895,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>(
             );
             llvm::LLVMAddNamedMetadataOperand(
                 debug_context.llmod,
-                c"llvm.ident".as_ptr(),
+                cstr!("llvm.ident").as_ptr(),
                 llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
             );
         }
@@ -1295,7 +1301,7 @@ fn build_vtable_type_di_node<'ll, 'tcx>(
 
     // All function pointers are described as opaque pointers. This could be improved in the future
     // by describing them as actual function pointers.
-    let void_pointer_ty = tcx.mk_imm_ptr(tcx.types.unit);
+    let void_pointer_ty = Ty::new_imm_ptr(tcx, tcx.types.unit);
     let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
     let usize_di_node = type_di_node(cx, tcx.types.usize);
     let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty);
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index aa7ae9355bc..b924c771af7 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -113,7 +113,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
                 llvm::LLVMRustAddModuleFlag(
                     self.llmod,
                     llvm::LLVMModFlagBehavior::Warning,
-                    c"Dwarf Version".as_ptr().cast(),
+                    "Dwarf Version\0".as_ptr().cast(),
                     dwarf_version,
                 );
             } else {
@@ -121,16 +121,17 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
                 llvm::LLVMRustAddModuleFlag(
                     self.llmod,
                     llvm::LLVMModFlagBehavior::Warning,
-                    c"CodeView".as_ptr().cast(),
+                    "CodeView\0".as_ptr().cast(),
                     1,
                 )
             }
 
             // Prevent bitcode readers from deleting the debug info.
+            let ptr = "Debug Info Version\0".as_ptr();
             llvm::LLVMRustAddModuleFlag(
                 self.llmod,
                 llvm::LLVMModFlagBehavior::Warning,
-                c"Debug Info Version".as_ptr().cast(),
+                ptr.cast(),
                 llvm::LLVMRustDebugMetadataVersion(),
             );
         }
@@ -262,7 +263,7 @@ impl CodegenCx<'_, '_> {
     pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
         let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
             Ok(SourceFileAndLine { sf: file, line }) => {
-                let line_pos = file.line_begin_pos(pos);
+                let line_pos = file.lines(|lines| lines[line]);
 
                 // Use 1-based indexing.
                 let line = (line + 1) as u32;
@@ -331,7 +332,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
             llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
         };
 
-        let mut name = String::new();
+        let mut name = String::with_capacity(64);
         type_names::push_item_name(tcx, def_id, false, &mut name);
 
         // Find the enclosing function, in case this is a closure.
@@ -453,7 +454,7 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                         ty::Array(ct, _)
                             if (*ct == cx.tcx.types.u8) || cx.layout_of(*ct).is_zst() =>
                         {
-                            cx.tcx.mk_imm_ptr(*ct)
+                            Ty::new_imm_ptr(cx.tcx, *ct)
                         }
                         _ => t,
                     };
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
index d5ea48c311b..fa61c7dde18 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -28,7 +28,7 @@ pub fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DISco
         .map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
 
     let namespace_name_string = {
-        let mut output = String::new();
+        let mut output = String::with_capacity(64);
         type_names::push_item_name(cx.tcx, def_id, false, &mut output);
         output
     };
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
index 6bcd3e5bf58..7be83638676 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -82,8 +82,8 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
         ty::Foreign(_) => {
             // Assert that pointers to foreign types really are thin:
             debug_assert_eq!(
-                cx.size_of(cx.tcx.mk_imm_ptr(pointee_tail_ty)),
-                cx.size_of(cx.tcx.mk_imm_ptr(cx.tcx.types.u8))
+                cx.size_of(Ty::new_imm_ptr(cx.tcx, pointee_tail_ty)),
+                cx.size_of(Ty::new_imm_ptr(cx.tcx, cx.tcx.types.u8))
             );
             None
         }
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 4e28034a850..a254c86c291 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt;
 use crate::va_arg::emit_va_arg;
 use crate::value::Value;
 
-use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
+use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
 use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
 use rustc_codegen_ssa::mir::operand::OperandRef;
@@ -452,6 +452,8 @@ fn try_intrinsic<'ll>(
         bx.store(bx.const_i32(0), dest, ret_align);
     } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, try_func, data, catch_func, dest);
+    } else if wants_wasm_eh(bx.sess()) {
+        codegen_wasm_try(bx, try_func, data, catch_func, dest);
     } else if bx.sess().target.os == "emscripten" {
         codegen_emcc_try(bx, try_func, data, catch_func, dest);
     } else {
@@ -610,6 +612,80 @@ fn codegen_msvc_try<'ll>(
     bx.store(ret, dest, i32_align);
 }
 
+// WASM's definition of the `rust_try` function.
+fn codegen_wasm_try<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    try_func: &'ll Value,
+    data: &'ll Value,
+    catch_func: &'ll Value,
+    dest: &'ll Value,
+) {
+    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+        bx.set_personality_fn(bx.eh_personality());
+
+        let normal = bx.append_sibling_block("normal");
+        let catchswitch = bx.append_sibling_block("catchswitch");
+        let catchpad = bx.append_sibling_block("catchpad");
+        let caught = bx.append_sibling_block("caught");
+
+        let try_func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let catch_func = llvm::get_param(bx.llfn(), 2);
+
+        // We're generating an IR snippet that looks like:
+        //
+        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
+        //      %slot = alloca i8*
+        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
+        //
+        //   normal:
+        //      ret i32 0
+        //
+        //   catchswitch:
+        //      %cs = catchswitch within none [%catchpad] unwind to caller
+        //
+        //   catchpad:
+        //      %tok = catchpad within %cs [null]
+        //      %ptr = call @llvm.wasm.get.exception(token %tok)
+        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
+        //      call %catch_func(%data, %ptr)
+        //      catchret from %tok to label %caught
+        //
+        //   caught:
+        //      ret i32 1
+        //   }
+        //
+        let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
+
+        bx.switch_to_block(normal);
+        bx.ret(bx.const_i32(0));
+
+        bx.switch_to_block(catchswitch);
+        let cs = bx.catch_switch(None, None, &[catchpad]);
+
+        bx.switch_to_block(catchpad);
+        let null = bx.const_null(bx.type_i8p());
+        let funclet = bx.catch_pad(cs, &[null]);
+
+        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
+        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
+
+        let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
+        bx.catch_ret(&funclet, caught);
+
+        bx.switch_to_block(caught);
+        bx.ret(bx.const_i32(1));
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
+    let i32_align = bx.tcx().data_layout.i32_align.abi;
+    bx.store(ret, dest, i32_align);
+}
+
 // Definition of the standard `try` function for Rust using the GNU-like model
 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
 // instructions).
@@ -797,23 +873,29 @@ fn get_rust_try_fn<'ll, 'tcx>(
 
     // Define the type up front for the signature of the rust_try function.
     let tcx = cx.tcx;
-    let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
     // `unsafe fn(*mut i8) -> ()`
-    let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
-        [i8p],
-        tcx.mk_unit(),
-        false,
-        hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let try_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p],
+            Ty::new_unit(tcx),
+            false,
+            hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(*mut i8, *mut i8) -> ()`
-    let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
-        [i8p, i8p],
-        tcx.mk_unit(),
-        false,
-        hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let catch_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p, i8p],
+            Ty::new_unit(tcx),
+            false,
+            hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
     let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
         [try_fn_ty, i8p, catch_fn_ty],
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index 24968e00cc8..24ba28bbc82 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -11,7 +11,6 @@
 #![feature(let_chains)]
 #![feature(never_type)]
 #![feature(impl_trait_in_assoc_type)]
-#![feature(c_str_literals)]
 #![recursion_limit = "256"]
 #![allow(rustc::potential_query_instability)]
 #![deny(rustc::untranslatable_diagnostic)]
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 6ef3418cc5f..3ad546b61e9 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -1,7 +1,7 @@
 #![allow(non_camel_case_types)]
 #![allow(non_upper_case_globals)]
 
-use rustc_codegen_ssa::coverageinfo::map as coverage_map;
+use crate::coverageinfo::map_data as coverage_map;
 
 use super::debuginfo::{
     DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
@@ -585,6 +585,16 @@ pub enum ThreadLocalMode {
     LocalExec,
 }
 
+/// LLVMRustTailCallKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum TailCallKind {
+    None,
+    Tail,
+    MustTail,
+    NoTail,
+}
+
 /// LLVMRustChecksumKind
 #[derive(Copy, Clone)]
 #[repr(C)]
@@ -1071,6 +1081,7 @@ extern "C" {
 
     // Operations on other types
     pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
+    pub fn LLVMTokenTypeInContext(C: &Context) -> &Type;
     pub fn LLVMMetadataTypeInContext(C: &Context) -> &Type;
 
     // Operations on all values
@@ -1195,6 +1206,7 @@ extern "C" {
         NameLen: size_t,
     ) -> Option<&Value>;
     pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
+    pub fn LLVMRustSetTailCallKind(CallInst: &Value, TKC: TailCallKind);
 
     // Operations on attributes
     pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute;
@@ -1301,7 +1313,7 @@ extern "C" {
         NumArgs: c_uint,
         Then: &'a BasicBlock,
         Catch: &'a BasicBlock,
-        OpBundles: *const Option<&OperandBundleDef<'a>>,
+        OpBundles: *const &OperandBundleDef<'a>,
         NumOpBundles: c_uint,
         Name: *const c_char,
     ) -> &'a Value;
@@ -1673,7 +1685,7 @@ extern "C" {
         Fn: &'a Value,
         Args: *const &'a Value,
         NumArgs: c_uint,
-        OpBundles: *const Option<&OperandBundleDef<'a>>,
+        OpBundles: *const &OperandBundleDef<'a>,
         NumOpBundles: c_uint,
     ) -> &'a Value;
     pub fn LLVMRustBuildMemCpy<'a>(
@@ -2512,6 +2524,7 @@ extern "C" {
         remark_all_passes: bool,
         remark_passes: *const *const c_char,
         remark_passes_len: usize,
+        remark_file: *const c_char,
     );
 
     #[allow(improper_ctypes)]
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 4ffa2b9c6a3..7e672a8dc33 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -52,6 +52,10 @@ impl<'ll> CodegenCx<'ll, '_> {
         unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
     }
 
+    pub(crate) fn type_token(&self) -> &'ll Type {
+        unsafe { llvm::LLVMTokenTypeInContext(self.llcx) }
+    }
+
     pub(crate) fn type_metadata(&self) -> &'ll Type {
         unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) }
     }
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 3339e4e07ed..58e97be34f2 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -337,12 +337,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
             // only wide pointer boxes are handled as pointers
             // thin pointer boxes with scalar allocators are handled by the general logic below
             ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
-                let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+                let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
                 return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
             }
             // `dyn* Trait` has the same ABI as `*mut dyn Trait`
             ty::Dynamic(bounds, region, ty::DynStar) => {
-                let ptr_ty = cx.tcx.mk_mut_ptr(cx.tcx.mk_dynamic(bounds, region, ty::Dyn));
+                let ptr_ty =
+                    Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn));
                 return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
             }
             _ => {}
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index b19398e68c2..8800caa71d6 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -73,7 +73,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
     let layout = bx.cx.layout_of(target_ty);
     let (llty, size, align) = if indirect {
         (
-            bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
+            bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
             bx.cx.data_layout().pointer_size,
             bx.cx.data_layout().pointer_align,
         )