about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs625
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs176
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs1146
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs503
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs476
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs856
-rw-r--r--compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs103
-rw-r--r--compiler/rustc_codegen_llvm/src/back/profiling.rs58
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs1122
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs175
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs1675
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs190
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs424
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs579
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs1077
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs292
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs272
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs451
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs289
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs133
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/doc.md131
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs117
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs1591
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs931
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs465
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs445
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs278
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs642
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs48
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs98
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs231
-rw-r--r--compiler/rustc_codegen_llvm/src/errors.rs256
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs2433
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs473
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs94
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs213
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs2421
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/mod.rs326
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs647
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs161
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs361
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs351
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs311
-rw-r--r--compiler/rustc_codegen_llvm/src/value.rs32
44 files changed, 23678 insertions, 0 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
new file mode 100644
index 00000000000..e5f5146fac8
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -0,0 +1,625 @@
+use crate::attributes;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm::{self, Attribute, AttributePlace};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::LayoutOf;
+pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
+use rustc_middle::ty::Ty;
+use rustc_session::config;
+pub use rustc_target::abi::call::*;
+use rustc_target::abi::{self, HasDataLayout, Int};
+pub use rustc_target::spec::abi::Abi;
+use rustc_target::spec::SanitizerSet;
+
+use libc::c_uint;
+use smallvec::SmallVec;
+
+pub trait ArgAttributesExt {
+    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
+    fn apply_attrs_to_callsite(
+        &self,
+        idx: AttributePlace,
+        cx: &CodegenCx<'_, '_>,
+        callsite: &Value,
+    );
+}
+
+const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
+    [(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
+
+const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
+    (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
+    (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
+    (ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
+    (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
+    (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
+];
+
+fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
+    let mut regular = this.regular;
+
+    let mut attrs = SmallVec::new();
+
+    // ABI-affecting attributes must always be applied
+    for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
+        if regular.contains(attr) {
+            attrs.push(llattr.create_attr(cx.llcx));
+        }
+    }
+    if let Some(align) = this.pointee_align {
+        attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
+    }
+    match this.arg_ext {
+        ArgExtension::None => {}
+        ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
+        ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
+    }
+
+    // Only apply remaining attributes when optimizing
+    if cx.sess().opts.optimize != config::OptLevel::No {
+        let deref = this.pointee_size.bytes();
+        if deref != 0 {
+            if regular.contains(ArgAttribute::NonNull) {
+                attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
+            } else {
+                attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
+            }
+            regular -= ArgAttribute::NonNull;
+        }
+        for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
+            if regular.contains(attr) {
+                attrs.push(llattr.create_attr(cx.llcx));
+            }
+        }
+    } else if cx.tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
+        // If we're not optimising, *but* memory sanitizer is on, emit noundef, since it affects
+        // memory sanitizer's behavior.
+
+        if regular.contains(ArgAttribute::NoUndef) {
+            attrs.push(llvm::AttributeKind::NoUndef.create_attr(cx.llcx));
+        }
+    }
+
+    attrs
+}
+
+impl ArgAttributesExt for ArgAttributes {
+    fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
+        let attrs = get_attrs(self, cx);
+        attributes::apply_to_llfn(llfn, idx, &attrs);
+    }
+
+    fn apply_attrs_to_callsite(
+        &self,
+        idx: AttributePlace,
+        cx: &CodegenCx<'_, '_>,
+        callsite: &Value,
+    ) {
+        let attrs = get_attrs(self, cx);
+        attributes::apply_to_callsite(callsite, idx, &attrs);
+    }
+}
+
+pub trait LlvmType {
+    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
+}
+
+impl LlvmType for Reg {
+    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+        match self.kind {
+            RegKind::Integer => cx.type_ix(self.size.bits()),
+            RegKind::Float => match self.size.bits() {
+                32 => cx.type_f32(),
+                64 => cx.type_f64(),
+                _ => bug!("unsupported float: {:?}", self),
+            },
+            RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
+        }
+    }
+}
+
+impl LlvmType for CastTarget {
+    fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+        let rest_ll_unit = self.rest.unit.llvm_type(cx);
+        let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
+            (0, 0)
+        } else {
+            (
+                self.rest.total.bytes() / self.rest.unit.size.bytes(),
+                self.rest.total.bytes() % self.rest.unit.size.bytes(),
+            )
+        };
+
+        if self.prefix.iter().all(|x| x.is_none()) {
+            // Simplify to a single unit when there is no prefix and size <= unit size
+            if self.rest.total <= self.rest.unit.size {
+                return rest_ll_unit;
+            }
+
+            // Simplify to array when all chunks are the same size and type
+            if rem_bytes == 0 {
+                return cx.type_array(rest_ll_unit, rest_count);
+            }
+        }
+
+        // Create list of fields in the main structure
+        let mut args: Vec<_> = self
+            .prefix
+            .iter()
+            .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
+            .chain((0..rest_count).map(|_| rest_ll_unit))
+            .collect();
+
+        // Append final integer
+        if rem_bytes != 0 {
+            // Only integers can be really split further.
+            assert_eq!(self.rest.unit.kind, RegKind::Integer);
+            args.push(cx.type_ix(rem_bytes * 8));
+        }
+
+        cx.type_struct(&args, false)
+    }
+}
+
+pub trait ArgAbiExt<'ll, 'tcx> {
+    fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'll, 'tcx>,
+        val: &'ll Value,
+        dst: PlaceRef<'tcx, &'ll Value>,
+    );
+    fn store_fn_arg(
+        &self,
+        bx: &mut Builder<'_, 'll, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, &'ll Value>,
+    );
+}
+
+impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+    /// Gets the LLVM type for a place of the original Rust type of
+    /// this argument/return, i.e., the result of `type_of::type_of`.
+    fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+        self.layout.llvm_type(cx)
+    }
+
+    /// Stores a direct/indirect value described by this ArgAbi into a
+    /// place for the original Rust type of this argument/return.
+    /// Can be used for both storing formal arguments into Rust variables
+    /// or results of call/invoke instructions into their destinations.
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'll, 'tcx>,
+        val: &'ll Value,
+        dst: PlaceRef<'tcx, &'ll Value>,
+    ) {
+        match &self.mode {
+            PassMode::Ignore => {}
+            // Sized indirect arguments
+            PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => {
+                let align = attrs.pointee_align.unwrap_or(self.layout.align.abi);
+                OperandValue::Ref(val, None, align).store(bx, dst);
+            }
+            // Unsized indirect qrguments
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
+                bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+            }
+            PassMode::Cast { cast, pad_i32: _ } => {
+                // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+                // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+                let can_store_through_cast_ptr = false;
+                if can_store_through_cast_ptr {
+                    bx.store(val, dst.llval, self.layout.align.abi);
+                } else {
+                    // The actual return type is a struct, but the ABI
+                    // adaptation code has cast it into some scalar type. The
+                    // code that follows is the only reliable way I have
+                    // found to do a transform like i64 -> {i32,i32}.
+                    // Basically we dump the data onto the stack then memcpy it.
+                    //
+                    // Other approaches I tried:
+                    // - Casting rust ret pointer to the foreign type and using Store
+                    //   is (a) unsafe if size of foreign type > size of rust type and
+                    //   (b) runs afoul of strict aliasing rules, yielding invalid
+                    //   assembly under -O (specifically, the store gets removed).
+                    // - Truncating foreign type to correct integral type and then
+                    //   bitcasting to the struct type yields invalid cast errors.
+
+                    // We instead thus allocate some scratch space...
+                    let scratch_size = cast.size(bx);
+                    let scratch_align = cast.align(bx);
+                    let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
+                    bx.lifetime_start(llscratch, scratch_size);
+
+                    // ... where we first store the value...
+                    bx.store(val, llscratch, scratch_align);
+
+                    // ... and then memcpy it to the intended destination.
+                    bx.memcpy(
+                        dst.llval,
+                        self.layout.align.abi,
+                        llscratch,
+                        scratch_align,
+                        bx.const_usize(self.layout.size.bytes()),
+                        MemFlags::empty(),
+                    );
+
+                    bx.lifetime_end(llscratch, scratch_size);
+                }
+            }
+            _ => {
+                OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);
+            }
+        }
+    }
+
+    fn store_fn_arg(
+        &self,
+        bx: &mut Builder<'_, 'll, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, &'ll Value>,
+    ) {
+        let mut next = || {
+            let val = llvm::get_param(bx.llfn(), *idx as c_uint);
+            *idx += 1;
+            val
+        };
+        match self.mode {
+            PassMode::Ignore => {}
+            PassMode::Pair(..) => {
+                OperandValue::Pair(next(), next()).store(bx, dst);
+            }
+            PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
+                OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+            }
+            PassMode::Direct(_)
+            | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }
+            | PassMode::Cast { .. } => {
+                let next_arg = next();
+                self.store(bx, next_arg, dst);
+            }
+        }
+    }
+}
+
+impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+    fn store_fn_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, Self::Value>,
+    ) {
+        arg_abi.store_fn_arg(self, idx, dst)
+    }
+    fn store_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        val: &'ll Value,
+        dst: PlaceRef<'tcx, &'ll Value>,
+    ) {
+        arg_abi.store(self, val, dst)
+    }
+    fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+        arg_abi.memory_ty(self)
+    }
+}
+
+pub trait FnAbiLlvmExt<'ll, 'tcx> {
+    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+    fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+    fn llvm_cconv(&self) -> llvm::CallConv;
+    fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
+    fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
+}
+
+impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+    fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+        // Ignore "extra" args from the call site for C variadic functions.
+        // Only the "fixed" args are part of the LLVM function signature.
+        let args =
+            if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args };
+
+        // This capacity calculation is approximate.
+        let mut llargument_tys = Vec::with_capacity(
+            self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
+        );
+
+        let llreturn_ty = match &self.ret.mode {
+            PassMode::Ignore => cx.type_void(),
+            PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
+            PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx),
+            PassMode::Indirect { .. } => {
+                llargument_tys.push(cx.type_ptr());
+                cx.type_void()
+            }
+        };
+
+        for arg in args {
+            // Note that the exact number of arguments pushed here is carefully synchronized with
+            // code all over the place, both in the codegen_llvm and codegen_ssa crates. That's how
+            // other code then knows which LLVM argument(s) correspond to the n-th Rust argument.
+            let llarg_ty = match &arg.mode {
+                PassMode::Ignore => continue,
+                PassMode::Direct(_) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // and for Scalar ABIs the LLVM type is fully determined by `layout.abi`,
+                    // guaranteeing that we generate ABI-compatible LLVM IR.
+                    arg.layout.immediate_llvm_type(cx)
+                }
+                PassMode::Pair(..) => {
+                    // ABI-compatible Rust types have the same `layout.abi` (up to validity ranges),
+                    // so for ScalarPair we can easily be sure that we are generating ABI-compatible
+                    // LLVM IR.
+                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
+                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
+                    continue;
+                }
+                PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
+                    // Construct the type of a (wide) pointer to `ty`, and pass its two fields.
+                    // Any two ABI-compatible unsized types have the same metadata type and
+                    // moreover the same metadata value leads to the same dynamic size and
+                    // alignment, so this respects ABI compatibility.
+                    let ptr_ty = Ty::new_mut_ptr(cx.tcx, arg.layout.ty);
+                    let ptr_layout = cx.layout_of(ptr_ty);
+                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
+                    llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
+                    continue;
+                }
+                PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } => cx.type_ptr(),
+                PassMode::Cast { cast, pad_i32 } => {
+                    // add padding
+                    if *pad_i32 {
+                        llargument_tys.push(Reg::i32().llvm_type(cx));
+                    }
+                    // Compute the LLVM type we use for this function from the cast type.
+                    // We assume here that ABI-compatible Rust types have the same cast type.
+                    cast.llvm_type(cx)
+                }
+            };
+            llargument_tys.push(llarg_ty);
+        }
+
+        if self.c_variadic {
+            cx.type_variadic_func(&llargument_tys, llreturn_ty)
+        } else {
+            cx.type_func(&llargument_tys, llreturn_ty)
+        }
+    }
+
+    fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+        cx.type_ptr_ext(cx.data_layout().instruction_address_space)
+    }
+
+    fn llvm_cconv(&self) -> llvm::CallConv {
+        self.conv.into()
+    }
+
+    fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
+        let mut func_attrs = SmallVec::<[_; 3]>::new();
+        if self.ret.layout.abi.is_uninhabited() {
+            func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
+        }
+        if !self.can_unwind {
+            func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
+        }
+        if let Conv::RiscvInterrupt { kind } = self.conv {
+            func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
+        }
+        attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
+
+        let mut i = 0;
+        let mut apply = |attrs: &ArgAttributes| {
+            attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
+            i += 1;
+            i - 1
+        };
+        match &self.ret.mode {
+            PassMode::Direct(attrs) => {
+                attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
+            }
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
+                assert!(!on_stack);
+                let i = apply(attrs);
+                let sret = llvm::CreateStructRetAttr(
+                    cx.llcx,
+                    cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()),
+                );
+                attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
+            }
+            PassMode::Cast { cast, pad_i32: _ } => {
+                cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
+            }
+            _ => {}
+        }
+        for arg in self.args.iter() {
+            match &arg.mode {
+                PassMode::Ignore => {}
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
+                    let i = apply(attrs);
+                    let byval = llvm::CreateByValAttr(
+                        cx.llcx,
+                        cx.type_array(cx.type_i8(), arg.layout.size.bytes()),
+                    );
+                    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
+                }
+                PassMode::Direct(attrs)
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
+                    apply(attrs);
+                }
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
+                    assert!(!on_stack);
+                    apply(attrs);
+                    apply(meta_attrs);
+                }
+                PassMode::Pair(a, b) => {
+                    apply(a);
+                    apply(b);
+                }
+                PassMode::Cast { cast, pad_i32 } => {
+                    if *pad_i32 {
+                        apply(&ArgAttributes::new());
+                    }
+                    apply(&cast.attrs);
+                }
+            }
+        }
+    }
+
+    fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
+        let mut func_attrs = SmallVec::<[_; 2]>::new();
+        if self.ret.layout.abi.is_uninhabited() {
+            func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
+        }
+        if !self.can_unwind {
+            func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
+        }
+        attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
+
+        let mut i = 0;
+        let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
+            attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
+            i += 1;
+            i - 1
+        };
+        match &self.ret.mode {
+            PassMode::Direct(attrs) => {
+                attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
+            }
+            PassMode::Indirect { attrs, meta_attrs: _, on_stack } => {
+                assert!(!on_stack);
+                let i = apply(bx.cx, attrs);
+                let sret = llvm::CreateStructRetAttr(
+                    bx.cx.llcx,
+                    bx.cx.type_array(bx.cx.type_i8(), self.ret.layout.size.bytes()),
+                );
+                attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
+            }
+            PassMode::Cast { cast, pad_i32: _ } => {
+                cast.attrs.apply_attrs_to_callsite(
+                    llvm::AttributePlace::ReturnValue,
+                    bx.cx,
+                    callsite,
+                );
+            }
+            _ => {}
+        }
+        if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
+            // If the value is a boolean, the range is 0..2 and that ultimately
+            // become 0..0 when the type becomes i1, which would be rejected
+            // by the LLVM verifier.
+            if let Int(..) = scalar.primitive() {
+                if !scalar.is_bool() && !scalar.is_always_valid(bx) {
+                    bx.range_metadata(callsite, scalar.valid_range(bx));
+                }
+            }
+        }
+        for arg in self.args.iter() {
+            match &arg.mode {
+                PassMode::Ignore => {}
+                PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => {
+                    let i = apply(bx.cx, attrs);
+                    let byval = llvm::CreateByValAttr(
+                        bx.cx.llcx,
+                        bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()),
+                    );
+                    attributes::apply_to_callsite(
+                        callsite,
+                        llvm::AttributePlace::Argument(i),
+                        &[byval],
+                    );
+                }
+                PassMode::Direct(attrs)
+                | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
+                    apply(bx.cx, attrs);
+                }
+                PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack: _ } => {
+                    apply(bx.cx, attrs);
+                    apply(bx.cx, meta_attrs);
+                }
+                PassMode::Pair(a, b) => {
+                    apply(bx.cx, a);
+                    apply(bx.cx, b);
+                }
+                PassMode::Cast { cast, pad_i32 } => {
+                    if *pad_i32 {
+                        apply(bx.cx, &ArgAttributes::new());
+                    }
+                    apply(bx.cx, &cast.attrs);
+                }
+            }
+        }
+
+        let cconv = self.llvm_cconv();
+        if cconv != llvm::CCallConv {
+            llvm::SetInstructionCallConv(callsite, cconv);
+        }
+
+        if self.conv == Conv::CCmseNonSecureCall {
+            // This will probably get ignored on all targets but those supporting the TrustZone-M
+            // extension (thumbv8m targets).
+            let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
+            attributes::apply_to_callsite(
+                callsite,
+                llvm::AttributePlace::Function,
+                &[cmse_nonsecure_call],
+            );
+        }
+
+        // Some intrinsics require that an elementtype attribute (with the pointee type of a
+        // pointer argument) is added to the callsite.
+        let element_type_index = unsafe { llvm::LLVMRustGetElementTypeArgIndex(callsite) };
+        if element_type_index >= 0 {
+            let arg_ty = self.args[element_type_index as usize].layout.ty;
+            let pointee_ty = arg_ty.builtin_deref(true).expect("Must be pointer argument").ty;
+            let element_type_attr = unsafe {
+                llvm::LLVMRustCreateElementTypeAttr(bx.llcx, bx.layout_of(pointee_ty).llvm_type(bx))
+            };
+            attributes::apply_to_callsite(
+                callsite,
+                llvm::AttributePlace::Argument(element_type_index as u32),
+                &[element_type_attr],
+            );
+        }
+    }
+}
+
+impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+    fn get_param(&mut self, index: usize) -> Self::Value {
+        llvm::get_param(self.llfn(), index as c_uint)
+    }
+}
+
+impl From<Conv> for llvm::CallConv {
+    fn from(conv: Conv) -> Self {
+        match conv {
+            Conv::C | Conv::Rust | Conv::CCmseNonSecureCall | Conv::RiscvInterrupt { .. } => {
+                llvm::CCallConv
+            }
+            Conv::Cold => llvm::ColdCallConv,
+            Conv::PreserveMost => llvm::PreserveMost,
+            Conv::PreserveAll => llvm::PreserveAll,
+            Conv::AvrInterrupt => llvm::AvrInterrupt,
+            Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
+            Conv::ArmAapcs => llvm::ArmAapcsCallConv,
+            Conv::Msp430Intr => llvm::Msp430Intr,
+            Conv::PtxKernel => llvm::PtxKernel,
+            Conv::X86Fastcall => llvm::X86FastcallCallConv,
+            Conv::X86Intr => llvm::X86_Intr,
+            Conv::X86Stdcall => llvm::X86StdcallCallConv,
+            Conv::X86ThisCall => llvm::X86_ThisCall,
+            Conv::X86VectorCall => llvm::X86_VectorCall,
+            Conv::X86_64SysV => llvm::X86_64_SysV,
+            Conv::X86_64Win64 => llvm::X86_64_Win64,
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
new file mode 100644
index 00000000000..ca376029735
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -0,0 +1,176 @@
+use crate::attributes;
+use libc::c_uint;
+use rustc_ast::expand::allocator::{
+    alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy,
+    ALLOCATOR_METHODS, NO_ALLOC_SHIM_IS_UNSTABLE,
+};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{DebugInfo, OomStrategy};
+
+use crate::debuginfo;
+use crate::llvm::{self, Context, False, Module, True, Type};
+use crate::ModuleLlvm;
+
+pub(crate) unsafe fn codegen(
+    tcx: TyCtxt<'_>,
+    module_llvm: &mut ModuleLlvm,
+    module_name: &str,
+    kind: AllocatorKind,
+    alloc_error_handler_kind: AllocatorKind,
+) {
+    let llcx = &*module_llvm.llcx;
+    let llmod = module_llvm.llmod();
+    let usize = match tcx.sess.target.pointer_width {
+        16 => llvm::LLVMInt16TypeInContext(llcx),
+        32 => llvm::LLVMInt32TypeInContext(llcx),
+        64 => llvm::LLVMInt64TypeInContext(llcx),
+        tws => bug!("Unsupported target word size for int: {}", tws),
+    };
+    let i8 = llvm::LLVMInt8TypeInContext(llcx);
+    let i8p = llvm::LLVMPointerTypeInContext(llcx, 0);
+
+    if kind == AllocatorKind::Default {
+        for method in ALLOCATOR_METHODS {
+            let mut args = Vec::with_capacity(method.inputs.len());
+            for input in method.inputs.iter() {
+                match input.ty {
+                    AllocatorTy::Layout => {
+                        args.push(usize); // size
+                        args.push(usize); // align
+                    }
+                    AllocatorTy::Ptr => args.push(i8p),
+                    AllocatorTy::Usize => args.push(usize),
+
+                    AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+                }
+            }
+            let output = match method.output {
+                AllocatorTy::ResultPtr => Some(i8p),
+                AllocatorTy::Unit => None,
+
+                AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+                    panic!("invalid allocator output")
+                }
+            };
+
+            let from_name = global_fn_name(method.name);
+            let to_name = default_fn_name(method.name);
+
+            create_wrapper_function(tcx, llcx, llmod, &from_name, &to_name, &args, output, false);
+        }
+    }
+
+    // rust alloc error handler
+    create_wrapper_function(
+        tcx,
+        llcx,
+        llmod,
+        "__rust_alloc_error_handler",
+        alloc_error_handler_name(alloc_error_handler_kind),
+        &[usize, usize], // size, align
+        None,
+        true,
+    );
+
+    // __rust_alloc_error_handler_should_panic
+    let name = OomStrategy::SYMBOL;
+    let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
+    if tcx.sess.default_hidden_visibility() {
+        llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
+    }
+    let val = tcx.sess.opts.unstable_opts.oom.should_panic();
+    let llval = llvm::LLVMConstInt(i8, val as u64, False);
+    llvm::LLVMSetInitializer(ll_g, llval);
+
+    let name = NO_ALLOC_SHIM_IS_UNSTABLE;
+    let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
+    if tcx.sess.default_hidden_visibility() {
+        llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
+    }
+    let llval = llvm::LLVMConstInt(i8, 0, False);
+    llvm::LLVMSetInitializer(ll_g, llval);
+
+    if tcx.sess.opts.debuginfo != DebugInfo::None {
+        let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod);
+        debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx);
+        dbg_cx.finalize(tcx.sess);
+    }
+}
+
+fn create_wrapper_function(
+    tcx: TyCtxt<'_>,
+    llcx: &Context,
+    llmod: &Module,
+    from_name: &str,
+    to_name: &str,
+    args: &[&Type],
+    output: Option<&Type>,
+    no_return: bool,
+) {
+    unsafe {
+        let ty = llvm::LLVMFunctionType(
+            output.unwrap_or_else(|| llvm::LLVMVoidTypeInContext(llcx)),
+            args.as_ptr(),
+            args.len() as c_uint,
+            False,
+        );
+        let llfn = llvm::LLVMRustGetOrInsertFunction(
+            llmod,
+            from_name.as_ptr().cast(),
+            from_name.len(),
+            ty,
+        );
+        let no_return = if no_return {
+            // -> ! DIFlagNoReturn
+            let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
+            attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
+            Some(no_return)
+        } else {
+            None
+        };
+
+        if tcx.sess.default_hidden_visibility() {
+            llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+        }
+        if tcx.sess.must_emit_unwind_tables() {
+            let uwtable =
+                attributes::uwtable_attr(llcx, tcx.sess.opts.unstable_opts.use_sync_unwind);
+            attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
+        }
+
+        let callee =
+            llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_ptr().cast(), to_name.len(), ty);
+        if let Some(no_return) = no_return {
+            // -> ! DIFlagNoReturn
+            attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
+        }
+        llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+        let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr().cast());
+
+        let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+        llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+        let args = args
+            .iter()
+            .enumerate()
+            .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+            .collect::<Vec<_>>();
+        let ret = llvm::LLVMRustBuildCall(
+            llbuilder,
+            ty,
+            callee,
+            args.as_ptr(),
+            args.len() as c_uint,
+            [].as_ptr(),
+            0 as c_uint,
+        );
+        llvm::LLVMSetTailCall(ret, True);
+        if output.is_some() {
+            llvm::LLVMBuildRet(llbuilder, ret);
+        } else {
+            llvm::LLVMBuildRetVoid(llbuilder);
+        }
+        llvm::LLVMDisposeBuilder(llbuilder);
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
new file mode 100644
index 00000000000..74539d4d495
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -0,0 +1,1146 @@
+use crate::attributes;
+use crate::builder::Builder;
+use crate::common::Funclet;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, span_bug, ty::Instance};
+use rustc_span::{Pos, Span};
+use rustc_target::abi::*;
+use rustc_target::asm::*;
+
+use libc::{c_char, c_uint};
+use smallvec::SmallVec;
+
+impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+    fn codegen_inline_asm(
+        &mut self,
+        template: &[InlineAsmTemplatePiece],
+        operands: &[InlineAsmOperandRef<'tcx, Self>],
+        options: InlineAsmOptions,
+        line_spans: &[Span],
+        instance: Instance<'_>,
+        dest: Option<Self::BasicBlock>,
+        catch_funclet: Option<(Self::BasicBlock, Option<&Self::Funclet>)>,
+    ) {
+        let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+        // Collect the types of output operands
+        let mut constraints = vec![];
+        let mut clobbers = vec![];
+        let mut output_types = vec![];
+        let mut op_idx = FxHashMap::default();
+        let mut clobbered_x87 = false;
+        for (idx, op) in operands.iter().enumerate() {
+            match *op {
+                InlineAsmOperandRef::Out { reg, late, place } => {
+                    let is_target_supported = |reg_class: InlineAsmRegClass| {
+                        for &(_, feature) in reg_class.supported_types(asm_arch) {
+                            if let Some(feature) = feature {
+                                if self
+                                    .tcx
+                                    .asm_target_features(instance.def_id())
+                                    .contains(&feature)
+                                {
+                                    return true;
+                                }
+                            } else {
+                                // Register class is unconditionally supported
+                                return true;
+                            }
+                        }
+                        false
+                    };
+
+                    let mut layout = None;
+                    let ty = if let Some(ref place) = place {
+                        layout = Some(&place.layout);
+                        llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
+                    } else if matches!(
+                        reg.reg_class(),
+                        InlineAsmRegClass::X86(
+                            X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
+                        )
+                    ) {
+                        // Special handling for x87/mmx registers: we always
+                        // clobber the whole set if one register is marked as
+                        // clobbered. This is due to the way LLVM handles the
+                        // FP stack in inline assembly.
+                        if !clobbered_x87 {
+                            clobbered_x87 = true;
+                            clobbers.push("~{st}".to_string());
+                            for i in 1..=7 {
+                                clobbers.push(format!("~{{st({})}}", i));
+                            }
+                        }
+                        continue;
+                    } else if !is_target_supported(reg.reg_class())
+                        || reg.reg_class().is_clobber_only(asm_arch)
+                    {
+                        // We turn discarded outputs into clobber constraints
+                        // if the target feature needed by the register class is
+                        // disabled. This is necessary otherwise LLVM will try
+                        // to actually allocate a register for the dummy output.
+                        assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
+                        clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
+                        continue;
+                    } else {
+                        // If the output is discarded, we don't really care what
+                        // type is used. We're just using this to tell LLVM to
+                        // reserve the register.
+                        dummy_output_type(self.cx, reg.reg_class())
+                    };
+                    output_types.push(ty);
+                    op_idx.insert(idx, constraints.len());
+                    let prefix = if late { "=" } else { "=&" };
+                    constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
+                }
+                InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+                    let layout = if let Some(ref out_place) = out_place {
+                        &out_place.layout
+                    } else {
+                        // LLVM required tied operands to have the same type,
+                        // so we just use the type of the input.
+                        &in_value.layout
+                    };
+                    let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
+                    output_types.push(ty);
+                    op_idx.insert(idx, constraints.len());
+                    let prefix = if late { "=" } else { "=&" };
+                    constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
+                }
+                _ => {}
+            }
+        }
+
+        // Collect input operands
+        let mut inputs = vec![];
+        for (idx, op) in operands.iter().enumerate() {
+            match *op {
+                InlineAsmOperandRef::In { reg, value } => {
+                    let llval =
+                        llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
+                    inputs.push(llval);
+                    op_idx.insert(idx, constraints.len());
+                    constraints.push(reg_to_llvm(reg, Some(&value.layout)));
+                }
+                InlineAsmOperandRef::InOut { reg, late, in_value, out_place: _ } => {
+                    let value = llvm_fixup_input(
+                        self,
+                        in_value.immediate(),
+                        reg.reg_class(),
+                        &in_value.layout,
+                    );
+                    inputs.push(value);
+
+                    // In the case of fixed registers, we have the choice of
+                    // either using a tied operand or duplicating the constraint.
+                    // We prefer the latter because it matches the behavior of
+                    // Clang.
+                    if late && matches!(reg, InlineAsmRegOrRegClass::Reg(_)) {
+                        constraints.push(reg_to_llvm(reg, Some(&in_value.layout)).to_string());
+                    } else {
+                        constraints.push(format!("{}", op_idx[&idx]));
+                    }
+                }
+                InlineAsmOperandRef::SymFn { instance } => {
+                    inputs.push(self.cx.get_fn(instance));
+                    op_idx.insert(idx, constraints.len());
+                    constraints.push("s".to_string());
+                }
+                InlineAsmOperandRef::SymStatic { def_id } => {
+                    inputs.push(self.cx.get_static(def_id));
+                    op_idx.insert(idx, constraints.len());
+                    constraints.push("s".to_string());
+                }
+                _ => {}
+            }
+        }
+
+        // Build the template string
+        let mut labels = vec![];
+        let mut template_str = String::new();
+        for piece in template {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref s) => {
+                    if s.contains('$') {
+                        for c in s.chars() {
+                            if c == '$' {
+                                template_str.push_str("$$");
+                            } else {
+                                template_str.push(c);
+                            }
+                        }
+                    } else {
+                        template_str.push_str(s)
+                    }
+                }
+                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+                    match operands[operand_idx] {
+                        InlineAsmOperandRef::In { reg, .. }
+                        | InlineAsmOperandRef::Out { reg, .. }
+                        | InlineAsmOperandRef::InOut { reg, .. } => {
+                            let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
+                            if let Some(modifier) = modifier {
+                                template_str.push_str(&format!(
+                                    "${{{}:{}}}",
+                                    op_idx[&operand_idx], modifier
+                                ));
+                            } else {
+                                template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
+                            }
+                        }
+                        InlineAsmOperandRef::Const { ref string } => {
+                            // Const operands get injected directly into the template
+                            template_str.push_str(string);
+                        }
+                        InlineAsmOperandRef::SymFn { .. }
+                        | InlineAsmOperandRef::SymStatic { .. } => {
+                            // Only emit the raw symbol name
+                            template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
+                        }
+                        InlineAsmOperandRef::Label { label } => {
+                            template_str.push_str(&format!("${{{}:l}}", constraints.len()));
+                            constraints.push("!i".to_owned());
+                            labels.push(label);
+                        }
+                    }
+                }
+            }
+        }
+
+        constraints.append(&mut clobbers);
+        if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+            match asm_arch {
+                InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
+                    constraints.push("~{cc}".to_string());
+                }
+                InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+                    constraints.extend_from_slice(&[
+                        "~{dirflag}".to_string(),
+                        "~{fpsr}".to_string(),
+                        "~{flags}".to_string(),
+                    ]);
+                }
+                InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+                    constraints.extend_from_slice(&[
+                        "~{vtype}".to_string(),
+                        "~{vl}".to_string(),
+                        "~{vxsat}".to_string(),
+                        "~{vxrm}".to_string(),
+                    ]);
+                }
+                InlineAsmArch::Avr => {
+                    constraints.push("~{sreg}".to_string());
+                }
+                InlineAsmArch::Nvptx64 => {}
+                InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
+                InlineAsmArch::Hexagon => {}
+                InlineAsmArch::LoongArch64 => {
+                    constraints.extend_from_slice(&[
+                        "~{$fcc0}".to_string(),
+                        "~{$fcc1}".to_string(),
+                        "~{$fcc2}".to_string(),
+                        "~{$fcc3}".to_string(),
+                        "~{$fcc4}".to_string(),
+                        "~{$fcc5}".to_string(),
+                        "~{$fcc6}".to_string(),
+                        "~{$fcc7}".to_string(),
+                    ]);
+                }
+                InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
+                InlineAsmArch::S390x => {
+                    constraints.push("~{cc}".to_string());
+                }
+                InlineAsmArch::SpirV => {}
+                InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
+                InlineAsmArch::Bpf => {}
+                InlineAsmArch::Msp430 => {
+                    constraints.push("~{sr}".to_string());
+                }
+                InlineAsmArch::M68k => {
+                    constraints.push("~{ccr}".to_string());
+                }
+                InlineAsmArch::CSKY => {}
+            }
+        }
+        if !options.contains(InlineAsmOptions::NOMEM) {
+            // This is actually ignored by LLVM, but it's probably best to keep
+            // it just in case. LLVM instead uses the ReadOnly/ReadNone
+            // attributes on the call instruction to optimize.
+            constraints.push("~{memory}".to_string());
+        }
+        let volatile = !options.contains(InlineAsmOptions::PURE);
+        let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
+        let output_type = match &output_types[..] {
+            [] => self.type_void(),
+            [ty] => ty,
+            tys => self.type_struct(tys, false),
+        };
+        let dialect = match asm_arch {
+            InlineAsmArch::X86 | InlineAsmArch::X86_64
+                if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
+            {
+                llvm::AsmDialect::Intel
+            }
+            _ => llvm::AsmDialect::Att,
+        };
+        let result = inline_asm_call(
+            self,
+            &template_str,
+            &constraints.join(","),
+            &inputs,
+            output_type,
+            &labels,
+            volatile,
+            alignstack,
+            dialect,
+            line_spans,
+            options.contains(InlineAsmOptions::MAY_UNWIND),
+            dest,
+            catch_funclet,
+        )
+        .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
+
+        let mut attrs = SmallVec::<[_; 2]>::new();
+        if options.contains(InlineAsmOptions::PURE) {
+            if options.contains(InlineAsmOptions::NOMEM) {
+                attrs.push(llvm::MemoryEffects::None.create_attr(self.cx.llcx));
+            } else if options.contains(InlineAsmOptions::READONLY) {
+                attrs.push(llvm::MemoryEffects::ReadOnly.create_attr(self.cx.llcx));
+            }
+            attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
+        } else if options.contains(InlineAsmOptions::NOMEM) {
+            attrs.push(llvm::MemoryEffects::InaccessibleMemOnly.create_attr(self.cx.llcx));
+        } else {
+            // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
+        }
+        attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
+
+        // Switch to the 'normal' basic block if we did an `invoke` instead of a `call`
+        if let Some(dest) = dest {
+            self.switch_to_block(dest);
+        }
+
+        // Write results to outputs
+        for (idx, op) in operands.iter().enumerate() {
+            if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
+            | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
+            {
+                let value = if output_types.len() == 1 {
+                    result
+                } else {
+                    self.extract_value(result, op_idx[&idx] as u64)
+                };
+                let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
+                OperandValue::Immediate(value).store(self, place);
+            }
+        }
+    }
+}
+
+impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> {
+    fn codegen_global_asm(
+        &self,
+        template: &[InlineAsmTemplatePiece],
+        operands: &[GlobalAsmOperandRef<'tcx>],
+        options: InlineAsmOptions,
+        _line_spans: &[Span],
+    ) {
+        let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+        // Default to Intel syntax on x86
+        let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+            && !options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+        // Build the template string
+        let mut template_str = String::new();
+        if intel_syntax {
+            template_str.push_str(".intel_syntax\n");
+        }
+        for piece in template {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
+                InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+                    match operands[operand_idx] {
+                        GlobalAsmOperandRef::Const { ref string } => {
+                            // Const operands get injected directly into the
+                            // template. Note that we don't need to escape $
+                            // here unlike normal inline assembly.
+                            template_str.push_str(string);
+                        }
+                        GlobalAsmOperandRef::SymFn { instance } => {
+                            let llval = self.get_fn(instance);
+                            self.add_compiler_used_global(llval);
+                            let symbol = llvm::build_string(|s| unsafe {
+                                llvm::LLVMRustGetMangledName(llval, s);
+                            })
+                            .expect("symbol is not valid UTF-8");
+                            template_str.push_str(&symbol);
+                        }
+                        GlobalAsmOperandRef::SymStatic { def_id } => {
+                            let llval = self
+                                .renamed_statics
+                                .borrow()
+                                .get(&def_id)
+                                .copied()
+                                .unwrap_or_else(|| self.get_static(def_id));
+                            self.add_compiler_used_global(llval);
+                            let symbol = llvm::build_string(|s| unsafe {
+                                llvm::LLVMRustGetMangledName(llval, s);
+                            })
+                            .expect("symbol is not valid UTF-8");
+                            template_str.push_str(&symbol);
+                        }
+                    }
+                }
+            }
+        }
+        if intel_syntax {
+            template_str.push_str("\n.att_syntax\n");
+        }
+
+        unsafe {
+            llvm::LLVMAppendModuleInlineAsm(
+                self.llmod,
+                template_str.as_ptr().cast(),
+                template_str.len(),
+            );
+        }
+    }
+}
+
+pub(crate) fn inline_asm_call<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    asm: &str,
+    cons: &str,
+    inputs: &[&'ll Value],
+    output: &'ll llvm::Type,
+    labels: &[&'ll llvm::BasicBlock],
+    volatile: bool,
+    alignstack: bool,
+    dia: llvm::AsmDialect,
+    line_spans: &[Span],
+    unwind: bool,
+    dest: Option<&'ll llvm::BasicBlock>,
+    catch_funclet: Option<(&'ll llvm::BasicBlock, Option<&Funclet<'ll>>)>,
+) -> Option<&'ll Value> {
+    let volatile = if volatile { llvm::True } else { llvm::False };
+    let alignstack = if alignstack { llvm::True } else { llvm::False };
+    let can_throw = if unwind { llvm::True } else { llvm::False };
+
+    let argtys = inputs
+        .iter()
+        .map(|v| {
+            debug!("Asm Input Type: {:?}", *v);
+            bx.cx.val_ty(*v)
+        })
+        .collect::<Vec<_>>();
+
+    debug!("Asm Output Type: {:?}", output);
+    let fty = bx.cx.type_func(&argtys, output);
+    unsafe {
+        // Ask LLVM to verify that the constraints are well-formed.
+        let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
+        debug!("constraint verification result: {:?}", constraints_ok);
+        if constraints_ok {
+            let v = llvm::LLVMRustInlineAsm(
+                fty,
+                asm.as_ptr().cast(),
+                asm.len(),
+                cons.as_ptr().cast(),
+                cons.len(),
+                volatile,
+                alignstack,
+                dia,
+                can_throw,
+            );
+
+            let call = if !labels.is_empty() {
+                assert!(catch_funclet.is_none());
+                bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None)
+            } else if let Some((catch, funclet)) = catch_funclet {
+                bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet)
+            } else {
+                bx.call(fty, None, None, v, inputs, None)
+            };
+
+            // Store mark in a metadata node so we can map LLVM errors
+            // back to source locations. See #17552.
+            let key = "srcloc";
+            let kind = llvm::LLVMGetMDKindIDInContext(
+                bx.llcx,
+                key.as_ptr() as *const c_char,
+                key.len() as c_uint,
+            );
+
+            // srcloc contains one integer for each line of assembly code.
+            // Unfortunately this isn't enough to encode a full span so instead
+            // we just encode the start position of each line.
+            // FIXME: Figure out a way to pass the entire line spans.
+            let mut srcloc = vec![];
+            if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
+                // LLVM inserts an extra line to add the ".intel_syntax", so add
+                // a dummy srcloc entry for it.
+                //
+                // Don't do this if we only have 1 line span since that may be
+                // due to the asm template string coming from a macro. LLVM will
+                // default to the first srcloc for lines that don't have an
+                // associated srcloc.
+                srcloc.push(bx.const_i32(0));
+            }
+            srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
+            let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
+            llvm::LLVMSetMetadata(call, kind, md);
+
+            Some(call)
+        } else {
+            // LLVM has detected an issue with our constraints, bail out
+            None
+        }
+    }
+}
+
+/// If the register is an xmm/ymm/zmm register then return its index.
+fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
+    match reg {
+        InlineAsmReg::X86(reg)
+            if reg as u32 >= X86InlineAsmReg::xmm0 as u32
+                && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
+        {
+            Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
+        }
+        InlineAsmReg::X86(reg)
+            if reg as u32 >= X86InlineAsmReg::ymm0 as u32
+                && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
+        {
+            Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
+        }
+        InlineAsmReg::X86(reg)
+            if reg as u32 >= X86InlineAsmReg::zmm0 as u32
+                && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
+        {
+            Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
+        }
+        _ => None,
+    }
+}
+
+/// If the register is an AArch64 integer register then return its index.
+fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
+    match reg {
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x0) => Some(0),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x1) => Some(1),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x2) => Some(2),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x3) => Some(3),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x4) => Some(4),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x5) => Some(5),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x6) => Some(6),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x7) => Some(7),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x8) => Some(8),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x9) => Some(9),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x10) => Some(10),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x11) => Some(11),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x12) => Some(12),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x13) => Some(13),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x14) => Some(14),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x15) => Some(15),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x16) => Some(16),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x17) => Some(17),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x18) => Some(18),
+        // x19 is reserved
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x20) => Some(20),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x21) => Some(21),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x22) => Some(22),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x23) => Some(23),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x24) => Some(24),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x25) => Some(25),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x26) => Some(26),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x27) => Some(27),
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x28) => Some(28),
+        // x29 is reserved
+        InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) => Some(30),
+        _ => None,
+    }
+}
+
+/// If the register is an AArch64 vector register then return its index.
+fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
+    match reg {
+        InlineAsmReg::AArch64(reg)
+            if reg as u32 >= AArch64InlineAsmReg::v0 as u32
+                && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
+        {
+            Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
+        }
+        _ => None,
+    }
+}
+
+/// Converts a register class to an LLVM constraint code.
+fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
+    match reg {
+        // For vector registers LLVM wants the register name to match the type size.
+        InlineAsmRegOrRegClass::Reg(reg) => {
+            if let Some(idx) = xmm_reg_index(reg) {
+                let class = if let Some(layout) = layout {
+                    match layout.size.bytes() {
+                        64 => 'z',
+                        32 => 'y',
+                        _ => 'x',
+                    }
+                } else {
+                    // We use f32 as the type for discarded outputs
+                    'x'
+                };
+                format!("{{{}mm{}}}", class, idx)
+            } else if let Some(idx) = a64_reg_index(reg) {
+                let class = if let Some(layout) = layout {
+                    match layout.size.bytes() {
+                        8 => 'x',
+                        _ => 'w',
+                    }
+                } else {
+                    // We use i32 as the type for discarded outputs
+                    'w'
+                };
+                if class == 'x' && reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
+                    // LLVM doesn't recognize x30. use lr instead.
+                    "{lr}".to_string()
+                } else {
+                    format!("{{{}{}}}", class, idx)
+                }
+            } else if let Some(idx) = a64_vreg_index(reg) {
+                let class = if let Some(layout) = layout {
+                    match layout.size.bytes() {
+                        16 => 'q',
+                        8 => 'd',
+                        4 => 's',
+                        2 => 'h',
+                        1 => 'd', // We fixup i8 to i8x8
+                        _ => unreachable!(),
+                    }
+                } else {
+                    // We use i64x2 as the type for discarded outputs
+                    'q'
+                };
+                format!("{{{}{}}}", class, idx)
+            } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
+                // LLVM doesn't recognize r14
+                "{lr}".to_string()
+            } else {
+                format!("{{{}}}", reg.name())
+            }
+        }
+        // The constraints can be retrieved from
+        // https://llvm.org/docs/LangRef.html#supported-constraint-code-list
+        InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+                unreachable!("clobber-only")
+            }
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
+            InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+            | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+                unreachable!("clobber-only")
+            }
+            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+                unreachable!("clobber-only")
+            }
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+            | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+            InlineAsmRegClass::X86(
+                X86InlineAsmRegClass::x87_reg
+                | X86InlineAsmRegClass::mmx_reg
+                | X86InlineAsmRegClass::kreg0
+                | X86InlineAsmRegClass::tmm_reg,
+            ) => unreachable!("clobber-only"),
+            InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
+            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
+            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
+            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
+            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
+            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
+            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
+            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
+            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
+            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
+            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
+            InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+                bug!("LLVM backend does not support SPIR-V")
+            }
+            InlineAsmRegClass::Err => unreachable!(),
+        }
+        .to_string(),
+    }
+}
+
+/// Converts a modifier into LLVM's equivalent modifier.
+fn modifier_to_llvm(
+    arch: InlineAsmArch,
+    reg: InlineAsmRegClass,
+    modifier: Option<char>,
+) -> Option<char> {
+    // The modifiers can be retrieved from
+    // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers
+    match reg {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            if modifier == Some('v') { None } else { modifier }
+        }
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+            if modifier.is_none() {
+                Some('q')
+            } else {
+                modifier
+            }
+        }
+        InlineAsmRegClass::Hexagon(_) => None,
+        InlineAsmRegClass::LoongArch(_) => None,
+        InlineAsmRegClass::Mips(_) => None,
+        InlineAsmRegClass::Nvptx(_) => None,
+        InlineAsmRegClass::PowerPC(_) => None,
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+        | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+            None if arch == InlineAsmArch::X86_64 => Some('q'),
+            None => Some('k'),
+            Some('l') => Some('b'),
+            Some('h') => Some('h'),
+            Some('x') => Some('w'),
+            Some('e') => Some('k'),
+            Some('r') => Some('q'),
+            _ => unreachable!(),
+        },
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+        InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+        | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+            (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+            (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+            (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+            (_, Some('x')) => Some('x'),
+            (_, Some('y')) => Some('t'),
+            (_, Some('z')) => Some('g'),
+            _ => unreachable!(),
+        },
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+        InlineAsmRegClass::X86(
+            X86InlineAsmRegClass::x87_reg
+            | X86InlineAsmRegClass::mmx_reg
+            | X86InlineAsmRegClass::kreg0
+            | X86InlineAsmRegClass::tmm_reg,
+        ) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
+        InlineAsmRegClass::Bpf(_) => None,
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
+        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
+        | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
+            Some('h') => Some('B'),
+            Some('l') => Some('A'),
+            _ => None,
+        },
+        InlineAsmRegClass::Avr(_) => None,
+        InlineAsmRegClass::S390x(_) => None,
+        InlineAsmRegClass::Msp430(_) => None,
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("LLVM backend does not support SPIR-V")
+        }
+        InlineAsmRegClass::M68k(_) => None,
+        InlineAsmRegClass::CSKY(_) => None,
+        InlineAsmRegClass::Err => unreachable!(),
+    }
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
+    match reg {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+        | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+            cx.type_vector(cx.type_i64(), 2)
+        }
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+            cx.type_vector(cx.type_i64(), 2)
+        }
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+        InlineAsmRegClass::X86(
+            X86InlineAsmRegClass::x87_reg
+            | X86InlineAsmRegClass::mmx_reg
+            | X86InlineAsmRegClass::kreg0
+            | X86InlineAsmRegClass::tmm_reg,
+        ) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
+        InlineAsmRegClass::S390x(
+            S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr,
+        ) => cx.type_i32(),
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+        InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("LLVM backend does not support SPIR-V")
+        }
+        InlineAsmRegClass::Err => unreachable!(),
+    }
+}
+
+/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
+/// the equivalent integer type.
+fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
+    let dl = &cx.tcx.data_layout;
+    match scalar.primitive() {
+        Primitive::Int(Integer::I8, _) => cx.type_i8(),
+        Primitive::Int(Integer::I16, _) => cx.type_i16(),
+        Primitive::Int(Integer::I32, _) => cx.type_i32(),
+        Primitive::Int(Integer::I64, _) => cx.type_i64(),
+        Primitive::F32 => cx.type_f32(),
+        Primitive::F64 => cx.type_f64(),
+        // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+        Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()),
+        _ => unreachable!(),
+    }
+}
+
+/// Fix up an input value to work around LLVM bugs.
+fn llvm_fixup_input<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    mut value: &'ll Value,
+    reg: InlineAsmRegClass,
+    layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+    let dl = &bx.tcx.data_layout;
+    match (reg, layout.abi) {
+        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+            if let Primitive::Int(Integer::I8, _) = s.primitive() {
+                let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
+                bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+            } else {
+                value
+            }
+        }
+        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+            let elem_ty = llvm_asm_scalar_type(bx.cx, s);
+            let count = 16 / layout.size.bytes();
+            let vec_ty = bx.cx.type_vector(elem_ty, count);
+            // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+            if let Primitive::Pointer(_) = s.primitive() {
+                let t = bx.type_from_integer(dl.ptr_sized_integer());
+                value = bx.ptrtoint(value, t);
+            }
+            bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+        }
+        (
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+            Abi::Vector { element, count },
+        ) if layout.size.bytes() == 8 => {
+            let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+            let vec_ty = bx.cx.type_vector(elem_ty, count);
+            let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
+            bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+        }
+        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+            if s.primitive() == Primitive::F64 =>
+        {
+            bx.bitcast(value, bx.cx.type_i64())
+        }
+        (
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            Abi::Vector { .. },
+        ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
+        (
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            Abi::Scalar(s),
+        ) => {
+            if let Primitive::Int(Integer::I32, _) = s.primitive() {
+                bx.bitcast(value, bx.cx.type_f32())
+            } else {
+                value
+            }
+        }
+        (
+            InlineAsmRegClass::Arm(
+                ArmInlineAsmRegClass::dreg
+                | ArmInlineAsmRegClass::dreg_low8
+                | ArmInlineAsmRegClass::dreg_low16,
+            ),
+            Abi::Scalar(s),
+        ) => {
+            if let Primitive::Int(Integer::I64, _) = s.primitive() {
+                bx.bitcast(value, bx.cx.type_f64())
+            } else {
+                value
+            }
+        }
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+            match s.primitive() {
+                // MIPS only supports register-length arithmetics.
+                Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
+                Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+                Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
+                _ => value,
+            }
+        }
+        _ => value,
+    }
+}
+
+/// Fix up an output value to work around LLVM bugs.
+fn llvm_fixup_output<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    mut value: &'ll Value,
+    reg: InlineAsmRegClass,
+    layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+    match (reg, layout.abi) {
+        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+            if let Primitive::Int(Integer::I8, _) = s.primitive() {
+                bx.extract_element(value, bx.const_i32(0))
+            } else {
+                value
+            }
+        }
+        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+            value = bx.extract_element(value, bx.const_i32(0));
+            if let Primitive::Pointer(_) = s.primitive() {
+                value = bx.inttoptr(value, layout.llvm_type(bx.cx));
+            }
+            value
+        }
+        (
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+            Abi::Vector { element, count },
+        ) if layout.size.bytes() == 8 => {
+            let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+            let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
+            let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
+            bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+        }
+        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+            if s.primitive() == Primitive::F64 =>
+        {
+            bx.bitcast(value, bx.cx.type_f64())
+        }
+        (
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            Abi::Vector { .. },
+        ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
+        (
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            Abi::Scalar(s),
+        ) => {
+            if let Primitive::Int(Integer::I32, _) = s.primitive() {
+                bx.bitcast(value, bx.cx.type_i32())
+            } else {
+                value
+            }
+        }
+        (
+            InlineAsmRegClass::Arm(
+                ArmInlineAsmRegClass::dreg
+                | ArmInlineAsmRegClass::dreg_low8
+                | ArmInlineAsmRegClass::dreg_low16,
+            ),
+            Abi::Scalar(s),
+        ) => {
+            if let Primitive::Int(Integer::I64, _) = s.primitive() {
+                bx.bitcast(value, bx.cx.type_i64())
+            } else {
+                value
+            }
+        }
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+            match s.primitive() {
+                // MIPS only supports register-length arithmetics.
+                Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
+                Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
+                Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+                Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
+                _ => value,
+            }
+        }
+        _ => value,
+    }
+}
+
+/// Output type to use for llvm_fixup_output.
+fn llvm_fixup_output_type<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    reg: InlineAsmRegClass,
+    layout: &TyAndLayout<'tcx>,
+) -> &'ll Type {
+    match (reg, layout.abi) {
+        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+            if let Primitive::Int(Integer::I8, _) = s.primitive() {
+                cx.type_vector(cx.type_i8(), 8)
+            } else {
+                layout.llvm_type(cx)
+            }
+        }
+        (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+            let elem_ty = llvm_asm_scalar_type(cx, s);
+            let count = 16 / layout.size.bytes();
+            cx.type_vector(elem_ty, count)
+        }
+        (
+            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+            Abi::Vector { element, count },
+        ) if layout.size.bytes() == 8 => {
+            let elem_ty = llvm_asm_scalar_type(cx, element);
+            cx.type_vector(elem_ty, count * 2)
+        }
+        (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+            if s.primitive() == Primitive::F64 =>
+        {
+            cx.type_i64()
+        }
+        (
+            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+            Abi::Vector { .. },
+        ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
+        (
+            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+            Abi::Scalar(s),
+        ) => {
+            if let Primitive::Int(Integer::I32, _) = s.primitive() {
+                cx.type_f32()
+            } else {
+                layout.llvm_type(cx)
+            }
+        }
+        (
+            InlineAsmRegClass::Arm(
+                ArmInlineAsmRegClass::dreg
+                | ArmInlineAsmRegClass::dreg_low8
+                | ArmInlineAsmRegClass::dreg_low16,
+            ),
+            Abi::Scalar(s),
+        ) => {
+            if let Primitive::Int(Integer::I64, _) = s.primitive() {
+                cx.type_f64()
+            } else {
+                layout.llvm_type(cx)
+            }
+        }
+        (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+            match s.primitive() {
+                // MIPS only supports register-length arithmetics.
+                Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
+                Primitive::F32 => cx.type_i32(),
+                Primitive::F64 => cx.type_i64(),
+                _ => layout.llvm_type(cx),
+            }
+        }
+        _ => layout.llvm_type(cx),
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
new file mode 100644
index 00000000000..f9eaa0d94cb
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -0,0 +1,503 @@
+//! Set and unset common attributes on LLVM values.
+
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::config::{FunctionReturn, OptLevel};
+use rustc_span::symbol::sym;
+use rustc_target::spec::abi::Abi;
+use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
+use smallvec::SmallVec;
+
+use crate::attributes;
+use crate::errors::{MissingFeatures, SanitizerMemtagRequiresMte, TargetFeatureDisableOrEnable};
+use crate::llvm::AttributePlace::Function;
+use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects};
+use crate::llvm_util;
+pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
+
+use crate::context::CodegenCx;
+use crate::value::Value;
+
+pub fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
+    if !attrs.is_empty() {
+        llvm::AddFunctionAttributes(llfn, idx, attrs);
+    }
+}
+
+pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
+    if !attrs.is_empty() {
+        llvm::AddCallSiteAttributes(callsite, idx, attrs);
+    }
+}
+
+/// Get LLVM attribute for the provided inline heuristic.
+#[inline]
+fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> {
+    if !cx.tcx.sess.opts.unstable_opts.inline_llvm {
+        // disable LLVM inlining
+        return Some(AttributeKind::NoInline.create_attr(cx.llcx));
+    }
+    match inline {
+        InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
+        InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)),
+        InlineAttr::Never => {
+            if cx.sess().target.arch != "amdgpu" {
+                Some(AttributeKind::NoInline.create_attr(cx.llcx))
+            } else {
+                None
+            }
+        }
+        InlineAttr::None => None,
+    }
+}
+
+/// Get LLVM sanitize attributes.
+#[inline]
+pub fn sanitize_attrs<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    no_sanitize: SanitizerSet,
+) -> SmallVec<[&'ll Attribute; 4]> {
+    let mut attrs = SmallVec::new();
+    let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
+    if enabled.contains(SanitizerSet::ADDRESS) || enabled.contains(SanitizerSet::KERNELADDRESS) {
+        attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
+    }
+    if enabled.contains(SanitizerSet::MEMORY) {
+        attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
+    }
+    if enabled.contains(SanitizerSet::THREAD) {
+        attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
+    }
+    if enabled.contains(SanitizerSet::HWADDRESS) {
+        attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
+    }
+    if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
+        attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
+    }
+    if enabled.contains(SanitizerSet::MEMTAG) {
+        // Check to make sure the mte target feature is actually enabled.
+        let features = cx.tcx.global_backend_features(());
+        let mte_feature =
+            features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
+        if let None | Some("-mte") = mte_feature {
+            cx.tcx.dcx().emit_err(SanitizerMemtagRequiresMte);
+        }
+
+        attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
+    }
+    if enabled.contains(SanitizerSet::SAFESTACK) {
+        attrs.push(llvm::AttributeKind::SanitizeSafeStack.create_attr(cx.llcx));
+    }
+    attrs
+}
+
+/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
+#[inline]
+pub fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute {
+    // NOTE: We should determine if we even need async unwind tables, as they
+    // take have more overhead and if we can use sync unwind tables we
+    // probably should.
+    let async_unwind = !use_sync_unwind.unwrap_or(false);
+    llvm::CreateUWTableAttr(llcx, async_unwind)
+}
+
+pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    let mut fp = cx.sess().target.frame_pointer;
+    let opts = &cx.sess().opts;
+    // "mcount" function relies on stack pointer.
+    // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
+    if opts.unstable_opts.instrument_mcount || matches!(opts.cg.force_frame_pointers, Some(true)) {
+        fp = FramePointer::Always;
+    }
+    let attr_value = match fp {
+        FramePointer::Always => "all",
+        FramePointer::NonLeaf => "non-leaf",
+        FramePointer::MayOmit => return None,
+    };
+    Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
+}
+
+fn function_return_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    let function_return_attr = match cx.sess().opts.unstable_opts.function_return {
+        FunctionReturn::Keep => return None,
+        FunctionReturn::ThunkExtern => AttributeKind::FnRetThunkExtern,
+    };
+
+    Some(function_return_attr.create_attr(cx.llcx))
+}
+
+/// Tell LLVM what instrument function to insert.
+#[inline]
+fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 4]> {
+    let mut attrs = SmallVec::new();
+    if cx.sess().opts.unstable_opts.instrument_mcount {
+        // Similar to `clang -pg` behavior. Handled by the
+        // `post-inline-ee-instrument` LLVM pass.
+
+        // The function name varies on platforms.
+        // See test/CodeGen/mcount.c in clang.
+        let mcount_name = match &cx.sess().target.llvm_mcount_intrinsic {
+            Some(llvm_mcount_intrinsic) => llvm_mcount_intrinsic.as_ref(),
+            None => cx.sess().target.mcount.as_ref(),
+        };
+
+        attrs.push(llvm::CreateAttrStringValue(
+            cx.llcx,
+            "instrument-function-entry-inlined",
+            mcount_name,
+        ));
+    }
+    if let Some(options) = &cx.sess().opts.unstable_opts.instrument_xray {
+        // XRay instrumentation is similar to __cyg_profile_func_{enter,exit}.
+        // Function prologue and epilogue are instrumented with NOP sleds,
+        // a runtime library later replaces them with detours into tracing code.
+        if options.always {
+            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-always"));
+        }
+        if options.never {
+            attrs.push(llvm::CreateAttrStringValue(cx.llcx, "function-instrument", "xray-never"));
+        }
+        if options.ignore_loops {
+            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-ignore-loops"));
+        }
+        // LLVM will not choose the default for us, but rather requires specific
+        // threshold in absence of "xray-always". Use the same default as Clang.
+        let threshold = options.instruction_threshold.unwrap_or(200);
+        attrs.push(llvm::CreateAttrStringValue(
+            cx.llcx,
+            "xray-instruction-threshold",
+            &threshold.to_string(),
+        ));
+        if options.skip_entry {
+            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-entry"));
+        }
+        if options.skip_exit {
+            attrs.push(llvm::CreateAttrString(cx.llcx, "xray-skip-exit"));
+        }
+    }
+    attrs
+}
+
+fn nojumptables_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    if !cx.sess().opts.unstable_opts.no_jump_tables {
+        return None;
+    }
+
+    Some(llvm::CreateAttrStringValue(cx.llcx, "no-jump-tables", "true"))
+}
+
+fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    // Currently stack probes seem somewhat incompatible with the address
+    // sanitizer and thread sanitizer. With asan we're already protected from
+    // stack overflow anyway so we don't really need stack probes regardless.
+    if cx
+        .sess()
+        .opts
+        .unstable_opts
+        .sanitizer
+        .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
+    {
+        return None;
+    }
+
+    // probestack doesn't play nice either with `-C profile-generate`.
+    if cx.sess().opts.cg.profile_generate.enabled() {
+        return None;
+    }
+
+    // probestack doesn't play nice either with gcov profiling.
+    if cx.sess().opts.unstable_opts.profile {
+        return None;
+    }
+
+    let attr_value = match cx.sess().target.stack_probes {
+        StackProbeType::None => return None,
+        // Request LLVM to generate the probes inline. If the given LLVM version does not support
+        // this, no probe is generated at all (even if the attribute is specified).
+        StackProbeType::Inline => "inline-asm",
+        // Flag our internal `__rust_probestack` function as the stack probe symbol.
+        // This is defined in the `compiler-builtins` crate for each architecture.
+        StackProbeType::Call => "__rust_probestack",
+        // Pick from the two above based on the LLVM version.
+        StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
+            if llvm_util::get_version() < min_llvm_version_for_inline {
+                "__rust_probestack"
+            } else {
+                "inline-asm"
+            }
+        }
+    };
+    Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
+}
+
+fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    let sspattr = match cx.sess().stack_protector() {
+        StackProtector::None => return None,
+        StackProtector::All => AttributeKind::StackProtectReq,
+        StackProtector::Strong => AttributeKind::StackProtectStrong,
+        StackProtector::Basic => AttributeKind::StackProtect,
+    };
+
+    Some(sspattr.create_attr(cx.llcx))
+}
+
+pub fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute {
+    let target_cpu = llvm_util::target_cpu(cx.tcx.sess);
+    llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
+}
+
+pub fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    llvm_util::tune_cpu(cx.tcx.sess)
+        .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
+}
+
+/// Get the `NonLazyBind` LLVM attribute,
+/// if the codegen options allow skipping the PLT.
+pub fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+    // Don't generate calls through PLT if it's not necessary
+    if !cx.sess().needs_plt() {
+        Some(AttributeKind::NonLazyBind.create_attr(cx.llcx))
+    } else {
+        None
+    }
+}
+
+/// Get the default optimizations attrs for a function.
+#[inline]
+pub(crate) fn default_optimisation_attrs<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+) -> SmallVec<[&'ll Attribute; 2]> {
+    let mut attrs = SmallVec::new();
+    match cx.sess().opts.optimize {
+        OptLevel::Size => {
+            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+        }
+        OptLevel::SizeMin => {
+            attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
+            attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+        }
+        _ => {}
+    }
+    attrs
+}
+
+fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
+    llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
+}
+
+/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
+/// attributes.
+pub fn from_fn_attrs<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    llfn: &'ll Value,
+    instance: ty::Instance<'tcx>,
+) {
+    let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
+
+    let mut to_add = SmallVec::<[_; 16]>::new();
+
+    match codegen_fn_attrs.optimize {
+        OptimizeAttr::None => {
+            to_add.extend(default_optimisation_attrs(cx));
+        }
+        OptimizeAttr::Size => {
+            to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
+            to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+        }
+        OptimizeAttr::Speed => {}
+    }
+
+    let inline =
+        if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
+            InlineAttr::Hint
+        } else {
+            codegen_fn_attrs.inline
+        };
+    to_add.extend(inline_attr(cx, inline));
+
+    // The `uwtable` attribute according to LLVM is:
+    //
+    //     This attribute indicates that the ABI being targeted requires that an
+    //     unwind table entry be produced for this function even if we can show
+    //     that no exceptions passes by it. This is normally the case for the
+    //     ELF x86-64 abi, but it can be disabled for some compilation units.
+    //
+    // Typically when we're compiling with `-C panic=abort` (which implies this
+    // `no_landing_pads` check) we don't need `uwtable` because we can't
+    // generate any exceptions! On Windows, however, exceptions include other
+    // events such as illegal instructions, segfaults, etc. This means that on
+    // Windows we end up still needing the `uwtable` attribute even if the `-C
+    // panic=abort` flag is passed.
+    //
+    // You can also find more info on why Windows always requires uwtables here:
+    //      https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
+    if cx.sess().must_emit_unwind_tables() {
+        to_add.push(uwtable_attr(cx.llcx, cx.sess().opts.unstable_opts.use_sync_unwind));
+    }
+
+    if cx.sess().opts.unstable_opts.profile_sample_use.is_some() {
+        to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
+    }
+
+    // FIXME: none of these functions interact with source level attributes.
+    to_add.extend(frame_pointer_type_attr(cx));
+    to_add.extend(function_return_attr(cx));
+    to_add.extend(instrument_function_attr(cx));
+    to_add.extend(nojumptables_attr(cx));
+    to_add.extend(probestack_attr(cx));
+    to_add.extend(stackprotector_attr(cx));
+
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_BUILTINS) {
+        to_add.push(llvm::CreateAttrString(cx.llcx, "no-builtins"));
+    }
+
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+        to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
+    }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
+        to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx));
+    }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
+        to_add.push(MemoryEffects::None.create_attr(cx.llcx));
+    }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+        to_add.push(AttributeKind::Naked.create_attr(cx.llcx));
+        // HACK(jubilee): "indirect branch tracking" works by attaching prologues to functions.
+        // And it is a module-level attribute, so the alternative is pulling naked functions into new LLVM modules.
+        // Otherwise LLVM's "naked" functions come with endbr prefixes per https://github.com/rust-lang/rust/issues/98768
+        to_add.push(AttributeKind::NoCfCheck.create_attr(cx.llcx));
+        // Need this for AArch64.
+        to_add.push(llvm::CreateAttrStringValue(cx.llcx, "branch-target-enforcement", "false"));
+    }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
+        || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
+    {
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        // apply to argument place instead of function
+        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
+        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
+        let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
+        if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
+            flags |= AllocKindFlags::Uninitialized;
+        } else {
+            flags |= AllocKindFlags::Zeroed;
+        }
+        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
+        // apply to return place instead of function (unlike all other attributes applied in this function)
+        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
+    }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        to_add.push(llvm::CreateAllocKindAttr(
+            cx.llcx,
+            AllocKindFlags::Realloc | AllocKindFlags::Aligned,
+        ));
+        // applies to argument place instead of function place
+        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+        // apply to argument place instead of function
+        let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
+        to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
+        let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
+    }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
+        to_add.push(create_alloc_family_attr(cx.llcx));
+        to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
+        // applies to argument place instead of function place
+        let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+        attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+    }
+    if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
+        to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"));
+    }
+    if let Some(align) = codegen_fn_attrs.alignment {
+        llvm::set_alignment(llfn, align as usize);
+    }
+    to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize));
+
+    // Always annotate functions with the target-cpu they are compiled for.
+    // Without this, ThinLTO won't inline Rust functions into Clang generated
+    // functions (because Clang annotates functions this way too).
+    to_add.push(target_cpu_attr(cx));
+    // tune-cpu is only conveyed through the attribute for our purpose.
+    // The target doesn't care; the subtarget reads our attribute.
+    to_add.extend(tune_cpu_attr(cx));
+
+    let function_features =
+        codegen_fn_attrs.target_features.iter().map(|f| f.as_str()).collect::<Vec<&str>>();
+
+    if let Some(f) = llvm_util::check_tied_features(
+        cx.tcx.sess,
+        &function_features.iter().map(|f| (*f, true)).collect(),
+    ) {
+        let span = cx
+            .tcx
+            .get_attrs(instance.def_id(), sym::target_feature)
+            .next()
+            .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
+        cx.tcx
+            .dcx()
+            .create_err(TargetFeatureDisableOrEnable {
+                features: f,
+                span: Some(span),
+                missing_features: Some(MissingFeatures),
+            })
+            .emit();
+        return;
+    }
+
+    let mut function_features = function_features
+        .iter()
+        .flat_map(|feat| {
+            llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{f}"))
+        })
+        .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
+            InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
+            InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
+        }))
+        .collect::<Vec<String>>();
+
+    if cx.tcx.sess.target.is_like_wasm {
+        // If this function is an import from the environment but the wasm
+        // import has a specific module/name, apply them here.
+        if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
+            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module));
+
+            let name =
+                codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
+            let name = name.as_str();
+            to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
+        }
+
+        // The `"wasm"` abi on wasm targets automatically enables the
+        // `+multivalue` feature because the purpose of the wasm abi is to match
+        // the WebAssembly specification, which has this feature. This won't be
+        // needed when LLVM enables this `multivalue` feature by default.
+        if !cx.tcx.is_closure_like(instance.def_id()) {
+            let abi = cx.tcx.fn_sig(instance.def_id()).skip_binder().abi();
+            if abi == Abi::Wasm {
+                function_features.push("+multivalue".to_string());
+            }
+        }
+    }
+
+    let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
+    let function_features = function_features.iter().map(|s| s.as_str());
+    let target_features: String =
+        global_features.chain(function_features).intersperse(",").collect();
+    if !target_features.is_empty() {
+        to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
+    }
+
+    attributes::apply_to_llfn(llfn, Function, &to_add);
+}
+
+fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
+    tcx.wasm_import_module_map(id.krate).get(&id)
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
new file mode 100644
index 00000000000..0619000364b
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -0,0 +1,476 @@
+//! A helper class for dealing with static archives
+
+use std::env;
+use std::ffi::{c_char, c_void, CStr, CString, OsString};
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use std::str;
+
+use crate::common;
+use crate::errors::{
+    DlltoolFailImportLibrary, ErrorCallingDllTool, ErrorCreatingImportLibrary, ErrorWritingDEFFile,
+};
+use crate::llvm::archive_ro::{ArchiveRO, Child};
+use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport};
+use rustc_codegen_ssa::back::archive::{
+    get_native_object_symbols, try_extract_macho_fat_archive, ArArchiveBuilder,
+    ArchiveBuildFailure, ArchiveBuilder, ArchiveBuilderBuilder, UnknownArchiveKind,
+};
+
+use rustc_session::cstore::DllImport;
+use rustc_session::Session;
+
+/// Helper for adding many files to an archive.
+#[must_use = "must call build() to finish building the archive"]
+pub(crate) struct LlvmArchiveBuilder<'a> {
+    sess: &'a Session,
+    additions: Vec<Addition>,
+}
+
+enum Addition {
+    File { path: PathBuf, name_in_archive: String },
+    Archive { path: PathBuf, archive: ArchiveRO, skip: Box<dyn FnMut(&str) -> bool> },
+}
+
+impl Addition {
+    fn path(&self) -> &Path {
+        match self {
+            Addition::File { path, .. } | Addition::Archive { path, .. } => path,
+        }
+    }
+}
+
+fn is_relevant_child(c: &Child<'_>) -> bool {
+    match c.name() {
+        Some(name) => !name.contains("SYMDEF"),
+        None => false,
+    }
+}
+
+/// Map machine type strings to values of LLVM's MachineTypes enum.
+fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
+    match cpu {
+        "x86_64" => LLVMMachineType::AMD64,
+        "x86" => LLVMMachineType::I386,
+        "aarch64" => LLVMMachineType::ARM64,
+        "arm64ec" => LLVMMachineType::ARM64EC,
+        "arm" => LLVMMachineType::ARM,
+        _ => panic!("unsupported cpu type {cpu}"),
+    }
+}
+
+impl<'a> ArchiveBuilder for LlvmArchiveBuilder<'a> {
+    fn add_archive(
+        &mut self,
+        archive: &Path,
+        skip: Box<dyn FnMut(&str) -> bool + 'static>,
+    ) -> io::Result<()> {
+        let mut archive = archive.to_path_buf();
+        if self.sess.target.llvm_target.contains("-apple-macosx") {
+            if let Some(new_archive) = try_extract_macho_fat_archive(self.sess, &archive)? {
+                archive = new_archive
+            }
+        }
+        let archive_ro = match ArchiveRO::open(&archive) {
+            Ok(ar) => ar,
+            Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
+        };
+        if self.additions.iter().any(|ar| ar.path() == archive) {
+            return Ok(());
+        }
+        self.additions.push(Addition::Archive {
+            path: archive,
+            archive: archive_ro,
+            skip: Box::new(skip),
+        });
+        Ok(())
+    }
+
+    /// Adds an arbitrary file to this archive
+    fn add_file(&mut self, file: &Path) {
+        let name = file.file_name().unwrap().to_str().unwrap();
+        self.additions
+            .push(Addition::File { path: file.to_path_buf(), name_in_archive: name.to_owned() });
+    }
+
+    /// Combine the provided files, rlibs, and native libraries into a single
+    /// `Archive`.
+    fn build(mut self: Box<Self>, output: &Path) -> bool {
+        match self.build_with_llvm(output) {
+            Ok(any_members) => any_members,
+            Err(e) => self.sess.dcx().emit_fatal(ArchiveBuildFailure { error: e }),
+        }
+    }
+}
+
+pub struct LlvmArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
+    fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder + 'a> {
+        // FIXME use ArArchiveBuilder on most targets again once reading thin archives is
+        // implemented
+        if true {
+            Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() })
+        } else {
+            Box::new(ArArchiveBuilder::new(sess, get_llvm_object_symbols))
+        }
+    }
+
+    fn create_dll_import_lib(
+        &self,
+        sess: &Session,
+        lib_name: &str,
+        dll_imports: &[DllImport],
+        tmpdir: &Path,
+        is_direct_dependency: bool,
+    ) -> PathBuf {
+        let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" };
+        let output_path = {
+            let mut output_path: PathBuf = tmpdir.to_path_buf();
+            output_path.push(format!("{lib_name}{name_suffix}"));
+            output_path.with_extension("lib")
+        };
+
+        let target = &sess.target;
+        let mingw_gnu_toolchain = common::is_mingw_gnu_toolchain(target);
+
+        let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports
+            .iter()
+            .map(|import: &DllImport| {
+                if sess.target.arch == "x86" {
+                    (
+                        common::i686_decorated_name(import, mingw_gnu_toolchain, false),
+                        import.ordinal(),
+                    )
+                } else {
+                    (import.name.to_string(), import.ordinal())
+                }
+            })
+            .collect();
+
+        if mingw_gnu_toolchain {
+            // The binutils linker used on -windows-gnu targets cannot read the import
+            // libraries generated by LLVM: in our attempts, the linker produced an .EXE
+            // that loaded but crashed with an AV upon calling one of the imported
+            // functions. Therefore, use binutils to create the import library instead,
+            // by writing a .DEF file to the temp dir and calling binutils's dlltool.
+            let def_file_path =
+                tmpdir.join(format!("{lib_name}{name_suffix}")).with_extension("def");
+
+            let def_file_content = format!(
+                "EXPORTS\n{}",
+                import_name_and_ordinal_vector
+                    .into_iter()
+                    .map(|(name, ordinal)| {
+                        match ordinal {
+                            Some(n) => format!("{name} @{n} NONAME"),
+                            None => name,
+                        }
+                    })
+                    .collect::<Vec<String>>()
+                    .join("\n")
+            );
+
+            match std::fs::write(&def_file_path, def_file_content) {
+                Ok(_) => {}
+                Err(e) => {
+                    sess.dcx().emit_fatal(ErrorWritingDEFFile { error: e });
+                }
+            };
+
+            // --no-leading-underscore: For the `import_name_type` feature to work, we need to be
+            // able to control the *exact* spelling of each of the symbols that are being imported:
+            // hence we don't want `dlltool` adding leading underscores automatically.
+            let dlltool = find_binutils_dlltool(sess);
+            let temp_prefix = {
+                let mut path = PathBuf::from(&output_path);
+                path.pop();
+                path.push(lib_name);
+                path
+            };
+            // dlltool target architecture args from:
+            // https://github.com/llvm/llvm-project-release-prs/blob/llvmorg-15.0.6/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp#L69
+            let (dlltool_target_arch, dlltool_target_bitness) = match sess.target.arch.as_ref() {
+                "x86_64" => ("i386:x86-64", "--64"),
+                "x86" => ("i386", "--32"),
+                "aarch64" => ("arm64", "--64"),
+                "arm" => ("arm", "--32"),
+                _ => panic!("unsupported arch {}", sess.target.arch),
+            };
+            let mut dlltool_cmd = std::process::Command::new(&dlltool);
+            dlltool_cmd.args([
+                "-d",
+                def_file_path.to_str().unwrap(),
+                "-D",
+                lib_name,
+                "-l",
+                output_path.to_str().unwrap(),
+                "-m",
+                dlltool_target_arch,
+                "-f",
+                dlltool_target_bitness,
+                "--no-leading-underscore",
+                "--temp-prefix",
+                temp_prefix.to_str().unwrap(),
+            ]);
+
+            match dlltool_cmd.output() {
+                Err(e) => {
+                    sess.dcx().emit_fatal(ErrorCallingDllTool {
+                        dlltool_path: dlltool.to_string_lossy(),
+                        error: e,
+                    });
+                }
+                // dlltool returns '0' on failure, so check for error output instead.
+                Ok(output) if !output.stderr.is_empty() => {
+                    sess.dcx().emit_fatal(DlltoolFailImportLibrary {
+                        dlltool_path: dlltool.to_string_lossy(),
+                        dlltool_args: dlltool_cmd
+                            .get_args()
+                            .map(|arg| arg.to_string_lossy())
+                            .collect::<Vec<_>>()
+                            .join(" "),
+                        stdout: String::from_utf8_lossy(&output.stdout),
+                        stderr: String::from_utf8_lossy(&output.stderr),
+                    })
+                }
+                _ => {}
+            }
+        } else {
+            // we've checked for \0 characters in the library name already
+            let dll_name_z = CString::new(lib_name).unwrap();
+
+            let output_path_z = rustc_fs_util::path_to_c_string(&output_path);
+
+            trace!("invoking LLVMRustWriteImportLibrary");
+            trace!("  dll_name {:#?}", dll_name_z);
+            trace!("  output_path {}", output_path.display());
+            trace!(
+                "  import names: {}",
+                dll_imports
+                    .iter()
+                    .map(|import| import.name.to_string())
+                    .collect::<Vec<_>>()
+                    .join(", "),
+            );
+
+            // All import names are Rust identifiers and therefore cannot contain \0 characters.
+            // FIXME: when support for #[link_name] is implemented, ensure that the import names
+            // still don't contain any \0 characters. Also need to check that the names don't
+            // contain substrings like " @" or "NONAME" that are keywords or otherwise reserved
+            // in definition files.
+            let cstring_import_name_and_ordinal_vector: Vec<(CString, Option<u16>)> =
+                import_name_and_ordinal_vector
+                    .into_iter()
+                    .map(|(name, ordinal)| (CString::new(name).unwrap(), ordinal))
+                    .collect();
+
+            let ffi_exports: Vec<LLVMRustCOFFShortExport> = cstring_import_name_and_ordinal_vector
+                .iter()
+                .map(|(name_z, ordinal)| LLVMRustCOFFShortExport::new(name_z.as_ptr(), *ordinal))
+                .collect();
+            let result = unsafe {
+                crate::llvm::LLVMRustWriteImportLibrary(
+                    dll_name_z.as_ptr(),
+                    output_path_z.as_ptr(),
+                    ffi_exports.as_ptr(),
+                    ffi_exports.len(),
+                    llvm_machine_type(&sess.target.arch) as u16,
+                    !sess.target.is_like_msvc,
+                )
+            };
+
+            if result == crate::llvm::LLVMRustResult::Failure {
+                sess.dcx().emit_fatal(ErrorCreatingImportLibrary {
+                    lib_name,
+                    error: llvm::last_error().unwrap_or("unknown LLVM error".to_string()),
+                });
+            }
+        };
+
+        output_path
+    }
+}
+
+// The object crate doesn't know how to get symbols for LLVM bitcode and COFF bigobj files.
+// As such we need to use LLVM for them.
+#[deny(unsafe_op_in_unsafe_fn)]
+fn get_llvm_object_symbols(
+    buf: &[u8],
+    f: &mut dyn FnMut(&[u8]) -> io::Result<()>,
+) -> io::Result<bool> {
+    let is_bitcode = unsafe { llvm::LLVMRustIsBitcode(buf.as_ptr(), buf.len()) };
+
+    // COFF bigobj file, msvc LTO file or import library. See
+    // https://github.com/llvm/llvm-project/blob/453f27bc9/llvm/lib/BinaryFormat/Magic.cpp#L38-L51
+    let is_unsupported_windows_obj_file = buf.get(0..4) == Some(b"\0\0\xFF\xFF");
+
+    if is_bitcode || is_unsupported_windows_obj_file {
+        let mut state = Box::new(f);
+
+        let err = unsafe {
+            llvm::LLVMRustGetSymbols(
+                buf.as_ptr(),
+                buf.len(),
+                std::ptr::addr_of_mut!(*state) as *mut c_void,
+                callback,
+                error_callback,
+            )
+        };
+
+        if err.is_null() {
+            return Ok(true);
+        } else {
+            return Err(unsafe { *Box::from_raw(err as *mut io::Error) });
+        }
+
+        unsafe extern "C" fn callback(
+            state: *mut c_void,
+            symbol_name: *const c_char,
+        ) -> *mut c_void {
+            let f = unsafe { &mut *(state as *mut &mut dyn FnMut(&[u8]) -> io::Result<()>) };
+            match f(unsafe { CStr::from_ptr(symbol_name) }.to_bytes()) {
+                Ok(()) => std::ptr::null_mut(),
+                Err(err) => Box::into_raw(Box::new(err)) as *mut c_void,
+            }
+        }
+
+        unsafe extern "C" fn error_callback(error: *const c_char) -> *mut c_void {
+            let error = unsafe { CStr::from_ptr(error) };
+            Box::into_raw(Box::new(io::Error::new(
+                io::ErrorKind::Other,
+                format!("LLVM error: {}", error.to_string_lossy()),
+            ))) as *mut c_void
+        }
+    } else {
+        get_native_object_symbols(buf, f)
+    }
+}
+
+impl<'a> LlvmArchiveBuilder<'a> {
+    fn build_with_llvm(&mut self, output: &Path) -> io::Result<bool> {
+        let kind = &*self.sess.target.archive_format;
+        let kind = kind
+            .parse::<ArchiveKind>()
+            .map_err(|_| kind)
+            .unwrap_or_else(|kind| self.sess.dcx().emit_fatal(UnknownArchiveKind { kind }));
+
+        let mut additions = mem::take(&mut self.additions);
+        let mut strings = Vec::new();
+        let mut members = Vec::new();
+
+        let dst = CString::new(output.to_str().unwrap())?;
+
+        unsafe {
+            for addition in &mut additions {
+                match addition {
+                    Addition::File { path, name_in_archive } => {
+                        let path = CString::new(path.to_str().unwrap())?;
+                        let name = CString::new(name_in_archive.as_bytes())?;
+                        members.push(llvm::LLVMRustArchiveMemberNew(
+                            path.as_ptr(),
+                            name.as_ptr(),
+                            None,
+                        ));
+                        strings.push(path);
+                        strings.push(name);
+                    }
+                    Addition::Archive { archive, skip, .. } => {
+                        for child in archive.iter() {
+                            let child = child.map_err(string_to_io_error)?;
+                            if !is_relevant_child(&child) {
+                                continue;
+                            }
+                            let child_name = child.name().unwrap();
+                            if skip(child_name) {
+                                continue;
+                            }
+
+                            // It appears that LLVM's archive writer is a little
+                            // buggy if the name we pass down isn't just the
+                            // filename component, so chop that off here and
+                            // pass it in.
+                            //
+                            // See LLVM bug 25877 for more info.
+                            let child_name =
+                                Path::new(child_name).file_name().unwrap().to_str().unwrap();
+                            let name = CString::new(child_name)?;
+                            let m = llvm::LLVMRustArchiveMemberNew(
+                                ptr::null(),
+                                name.as_ptr(),
+                                Some(child.raw),
+                            );
+                            members.push(m);
+                            strings.push(name);
+                        }
+                    }
+                }
+            }
+
+            let r = llvm::LLVMRustWriteArchive(
+                dst.as_ptr(),
+                members.len() as libc::size_t,
+                members.as_ptr() as *const &_,
+                true,
+                kind,
+            );
+            let ret = if r.into_result().is_err() {
+                let err = llvm::LLVMRustGetLastError();
+                let msg = if err.is_null() {
+                    "failed to write archive".into()
+                } else {
+                    String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
+                };
+                Err(io::Error::new(io::ErrorKind::Other, msg))
+            } else {
+                Ok(!members.is_empty())
+            };
+            for member in members {
+                llvm::LLVMRustArchiveMemberFree(member);
+            }
+            ret
+        }
+    }
+}
+
+fn string_to_io_error(s: String) -> io::Error {
+    io::Error::new(io::ErrorKind::Other, format!("bad archive: {s}"))
+}
+
+fn find_binutils_dlltool(sess: &Session) -> OsString {
+    assert!(sess.target.options.is_like_windows && !sess.target.options.is_like_msvc);
+    if let Some(dlltool_path) = &sess.opts.cg.dlltool {
+        return dlltool_path.clone().into_os_string();
+    }
+
+    let tool_name: OsString = if sess.host.options.is_like_windows {
+        // If we're compiling on Windows, always use "dlltool.exe".
+        "dlltool.exe"
+    } else {
+        // On other platforms, use the architecture-specific name.
+        match sess.target.arch.as_ref() {
+            "x86_64" => "x86_64-w64-mingw32-dlltool",
+            "x86" => "i686-w64-mingw32-dlltool",
+            "aarch64" => "aarch64-w64-mingw32-dlltool",
+
+            // For non-standard architectures (e.g., aarch32) fallback to "dlltool".
+            _ => "dlltool",
+        }
+    }
+    .into();
+
+    // NOTE: it's not clear how useful it is to explicitly search PATH.
+    for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) {
+        let full_path = dir.join(&tool_name);
+        if full_path.is_file() {
+            return full_path.into_os_string();
+        }
+    }
+
+    // The user didn't specify the location of the dlltool binary, and we weren't able
+    // to find the appropriate one on the PATH. Just return the name of the tool
+    // and let the invocation fail with a hopefully useful error message.
+    tool_name
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
new file mode 100644
index 00000000000..06a681c24e6
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -0,0 +1,856 @@
+use crate::back::write::{
+    self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers,
+};
+use crate::errors::{
+    DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro,
+};
+use crate::llvm::{self, build_string};
+use crate::{LlvmCodegenBackend, ModuleLlvm};
+use object::read::archive::ArchiveFile;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
+use rustc_codegen_ssa::back::symbol_export;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, TargetMachineFactoryConfig};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::memmap::Mmap;
+use rustc_errors::{DiagCtxt, FatalError};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
+use rustc_session::config::{self, CrateType, Lto};
+
+use std::collections::BTreeMap;
+use std::ffi::{CStr, CString};
+use std::fs::File;
+use std::io;
+use std::iter;
+use std::mem::ManuallyDrop;
+use std::path::Path;
+use std::slice;
+use std::sync::Arc;
+
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
+
+pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
+    match crate_type {
+        CrateType::Executable
+        | CrateType::Dylib
+        | CrateType::Staticlib
+        | CrateType::Cdylib
+        | CrateType::ProcMacro => true,
+        CrateType::Rlib => false,
+    }
+}
+
+fn prepare_lto(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
+    let export_threshold = match cgcx.lto {
+        // We're just doing LTO for our one crate
+        Lto::ThinLocal => SymbolExportLevel::Rust,
+
+        // We're doing LTO for the entire crate graph
+        Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
+
+        Lto::No => panic!("didn't request LTO but we're doing LTO"),
+    };
+
+    let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
+        if info.level.is_below_threshold(export_threshold) || info.used {
+            Some(CString::new(name.as_str()).unwrap())
+        } else {
+            None
+        }
+    };
+    let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+    let mut symbols_below_threshold = {
+        let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+        exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
+    };
+    info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
+
+    // If we're performing LTO for the entire crate graph, then for each of our
+    // upstream dependencies, find the corresponding rlib and load the bitcode
+    // from the archive.
+    //
+    // We save off all the bytecode and LLVM module ids for later processing
+    // with either fat or thin LTO
+    let mut upstream_modules = Vec::new();
+    if cgcx.lto != Lto::ThinLocal {
+        // Make sure we actually can run LTO
+        for crate_type in cgcx.crate_types.iter() {
+            if !crate_type_allows_lto(*crate_type) {
+                dcx.emit_err(LtoDisallowed);
+                return Err(FatalError);
+            } else if *crate_type == CrateType::Dylib {
+                if !cgcx.opts.unstable_opts.dylib_lto {
+                    dcx.emit_err(LtoDylib);
+                    return Err(FatalError);
+                }
+            } else if *crate_type == CrateType::ProcMacro {
+                if !cgcx.opts.unstable_opts.dylib_lto {
+                    dcx.emit_err(LtoProcMacro);
+                    return Err(FatalError);
+                }
+            }
+        }
+
+        if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto {
+            dcx.emit_err(DynamicLinkingWithLTO);
+            return Err(FatalError);
+        }
+
+        for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+            let exported_symbols =
+                cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+            {
+                let _timer =
+                    cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+                symbols_below_threshold
+                    .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
+            }
+
+            let archive_data = unsafe {
+                Mmap::map(std::fs::File::open(&path).expect("couldn't open rlib"))
+                    .expect("couldn't map rlib")
+            };
+            let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
+            let obj_files = archive
+                .members()
+                .filter_map(|child| {
+                    child.ok().and_then(|c| {
+                        std::str::from_utf8(c.name()).ok().map(|name| (name.trim(), c))
+                    })
+                })
+                .filter(|&(name, _)| looks_like_rust_object_file(name));
+            for (name, child) in obj_files {
+                info!("adding bitcode from {}", name);
+                match get_bitcode_slice_from_object_data(
+                    child.data(&*archive_data).expect("corrupt rlib"),
+                    cgcx,
+                ) {
+                    Ok(data) => {
+                        let module = SerializedModule::FromRlib(data.to_vec());
+                        upstream_modules.push((module, CString::new(name).unwrap()));
+                    }
+                    Err(e) => {
+                        dcx.emit_err(e);
+                        return Err(FatalError);
+                    }
+                }
+            }
+        }
+    }
+
+    // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to
+    // __llvm_profile_runtime, therefore we won't know until link time if this symbol
+    // should have default visibility.
+    symbols_below_threshold.push(CString::new("__llvm_profile_counter_bias").unwrap());
+    Ok((symbols_below_threshold, upstream_modules))
+}
+
+fn get_bitcode_slice_from_object_data<'a>(
+    obj: &'a [u8],
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<&'a [u8], LtoBitcodeFromRlib> {
+    // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
+    // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
+    // the relevant magic strings here and return.
+    if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
+        return Ok(obj);
+    }
+    // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
+    // which in the public API for sections gets treated as part of the section name, but internally
+    // in MachOObjectFile.cpp gets treated separately.
+    let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
+    let mut len = 0;
+    let data = unsafe {
+        llvm::LLVMRustGetSliceFromObjectDataByName(
+            obj.as_ptr(),
+            obj.len(),
+            section_name.as_ptr(),
+            &mut len,
+        )
+    };
+    if !data.is_null() {
+        assert!(len != 0);
+        let bc = unsafe { slice::from_raw_parts(data, len) };
+
+        // `bc` must be a sub-slice of `obj`.
+        assert!(obj.as_ptr() <= bc.as_ptr());
+        assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr());
+
+        Ok(bc)
+    } else {
+        assert!(len == 0);
+        Err(LtoBitcodeFromRlib {
+            llvm_err: llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string()),
+        })
+    }
+}
+
+/// Performs fat LTO by merging all modules into a single one and returning it
+/// for further optimization.
+pub(crate) fn run_fat(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+    let dcx = cgcx.create_dcx();
+    let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &dcx)?;
+    let symbols_below_threshold =
+        symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+    fat_lto(cgcx, &dcx, modules, cached_modules, upstream_modules, &symbols_below_threshold)
+}
+
+/// Performs thin LTO by performing necessary global analysis and returning two
+/// lists, one of the modules that need optimization and another for modules that
+/// can simply be copied over from the incr. comp. cache.
+pub(crate) fn run_thin(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    modules: Vec<(String, ThinBuffer)>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+    let dcx = cgcx.create_dcx();
+    let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &dcx)?;
+    let symbols_below_threshold =
+        symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+    if cgcx.opts.cg.linker_plugin_lto.enabled() {
+        unreachable!(
+            "We should never reach this case if the LTO step \
+                      is deferred to the linker"
+        );
+    }
+    thin_lto(cgcx, &dcx, modules, upstream_modules, cached_modules, &symbols_below_threshold)
+}
+
+pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
+    let name = module.name;
+    let buffer = ThinBuffer::new(module.module_llvm.llmod(), true);
+    (name, buffer)
+}
+
+fn fat_lto(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+    modules: Vec<FatLtoInput<LlvmCodegenBackend>>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+    mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+    symbols_below_threshold: &[*const libc::c_char],
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+    let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
+    info!("going for a fat lto");
+
+    // Sort out all our lists of incoming modules into two lists.
+    //
+    // * `serialized_modules` (also and argument to this function) contains all
+    //   modules that are serialized in-memory.
+    // * `in_memory` contains modules which are already parsed and in-memory,
+    //   such as from multi-CGU builds.
+    //
+    // All of `cached_modules` (cached from previous incremental builds) can
+    // immediately go onto the `serialized_modules` modules list and then we can
+    // split the `modules` array into these two lists.
+    let mut in_memory = Vec::new();
+    serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
+        info!("pushing cached module {:?}", wp.cgu_name);
+        (buffer, CString::new(wp.cgu_name).unwrap())
+    }));
+    for module in modules {
+        match module {
+            FatLtoInput::InMemory(m) => in_memory.push(m),
+            FatLtoInput::Serialized { name, buffer } => {
+                info!("pushing serialized module {:?}", name);
+                let buffer = SerializedModule::Local(buffer);
+                serialized_modules.push((buffer, CString::new(name).unwrap()));
+            }
+        }
+    }
+
+    // Find the "costliest" module and merge everything into that codegen unit.
+    // All the other modules will be serialized and reparsed into the new
+    // context, so this hopefully avoids serializing and parsing the largest
+    // codegen unit.
+    //
+    // Additionally use a regular module as the base here to ensure that various
+    // file copy operations in the backend work correctly. The only other kind
+    // of module here should be an allocator one, and if your crate is smaller
+    // than the allocator module then the size doesn't really matter anyway.
+    let costliest_module = in_memory
+        .iter()
+        .enumerate()
+        .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+        .map(|(i, module)| {
+            let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
+            (cost, i)
+        })
+        .max();
+
+    // If we found a costliest module, we're good to go. Otherwise all our
+    // inputs were serialized which could happen in the case, for example, that
+    // all our inputs were incrementally reread from the cache and we're just
+    // re-executing the LTO passes. If that's the case deserialize the first
+    // module and create a linker with it.
+    let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
+        Some((_cost, i)) => in_memory.remove(i),
+        None => {
+            assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
+            let (buffer, name) = serialized_modules.remove(0);
+            info!("no in-memory regular modules to choose from, parsing {:?}", name);
+            ModuleCodegen {
+                module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?,
+                name: name.into_string().unwrap(),
+                kind: ModuleKind::Regular,
+            }
+        }
+    };
+    let mut serialized_bitcode = Vec::new();
+    {
+        let (llcx, llmod) = {
+            let llvm = &module.module_llvm;
+            (&llvm.llcx, llvm.llmod())
+        };
+        info!("using {:?} as a base module", module.name);
+
+        // The linking steps below may produce errors and diagnostics within LLVM
+        // which we'd like to handle and print, so set up our diagnostic handlers
+        // (which get unregistered when they go out of scope below).
+        let _handler =
+            DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::LTO);
+
+        // For all other modules we codegened we'll need to link them into our own
+        // bitcode. All modules were codegened in their own LLVM context, however,
+        // and we want to move everything to the same LLVM context. Currently the
+        // way we know of to do that is to serialize them to a string and them parse
+        // them later. Not great but hey, that's why it's "fat" LTO, right?
+        for module in in_memory {
+            let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+            let llmod_id = CString::new(&module.name[..]).unwrap();
+            serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+        }
+        // Sort the modules to ensure we produce deterministic results.
+        serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
+
+        // For all serialized bitcode files we parse them and link them in as we did
+        // above, this is all mostly handled in C++. Like above, though, we don't
+        // know much about the memory management here so we err on the side of being
+        // save and persist everything with the original module.
+        let mut linker = Linker::new(llmod);
+        for (bc_decoded, name) in serialized_modules {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
+                    recorder.record_arg(format!("{name:?}"))
+                });
+            info!("linking {:?}", name);
+            let data = bc_decoded.data();
+            linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?;
+            serialized_bitcode.push(bc_decoded);
+        }
+        drop(linker);
+        save_temp_bitcode(cgcx, &module, "lto.input");
+
+        // Internalize everything below threshold to help strip out more modules and such.
+        unsafe {
+            let ptr = symbols_below_threshold.as_ptr();
+            llvm::LLVMRustRunRestrictionPass(
+                llmod,
+                ptr as *const *const libc::c_char,
+                symbols_below_threshold.len() as libc::size_t,
+            );
+            save_temp_bitcode(cgcx, &module, "lto.after-restriction");
+        }
+    }
+
+    Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
+}
+
+pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
+
+impl<'a> Linker<'a> {
+    pub(crate) fn new(llmod: &'a llvm::Module) -> Self {
+        unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
+    }
+
+    pub(crate) fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
+        unsafe {
+            if llvm::LLVMRustLinkerAdd(
+                self.0,
+                bytecode.as_ptr() as *const libc::c_char,
+                bytecode.len(),
+            ) {
+                Ok(())
+            } else {
+                Err(())
+            }
+        }
+    }
+}
+
+impl Drop for Linker<'_> {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
+        }
+    }
+}
+
+/// Prepare "thin" LTO to get run on these modules.
+///
+/// The general structure of ThinLTO is quite different from the structure of
+/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
+/// one giant LLVM module, and then we run more optimization passes over this
+/// big module after internalizing most symbols. Thin LTO, on the other hand,
+/// avoid this large bottleneck through more targeted optimization.
+///
+/// At a high level Thin LTO looks like:
+///
+///    1. Prepare a "summary" of each LLVM module in question which describes
+///       the values inside, cost of the values, etc.
+///    2. Merge the summaries of all modules in question into one "index"
+///    3. Perform some global analysis on this index
+///    4. For each module, use the index and analysis calculated previously to
+///       perform local transformations on the module, for example inlining
+///       small functions from other modules.
+///    5. Run thin-specific optimization passes over each module, and then code
+///       generate everything at the end.
+///
+/// The summary for each module is intended to be quite cheap, and the global
+/// index is relatively quite cheap to create as well. As a result, the goal of
+/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
+/// situations. For example one cheap optimization is that we can parallelize
+/// all codegen modules, easily making use of all the cores on a machine.
+///
+/// With all that in mind, the function here is designed at specifically just
+/// calculating the *index* for ThinLTO. This index will then be shared amongst
+/// all of the `LtoModuleCodegen` units returned below and destroyed once
+/// they all go out of scope.
+fn thin_lto(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+    modules: Vec<(String, ThinBuffer)>,
+    serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+    symbols_below_threshold: &[*const libc::c_char],
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+    let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
+    unsafe {
+        info!("going for that thin, thin LTO");
+
+        let green_modules: FxHashMap<_, _> =
+            cached_modules.iter().map(|(_, wp)| (wp.cgu_name.clone(), wp.clone())).collect();
+
+        let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
+        let mut thin_buffers = Vec::with_capacity(modules.len());
+        let mut module_names = Vec::with_capacity(full_scope_len);
+        let mut thin_modules = Vec::with_capacity(full_scope_len);
+
+        for (i, (name, buffer)) in modules.into_iter().enumerate() {
+            info!("local module: {} - {}", i, name);
+            let cname = CString::new(name.as_bytes()).unwrap();
+            thin_modules.push(llvm::ThinLTOModule {
+                identifier: cname.as_ptr(),
+                data: buffer.data().as_ptr(),
+                len: buffer.data().len(),
+            });
+            thin_buffers.push(buffer);
+            module_names.push(cname);
+        }
+
+        // FIXME: All upstream crates are deserialized internally in the
+        //        function below to extract their summary and modules. Note that
+        //        unlike the loop above we *must* decode and/or read something
+        //        here as these are all just serialized files on disk. An
+        //        improvement, however, to make here would be to store the
+        //        module summary separately from the actual module itself. Right
+        //        now this is store in one large bitcode file, and the entire
+        //        file is deflate-compressed. We could try to bypass some of the
+        //        decompression by storing the index uncompressed and only
+        //        lazily decompressing the bytecode if necessary.
+        //
+        //        Note that truly taking advantage of this optimization will
+        //        likely be further down the road. We'd have to implement
+        //        incremental ThinLTO first where we could actually avoid
+        //        looking at upstream modules entirely sometimes (the contents,
+        //        we must always unconditionally look at the index).
+        let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
+
+        let cached_modules =
+            cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
+
+        for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
+            info!("upstream or cached module {:?}", name);
+            thin_modules.push(llvm::ThinLTOModule {
+                identifier: name.as_ptr(),
+                data: module.data().as_ptr(),
+                len: module.data().len(),
+            });
+            serialized.push(module);
+            module_names.push(name);
+        }
+
+        // Sanity check
+        assert_eq!(thin_modules.len(), module_names.len());
+
+        // Delegate to the C++ bindings to create some data here. Once this is a
+        // tried-and-true interface we may wish to try to upstream some of this
+        // to LLVM itself, right now we reimplement a lot of what they do
+        // upstream...
+        let data = llvm::LLVMRustCreateThinLTOData(
+            thin_modules.as_ptr(),
+            thin_modules.len() as u32,
+            symbols_below_threshold.as_ptr(),
+            symbols_below_threshold.len() as u32,
+        )
+        .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?;
+
+        let data = ThinData(data);
+
+        info!("thin LTO data created");
+
+        let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
+            cgcx.incr_comp_session_dir
+        {
+            let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+            // If the previous file was deleted, or we get an IO error
+            // reading the file, then we'll just use `None` as the
+            // prev_key_map, which will force the code to be recompiled.
+            let prev =
+                if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+            let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+            (Some(path), prev, curr)
+        } else {
+            // If we don't compile incrementally, we don't need to load the
+            // import data from LLVM.
+            assert!(green_modules.is_empty());
+            let curr = ThinLTOKeysMap::default();
+            (None, None, curr)
+        };
+        info!("thin LTO cache key map loaded");
+        info!("prev_key_map: {:#?}", prev_key_map);
+        info!("curr_key_map: {:#?}", curr_key_map);
+
+        // Throw our data in an `Arc` as we'll be sharing it across threads. We
+        // also put all memory referenced by the C++ data (buffers, ids, etc)
+        // into the arc as well. After this we'll create a thin module
+        // codegen per module in this data.
+        let shared = Arc::new(ThinShared {
+            data,
+            thin_buffers,
+            serialized_modules: serialized,
+            module_names,
+        });
+
+        let mut copy_jobs = vec![];
+        let mut opt_jobs = vec![];
+
+        info!("checking which modules can be-reused and which have to be re-optimized.");
+        for (module_index, module_name) in shared.module_names.iter().enumerate() {
+            let module_name = module_name_to_str(module_name);
+            if let (Some(prev_key_map), true) =
+                (prev_key_map.as_ref(), green_modules.contains_key(module_name))
+            {
+                assert!(cgcx.incr_comp_session_dir.is_some());
+
+                // If a module exists in both the current and the previous session,
+                // and has the same LTO cache key in both sessions, then we can re-use it
+                if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
+                    let work_product = green_modules[module_name].clone();
+                    copy_jobs.push(work_product);
+                    info!(" - {}: re-used", module_name);
+                    assert!(cgcx.incr_comp_session_dir.is_some());
+                    continue;
+                }
+            }
+
+            info!(" - {}: re-compiled", module_name);
+            opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
+                shared: shared.clone(),
+                idx: module_index,
+            }));
+        }
+
+        // Save the current ThinLTO import information for the next compilation
+        // session, overwriting the previous serialized data (if any).
+        if let Some(path) = key_map_path {
+            if let Err(err) = curr_key_map.save_to_file(&path) {
+                return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }));
+            }
+        }
+
+        Ok((opt_jobs, copy_jobs))
+    }
+}
+
+pub(crate) fn run_pass_manager(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+    module: &mut ModuleCodegen<ModuleLlvm>,
+    thin: bool,
+) -> Result<(), FatalError> {
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name);
+    let config = cgcx.config(module.kind);
+
+    // Now we have one massive module inside of llmod. Time to run the
+    // LTO-specific optimization passes that LLVM provides.
+    //
+    // This code is based off the code found in llvm's LTO code generator:
+    //      llvm/lib/LTO/LTOCodeGenerator.cpp
+    debug!("running the pass manager");
+    unsafe {
+        if !llvm::LLVMRustHasModuleFlag(
+            module.module_llvm.llmod(),
+            "LTOPostLink".as_ptr().cast(),
+            11,
+        ) {
+            llvm::LLVMRustAddModuleFlag(
+                module.module_llvm.llmod(),
+                llvm::LLVMModFlagBehavior::Error,
+                c"LTOPostLink".as_ptr().cast(),
+                1,
+            );
+        }
+        let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
+        let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
+        write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage)?;
+    }
+    debug!("lto done");
+    Ok(())
+}
+
+pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
+
+unsafe impl Send for ModuleBuffer {}
+unsafe impl Sync for ModuleBuffer {}
+
+impl ModuleBuffer {
+    pub fn new(m: &llvm::Module) -> ModuleBuffer {
+        ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
+    }
+}
+
+impl ModuleBufferMethods for ModuleBuffer {
+    fn data(&self) -> &[u8] {
+        unsafe {
+            let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
+            let len = llvm::LLVMRustModuleBufferLen(self.0);
+            slice::from_raw_parts(ptr, len)
+        }
+    }
+}
+
+impl Drop for ModuleBuffer {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
+        }
+    }
+}
+
+pub struct ThinData(&'static mut llvm::ThinLTOData);
+
+unsafe impl Send for ThinData {}
+unsafe impl Sync for ThinData {}
+
+impl Drop for ThinData {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
+        }
+    }
+}
+
+pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
+
+unsafe impl Send for ThinBuffer {}
+unsafe impl Sync for ThinBuffer {}
+
+impl ThinBuffer {
+    pub fn new(m: &llvm::Module, is_thin: bool) -> ThinBuffer {
+        unsafe {
+            let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin);
+            ThinBuffer(buffer)
+        }
+    }
+}
+
+impl ThinBufferMethods for ThinBuffer {
+    fn data(&self) -> &[u8] {
+        unsafe {
+            let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
+            let len = llvm::LLVMRustThinLTOBufferLen(self.0);
+            slice::from_raw_parts(ptr, len)
+        }
+    }
+}
+
+impl Drop for ThinBuffer {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
+        }
+    }
+}
+
+pub unsafe fn optimize_thin_module(
+    thin_module: ThinModule<LlvmCodegenBackend>,
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+    let dcx = cgcx.create_dcx();
+
+    let module_name = &thin_module.shared.module_names[thin_module.idx];
+    let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
+    let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;
+
+    // Right now the implementation we've got only works over serialized
+    // modules, so we create a fresh new LLVM context and parse the module
+    // into that context. One day, however, we may do this for upstream
+    // crates but for locally codegened modules we may be able to reuse
+    // that LLVM Context and Module.
+    let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+    let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &dcx)? as *const _;
+    let mut module = ModuleCodegen {
+        module_llvm: ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) },
+        name: thin_module.name().to_string(),
+        kind: ModuleKind::Regular,
+    };
+    {
+        let target = &*module.module_llvm.tm;
+        let llmod = module.module_llvm.llmod();
+        save_temp_bitcode(cgcx, &module, "thin-lto-input");
+
+        // Up next comes the per-module local analyses that we do for Thin LTO.
+        // Each of these functions is basically copied from the LLVM
+        // implementation and then tailored to suit this implementation. Ideally
+        // each of these would be supported by upstream LLVM but that's perhaps
+        // a patch for another day!
+        //
+        // You can find some more comments about these functions in the LLVM
+        // bindings we've got (currently `PassWrapper.cpp`)
+        {
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
+            if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
+                return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
+            }
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
+        }
+
+        {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
+            if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
+                return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
+            }
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
+        }
+
+        {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
+            if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
+                return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
+            }
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
+        }
+
+        {
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
+            if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
+                return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule));
+            }
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
+        }
+
+        // Alright now that we've done everything related to the ThinLTO
+        // analysis it's time to run some optimizations! Here we use the same
+        // `run_pass_manager` as the "fat" LTO above except that we tell it to
+        // populate a thin-specific pass manager, which presumably LLVM treats a
+        // little differently.
+        {
+            info!("running thin lto passes over {}", module.name);
+            run_pass_manager(cgcx, &dcx, &mut module, true)?;
+            save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
+        }
+    }
+    Ok(module)
+}
+
+/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
+#[derive(Debug, Default)]
+pub struct ThinLTOKeysMap {
+    // key = llvm name of importing module, value = LLVM cache key
+    keys: BTreeMap<String, String>,
+}
+
+impl ThinLTOKeysMap {
+    fn save_to_file(&self, path: &Path) -> io::Result<()> {
+        use std::io::Write;
+        let file = File::create(path)?;
+        let mut writer = io::BufWriter::new(file);
+        // The entries are loaded back into a hash map in `load_from_file()`, so
+        // the order in which we write them to file here does not matter.
+        for (module, key) in &self.keys {
+            writeln!(writer, "{module} {key}")?;
+        }
+        Ok(())
+    }
+
+    fn load_from_file(path: &Path) -> io::Result<Self> {
+        use std::io::BufRead;
+        let mut keys = BTreeMap::default();
+        let file = File::open(path)?;
+        for line in io::BufReader::new(file).lines() {
+            let line = line?;
+            let mut split = line.split(' ');
+            let module = split.next().unwrap();
+            let key = split.next().unwrap();
+            assert_eq!(split.next(), None, "Expected two space-separated values, found {line:?}");
+            keys.insert(module.to_string(), key.to_string());
+        }
+        Ok(Self { keys })
+    }
+
+    fn from_thin_lto_modules(
+        data: &ThinData,
+        modules: &[llvm::ThinLTOModule],
+        names: &[CString],
+    ) -> Self {
+        let keys = iter::zip(modules, names)
+            .map(|(module, name)| {
+                let key = build_string(|rust_str| unsafe {
+                    llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
+                })
+                .expect("Invalid ThinLTO module key");
+                (name.clone().into_string().unwrap(), key)
+            })
+            .collect();
+        Self { keys }
+    }
+}
+
+fn module_name_to_str(c_str: &CStr) -> &str {
+    c_str.to_str().unwrap_or_else(|e| {
+        bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
+    })
+}
+
+pub fn parse_module<'a>(
+    cx: &'a llvm::Context,
+    name: &CStr,
+    data: &[u8],
+    dcx: &DiagCtxt,
+) -> Result<&'a llvm::Module, FatalError> {
+    unsafe {
+        llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr())
+            .ok_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode))
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
new file mode 100644
index 00000000000..28a88dd2efe
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs
@@ -0,0 +1,103 @@
+use std::{
+    ffi::{c_char, CStr},
+    marker::PhantomData,
+    ops::Deref,
+    ptr::NonNull,
+};
+
+use rustc_data_structures::small_c_str::SmallCStr;
+
+use crate::{errors::LlvmError, llvm};
+
+/// Responsible for safely creating and disposing llvm::TargetMachine via ffi functions.
+/// Not cloneable as there is no clone function for llvm::TargetMachine.
+#[repr(transparent)]
+pub struct OwnedTargetMachine {
+    tm_unique: NonNull<llvm::TargetMachine>,
+    phantom: PhantomData<llvm::TargetMachine>,
+}
+
+impl OwnedTargetMachine {
+    pub fn new(
+        triple: &CStr,
+        cpu: &CStr,
+        features: &CStr,
+        abi: &CStr,
+        model: llvm::CodeModel,
+        reloc: llvm::RelocModel,
+        level: llvm::CodeGenOptLevel,
+        use_soft_fp: bool,
+        function_sections: bool,
+        data_sections: bool,
+        unique_section_names: bool,
+        trap_unreachable: bool,
+        singletree: bool,
+        asm_comments: bool,
+        emit_stack_size_section: bool,
+        relax_elf_relocations: bool,
+        use_init_array: bool,
+        split_dwarf_file: &CStr,
+        output_obj_file: &CStr,
+        debug_info_compression: &CStr,
+        use_emulated_tls: bool,
+        args_cstr_buff: &[u8],
+    ) -> Result<Self, LlvmError<'static>> {
+        assert!(args_cstr_buff.len() > 0);
+        assert!(
+            *args_cstr_buff.last().unwrap() == 0,
+            "The last character must be a null terminator."
+        );
+
+        // SAFETY: llvm::LLVMRustCreateTargetMachine copies pointed to data
+        let tm_ptr = unsafe {
+            llvm::LLVMRustCreateTargetMachine(
+                triple.as_ptr(),
+                cpu.as_ptr(),
+                features.as_ptr(),
+                abi.as_ptr(),
+                model,
+                reloc,
+                level,
+                use_soft_fp,
+                function_sections,
+                data_sections,
+                unique_section_names,
+                trap_unreachable,
+                singletree,
+                asm_comments,
+                emit_stack_size_section,
+                relax_elf_relocations,
+                use_init_array,
+                split_dwarf_file.as_ptr(),
+                output_obj_file.as_ptr(),
+                debug_info_compression.as_ptr(),
+                use_emulated_tls,
+                args_cstr_buff.as_ptr() as *const c_char,
+                args_cstr_buff.len(),
+            )
+        };
+
+        NonNull::new(tm_ptr)
+            .map(|tm_unique| Self { tm_unique, phantom: PhantomData })
+            .ok_or_else(|| LlvmError::CreateTargetMachine { triple: SmallCStr::from(triple) })
+    }
+}
+
+impl Deref for OwnedTargetMachine {
+    type Target = llvm::TargetMachine;
+
+    fn deref(&self) -> &Self::Target {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        unsafe { self.tm_unique.as_ref() }
+    }
+}
+
+impl Drop for OwnedTargetMachine {
+    fn drop(&mut self) {
+        // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
+        // OwnedTargetMachine is not copyable so there is no double free or use after free
+        unsafe {
+            llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/profiling.rs b/compiler/rustc_codegen_llvm/src/back/profiling.rs
new file mode 100644
index 00000000000..2741f7d848e
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/profiling.rs
@@ -0,0 +1,58 @@
+use measureme::{event_id::SEPARATOR_BYTE, EventId, StringComponent, StringId};
+use rustc_data_structures::profiling::{SelfProfiler, TimingGuard};
+use std::ffi::{c_void, CStr};
+use std::os::raw::c_char;
+use std::sync::Arc;
+
+fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &str) -> EventId {
+    let pass_name = profiler.get_or_alloc_cached_string(pass_name);
+    let mut components = vec![StringComponent::Ref(pass_name)];
+    // handle that LazyCallGraph::SCC is a comma separated list within parentheses
+    let parentheses: &[_] = &['(', ')'];
+    let trimmed = ir_name.trim_matches(parentheses);
+    for part in trimmed.split(", ") {
+        let demangled_ir_name = rustc_demangle::demangle(part).to_string();
+        let ir_name = profiler.get_or_alloc_cached_string(demangled_ir_name);
+        components.push(StringComponent::Value(SEPARATOR_BYTE));
+        components.push(StringComponent::Ref(ir_name));
+    }
+    EventId::from_label(profiler.alloc_string(components.as_slice()))
+}
+
+pub struct LlvmSelfProfiler<'a> {
+    profiler: Arc<SelfProfiler>,
+    stack: Vec<TimingGuard<'a>>,
+    llvm_pass_event_kind: StringId,
+}
+
+impl<'a> LlvmSelfProfiler<'a> {
+    pub fn new(profiler: Arc<SelfProfiler>) -> Self {
+        let llvm_pass_event_kind = profiler.alloc_string("LLVM Pass");
+        Self { profiler, stack: Vec::default(), llvm_pass_event_kind }
+    }
+
+    fn before_pass_callback(&'a mut self, pass_name: &str, ir_name: &str) {
+        let event_id = llvm_args_to_string_id(&self.profiler, pass_name, ir_name);
+
+        self.stack.push(TimingGuard::start(&self.profiler, self.llvm_pass_event_kind, event_id));
+    }
+    fn after_pass_callback(&mut self) {
+        self.stack.pop();
+    }
+}
+
+pub unsafe extern "C" fn selfprofile_before_pass_callback(
+    llvm_self_profiler: *mut c_void,
+    pass_name: *const c_char,
+    ir_name: *const c_char,
+) {
+    let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+    let pass_name = CStr::from_ptr(pass_name).to_str().expect("valid UTF-8");
+    let ir_name = CStr::from_ptr(ir_name).to_str().expect("valid UTF-8");
+    llvm_self_profiler.before_pass_callback(pass_name, ir_name);
+}
+
+pub unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) {
+    let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+    llvm_self_profiler.after_pass_callback();
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
new file mode 100644
index 00000000000..031bbd63361
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -0,0 +1,1122 @@
+use crate::back::lto::ThinBuffer;
+use crate::back::owned_target_machine::OwnedTargetMachine;
+use crate::back::profiling::{
+    selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
+};
+use crate::base;
+use crate::common;
+use crate::errors::{
+    CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
+    WithLlvmError, WriteBytecode,
+};
+use crate::llvm::{self, DiagnosticInfo, PassManager};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::LlvmCodegenBackend;
+use crate::ModuleLlvm;
+use llvm::{
+    LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols,
+};
+use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::write::{
+    BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
+    TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{DiagCtxt, FatalError, Level};
+use rustc_fs_util::{link_or_copy, path_to_c_string};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::InnerSpan;
+use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo, TlsModel};
+
+use crate::llvm::diagnostic::OptimizationDiagnosticKind;
+use libc::{c_char, c_int, c_void, size_t};
+use std::ffi::CString;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+use std::slice;
+use std::str;
+use std::sync::Arc;
+
+pub fn llvm_err<'a>(dcx: &rustc_errors::DiagCtxt, err: LlvmError<'a>) -> FatalError {
+    match llvm::last_error() {
+        Some(llvm_err) => dcx.emit_almost_fatal(WithLlvmError(err, llvm_err)),
+        None => dcx.emit_almost_fatal(err),
+    }
+}
+
+pub fn write_output_file<'ll>(
+    dcx: &rustc_errors::DiagCtxt,
+    target: &'ll llvm::TargetMachine,
+    pm: &llvm::PassManager<'ll>,
+    m: &'ll llvm::Module,
+    output: &Path,
+    dwo_output: Option<&Path>,
+    file_type: llvm::FileType,
+    self_profiler_ref: &SelfProfilerRef,
+) -> Result<(), FatalError> {
+    debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output);
+    unsafe {
+        let output_c = path_to_c_string(output);
+        let dwo_output_c;
+        let dwo_output_ptr = if let Some(dwo_output) = dwo_output {
+            dwo_output_c = path_to_c_string(dwo_output);
+            dwo_output_c.as_ptr()
+        } else {
+            std::ptr::null()
+        };
+        let result = llvm::LLVMRustWriteOutputFile(
+            target,
+            pm,
+            m,
+            output_c.as_ptr(),
+            dwo_output_ptr,
+            file_type,
+        );
+
+        // Record artifact sizes for self-profiling
+        if result == llvm::LLVMRustResult::Success {
+            let artifact_kind = match file_type {
+                llvm::FileType::ObjectFile => "object_file",
+                llvm::FileType::AssemblyFile => "assembly_file",
+            };
+            record_artifact_size(self_profiler_ref, artifact_kind, output);
+            if let Some(dwo_file) = dwo_output {
+                record_artifact_size(self_profiler_ref, "dwo_file", dwo_file);
+            }
+        }
+
+        result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output }))
+    }
+}
+
+pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine {
+    let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None };
+    // Can't use query system here quite yet because this function is invoked before the query
+    // system/tcx is set up.
+    let features = llvm_util::global_llvm_features(sess, false);
+    target_machine_factory(sess, config::OptLevel::No, &features)(config)
+        .unwrap_or_else(|err| llvm_err(sess.dcx(), err).raise())
+}
+
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine {
+    let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
+        tcx.output_filenames(()).split_dwarf_path(
+            tcx.sess.split_debuginfo(),
+            tcx.sess.opts.unstable_opts.split_dwarf_kind,
+            Some(mod_name),
+        )
+    } else {
+        None
+    };
+
+    let output_obj_file =
+        Some(tcx.output_filenames(()).temp_path(OutputType::Object, Some(mod_name)));
+    let config = TargetMachineFactoryConfig { split_dwarf_file, output_obj_file };
+
+    target_machine_factory(
+        tcx.sess,
+        tcx.backend_optimization_level(()),
+        tcx.global_backend_features(()),
+    )(config)
+    .unwrap_or_else(|err| llvm_err(tcx.dcx(), err).raise())
+}
+
+pub fn to_llvm_opt_settings(
+    cfg: config::OptLevel,
+) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
+    use self::config::OptLevel::*;
+    match cfg {
+        No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
+        Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
+        Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
+        Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
+        Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
+        SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
+    }
+}
+
+fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
+    use config::OptLevel::*;
+    match cfg {
+        No => llvm::PassBuilderOptLevel::O0,
+        Less => llvm::PassBuilderOptLevel::O1,
+        Default => llvm::PassBuilderOptLevel::O2,
+        Aggressive => llvm::PassBuilderOptLevel::O3,
+        Size => llvm::PassBuilderOptLevel::Os,
+        SizeMin => llvm::PassBuilderOptLevel::Oz,
+    }
+}
+
+fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
+    match relocation_model {
+        RelocModel::Static => llvm::RelocModel::Static,
+        // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute.
+        RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC,
+        RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
+        RelocModel::Ropi => llvm::RelocModel::ROPI,
+        RelocModel::Rwpi => llvm::RelocModel::RWPI,
+        RelocModel::RopiRwpi => llvm::RelocModel::ROPI_RWPI,
+    }
+}
+
+pub(crate) fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
+    match code_model {
+        Some(CodeModel::Tiny) => llvm::CodeModel::Tiny,
+        Some(CodeModel::Small) => llvm::CodeModel::Small,
+        Some(CodeModel::Kernel) => llvm::CodeModel::Kernel,
+        Some(CodeModel::Medium) => llvm::CodeModel::Medium,
+        Some(CodeModel::Large) => llvm::CodeModel::Large,
+        None => llvm::CodeModel::None,
+    }
+}
+
+pub fn target_machine_factory(
+    sess: &Session,
+    optlvl: config::OptLevel,
+    target_features: &[String],
+) -> TargetMachineFactoryFn<LlvmCodegenBackend> {
+    let reloc_model = to_llvm_relocation_model(sess.relocation_model());
+
+    let (opt_level, _) = to_llvm_opt_settings(optlvl);
+    let use_softfp = sess.opts.cg.soft_float;
+
+    let ffunction_sections =
+        sess.opts.unstable_opts.function_sections.unwrap_or(sess.target.function_sections);
+    let fdata_sections = ffunction_sections;
+    let funique_section_names = !sess.opts.unstable_opts.no_unique_section_names;
+
+    let code_model = to_llvm_code_model(sess.code_model());
+
+    let mut singlethread = sess.target.singlethread;
+
+    // On the wasm target once the `atomics` feature is enabled that means that
+    // we're no longer single-threaded, or otherwise we don't want LLVM to
+    // lower atomic operations to single-threaded operations.
+    if singlethread && sess.target.is_like_wasm && sess.target_features.contains(&sym::atomics) {
+        singlethread = false;
+    }
+
+    let triple = SmallCStr::new(&sess.target.llvm_target);
+    let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
+    let features = CString::new(target_features.join(",")).unwrap();
+    let abi = SmallCStr::new(&sess.target.llvm_abiname);
+    let trap_unreachable =
+        sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable);
+    let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes;
+
+    let asm_comments = sess.opts.unstable_opts.asm_comments;
+    let relax_elf_relocations =
+        sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
+
+    let use_init_array =
+        !sess.opts.unstable_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
+
+    let path_mapping = sess.source_map().path_mapping().clone();
+
+    let use_emulated_tls = matches!(sess.tls_model(), TlsModel::Emulated);
+
+    // copy the exe path, followed by path all into one buffer
+    // null terminating them so we can use them as null terminated strings
+    let args_cstr_buff = {
+        let mut args_cstr_buff: Vec<u8> = Vec::new();
+        let exe_path = std::env::current_exe().unwrap_or_default();
+        let exe_path_str = exe_path.into_os_string().into_string().unwrap_or_default();
+
+        args_cstr_buff.extend_from_slice(exe_path_str.as_bytes());
+        args_cstr_buff.push(0);
+
+        for arg in sess.expanded_args.iter() {
+            args_cstr_buff.extend_from_slice(arg.as_bytes());
+            args_cstr_buff.push(0);
+        }
+
+        args_cstr_buff
+    };
+
+    let debuginfo_compression = sess.opts.debuginfo_compression.to_string();
+    match sess.opts.debuginfo_compression {
+        rustc_session::config::DebugInfoCompression::Zlib => {
+            if !unsafe { LLVMRustLLVMHasZlibCompressionForDebugSymbols() } {
+                sess.dcx().emit_warn(UnknownCompression { algorithm: "zlib" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::Zstd => {
+            if !unsafe { LLVMRustLLVMHasZstdCompressionForDebugSymbols() } {
+                sess.dcx().emit_warn(UnknownCompression { algorithm: "zstd" });
+            }
+        }
+        rustc_session::config::DebugInfoCompression::None => {}
+    };
+    let debuginfo_compression = SmallCStr::new(&debuginfo_compression);
+
+    let should_prefer_remapped_for_split_debuginfo_paths =
+        sess.should_prefer_remapped_for_split_debuginfo_paths();
+
+    Arc::new(move |config: TargetMachineFactoryConfig| {
+        let path_to_cstring_helper = |path: Option<PathBuf>| -> CString {
+            let path = path.unwrap_or_default();
+            let path = if should_prefer_remapped_for_split_debuginfo_paths {
+                path_mapping.map_prefix(path).0
+            } else {
+                path.into()
+            };
+            CString::new(path.to_str().unwrap()).unwrap()
+        };
+
+        let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file);
+        let output_obj_file = path_to_cstring_helper(config.output_obj_file);
+
+        OwnedTargetMachine::new(
+            &triple,
+            &cpu,
+            &features,
+            &abi,
+            code_model,
+            reloc_model,
+            opt_level,
+            use_softfp,
+            ffunction_sections,
+            fdata_sections,
+            funique_section_names,
+            trap_unreachable,
+            singlethread,
+            asm_comments,
+            emit_stack_size_section,
+            relax_elf_relocations,
+            use_init_array,
+            &split_dwarf_file,
+            &output_obj_file,
+            &debuginfo_compression,
+            use_emulated_tls,
+            &args_cstr_buff,
+        )
+    })
+}
+
+pub(crate) fn save_temp_bitcode(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    module: &ModuleCodegen<ModuleLlvm>,
+    name: &str,
+) {
+    if !cgcx.save_temps {
+        return;
+    }
+    unsafe {
+        let ext = format!("{name}.bc");
+        let cgu = Some(&module.name[..]);
+        let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
+        let cstr = path_to_c_string(&path);
+        let llmod = module.module_llvm.llmod();
+        llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+    }
+}
+
+/// In what context is a dignostic handler being attached to a codegen unit?
+pub enum CodegenDiagnosticsStage {
+    /// Prelink optimization stage.
+    Opt,
+    /// LTO/ThinLTO postlink optimization stage.
+    LTO,
+    /// Code generation.
+    Codegen,
+}
+
+pub struct DiagnosticHandlers<'a> {
+    data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a DiagCtxt),
+    llcx: &'a llvm::Context,
+    old_handler: Option<&'a llvm::DiagnosticHandler>,
+}
+
+impl<'a> DiagnosticHandlers<'a> {
+    pub fn new(
+        cgcx: &'a CodegenContext<LlvmCodegenBackend>,
+        dcx: &'a DiagCtxt,
+        llcx: &'a llvm::Context,
+        module: &ModuleCodegen<ModuleLlvm>,
+        stage: CodegenDiagnosticsStage,
+    ) -> Self {
+        let remark_passes_all: bool;
+        let remark_passes: Vec<CString>;
+        match &cgcx.remark {
+            Passes::All => {
+                remark_passes_all = true;
+                remark_passes = Vec::new();
+            }
+            Passes::Some(passes) => {
+                remark_passes_all = false;
+                remark_passes =
+                    passes.iter().map(|name| CString::new(name.as_str()).unwrap()).collect();
+            }
+        };
+        let remark_passes: Vec<*const c_char> =
+            remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
+        let remark_file = cgcx
+            .remark_dir
+            .as_ref()
+            // Use the .opt.yaml file suffix, which is supported by LLVM's opt-viewer.
+            .map(|dir| {
+                let stage_suffix = match stage {
+                    CodegenDiagnosticsStage::Codegen => "codegen",
+                    CodegenDiagnosticsStage::Opt => "opt",
+                    CodegenDiagnosticsStage::LTO => "lto",
+                };
+                dir.join(format!("{}.{stage_suffix}.opt.yaml", module.name))
+            })
+            .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok()));
+
+        let pgo_available = cgcx.opts.cg.profile_use.is_some();
+        let data = Box::into_raw(Box::new((cgcx, dcx)));
+        unsafe {
+            let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
+            llvm::LLVMRustContextConfigureDiagnosticHandler(
+                llcx,
+                diagnostic_handler,
+                data.cast(),
+                remark_passes_all,
+                remark_passes.as_ptr(),
+                remark_passes.len(),
+                // The `as_ref()` is important here, otherwise the `CString` will be dropped
+                // too soon!
+                remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()),
+                pgo_available,
+            );
+            DiagnosticHandlers { data, llcx, old_handler }
+        }
+    }
+}
+
+impl<'a> Drop for DiagnosticHandlers<'a> {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler);
+            drop(Box::from_raw(self.data));
+        }
+    }
+}
+
+fn report_inline_asm(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    msg: String,
+    level: llvm::DiagnosticLevel,
+    mut cookie: u64,
+    source: Option<(String, Vec<InnerSpan>)>,
+) {
+    // In LTO build we may get srcloc values from other crates which are invalid
+    // since they use a different source map. To be safe we just suppress these
+    // in LTO builds.
+    if matches!(cgcx.lto, Lto::Fat | Lto::Thin) {
+        cookie = 0;
+    }
+    let level = match level {
+        llvm::DiagnosticLevel::Error => Level::Error,
+        llvm::DiagnosticLevel::Warning => Level::Warning,
+        llvm::DiagnosticLevel::Note | llvm::DiagnosticLevel::Remark => Level::Note,
+    };
+    cgcx.diag_emitter.inline_asm_error(cookie.try_into().unwrap(), msg, level, source);
+}
+
+unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
+    if user.is_null() {
+        return;
+    }
+    let (cgcx, dcx) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &DiagCtxt));
+
+    match llvm::diagnostic::Diagnostic::unpack(info) {
+        llvm::diagnostic::InlineAsm(inline) => {
+            report_inline_asm(cgcx, inline.message, inline.level, inline.cookie, inline.source);
+        }
+
+        llvm::diagnostic::Optimization(opt) => {
+            dcx.emit_note(FromLlvmOptimizationDiag {
+                filename: &opt.filename,
+                line: opt.line,
+                column: opt.column,
+                pass_name: &opt.pass_name,
+                kind: match opt.kind {
+                    OptimizationDiagnosticKind::OptimizationRemark => "success",
+                    OptimizationDiagnosticKind::OptimizationMissed
+                    | OptimizationDiagnosticKind::OptimizationFailure => "missed",
+                    OptimizationDiagnosticKind::OptimizationAnalysis
+                    | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
+                    | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
+                    OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
+                },
+                message: &opt.message,
+            });
+        }
+        llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
+            let message = llvm::build_string(|s| {
+                llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+            })
+            .expect("non-UTF8 diagnostic");
+            dcx.emit_warn(FromLlvmDiag { message });
+        }
+        llvm::diagnostic::Unsupported(diagnostic_ref) => {
+            let message = llvm::build_string(|s| {
+                llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+            })
+            .expect("non-UTF8 diagnostic");
+            dcx.emit_err(FromLlvmDiag { message });
+        }
+        llvm::diagnostic::UnknownDiagnostic(..) => {}
+    }
+}
+
+fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
+    match config.pgo_gen {
+        SwitchWithOptPath::Enabled(ref opt_dir_path) => {
+            let path = if let Some(dir_path) = opt_dir_path {
+                dir_path.join("default_%m.profraw")
+            } else {
+                PathBuf::from("default_%m.profraw")
+            };
+
+            Some(CString::new(format!("{}", path.display())).unwrap())
+        }
+        SwitchWithOptPath::Disabled => None,
+    }
+}
+
+fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
+    config
+        .pgo_use
+        .as_ref()
+        .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
+    config
+        .pgo_sample_use
+        .as_ref()
+        .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+fn get_instr_profile_output_path(config: &ModuleConfig) -> Option<CString> {
+    config.instrument_coverage.then(|| CString::new("default_%m_%p.profraw").unwrap())
+}
+
+pub(crate) unsafe fn llvm_optimize(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+    module: &ModuleCodegen<ModuleLlvm>,
+    config: &ModuleConfig,
+    opt_level: config::OptLevel,
+    opt_stage: llvm::OptStage,
+) -> Result<(), FatalError> {
+    let unroll_loops =
+        opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
+    let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
+    let pgo_gen_path = get_pgo_gen_path(config);
+    let pgo_use_path = get_pgo_use_path(config);
+    let pgo_sample_use_path = get_pgo_sample_use_path(config);
+    let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
+    let instr_profile_output_path = get_instr_profile_output_path(config);
+    let sanitize_dataflow_abilist: Vec<_> = config
+        .sanitizer_dataflow_abilist
+        .iter()
+        .map(|file| CString::new(file.as_str()).unwrap())
+        .collect();
+    let sanitize_dataflow_abilist_ptrs: Vec<_> =
+        sanitize_dataflow_abilist.iter().map(|file| file.as_ptr()).collect();
+    // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
+    let sanitizer_options = if !is_lto {
+        Some(llvm::SanitizerOptions {
+            sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
+            sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+            sanitize_cfi: config.sanitizer.contains(SanitizerSet::CFI),
+            sanitize_dataflow: config.sanitizer.contains(SanitizerSet::DATAFLOW),
+            sanitize_dataflow_abilist: sanitize_dataflow_abilist_ptrs.as_ptr(),
+            sanitize_dataflow_abilist_len: sanitize_dataflow_abilist_ptrs.len(),
+            sanitize_kcfi: config.sanitizer.contains(SanitizerSet::KCFI),
+            sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
+            sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
+            sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
+            sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
+            sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
+            sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
+            sanitize_kernel_address: config.sanitizer.contains(SanitizerSet::KERNELADDRESS),
+            sanitize_kernel_address_recover: config
+                .sanitizer_recover
+                .contains(SanitizerSet::KERNELADDRESS),
+        })
+    } else {
+        None
+    };
+
+    let mut llvm_profiler = cgcx
+        .prof
+        .llvm_recording_enabled()
+        .then(|| LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()));
+
+    let llvm_selfprofiler =
+        llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut());
+
+    let extra_passes = if !is_lto { config.passes.join(",") } else { "".to_string() };
+
+    let llvm_plugins = config.llvm_plugins.join(",");
+
+    // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
+    // We would have to add upstream support for this first, before we can support
+    // config.inline_threshold and our more aggressive default thresholds.
+    let result = llvm::LLVMRustOptimize(
+        module.module_llvm.llmod(),
+        &*module.module_llvm.tm,
+        to_pass_builder_opt_level(opt_level),
+        opt_stage,
+        cgcx.opts.cg.linker_plugin_lto.enabled(),
+        config.no_prepopulate_passes,
+        config.verify_llvm_ir,
+        using_thin_buffers,
+        config.merge_functions,
+        unroll_loops,
+        config.vectorize_slp,
+        config.vectorize_loop,
+        config.no_builtins,
+        config.emit_lifetime_markers,
+        sanitizer_options.as_ref(),
+        pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+        pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+        config.instrument_coverage,
+        instr_profile_output_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+        config.instrument_gcov,
+        pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+        config.debug_info_for_profiling,
+        llvm_selfprofiler,
+        selfprofile_before_pass_callback,
+        selfprofile_after_pass_callback,
+        extra_passes.as_ptr().cast(),
+        extra_passes.len(),
+        llvm_plugins.as_ptr().cast(),
+        llvm_plugins.len(),
+    );
+    result.into_result().map_err(|()| llvm_err(dcx, LlvmError::RunLlvmPasses))
+}
+
+// Unsafe due to LLVM calls.
+pub(crate) unsafe fn optimize(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+    module: &ModuleCodegen<ModuleLlvm>,
+    config: &ModuleConfig,
+) -> Result<(), FatalError> {
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
+
+    let llmod = module.module_llvm.llmod();
+    let llcx = &*module.module_llvm.llcx;
+    let _handlers = DiagnosticHandlers::new(cgcx, dcx, llcx, module, CodegenDiagnosticsStage::Opt);
+
+    let module_name = module.name.clone();
+    let module_name = Some(&module_name[..]);
+
+    if config.emit_no_opt_bc {
+        let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
+        let out = path_to_c_string(&out);
+        llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
+    }
+
+    if let Some(opt_level) = config.opt_level {
+        let opt_stage = match cgcx.lto {
+            Lto::Fat => llvm::OptStage::PreLinkFatLTO,
+            Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
+            _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
+            _ => llvm::OptStage::PreLinkNoLTO,
+        };
+        return llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage);
+    }
+    Ok(())
+}
+
+pub(crate) fn link(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+    mut modules: Vec<ModuleCodegen<ModuleLlvm>>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+    use super::lto::{Linker, ModuleBuffer};
+    // Sort the modules by name to ensure deterministic behavior.
+    modules.sort_by(|a, b| a.name.cmp(&b.name));
+    let (first, elements) =
+        modules.split_first().expect("Bug! modules must contain at least one module.");
+
+    let mut linker = Linker::new(first.module_llvm.llmod());
+    for module in elements {
+        let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name);
+        let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+        linker
+            .add(buffer.data())
+            .map_err(|()| llvm_err(dcx, LlvmError::SerializeModule { name: &module.name }))?;
+    }
+    drop(linker);
+    Ok(modules.remove(0))
+}
+
+pub(crate) unsafe fn codegen(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    dcx: &DiagCtxt,
+    module: ModuleCodegen<ModuleLlvm>,
+    config: &ModuleConfig,
+) -> Result<CompiledModule, FatalError> {
+    let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+    {
+        let llmod = module.module_llvm.llmod();
+        let llcx = &*module.module_llvm.llcx;
+        let tm = &*module.module_llvm.tm;
+        let module_name = module.name.clone();
+        let module_name = Some(&module_name[..]);
+        let _handlers =
+            DiagnosticHandlers::new(cgcx, dcx, llcx, &module, CodegenDiagnosticsStage::Codegen);
+
+        if cgcx.msvc_imps_needed {
+            create_msvc_imps(cgcx, llcx, llmod);
+        }
+
+        // A codegen-specific pass manager is used to generate object
+        // files for an LLVM module.
+        //
+        // Apparently each of these pass managers is a one-shot kind of
+        // thing, so we create a new one for each type of output. The
+        // pass manager passed to the closure should be ensured to not
+        // escape the closure itself, and the manager should only be
+        // used once.
+        unsafe fn with_codegen<'ll, F, R>(
+            tm: &'ll llvm::TargetMachine,
+            llmod: &'ll llvm::Module,
+            no_builtins: bool,
+            f: F,
+        ) -> R
+        where
+            F: FnOnce(&'ll mut PassManager<'ll>) -> R,
+        {
+            let cpm = llvm::LLVMCreatePassManager();
+            llvm::LLVMAddAnalysisPasses(tm, cpm);
+            llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+            f(cpm)
+        }
+
+        // Two things to note:
+        // - If object files are just LLVM bitcode we write bitcode, copy it to
+        //   the .o file, and delete the bitcode if it wasn't otherwise
+        //   requested.
+        // - If we don't have the integrated assembler then we need to emit
+        //   asm from LLVM and use `gcc` to create the object file.
+
+        let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+        let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+        if config.bitcode_needed() {
+            let _timer = cgcx
+                .prof
+                .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module.name);
+            let thin = ThinBuffer::new(llmod, config.emit_thin_lto);
+            let data = thin.data();
+
+            if let Some(bitcode_filename) = bc_out.file_name() {
+                cgcx.prof.artifact_size(
+                    "llvm_bitcode",
+                    bitcode_filename.to_string_lossy(),
+                    data.len() as u64,
+                );
+            }
+
+            if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+                let _timer = cgcx
+                    .prof
+                    .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
+                if let Err(err) = fs::write(&bc_out, data) {
+                    dcx.emit_err(WriteBytecode { path: &bc_out, err });
+                }
+            }
+
+            if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+                let _timer = cgcx
+                    .prof
+                    .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name);
+                embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+            }
+        }
+
+        if config.emit_ir {
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name);
+            let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+            let out_c = path_to_c_string(&out);
+
+            extern "C" fn demangle_callback(
+                input_ptr: *const c_char,
+                input_len: size_t,
+                output_ptr: *mut c_char,
+                output_len: size_t,
+            ) -> size_t {
+                let input =
+                    unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
+
+                let Ok(input) = str::from_utf8(input) else { return 0 };
+
+                let output = unsafe {
+                    slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
+                };
+                let mut cursor = io::Cursor::new(output);
+
+                let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
+
+                if write!(cursor, "{demangled:#}").is_err() {
+                    // Possible only if provided buffer is not big enough
+                    return 0;
+                }
+
+                cursor.position() as size_t
+            }
+
+            let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
+
+            if result == llvm::LLVMRustResult::Success {
+                record_artifact_size(&cgcx.prof, "llvm_ir", &out);
+            }
+
+            result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out }))?;
+        }
+
+        if config.emit_asm {
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
+            let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+
+            // We can't use the same module for asm and object code output,
+            // because that triggers various errors like invalid IR or broken
+            // binaries. So we must clone the module to produce the asm output
+            // if we are also producing object code.
+            let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj {
+                llvm::LLVMCloneModule(llmod)
+            } else {
+                llmod
+            };
+            with_codegen(tm, llmod, config.no_builtins, |cpm| {
+                write_output_file(
+                    dcx,
+                    tm,
+                    cpm,
+                    llmod,
+                    &path,
+                    None,
+                    llvm::FileType::AssemblyFile,
+                    &cgcx.prof,
+                )
+            })?;
+        }
+
+        match config.emit_obj {
+            EmitObj::ObjectCode(_) => {
+                let _timer = cgcx
+                    .prof
+                    .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
+
+                let dwo_out = cgcx.output_filenames.temp_path_dwo(module_name);
+                let dwo_out = match (cgcx.split_debuginfo, cgcx.split_dwarf_kind) {
+                    // Don't change how DWARF is emitted when disabled.
+                    (SplitDebuginfo::Off, _) => None,
+                    // Don't provide a DWARF object path if split debuginfo is enabled but this is
+                    // a platform that doesn't support Split DWARF.
+                    _ if !cgcx.target_can_use_split_dwarf => None,
+                    // Don't provide a DWARF object path in single mode, sections will be written
+                    // into the object as normal but ignored by linker.
+                    (_, SplitDwarfKind::Single) => None,
+                    // Emit (a subset of the) DWARF into a separate dwarf object file in split
+                    // mode.
+                    (_, SplitDwarfKind::Split) => Some(dwo_out.as_path()),
+                };
+
+                with_codegen(tm, llmod, config.no_builtins, |cpm| {
+                    write_output_file(
+                        dcx,
+                        tm,
+                        cpm,
+                        llmod,
+                        &obj_out,
+                        dwo_out,
+                        llvm::FileType::ObjectFile,
+                        &cgcx.prof,
+                    )
+                })?;
+            }
+
+            EmitObj::Bitcode => {
+                debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+                if let Err(err) = link_or_copy(&bc_out, &obj_out) {
+                    dcx.emit_err(CopyBitcode { err });
+                }
+
+                if !config.emit_bc {
+                    debug!("removing_bitcode {:?}", bc_out);
+                    ensure_removed(dcx, &bc_out);
+                }
+            }
+
+            EmitObj::None => {}
+        }
+
+        record_llvm_cgu_instructions_stats(&cgcx.prof, llmod);
+    }
+
+    // `.dwo` files are only emitted if:
+    //
+    // - Object files are being emitted (i.e. bitcode only or metadata only compilations will not
+    //   produce dwarf objects, even if otherwise enabled)
+    // - Target supports Split DWARF
+    // - Split debuginfo is enabled
+    // - Split DWARF kind is `split` (i.e. debuginfo is split into `.dwo` files, not different
+    //   sections in the `.o` files).
+    let dwarf_object_emitted = matches!(config.emit_obj, EmitObj::ObjectCode(_))
+        && cgcx.target_can_use_split_dwarf
+        && cgcx.split_debuginfo != SplitDebuginfo::Off
+        && cgcx.split_dwarf_kind == SplitDwarfKind::Split;
+    Ok(module.into_compiled_module(
+        config.emit_obj != EmitObj::None,
+        dwarf_object_emitted,
+        config.emit_bc,
+        &cgcx.output_filenames,
+    ))
+}
+
+fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
+    let mut asm = format!(".section {section_name},\"{section_flags}\"\n").into_bytes();
+    asm.extend_from_slice(b".ascii \"");
+    asm.reserve(data.len());
+    for &byte in data {
+        if byte == b'\\' || byte == b'"' {
+            asm.push(b'\\');
+            asm.push(byte);
+        } else if byte < 0x20 || byte >= 0x80 {
+            // Avoid non UTF-8 inline assembly. Use octal escape sequence, because it is fixed
+            // width, while hex escapes will consume following characters.
+            asm.push(b'\\');
+            asm.push(b'0' + ((byte >> 6) & 0x7));
+            asm.push(b'0' + ((byte >> 3) & 0x7));
+            asm.push(b'0' + ((byte >> 0) & 0x7));
+        } else {
+            asm.push(byte);
+        }
+    }
+    asm.extend_from_slice(b"\"\n");
+    asm
+}
+
+fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-ios")
+        || cgcx.opts.target_triple.triple().contains("-darwin")
+        || cgcx.opts.target_triple.triple().contains("-tvos")
+        || cgcx.opts.target_triple.triple().contains("-watchos")
+}
+
+fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
+    cgcx.opts.target_triple.triple().contains("-aix")
+}
+
+//FIXME use c string literals here too
+pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> &'static str {
+    if target_is_apple(cgcx) {
+        "__LLVM,__bitcode\0"
+    } else if target_is_aix(cgcx) {
+        ".ipa\0"
+    } else {
+        ".llvmbc\0"
+    }
+}
+
+/// Embed the bitcode of an LLVM module in the LLVM module itself.
+///
+/// This is done primarily for iOS where it appears to be standard to compile C
+/// code at least with `-fembed-bitcode` which creates two sections in the
+/// executable:
+///
+/// * __LLVM,__bitcode
+/// * __LLVM,__cmdline
+///
+/// It appears *both* of these sections are necessary to get the linker to
+/// recognize what's going on. A suitable cmdline value is taken from the
+/// target spec.
+///
+/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
+/// embed an empty section.
+///
+/// Basically all of this is us attempting to follow in the footsteps of clang
+/// on iOS. See #35968 for lots more info.
+unsafe fn embed_bitcode(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    llcx: &llvm::Context,
+    llmod: &llvm::Module,
+    cmdline: &str,
+    bitcode: &[u8],
+) {
+    // We're adding custom sections to the output object file, but we definitely
+    // do not want these custom sections to make their way into the final linked
+    // executable. The purpose of these custom sections is for tooling
+    // surrounding object files to work with the LLVM IR, if necessary. For
+    // example rustc's own LTO will look for LLVM IR inside of the object file
+    // in these sections by default.
+    //
+    // To handle this is a bit different depending on the object file format
+    // used by the backend, broken down into a few different categories:
+    //
+    // * Mach-O - this is for macOS. Inspecting the source code for the native
+    //   linker here shows that the `.llvmbc` and `.llvmcmd` sections are
+    //   automatically skipped by the linker. In that case there's nothing extra
+    //   that we need to do here.
+    //
+    // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and
+    //   `.llvmcmd` sections, so there's nothing extra we need to do.
+    //
+    // * COFF - if we don't do anything the linker will by default copy all
+    //   these sections to the output artifact, not what we want! To subvert
+    //   this we want to flag the sections we inserted here as
+    //   `IMAGE_SCN_LNK_REMOVE`.
+    //
+    // * ELF - this is very similar to COFF above. One difference is that these
+    //   sections are removed from the output linked artifact when
+    //   `--gc-sections` is passed, which we pass by default. If that flag isn't
+    //   passed though then these sections will show up in the final output.
+    //   Additionally the flag that we need to set here is `SHF_EXCLUDE`.
+    //
+    // * XCOFF - AIX linker ignores content in .ipa and .info if no auxiliary
+    //   symbol associated with these sections.
+    //
+    // Unfortunately, LLVM provides no way to set custom section flags. For ELF
+    // and COFF we emit the sections using module level inline assembly for that
+    // reason (see issue #90326 for historical background).
+    let is_aix = target_is_aix(cgcx);
+    let is_apple = target_is_apple(cgcx);
+    if is_apple || is_aix || cgcx.opts.target_triple.triple().starts_with("wasm") {
+        // We don't need custom section flags, create LLVM globals.
+        let llconst = common::bytes_in_context(llcx, bitcode);
+        let llglobal = llvm::LLVMAddGlobal(
+            llmod,
+            common::val_ty(llconst),
+            c"rustc.embedded.module".as_ptr().cast(),
+        );
+        llvm::LLVMSetInitializer(llglobal, llconst);
+
+        let section = bitcode_section_name(cgcx);
+        llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+        llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+        llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
+
+        let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
+        let llglobal = llvm::LLVMAddGlobal(
+            llmod,
+            common::val_ty(llconst),
+            c"rustc.embedded.cmdline".as_ptr().cast(),
+        );
+        llvm::LLVMSetInitializer(llglobal, llconst);
+        let section = if is_apple {
+            c"__LLVM,__cmdline"
+        } else if is_aix {
+            c".info"
+        } else {
+            c".llvmcmd"
+        };
+        llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+        llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+    } else {
+        // We need custom section flags, so emit module-level inline assembly.
+        let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
+        let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
+        llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+        let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
+        llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+    }
+}
+
+// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
+// This is required to satisfy `dllimport` references to static data in .rlibs
+// when using MSVC linker. We do this only for data, as linker can fix up
+// code references on its own.
+// See #26591, #27438
+fn create_msvc_imps(
+    cgcx: &CodegenContext<LlvmCodegenBackend>,
+    llcx: &llvm::Context,
+    llmod: &llvm::Module,
+) {
+    if !cgcx.msvc_imps_needed {
+        return;
+    }
+    // The x86 ABI seems to require that leading underscores are added to symbol
+    // names, so we need an extra underscore on x86. There's also a leading
+    // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
+    // underscores added in front).
+    let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
+
+    unsafe {
+        let ptr_ty = Type::ptr_llcx(llcx);
+        let globals = base::iter_globals(llmod)
+            .filter(|&val| {
+                llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
+                    && llvm::LLVMIsDeclaration(val) == 0
+            })
+            .filter_map(|val| {
+                // Exclude some symbols that we know are not Rust symbols.
+                let name = llvm::get_value_name(val);
+                if ignored(name) { None } else { Some((val, name)) }
+            })
+            .map(move |(val, name)| {
+                let mut imp_name = prefix.as_bytes().to_vec();
+                imp_name.extend(name);
+                let imp_name = CString::new(imp_name).unwrap();
+                (imp_name, val)
+            })
+            .collect::<Vec<_>>();
+
+        for (imp_name, val) in globals {
+            let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast());
+            llvm::LLVMSetInitializer(imp, val);
+            llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
+        }
+    }
+
+    // Use this function to exclude certain symbols from `__imp` generation.
+    fn ignored(symbol_name: &[u8]) -> bool {
+        // These are symbols generated by LLVM's profiling instrumentation
+        symbol_name.starts_with(b"__llvm_profile_")
+    }
+}
+
+fn record_artifact_size(
+    self_profiler_ref: &SelfProfilerRef,
+    artifact_kind: &'static str,
+    path: &Path,
+) {
+    // Don't stat the file if we are not going to record its size.
+    if !self_profiler_ref.enabled() {
+        return;
+    }
+
+    if let Some(artifact_name) = path.file_name() {
+        let file_size = std::fs::metadata(path).map(|m| m.len()).unwrap_or(0);
+        self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);
+    }
+}
+
+fn record_llvm_cgu_instructions_stats(prof: &SelfProfilerRef, llmod: &llvm::Module) {
+    if !prof.enabled() {
+        return;
+    }
+
+    let raw_stats =
+        llvm::build_string(|s| unsafe { llvm::LLVMRustModuleInstructionStats(llmod, s) })
+            .expect("cannot get module instruction stats");
+
+    #[derive(serde::Deserialize)]
+    struct InstructionsStats {
+        module: String,
+        total: u64,
+    }
+
+    let InstructionsStats { module, total } =
+        serde_json::from_str(&raw_stats).expect("cannot parse llvm cgu instructions stats");
+    prof.artifact_size("cgu_instructions", module, total);
+}
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
new file mode 100644
index 00000000000..5dc271ccddb
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -0,0 +1,175 @@
+//! Codegen the MIR to the LLVM IR.
+//!
+//! Hopefully useful general knowledge about codegen:
+//!
+//! * There's no way to find out the [`Ty`] type of a [`Value`]. Doing so
+//!   would be "trying to get the eggs out of an omelette" (credit:
+//!   pcwalton). You can, instead, find out its [`llvm::Type`] by calling [`val_ty`],
+//!   but one [`llvm::Type`] corresponds to many [`Ty`]s; for instance, `tup(int, int,
+//!   int)` and `rec(x=int, y=int, z=int)` will have the same [`llvm::Type`].
+//!
+//! [`Ty`]: rustc_middle::ty::Ty
+//! [`val_ty`]: crate::common::val_ty
+
+use super::ModuleLlvm;
+
+use crate::attributes;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::dep_graph;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::SanitizerSet;
+
+use std::time::Instant;
+
+pub struct ValueIter<'ll> {
+    cur: Option<&'ll Value>,
+    step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
+}
+
+impl<'ll> Iterator for ValueIter<'ll> {
+    type Item = &'ll Value;
+
+    fn next(&mut self) -> Option<&'ll Value> {
+        let old = self.cur;
+        if let Some(old) = old {
+            self.cur = unsafe { (self.step)(old) };
+        }
+        old
+    }
+}
+
+pub fn iter_globals(llmod: &llvm::Module) -> ValueIter<'_> {
+    unsafe { ValueIter { cur: llvm::LLVMGetFirstGlobal(llmod), step: llvm::LLVMGetNextGlobal } }
+}
+
+pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<ModuleLlvm>, u64) {
+    let start_time = Instant::now();
+
+    let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+    let (module, _) = tcx.dep_graph.with_task(
+        dep_node,
+        tcx,
+        cgu_name,
+        module_codegen,
+        Some(dep_graph::hash_result),
+    );
+    let time_to_codegen = start_time.elapsed();
+
+    // We assume that the cost to run LLVM on a CGU is proportional to
+    // the time we needed for codegenning it.
+    let cost = time_to_codegen.as_nanos() as u64;
+
+    fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
+        let cgu = tcx.codegen_unit(cgu_name);
+        let _prof_timer =
+            tcx.prof.generic_activity_with_arg_recorder("codegen_module", |recorder| {
+                recorder.record_arg(cgu_name.to_string());
+                recorder.record_arg(cgu.size_estimate().to_string());
+            });
+        // Instantiate monomorphizations without filling out definitions yet...
+        let llvm_module = ModuleLlvm::new(tcx, cgu_name.as_str());
+        {
+            let cx = CodegenCx::new(tcx, cgu, &llvm_module);
+            let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
+            for &(mono_item, data) in &mono_items {
+                mono_item.predefine::<Builder<'_, '_, '_>>(&cx, data.linkage, data.visibility);
+            }
+
+            // ... and now that we have everything pre-defined, fill out those definitions.
+            for &(mono_item, _) in &mono_items {
+                mono_item.define::<Builder<'_, '_, '_>>(&cx);
+            }
+
+            // If this codegen unit contains the main function, also create the
+            // wrapper here
+            if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) {
+                let attrs = attributes::sanitize_attrs(&cx, SanitizerSet::empty());
+                attributes::apply_to_llfn(entry, llvm::AttributePlace::Function, &attrs);
+            }
+
+            // Finalize code coverage by injecting the coverage map. Note, the coverage map will
+            // also be added to the `llvm.compiler.used` variable, created next.
+            if cx.sess().instrument_coverage() {
+                cx.coverageinfo_finalize();
+            }
+
+            // Create the llvm.used and llvm.compiler.used variables.
+            if !cx.used_statics.borrow().is_empty() {
+                cx.create_used_variable_impl(c"llvm.used", &*cx.used_statics.borrow());
+            }
+            if !cx.compiler_used_statics.borrow().is_empty() {
+                cx.create_used_variable_impl(
+                    c"llvm.compiler.used",
+                    &*cx.compiler_used_statics.borrow(),
+                );
+            }
+
+            // Run replace-all-uses-with for statics that need it. This must
+            // happen after the llvm.used variables are created.
+            for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
+                unsafe {
+                    llvm::LLVMReplaceAllUsesWith(old_g, new_g);
+                    llvm::LLVMDeleteGlobal(old_g);
+                }
+            }
+
+            // Finalize debuginfo
+            if cx.sess().opts.debuginfo != DebugInfo::None {
+                cx.debuginfo_finalize();
+            }
+        }
+
+        ModuleCodegen {
+            name: cgu_name.to_string(),
+            module_llvm: llvm_module,
+            kind: ModuleKind::Regular,
+        }
+    }
+
+    (module, cost)
+}
+
+pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
+    let Some(sect) = attrs.link_section else { return };
+    unsafe {
+        let buf = SmallCStr::new(sect.as_str());
+        llvm::LLVMSetSection(llval, buf.as_ptr());
+    }
+}
+
+pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
+    match linkage {
+        Linkage::External => llvm::Linkage::ExternalLinkage,
+        Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
+        Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
+        Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
+        Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
+        Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
+        Linkage::Appending => llvm::Linkage::AppendingLinkage,
+        Linkage::Internal => llvm::Linkage::InternalLinkage,
+        Linkage::Private => llvm::Linkage::PrivateLinkage,
+        Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
+        Linkage::Common => llvm::Linkage::CommonLinkage,
+    }
+}
+
+pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
+    match linkage {
+        Visibility::Default => llvm::Visibility::Default,
+        Visibility::Hidden => llvm::Visibility::Hidden,
+        Visibility::Protected => llvm::Visibility::Protected,
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
new file mode 100644
index 00000000000..63e59ea13fc
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -0,0 +1,1675 @@
+use crate::abi::FnAbiLlvmExt;
+use crate::attributes;
+use crate::common::Funclet;
+use crate::context::CodegenCx;
+use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use libc::{c_char, c_uint};
+use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
+};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::Span;
+use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions};
+use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
+use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target};
+use smallvec::SmallVec;
+use std::borrow::Cow;
+use std::iter;
+use std::ops::Deref;
+use std::ptr;
+
+// All Builders must have an llfn associated with them
+#[must_use]
+pub struct Builder<'a, 'll, 'tcx> {
+    pub llbuilder: &'ll mut llvm::Builder<'ll>,
+    pub cx: &'a CodegenCx<'ll, 'tcx>,
+}
+
+impl Drop for Builder<'_, '_, '_> {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
+        }
+    }
+}
+
+/// Empty string, to be used where LLVM expects an instruction name, indicating
+/// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
+// FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
+const UNNAMED: *const c_char = c"".as_ptr();
+
+impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> {
+    type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
+    type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
+    type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
+    type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
+    type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
+
+    type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
+    type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation;
+    type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl abi::HasDataLayout for Builder<'_, '_, '_> {
+    fn data_layout(&self) -> &abi::TargetDataLayout {
+        self.cx.data_layout()
+    }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+    #[inline]
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.cx.tcx
+    }
+}
+
+impl<'tcx> ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        self.cx.param_env()
+    }
+}
+
+impl HasTargetSpec for Builder<'_, '_, '_> {
+    #[inline]
+    fn target_spec(&self) -> &Target {
+        self.cx.target_spec()
+    }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+    type LayoutOfResult = TyAndLayout<'tcx>;
+
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        self.cx.handle_layout_err(err, span, ty)
+    }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+    type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+    #[inline]
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        span: Span,
+        fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> ! {
+        self.cx.handle_fn_abi_err(err, span, fn_abi_request)
+    }
+}
+
+impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
+    type Target = CodegenCx<'ll, 'tcx>;
+
+    #[inline]
+    fn deref(&self) -> &Self::Target {
+        self.cx
+    }
+}
+
+impl<'ll, 'tcx> HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
+    type CodegenCx = CodegenCx<'ll, 'tcx>;
+}
+
+macro_rules! builder_methods_for_value_instructions {
+    ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
+        $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
+            unsafe {
+                llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
+            }
+        })+
+    }
+}
+
+impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
+    fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self {
+        let bx = Builder::with_cx(cx);
+        unsafe {
+            llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb);
+        }
+        bx
+    }
+
+    fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
+        self.cx
+    }
+
+    fn llbb(&self) -> &'ll BasicBlock {
+        unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
+    }
+
+    fn set_span(&mut self, _span: Span) {}
+
+    fn append_block(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &str) -> &'ll BasicBlock {
+        unsafe {
+            let name = SmallCStr::new(name);
+            llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
+        }
+    }
+
+    fn append_sibling_block(&mut self, name: &str) -> &'ll BasicBlock {
+        Self::append_block(self.cx, self.llfn(), name)
+    }
+
+    fn switch_to_block(&mut self, llbb: Self::BasicBlock) {
+        *self = Self::build(self.cx, llbb)
+    }
+
+    fn ret_void(&mut self) {
+        unsafe {
+            llvm::LLVMBuildRetVoid(self.llbuilder);
+        }
+    }
+
+    fn ret(&mut self, v: &'ll Value) {
+        unsafe {
+            llvm::LLVMBuildRet(self.llbuilder, v);
+        }
+    }
+
+    fn br(&mut self, dest: &'ll BasicBlock) {
+        unsafe {
+            llvm::LLVMBuildBr(self.llbuilder, dest);
+        }
+    }
+
+    fn cond_br(
+        &mut self,
+        cond: &'ll Value,
+        then_llbb: &'ll BasicBlock,
+        else_llbb: &'ll BasicBlock,
+    ) {
+        unsafe {
+            llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
+        }
+    }
+
+    fn switch(
+        &mut self,
+        v: &'ll Value,
+        else_llbb: &'ll BasicBlock,
+        cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
+    ) {
+        let switch =
+            unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
+        for (on_val, dest) in cases {
+            let on_val = self.const_uint_big(self.val_ty(v), on_val);
+            unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
+        }
+    }
+
+    fn invoke(
+        &mut self,
+        llty: &'ll Type,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        llfn: &'ll Value,
+        args: &[&'ll Value],
+        then: &'ll BasicBlock,
+        catch: &'ll BasicBlock,
+        funclet: Option<&Funclet<'ll>>,
+    ) -> &'ll Value {
+        debug!("invoke {:?} with args ({:?})", llfn, args);
+
+        let args = self.check_call("invoke", llty, llfn, args);
+        let funclet_bundle = funclet.map(|funclet| funclet.bundle());
+        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
+        let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+        if let Some(funclet_bundle) = funclet_bundle {
+            bundles.push(funclet_bundle);
+        }
+
+        // Emit CFI pointer type membership test
+        self.cfi_type_test(fn_attrs, fn_abi, llfn);
+
+        // Emit KCFI operand bundle
+        let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
+        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+        if let Some(kcfi_bundle) = kcfi_bundle {
+            bundles.push(kcfi_bundle);
+        }
+
+        let invoke = unsafe {
+            llvm::LLVMRustBuildInvoke(
+                self.llbuilder,
+                llty,
+                llfn,
+                args.as_ptr(),
+                args.len() as c_uint,
+                then,
+                catch,
+                bundles.as_ptr(),
+                bundles.len() as c_uint,
+                UNNAMED,
+            )
+        };
+        if let Some(fn_abi) = fn_abi {
+            fn_abi.apply_attrs_callsite(self, invoke);
+        }
+        invoke
+    }
+
+    fn unreachable(&mut self) {
+        unsafe {
+            llvm::LLVMBuildUnreachable(self.llbuilder);
+        }
+    }
+
+    builder_methods_for_value_instructions! {
+        add(a, b) => LLVMBuildAdd,
+        fadd(a, b) => LLVMBuildFAdd,
+        sub(a, b) => LLVMBuildSub,
+        fsub(a, b) => LLVMBuildFSub,
+        mul(a, b) => LLVMBuildMul,
+        fmul(a, b) => LLVMBuildFMul,
+        udiv(a, b) => LLVMBuildUDiv,
+        exactudiv(a, b) => LLVMBuildExactUDiv,
+        sdiv(a, b) => LLVMBuildSDiv,
+        exactsdiv(a, b) => LLVMBuildExactSDiv,
+        fdiv(a, b) => LLVMBuildFDiv,
+        urem(a, b) => LLVMBuildURem,
+        srem(a, b) => LLVMBuildSRem,
+        frem(a, b) => LLVMBuildFRem,
+        shl(a, b) => LLVMBuildShl,
+        lshr(a, b) => LLVMBuildLShr,
+        ashr(a, b) => LLVMBuildAShr,
+        and(a, b) => LLVMBuildAnd,
+        or(a, b) => LLVMBuildOr,
+        xor(a, b) => LLVMBuildXor,
+        neg(x) => LLVMBuildNeg,
+        fneg(x) => LLVMBuildFNeg,
+        not(x) => LLVMBuildNot,
+        unchecked_sadd(x, y) => LLVMBuildNSWAdd,
+        unchecked_uadd(x, y) => LLVMBuildNUWAdd,
+        unchecked_ssub(x, y) => LLVMBuildNSWSub,
+        unchecked_usub(x, y) => LLVMBuildNUWSub,
+        unchecked_smul(x, y) => LLVMBuildNSWMul,
+        unchecked_umul(x, y) => LLVMBuildNUWMul,
+    }
+
+    fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetFastMath(instr);
+            instr
+        }
+    }
+
+    fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetFastMath(instr);
+            instr
+        }
+    }
+
+    fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetFastMath(instr);
+            instr
+        }
+    }
+
+    fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetFastMath(instr);
+            instr
+        }
+    }
+
+    fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetFastMath(instr);
+            instr
+        }
+    }
+
+    fn fadd_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetAlgebraicMath(instr);
+            instr
+        }
+    }
+
+    fn fsub_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetAlgebraicMath(instr);
+            instr
+        }
+    }
+
+    fn fmul_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetAlgebraicMath(instr);
+            instr
+        }
+    }
+
+    fn fdiv_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetAlgebraicMath(instr);
+            instr
+        }
+    }
+
+    fn frem_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
+            llvm::LLVMRustSetAlgebraicMath(instr);
+            instr
+        }
+    }
+
+    fn checked_binop(
+        &mut self,
+        oop: OverflowOp,
+        ty: Ty<'_>,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> (Self::Value, Self::Value) {
+        use rustc_middle::ty::{Int, Uint};
+        use rustc_middle::ty::{IntTy::*, UintTy::*};
+
+        let new_kind = match ty.kind() {
+            Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+            Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+            t @ (Uint(_) | Int(_)) => *t,
+            _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+        };
+
+        let name = match oop {
+            OverflowOp::Add => match new_kind {
+                Int(I8) => "llvm.sadd.with.overflow.i8",
+                Int(I16) => "llvm.sadd.with.overflow.i16",
+                Int(I32) => "llvm.sadd.with.overflow.i32",
+                Int(I64) => "llvm.sadd.with.overflow.i64",
+                Int(I128) => "llvm.sadd.with.overflow.i128",
+
+                Uint(U8) => "llvm.uadd.with.overflow.i8",
+                Uint(U16) => "llvm.uadd.with.overflow.i16",
+                Uint(U32) => "llvm.uadd.with.overflow.i32",
+                Uint(U64) => "llvm.uadd.with.overflow.i64",
+                Uint(U128) => "llvm.uadd.with.overflow.i128",
+
+                _ => unreachable!(),
+            },
+            OverflowOp::Sub => match new_kind {
+                Int(I8) => "llvm.ssub.with.overflow.i8",
+                Int(I16) => "llvm.ssub.with.overflow.i16",
+                Int(I32) => "llvm.ssub.with.overflow.i32",
+                Int(I64) => "llvm.ssub.with.overflow.i64",
+                Int(I128) => "llvm.ssub.with.overflow.i128",
+
+                Uint(_) => {
+                    // Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these
+                    // to be the canonical form. It will attempt to reform llvm.usub.with.overflow
+                    // in the backend if profitable.
+                    let sub = self.sub(lhs, rhs);
+                    let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs);
+                    return (sub, cmp);
+                }
+
+                _ => unreachable!(),
+            },
+            OverflowOp::Mul => match new_kind {
+                Int(I8) => "llvm.smul.with.overflow.i8",
+                Int(I16) => "llvm.smul.with.overflow.i16",
+                Int(I32) => "llvm.smul.with.overflow.i32",
+                Int(I64) => "llvm.smul.with.overflow.i64",
+                Int(I128) => "llvm.smul.with.overflow.i128",
+
+                Uint(U8) => "llvm.umul.with.overflow.i8",
+                Uint(U16) => "llvm.umul.with.overflow.i16",
+                Uint(U32) => "llvm.umul.with.overflow.i32",
+                Uint(U64) => "llvm.umul.with.overflow.i64",
+                Uint(U128) => "llvm.umul.with.overflow.i128",
+
+                _ => unreachable!(),
+            },
+        };
+
+        let res = self.call_intrinsic(name, &[lhs, rhs]);
+        (self.extract_value(res, 0), self.extract_value(res, 1))
+    }
+
+    fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+        if self.cx().val_ty(val) == self.cx().type_i1() {
+            self.zext(val, self.cx().type_i8())
+        } else {
+            val
+        }
+    }
+    fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
+        if scalar.is_bool() {
+            return self.trunc(val, self.cx().type_i1());
+        }
+        val
+    }
+
+    fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+        let mut bx = Builder::with_cx(self.cx);
+        bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
+        unsafe {
+            let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
+            llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+            alloca
+        }
+    }
+
+    fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
+        unsafe {
+            let alloca =
+                llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
+            llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+            alloca
+        }
+    }
+
+    fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
+        unsafe {
+            let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
+            llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
+            load
+        }
+    }
+
+    fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
+        unsafe {
+            let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
+            llvm::LLVMSetVolatile(load, llvm::True);
+            load
+        }
+    }
+
+    fn atomic_load(
+        &mut self,
+        ty: &'ll Type,
+        ptr: &'ll Value,
+        order: rustc_codegen_ssa::common::AtomicOrdering,
+        size: Size,
+    ) -> &'ll Value {
+        unsafe {
+            let load = llvm::LLVMRustBuildAtomicLoad(
+                self.llbuilder,
+                ty,
+                ptr,
+                UNNAMED,
+                AtomicOrdering::from_generic(order),
+            );
+            // LLVM requires the alignment of atomic loads to be at least the size of the type.
+            llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
+            load
+        }
+    }
+
+    #[instrument(level = "trace", skip(self))]
+    fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
+        if place.layout.is_unsized() {
+            let tail = self.tcx.struct_tail_with_normalize(place.layout.ty, |ty| ty, || {});
+            if matches!(tail.kind(), ty::Foreign(..)) {
+                // Unsized locals and, at least conceptually, even unsized arguments must be copied
+                // around, which requires dynamically determining their size. Therefore, we cannot
+                // allow `extern` types here. Consult t-opsem before removing this check.
+                panic!("unsized locals must not be `extern` types");
+            }
+        }
+        assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+        if place.layout.is_zst() {
+            return OperandRef::zero_sized(place.layout);
+        }
+
+        #[instrument(level = "trace", skip(bx))]
+        fn scalar_load_metadata<'a, 'll, 'tcx>(
+            bx: &mut Builder<'a, 'll, 'tcx>,
+            load: &'ll Value,
+            scalar: abi::Scalar,
+            layout: TyAndLayout<'tcx>,
+            offset: Size,
+        ) {
+            if !scalar.is_uninit_valid() {
+                bx.noundef_metadata(load);
+            }
+
+            match scalar.primitive() {
+                abi::Int(..) => {
+                    if !scalar.is_always_valid(bx) {
+                        bx.range_metadata(load, scalar.valid_range(bx));
+                    }
+                }
+                abi::Pointer(_) => {
+                    if !scalar.valid_range(bx).contains(0) {
+                        bx.nonnull_metadata(load);
+                    }
+
+                    if let Some(pointee) = layout.pointee_info_at(bx, offset) {
+                        if let Some(_) = pointee.safe {
+                            bx.align_metadata(load, pointee.align);
+                        }
+                    }
+                }
+                abi::F16 | abi::F32 | abi::F64 | abi::F128 => {}
+            }
+        }
+
+        let val = if let Some(llextra) = place.llextra {
+            OperandValue::Ref(place.llval, Some(llextra), place.align)
+        } else if place.layout.is_llvm_immediate() {
+            let mut const_llval = None;
+            let llty = place.layout.llvm_type(self);
+            unsafe {
+                if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
+                    if llvm::LLVMIsGlobalConstant(global) == llvm::True {
+                        if let Some(init) = llvm::LLVMGetInitializer(global) {
+                            if self.val_ty(init) == llty {
+                                const_llval = Some(init);
+                            }
+                        }
+                    }
+                }
+            }
+            let llval = const_llval.unwrap_or_else(|| {
+                let load = self.load(llty, place.llval, place.align);
+                if let abi::Abi::Scalar(scalar) = place.layout.abi {
+                    scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
+                }
+                load
+            });
+            OperandValue::Immediate(self.to_immediate(llval, place.layout))
+        } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
+            let b_offset = a.size(self).align_to(b.align(self).abi);
+
+            let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
+                let llptr = if i == 0 {
+                    place.llval
+                } else {
+                    self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
+                };
+                let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
+                let load = self.load(llty, llptr, align);
+                scalar_load_metadata(self, load, scalar, layout, offset);
+                self.to_immediate_scalar(load, scalar)
+            };
+
+            OperandValue::Pair(
+                load(0, a, place.layout, place.align, Size::ZERO),
+                load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset),
+            )
+        } else {
+            OperandValue::Ref(place.llval, None, place.align)
+        };
+
+        OperandRef { val, layout: place.layout }
+    }
+
+    fn write_operand_repeatedly(
+        &mut self,
+        cg_elem: OperandRef<'tcx, &'ll Value>,
+        count: u64,
+        dest: PlaceRef<'tcx, &'ll Value>,
+    ) {
+        let zero = self.const_usize(0);
+        let count = self.const_usize(count);
+
+        let header_bb = self.append_sibling_block("repeat_loop_header");
+        let body_bb = self.append_sibling_block("repeat_loop_body");
+        let next_bb = self.append_sibling_block("repeat_loop_next");
+
+        self.br(header_bb);
+
+        let mut header_bx = Self::build(self.cx, header_bb);
+        let i = header_bx.phi(self.val_ty(zero), &[zero], &[self.llbb()]);
+
+        let keep_going = header_bx.icmp(IntPredicate::IntULT, i, count);
+        header_bx.cond_br(keep_going, body_bb, next_bb);
+
+        let mut body_bx = Self::build(self.cx, body_bb);
+        let dest_elem = dest.project_index(&mut body_bx, i);
+        cg_elem.val.store(&mut body_bx, dest_elem);
+
+        let next = body_bx.unchecked_uadd(i, self.const_usize(1));
+        body_bx.br(header_bb);
+        header_bx.add_incoming_to_phi(i, next, body_bb);
+
+        *self = Self::build(self.cx, next_bb);
+    }
+
+    fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
+        if self.sess().target.arch == "amdgpu" {
+            // amdgpu/LLVM does something weird and thinks an i64 value is
+            // split into a v2i32, halving the bitwidth LLVM expects,
+            // tripping an assertion. So, for now, just disable this
+            // optimization.
+            return;
+        }
+
+        unsafe {
+            let llty = self.cx.val_ty(load);
+            let v = [
+                self.cx.const_uint_big(llty, range.start),
+                self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
+            ];
+
+            llvm::LLVMSetMetadata(
+                load,
+                llvm::MD_range as c_uint,
+                llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
+            );
+        }
+    }
+
+    fn nonnull_metadata(&mut self, load: &'ll Value) {
+        unsafe {
+            llvm::LLVMSetMetadata(
+                load,
+                llvm::MD_nonnull as c_uint,
+                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+            );
+        }
+    }
+
+    fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
+        self.store_with_flags(val, ptr, align, MemFlags::empty())
+    }
+
+    fn store_with_flags(
+        &mut self,
+        val: &'ll Value,
+        ptr: &'ll Value,
+        align: Align,
+        flags: MemFlags,
+    ) -> &'ll Value {
+        debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
+        assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
+        unsafe {
+            let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
+            let align =
+                if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
+            llvm::LLVMSetAlignment(store, align);
+            if flags.contains(MemFlags::VOLATILE) {
+                llvm::LLVMSetVolatile(store, llvm::True);
+            }
+            if flags.contains(MemFlags::NONTEMPORAL) {
+                // According to LLVM [1] building a nontemporal store must
+                // *always* point to a metadata value of the integer 1.
+                //
+                // [1]: https://llvm.org/docs/LangRef.html#store-instruction
+                let one = self.cx.const_i32(1);
+                let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
+                llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
+            }
+            store
+        }
+    }
+
+    fn atomic_store(
+        &mut self,
+        val: &'ll Value,
+        ptr: &'ll Value,
+        order: rustc_codegen_ssa::common::AtomicOrdering,
+        size: Size,
+    ) {
+        debug!("Store {:?} -> {:?}", val, ptr);
+        assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
+        unsafe {
+            let store = llvm::LLVMRustBuildAtomicStore(
+                self.llbuilder,
+                val,
+                ptr,
+                AtomicOrdering::from_generic(order),
+            );
+            // LLVM requires the alignment of atomic stores to be at least the size of the type.
+            llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
+        }
+    }
+
+    fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
+        unsafe {
+            llvm::LLVMBuildGEP2(
+                self.llbuilder,
+                ty,
+                ptr,
+                indices.as_ptr(),
+                indices.len() as c_uint,
+                UNNAMED,
+            )
+        }
+    }
+
+    fn inbounds_gep(
+        &mut self,
+        ty: &'ll Type,
+        ptr: &'ll Value,
+        indices: &[&'ll Value],
+    ) -> &'ll Value {
+        unsafe {
+            llvm::LLVMBuildInBoundsGEP2(
+                self.llbuilder,
+                ty,
+                ptr,
+                indices.as_ptr(),
+                indices.len() as c_uint,
+                UNNAMED,
+            )
+        }
+    }
+
+    /* Casts */
+    fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        self.fptoint_sat(false, val, dest_ty)
+    }
+
+    fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        self.fptoint_sat(true, val, dest_ty)
+    }
+
+    fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        // On WebAssembly the `fptoui` and `fptosi` instructions currently have
+        // poor codegen. The reason for this is that the corresponding wasm
+        // instructions, `i32.trunc_f32_s` for example, will trap when the float
+        // is out-of-bounds, infinity, or nan. This means that LLVM
+        // automatically inserts control flow around `fptoui` and `fptosi`
+        // because the LLVM instruction `fptoui` is defined as producing a
+        // poison value, not having UB on out-of-bounds values.
+        //
+        // This method, however, is only used with non-saturating casts that
+        // have UB on out-of-bounds values. This means that it's ok if we use
+        // the raw wasm instruction since out-of-bounds values can do whatever
+        // we like. To ensure that LLVM picks the right instruction we choose
+        // the raw wasm intrinsic functions which avoid LLVM inserting all the
+        // other control flow automatically.
+        if self.sess().target.is_like_wasm {
+            let src_ty = self.cx.val_ty(val);
+            if self.cx.type_kind(src_ty) != TypeKind::Vector {
+                let float_width = self.cx.float_width(src_ty);
+                let int_width = self.cx.int_width(dest_ty);
+                let name = match (int_width, float_width) {
+                    (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
+                    (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
+                    (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
+                    (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
+                    _ => None,
+                };
+                if let Some(name) = name {
+                    return self.call_intrinsic(name, &[val]);
+                }
+            }
+        }
+        unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        // see `fptoui` above for why wasm is different here
+        if self.sess().target.is_like_wasm {
+            let src_ty = self.cx.val_ty(val);
+            if self.cx.type_kind(src_ty) != TypeKind::Vector {
+                let float_width = self.cx.float_width(src_ty);
+                let int_width = self.cx.int_width(dest_ty);
+                let name = match (int_width, float_width) {
+                    (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
+                    (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
+                    (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
+                    (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
+                    _ => None,
+                };
+                if let Some(name) = name {
+                    return self.call_intrinsic(name, &[val]);
+                }
+            }
+        }
+        unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
+        unsafe {
+            llvm::LLVMBuildIntCast2(
+                self.llbuilder,
+                val,
+                dest_ty,
+                if is_signed { True } else { False },
+                UNNAMED,
+            )
+        }
+    }
+
+    fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    /* Comparisons */
+    fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        let op = llvm::IntPredicate::from_generic(op);
+        unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+    }
+
+    fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        let op = llvm::RealPredicate::from_generic(op);
+        unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+    }
+
+    /* Miscellaneous instructions */
+    fn memcpy(
+        &mut self,
+        dst: &'ll Value,
+        dst_align: Align,
+        src: &'ll Value,
+        src_align: Align,
+        size: &'ll Value,
+        flags: MemFlags,
+    ) {
+        assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
+        let size = self.intcast(size, self.type_isize(), false);
+        let is_volatile = flags.contains(MemFlags::VOLATILE);
+        unsafe {
+            llvm::LLVMRustBuildMemCpy(
+                self.llbuilder,
+                dst,
+                dst_align.bytes() as c_uint,
+                src,
+                src_align.bytes() as c_uint,
+                size,
+                is_volatile,
+            );
+        }
+    }
+
+    fn memmove(
+        &mut self,
+        dst: &'ll Value,
+        dst_align: Align,
+        src: &'ll Value,
+        src_align: Align,
+        size: &'ll Value,
+        flags: MemFlags,
+    ) {
+        assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
+        let size = self.intcast(size, self.type_isize(), false);
+        let is_volatile = flags.contains(MemFlags::VOLATILE);
+        unsafe {
+            llvm::LLVMRustBuildMemMove(
+                self.llbuilder,
+                dst,
+                dst_align.bytes() as c_uint,
+                src,
+                src_align.bytes() as c_uint,
+                size,
+                is_volatile,
+            );
+        }
+    }
+
+    fn memset(
+        &mut self,
+        ptr: &'ll Value,
+        fill_byte: &'ll Value,
+        size: &'ll Value,
+        align: Align,
+        flags: MemFlags,
+    ) {
+        let is_volatile = flags.contains(MemFlags::VOLATILE);
+        unsafe {
+            llvm::LLVMRustBuildMemSet(
+                self.llbuilder,
+                ptr,
+                align.bytes() as c_uint,
+                fill_byte,
+                size,
+                is_volatile,
+            );
+        }
+    }
+
+    fn select(
+        &mut self,
+        cond: &'ll Value,
+        then_val: &'ll Value,
+        else_val: &'ll Value,
+    ) -> &'ll Value {
+        unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
+    }
+
+    fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+    }
+
+    fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
+    }
+
+    fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
+        unsafe {
+            let elt_ty = self.cx.val_ty(elt);
+            let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
+            let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
+            let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
+            self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
+        }
+    }
+
+    fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
+        assert_eq!(idx as c_uint as u64, idx);
+        unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
+    }
+
+    fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
+        assert_eq!(idx as c_uint as u64, idx);
+        unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
+    }
+
+    fn set_personality_fn(&mut self, personality: &'ll Value) {
+        unsafe {
+            llvm::LLVMSetPersonalityFn(self.llfn(), personality);
+        }
+    }
+
+    fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
+        let landing_pad = self.landing_pad(ty, pers_fn, 0);
+        unsafe {
+            llvm::LLVMSetCleanup(landing_pad, llvm::True);
+        }
+        (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
+    }
+
+    fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
+        let landing_pad = self.landing_pad(ty, pers_fn, 1);
+        self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[]));
+        (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
+    }
+
+    fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
+        let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
+        let mut exn = self.const_poison(ty);
+        exn = self.insert_value(exn, exn0, 0);
+        exn = self.insert_value(exn, exn1, 1);
+        unsafe {
+            llvm::LLVMBuildResume(self.llbuilder, exn);
+        }
+    }
+
+    fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
+        let ret = unsafe {
+            llvm::LLVMBuildCleanupPad(
+                self.llbuilder,
+                parent,
+                args.as_ptr(),
+                args.len() as c_uint,
+                c"cleanuppad".as_ptr(),
+            )
+        };
+        Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
+    }
+
+    fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) {
+        unsafe {
+            llvm::LLVMBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
+                .expect("LLVM does not have support for cleanupret");
+        }
+    }
+
+    fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
+        let ret = unsafe {
+            llvm::LLVMBuildCatchPad(
+                self.llbuilder,
+                parent,
+                args.as_ptr(),
+                args.len() as c_uint,
+                c"catchpad".as_ptr(),
+            )
+        };
+        Funclet::new(ret.expect("LLVM does not have support for catchpad"))
+    }
+
+    fn catch_switch(
+        &mut self,
+        parent: Option<&'ll Value>,
+        unwind: Option<&'ll BasicBlock>,
+        handlers: &[&'ll BasicBlock],
+    ) -> &'ll Value {
+        let ret = unsafe {
+            llvm::LLVMBuildCatchSwitch(
+                self.llbuilder,
+                parent,
+                unwind,
+                handlers.len() as c_uint,
+                c"catchswitch".as_ptr(),
+            )
+        };
+        let ret = ret.expect("LLVM does not have support for catchswitch");
+        for handler in handlers {
+            unsafe {
+                llvm::LLVMAddHandler(ret, handler);
+            }
+        }
+        ret
+    }
+
+    // Atomic Operations
+    fn atomic_cmpxchg(
+        &mut self,
+        dst: &'ll Value,
+        cmp: &'ll Value,
+        src: &'ll Value,
+        order: rustc_codegen_ssa::common::AtomicOrdering,
+        failure_order: rustc_codegen_ssa::common::AtomicOrdering,
+        weak: bool,
+    ) -> (&'ll Value, &'ll Value) {
+        let weak = if weak { llvm::True } else { llvm::False };
+        unsafe {
+            let value = llvm::LLVMBuildAtomicCmpXchg(
+                self.llbuilder,
+                dst,
+                cmp,
+                src,
+                AtomicOrdering::from_generic(order),
+                AtomicOrdering::from_generic(failure_order),
+                llvm::False, // SingleThreaded
+            );
+            llvm::LLVMSetWeak(value, weak);
+            let val = self.extract_value(value, 0);
+            let success = self.extract_value(value, 1);
+            (val, success)
+        }
+    }
+    fn atomic_rmw(
+        &mut self,
+        op: rustc_codegen_ssa::common::AtomicRmwBinOp,
+        dst: &'ll Value,
+        mut src: &'ll Value,
+        order: rustc_codegen_ssa::common::AtomicOrdering,
+    ) -> &'ll Value {
+        // The only RMW operation that LLVM supports on pointers is compare-exchange.
+        if self.val_ty(src) == self.type_ptr()
+            && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg
+        {
+            src = self.ptrtoint(src, self.type_isize());
+        }
+        unsafe {
+            llvm::LLVMBuildAtomicRMW(
+                self.llbuilder,
+                AtomicRmwBinOp::from_generic(op),
+                dst,
+                src,
+                AtomicOrdering::from_generic(order),
+                llvm::False, // SingleThreaded
+            )
+        }
+    }
+
+    fn atomic_fence(
+        &mut self,
+        order: rustc_codegen_ssa::common::AtomicOrdering,
+        scope: SynchronizationScope,
+    ) {
+        let single_threaded = match scope {
+            SynchronizationScope::SingleThread => llvm::True,
+            SynchronizationScope::CrossThread => llvm::False,
+        };
+        unsafe {
+            llvm::LLVMBuildFence(
+                self.llbuilder,
+                AtomicOrdering::from_generic(order),
+                single_threaded,
+                UNNAMED,
+            );
+        }
+    }
+
+    fn set_invariant_load(&mut self, load: &'ll Value) {
+        unsafe {
+            llvm::LLVMSetMetadata(
+                load,
+                llvm::MD_invariant_load as c_uint,
+                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+            );
+        }
+    }
+
+    fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
+        self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
+    }
+
+    fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
+        self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
+    }
+
+    fn instrprof_increment(
+        &mut self,
+        fn_name: &'ll Value,
+        hash: &'ll Value,
+        num_counters: &'ll Value,
+        index: &'ll Value,
+    ) {
+        debug!(
+            "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
+            fn_name, hash, num_counters, index
+        );
+
+        let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
+        let llty = self.cx.type_func(
+            &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
+            self.cx.type_void(),
+        );
+        let args = &[fn_name, hash, num_counters, index];
+        let args = self.check_call("call", llty, llfn, args);
+
+        unsafe {
+            let _ = llvm::LLVMRustBuildCall(
+                self.llbuilder,
+                llty,
+                llfn,
+                args.as_ptr() as *const &llvm::Value,
+                args.len() as c_uint,
+                [].as_ptr(),
+                0 as c_uint,
+            );
+        }
+    }
+
+    fn call(
+        &mut self,
+        llty: &'ll Type,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        llfn: &'ll Value,
+        args: &[&'ll Value],
+        funclet: Option<&Funclet<'ll>>,
+    ) -> &'ll Value {
+        debug!("call {:?} with args ({:?})", llfn, args);
+
+        let args = self.check_call("call", llty, llfn, args);
+        let funclet_bundle = funclet.map(|funclet| funclet.bundle());
+        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
+        let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+        if let Some(funclet_bundle) = funclet_bundle {
+            bundles.push(funclet_bundle);
+        }
+
+        // Emit CFI pointer type membership test
+        self.cfi_type_test(fn_attrs, fn_abi, llfn);
+
+        // Emit KCFI operand bundle
+        let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
+        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+        if let Some(kcfi_bundle) = kcfi_bundle {
+            bundles.push(kcfi_bundle);
+        }
+
+        let call = unsafe {
+            llvm::LLVMRustBuildCall(
+                self.llbuilder,
+                llty,
+                llfn,
+                args.as_ptr() as *const &llvm::Value,
+                args.len() as c_uint,
+                bundles.as_ptr(),
+                bundles.len() as c_uint,
+            )
+        };
+        if let Some(fn_abi) = fn_abi {
+            fn_abi.apply_attrs_callsite(self, call);
+        }
+        call
+    }
+
+    fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
+    }
+
+    fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) {
+        if llvm_util::get_version() < (17, 0, 2) {
+            // Work around https://github.com/llvm/llvm-project/issues/66984.
+            let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
+            attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+        } else {
+            // Cleanup is always the cold path.
+            let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx);
+            attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]);
+        }
+    }
+}
+
+impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> {
+    fn get_static(&mut self, def_id: DefId) -> &'ll Value {
+        // Forward to the `get_static` method of `CodegenCx`
+        self.cx().get_static(def_id)
+    }
+}
+
+impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
+    fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
+        // Create a fresh builder from the crate context.
+        let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
+        Builder { llbuilder, cx }
+    }
+
+    pub fn llfn(&self) -> &'ll Value {
+        unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
+    }
+
+    fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
+        unsafe {
+            llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
+        }
+    }
+
+    fn align_metadata(&mut self, load: &'ll Value, align: Align) {
+        unsafe {
+            let v = [self.cx.const_u64(align.bytes())];
+
+            llvm::LLVMSetMetadata(
+                load,
+                llvm::MD_align as c_uint,
+                llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
+            );
+        }
+    }
+
+    fn noundef_metadata(&mut self, load: &'ll Value) {
+        unsafe {
+            llvm::LLVMSetMetadata(
+                load,
+                llvm::MD_noundef as c_uint,
+                llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+            );
+        }
+    }
+
+    pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
+    }
+
+    pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
+    }
+
+    pub fn insert_element(
+        &mut self,
+        vec: &'ll Value,
+        elt: &'ll Value,
+        idx: &'ll Value,
+    ) -> &'ll Value {
+        unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
+    }
+
+    pub fn shuffle_vector(
+        &mut self,
+        v1: &'ll Value,
+        v2: &'ll Value,
+        mask: &'ll Value,
+    ) -> &'ll Value {
+        unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
+    }
+
+    pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
+    }
+    pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
+    }
+    pub fn vector_reduce_fadd_reassoc(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
+            llvm::LLVMRustSetAllowReassoc(instr);
+            instr
+        }
+    }
+    pub fn vector_reduce_fmul_reassoc(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+        unsafe {
+            let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
+            llvm::LLVMRustSetAllowReassoc(instr);
+            instr
+        }
+    }
+    pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
+    }
+    pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
+    }
+    pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
+    }
+    pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
+    }
+    pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
+    }
+    pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
+        unsafe {
+            llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
+        }
+    }
+    pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
+        unsafe {
+            llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
+        }
+    }
+    pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
+    }
+    pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+        unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
+    }
+
+    pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
+        unsafe {
+            llvm::LLVMAddClause(landing_pad, clause);
+        }
+    }
+
+    pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
+        let ret = unsafe { llvm::LLVMBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
+        ret.expect("LLVM does not have support for catchret")
+    }
+
+    fn check_call<'b>(
+        &mut self,
+        typ: &str,
+        fn_ty: &'ll Type,
+        llfn: &'ll Value,
+        args: &'b [&'ll Value],
+    ) -> Cow<'b, [&'ll Value]> {
+        assert!(
+            self.cx.type_kind(fn_ty) == TypeKind::Function,
+            "builder::{typ} not passed a function, but {fn_ty:?}"
+        );
+
+        let param_tys = self.cx.func_params_types(fn_ty);
+
+        let all_args_match = iter::zip(&param_tys, args.iter().map(|&v| self.val_ty(v)))
+            .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
+
+        if all_args_match {
+            return Cow::Borrowed(args);
+        }
+
+        let casted_args: Vec<_> = iter::zip(param_tys, args)
+            .enumerate()
+            .map(|(i, (expected_ty, &actual_val))| {
+                let actual_ty = self.val_ty(actual_val);
+                if expected_ty != actual_ty {
+                    debug!(
+                        "type mismatch in function call of {:?}. \
+                            Expected {:?} for param {}, got {:?}; injecting bitcast",
+                        llfn, expected_ty, i, actual_ty
+                    );
+                    self.bitcast(actual_val, expected_ty)
+                } else {
+                    actual_val
+                }
+            })
+            .collect();
+
+        Cow::Owned(casted_args)
+    }
+
+    pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+    }
+
+    pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
+        let (ty, f) = self.cx.get_intrinsic(intrinsic);
+        self.call(ty, None, None, f, args, None)
+    }
+
+    fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
+        let size = size.bytes();
+        if size == 0 {
+            return;
+        }
+
+        if !self.cx().sess().emit_lifetime_markers() {
+            return;
+        }
+
+        self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
+    }
+
+    pub(crate) fn phi(
+        &mut self,
+        ty: &'ll Type,
+        vals: &[&'ll Value],
+        bbs: &[&'ll BasicBlock],
+    ) -> &'ll Value {
+        assert_eq!(vals.len(), bbs.len());
+        let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
+        unsafe {
+            llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
+            phi
+        }
+    }
+
+    fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
+        unsafe {
+            llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
+        }
+    }
+
+    fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+        let src_ty = self.cx.val_ty(val);
+        let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
+            assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
+            (
+                self.cx.element_type(src_ty),
+                self.cx.element_type(dest_ty),
+                Some(self.cx.vector_length(src_ty)),
+            )
+        } else {
+            (src_ty, dest_ty, None)
+        };
+        let float_width = self.cx.float_width(float_ty);
+        let int_width = self.cx.int_width(int_ty);
+
+        let instr = if signed { "fptosi" } else { "fptoui" };
+        let name = if let Some(vector_length) = vector_length {
+            format!("llvm.{instr}.sat.v{vector_length}i{int_width}.v{vector_length}f{float_width}")
+        } else {
+            format!("llvm.{instr}.sat.i{int_width}.f{float_width}")
+        };
+        let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
+        self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None)
+    }
+
+    pub(crate) fn landing_pad(
+        &mut self,
+        ty: &'ll Type,
+        pers_fn: &'ll Value,
+        num_clauses: usize,
+    ) -> &'ll Value {
+        // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while,
+        // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The
+        // personality lives on the parent function anyway.
+        self.set_personality_fn(pers_fn);
+        unsafe {
+            llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED)
+        }
+    }
+
+    pub(crate) fn callbr(
+        &mut self,
+        llty: &'ll Type,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        llfn: &'ll Value,
+        args: &[&'ll Value],
+        default_dest: &'ll BasicBlock,
+        indirect_dest: &[&'ll BasicBlock],
+        funclet: Option<&Funclet<'ll>>,
+    ) -> &'ll Value {
+        debug!("invoke {:?} with args ({:?})", llfn, args);
+
+        let args = self.check_call("callbr", llty, llfn, args);
+        let funclet_bundle = funclet.map(|funclet| funclet.bundle());
+        let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw);
+        let mut bundles: SmallVec<[_; 2]> = SmallVec::new();
+        if let Some(funclet_bundle) = funclet_bundle {
+            bundles.push(funclet_bundle);
+        }
+
+        // Emit CFI pointer type membership test
+        self.cfi_type_test(fn_attrs, fn_abi, llfn);
+
+        // Emit KCFI operand bundle
+        let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn);
+        let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw);
+        if let Some(kcfi_bundle) = kcfi_bundle {
+            bundles.push(kcfi_bundle);
+        }
+
+        let callbr = unsafe {
+            llvm::LLVMRustBuildCallBr(
+                self.llbuilder,
+                llty,
+                llfn,
+                default_dest,
+                indirect_dest.as_ptr(),
+                indirect_dest.len() as c_uint,
+                args.as_ptr(),
+                args.len() as c_uint,
+                bundles.as_ptr(),
+                bundles.len() as c_uint,
+                UNNAMED,
+            )
+        };
+        if let Some(fn_abi) = fn_abi {
+            fn_abi.apply_attrs_callsite(self, callbr);
+        }
+        callbr
+    }
+
+    // Emits CFI pointer type membership tests.
+    fn cfi_type_test(
+        &mut self,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        llfn: &'ll Value,
+    ) {
+        let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+        if self.tcx.sess.is_sanitizer_cfi_enabled()
+            && let Some(fn_abi) = fn_abi
+            && is_indirect_call
+        {
+            if let Some(fn_attrs) = fn_attrs
+                && fn_attrs.no_sanitize.contains(SanitizerSet::CFI)
+            {
+                return;
+            }
+
+            let mut options = TypeIdOptions::empty();
+            if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+                options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+            }
+            if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+                options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+            }
+
+            let typeid = typeid_for_fnabi(self.tcx, fn_abi, options);
+            let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap();
+
+            // Test whether the function pointer is associated with the type identifier.
+            let cond = self.type_test(llfn, typeid_metadata);
+            let bb_pass = self.append_sibling_block("type_test.pass");
+            let bb_fail = self.append_sibling_block("type_test.fail");
+            self.cond_br(cond, bb_pass, bb_fail);
+
+            self.switch_to_block(bb_fail);
+            self.abort();
+            self.unreachable();
+
+            self.switch_to_block(bb_pass);
+        }
+    }
+
+    // Emits KCFI operand bundles.
+    fn kcfi_operand_bundle(
+        &mut self,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        llfn: &'ll Value,
+    ) -> Option<llvm::OperandBundleDef<'ll>> {
+        let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) };
+        let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled()
+            && let Some(fn_abi) = fn_abi
+            && is_indirect_call
+        {
+            if let Some(fn_attrs) = fn_attrs
+                && fn_attrs.no_sanitize.contains(SanitizerSet::KCFI)
+            {
+                return None;
+            }
+
+            let mut options = TypeIdOptions::empty();
+            if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+                options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+            }
+            if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+                options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+            }
+
+            let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
+            Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)]))
+        } else {
+            None
+        };
+        kcfi_bundle
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
new file mode 100644
index 00000000000..e675362ac33
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -0,0 +1,190 @@
+//! Handles codegen of callees as well as other call-related
+//! things. Callees are a superset of normal rust values and sometimes
+//! have different representations. In particular, top-level fn items
+//! and methods are represented as just a fn ptr and not a full
+//! closure.
+
+use crate::attributes;
+use crate::common;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::value::Value;
+
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value {
+    let tcx = cx.tcx();
+
+    debug!("get_fn(instance={:?})", instance);
+
+    assert!(!instance.args.has_infer());
+    assert!(!instance.args.has_escaping_bound_vars());
+
+    if let Some(&llfn) = cx.instances.borrow().get(&instance) {
+        return llfn;
+    }
+
+    let sym = tcx.symbol_name(instance).name;
+    debug!(
+        "get_fn({:?}: {:?}) => {}",
+        instance,
+        instance.ty(cx.tcx(), ty::ParamEnv::reveal_all()),
+        sym
+    );
+
+    let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+
+    let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
+        llfn
+    } else {
+        let instance_def_id = instance.def_id();
+        let llfn = if tcx.sess.target.arch == "x86"
+            && let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym)
+        {
+            // Fix for https://github.com/rust-lang/rust/issues/104453
+            // On x86 Windows, LLVM uses 'L' as the prefix for any private
+            // global symbols, so when we create an undecorated function symbol
+            // that begins with an 'L' LLVM misinterprets that as a private
+            // global symbol that it created and so fails the compilation at a
+            // later stage since such a symbol must have a definition.
+            //
+            // To avoid this, we set the Storage Class to "DllImport" so that
+            // LLVM will prefix the name with `__imp_`. Ideally, we'd like the
+            // existing logic below to set the Storage Class, but it has an
+            // exemption for MinGW for backwards compatibility.
+            let llfn = cx.declare_fn(
+                &common::i686_decorated_name(
+                    dllimport,
+                    common::is_mingw_gnu_toolchain(&tcx.sess.target),
+                    true,
+                ),
+                fn_abi,
+                Some(instance),
+            );
+            unsafe {
+                llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
+            }
+            llfn
+        } else {
+            cx.declare_fn(sym, fn_abi, Some(instance))
+        };
+        debug!("get_fn: not casting pointer!");
+
+        attributes::from_fn_attrs(cx, llfn, instance);
+
+        // Apply an appropriate linkage/visibility value to our item that we
+        // just declared.
+        //
+        // This is sort of subtle. Inside our codegen unit we started off
+        // compilation by predefining all our own `MonoItem` instances. That
+        // is, everything we're codegenning ourselves is already defined. That
+        // means that anything we're actually codegenning in this codegen unit
+        // will have hit the above branch in `get_declared_value`. As a result,
+        // we're guaranteed here that we're declaring a symbol that won't get
+        // defined, or in other words we're referencing a value from another
+        // codegen unit or even another crate.
+        //
+        // So because this is a foreign value we blanket apply an external
+        // linkage directive because it's coming from a different object file.
+        // The visibility here is where it gets tricky. This symbol could be
+        // referencing some foreign crate or foreign library (an `extern`
+        // block) in which case we want to leave the default visibility. We may
+        // also, though, have multiple codegen units. It could be a
+        // monomorphization, in which case its expected visibility depends on
+        // whether we are sharing generics or not. The important thing here is
+        // that the visibility we apply to the declaration is the same one that
+        // has been applied to the definition (wherever that definition may be).
+        unsafe {
+            llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
+
+            let is_generic =
+                instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
+
+            if is_generic {
+                // This is a monomorphization. Its expected visibility depends
+                // on whether we are in share-generics mode.
+
+                if cx.tcx.sess.opts.share_generics() {
+                    // We are in share_generics mode.
+
+                    if let Some(instance_def_id) = instance_def_id.as_local() {
+                        // This is a definition from the current crate. If the
+                        // definition is unreachable for downstream crates or
+                        // the current crate does not re-export generics, the
+                        // definition of the instance will have been declared
+                        // as `hidden`.
+                        if cx.tcx.is_unreachable_local_definition(instance_def_id)
+                            || !cx.tcx.local_crate_exports_generics()
+                        {
+                            llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+                        }
+                    } else {
+                        // This is a monomorphization of a generic function
+                        // defined in an upstream crate.
+                        if instance.upstream_monomorphization(tcx).is_some() {
+                            // This is instantiated in another crate. It cannot
+                            // be `hidden`.
+                        } else {
+                            // This is a local instantiation of an upstream definition.
+                            // If the current crate does not re-export it
+                            // (because it is a C library or an executable), it
+                            // will have been declared `hidden`.
+                            if !cx.tcx.local_crate_exports_generics() {
+                                llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+                            }
+                        }
+                    }
+                } else {
+                    // When not sharing generics, all instances are in the same
+                    // crate and have hidden visibility
+                    llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+                }
+            } else {
+                // This is a non-generic function
+                if cx.tcx.is_codegened_item(instance_def_id) {
+                    // This is a function that is instantiated in the local crate
+
+                    if instance_def_id.is_local() {
+                        // This is function that is defined in the local crate.
+                        // If it is not reachable, it is hidden.
+                        if !cx.tcx.is_reachable_non_generic(instance_def_id) {
+                            llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+                        }
+                    } else {
+                        // This is a function from an upstream crate that has
+                        // been instantiated here. These are always hidden.
+                        llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+                    }
+                }
+            }
+
+            // MinGW: For backward compatibility we rely on the linker to decide whether it
+            // should use dllimport for functions.
+            if cx.use_dll_storage_attrs
+                && let Some(library) = tcx.native_library(instance_def_id)
+                && library.kind.is_dllimport()
+                && !matches!(tcx.sess.target.env.as_ref(), "gnu" | "uclibc")
+            {
+                llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
+            }
+
+            if cx.should_assume_dso_local(llfn, true) {
+                llvm::LLVMRustSetDSOLocal(llfn, true);
+            }
+        }
+
+        llfn
+    };
+
+    cx.instances.borrow_mut().insert(instance, llfn);
+
+    llfn
+}
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
new file mode 100644
index 00000000000..25cbd90460f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -0,0 +1,424 @@
+//! Code that is useful in various codegen modules.
+
+use crate::consts::const_alloc_to_llvm;
+pub use crate::context::CodegenCx;
+use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
+use crate::type_::Type;
+use crate::value::Value;
+
+use rustc_ast::Mutability;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
+use rustc_hir::def_id::DefId;
+use rustc_middle::bug;
+use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType};
+use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer};
+use rustc_target::spec::Target;
+
+use libc::{c_char, c_uint};
+use std::fmt::Write;
+
+/*
+* A note on nomenclature of linking: "extern", "foreign", and "upcall".
+*
+* An "extern" is an LLVM symbol we wind up emitting an undefined external
+* reference to. This means "we don't have the thing in this compilation unit,
+* please make sure you link it in at runtime". This could be a reference to
+* C code found in a C library, or rust code found in a rust crate.
+*
+* Most "externs" are implicitly declared (automatically) as a result of a
+* user declaring an extern _module_ dependency; this causes the rust driver
+* to locate an extern crate, scan its compilation metadata, and emit extern
+* declarations for any symbols used by the declaring crate.
+*
+* A "foreign" is an extern that references C (or other non-rust ABI) code.
+* There is no metadata to scan for extern references so in these cases either
+* a header-digester like bindgen, or manual function prototypes, have to
+* serve as declarators. So these are usually given explicitly as prototype
+* declarations, in rust code, with ABI attributes on them noting which ABI to
+* link via.
+*
+* An "upcall" is a foreign call generated by the compiler (not corresponding
+* to any user-written call in the code) into the runtime library, to perform
+* some helper task such as bringing a task to life, allocating memory, etc.
+*
+*/
+
+/// A structure representing an active landing pad for the duration of a basic
+/// block.
+///
+/// Each `Block` may contain an instance of this, indicating whether the block
+/// is part of a landing pad or not. This is used to make decision about whether
+/// to emit `invoke` instructions (e.g., in a landing pad we don't continue to
+/// use `invoke`) and also about various function call metadata.
+///
+/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
+/// just a bunch of `None` instances (not too interesting), but for MSVC
+/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
+/// When inside of a landing pad, each function call in LLVM IR needs to be
+/// annotated with which landing pad it's a part of. This is accomplished via
+/// the `OperandBundleDef` value created for MSVC landing pads.
+pub struct Funclet<'ll> {
+    cleanuppad: &'ll Value,
+    operand: OperandBundleDef<'ll>,
+}
+
+impl<'ll> Funclet<'ll> {
+    pub fn new(cleanuppad: &'ll Value) -> Self {
+        Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
+    }
+
+    pub fn cleanuppad(&self) -> &'ll Value {
+        self.cleanuppad
+    }
+
+    pub fn bundle(&self) -> &OperandBundleDef<'ll> {
+        &self.operand
+    }
+}
+
+impl<'ll> BackendTypes for CodegenCx<'ll, '_> {
+    type Value = &'ll Value;
+    // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`.
+    type Function = &'ll Value;
+
+    type BasicBlock = &'ll BasicBlock;
+    type Type = &'ll Type;
+    type Funclet = Funclet<'ll>;
+
+    type DIScope = &'ll llvm::debuginfo::DIScope;
+    type DILocation = &'ll llvm::debuginfo::DILocation;
+    type DIVariable = &'ll llvm::debuginfo::DIVariable;
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+    pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
+        let len = u64::try_from(elts.len()).expect("LLVMConstArray2 elements len overflow");
+        unsafe { llvm::LLVMConstArray2(ty, elts.as_ptr(), len) }
+    }
+
+    pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
+        let len = c_uint::try_from(elts.len()).expect("LLVMConstVector elements len overflow");
+        unsafe { llvm::LLVMConstVector(elts.as_ptr(), len) }
+    }
+
+    pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
+        bytes_in_context(self.llcx, bytes)
+    }
+
+    pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
+        unsafe {
+            let idx = c_uint::try_from(idx).expect("LLVMGetAggregateElement index overflow");
+            let r = llvm::LLVMGetAggregateElement(v, idx).unwrap();
+
+            debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r);
+
+            r
+        }
+    }
+}
+
+impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn const_null(&self, t: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMConstNull(t) }
+    }
+
+    fn const_undef(&self, t: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMGetUndef(t) }
+    }
+
+    fn const_poison(&self, t: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMGetPoison(t) }
+    }
+
+    fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
+        unsafe { llvm::LLVMConstInt(t, i as u64, True) }
+    }
+
+    fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
+        unsafe { llvm::LLVMConstInt(t, i, False) }
+    }
+
+    fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
+        unsafe {
+            let words = [u as u64, (u >> 64) as u64];
+            llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
+        }
+    }
+
+    fn const_bool(&self, val: bool) -> &'ll Value {
+        self.const_uint(self.type_i1(), val as u64)
+    }
+
+    fn const_i16(&self, i: i16) -> &'ll Value {
+        self.const_int(self.type_i16(), i as i64)
+    }
+
+    fn const_i32(&self, i: i32) -> &'ll Value {
+        self.const_int(self.type_i32(), i as i64)
+    }
+
+    fn const_u32(&self, i: u32) -> &'ll Value {
+        self.const_uint(self.type_i32(), i as u64)
+    }
+
+    fn const_u64(&self, i: u64) -> &'ll Value {
+        self.const_uint(self.type_i64(), i)
+    }
+
+    fn const_u128(&self, i: u128) -> &'ll Value {
+        self.const_uint_big(self.type_i128(), i)
+    }
+
+    fn const_usize(&self, i: u64) -> &'ll Value {
+        let bit_size = self.data_layout().pointer_size.bits();
+        if bit_size < 64 {
+            // make sure it doesn't overflow
+            assert!(i < (1 << bit_size));
+        }
+
+        self.const_uint(self.isize_ty, i)
+    }
+
+    fn const_u8(&self, i: u8) -> &'ll Value {
+        self.const_uint(self.type_i8(), i as u64)
+    }
+
+    fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
+        unsafe { llvm::LLVMConstReal(t, val) }
+    }
+
+    fn const_str(&self, s: &str) -> (&'ll Value, &'ll Value) {
+        let str_global = *self
+            .const_str_cache
+            .borrow_mut()
+            .raw_entry_mut()
+            .from_key(s)
+            .or_insert_with(|| {
+                let sc = self.const_bytes(s.as_bytes());
+                let sym = self.generate_local_symbol_name("str");
+                let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| {
+                    bug!("symbol `{}` is already defined", sym);
+                });
+                unsafe {
+                    llvm::LLVMSetInitializer(g, sc);
+                    llvm::LLVMSetGlobalConstant(g, True);
+                    llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global);
+                    llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
+                }
+                (s.to_owned(), g)
+            })
+            .1;
+        let len = s.len();
+        (str_global, self.const_usize(len as u64))
+    }
+
+    fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
+        struct_in_context(self.llcx, elts, packed)
+    }
+
+    fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
+        try_as_const_integral(v).and_then(|v| unsafe {
+            let mut i = 0u64;
+            let success = llvm::LLVMRustConstIntGetZExtValue(v, &mut i);
+            success.then_some(i)
+        })
+    }
+
+    fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
+        try_as_const_integral(v).and_then(|v| unsafe {
+            let (mut lo, mut hi) = (0u64, 0u64);
+            let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
+            success.then_some(hi_lo_to_u128(lo, hi))
+        })
+    }
+
+    fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
+        let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
+        match cv {
+            Scalar::Int(int) => {
+                let data = int.assert_bits(layout.size(self));
+                let llval = self.const_uint_big(self.type_ix(bitsize), data);
+                if matches!(layout.primitive(), Pointer(_)) {
+                    unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+                } else {
+                    self.const_bitcast(llval, llty)
+                }
+            }
+            Scalar::Ptr(ptr, _size) => {
+                let (prov, offset) = ptr.into_parts();
+                let (base_addr, base_addr_space) = match self.tcx.global_alloc(prov.alloc_id()) {
+                    GlobalAlloc::Memory(alloc) => {
+                        let init = const_alloc_to_llvm(self, alloc);
+                        let alloc = alloc.inner();
+                        let value = match alloc.mutability {
+                            Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+                            _ => self.static_addr_of(init, alloc.align, None),
+                        };
+                        if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() {
+                            let hash = self.tcx.with_stable_hashing_context(|mut hcx| {
+                                let mut hasher = StableHasher::new();
+                                alloc.hash_stable(&mut hcx, &mut hasher);
+                                hasher.finish::<Hash128>()
+                            });
+                            llvm::set_value_name(value, format!("alloc_{hash:032x}").as_bytes());
+                        }
+                        (value, AddressSpace::DATA)
+                    }
+                    GlobalAlloc::Function(fn_instance) => (
+                        self.get_fn_addr(fn_instance.polymorphize(self.tcx)),
+                        self.data_layout().instruction_address_space,
+                    ),
+                    GlobalAlloc::VTable(ty, trait_ref) => {
+                        let alloc = self
+                            .tcx
+                            .global_alloc(self.tcx.vtable_allocation((ty, trait_ref)))
+                            .unwrap_memory();
+                        let init = const_alloc_to_llvm(self, alloc);
+                        let value = self.static_addr_of(init, alloc.inner().align, None);
+                        (value, AddressSpace::DATA)
+                    }
+                    GlobalAlloc::Static(def_id) => {
+                        assert!(self.tcx.is_static(def_id));
+                        assert!(!self.tcx.is_thread_local_static(def_id));
+                        (self.get_static(def_id), AddressSpace::DATA)
+                    }
+                };
+                let llval = unsafe {
+                    llvm::LLVMConstInBoundsGEP2(
+                        self.type_i8(),
+                        self.const_bitcast(base_addr, self.type_ptr_ext(base_addr_space)),
+                        &self.const_usize(offset.bytes()),
+                        1,
+                    )
+                };
+                if !matches!(layout.primitive(), Pointer(_)) {
+                    unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
+                } else {
+                    self.const_bitcast(llval, llty)
+                }
+            }
+        }
+    }
+
+    fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value {
+        const_alloc_to_llvm(self, alloc)
+    }
+
+    fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+        self.const_bitcast(val, ty)
+    }
+
+    fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
+        unsafe {
+            llvm::LLVMConstInBoundsGEP2(
+                self.type_i8(),
+                base_addr,
+                &self.const_usize(offset.bytes()),
+                1,
+            )
+        }
+    }
+}
+
+/// Get the [LLVM type][Type] of a [`Value`].
+pub fn val_ty(v: &Value) -> &Type {
+    unsafe { llvm::LLVMTypeOf(v) }
+}
+
+pub fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+    unsafe {
+        let ptr = bytes.as_ptr() as *const c_char;
+        llvm::LLVMConstStringInContext2(llcx, ptr, bytes.len(), True)
+    }
+}
+
+pub fn struct_in_context<'ll>(
+    llcx: &'ll llvm::Context,
+    elts: &[&'ll Value],
+    packed: bool,
+) -> &'ll Value {
+    let len = c_uint::try_from(elts.len()).expect("LLVMConstStructInContext elements len overflow");
+    unsafe { llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), len, packed as Bool) }
+}
+
+#[inline]
+fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
+    ((hi as u128) << 64) | (lo as u128)
+}
+
+fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
+    unsafe { llvm::LLVMIsAConstantInt(v) }
+}
+
+pub(crate) fn get_dllimport<'tcx>(
+    tcx: TyCtxt<'tcx>,
+    id: DefId,
+    name: &str,
+) -> Option<&'tcx DllImport> {
+    tcx.native_library(id)
+        .and_then(|lib| lib.dll_imports.iter().find(|di| di.name.as_str() == name))
+}
+
+pub(crate) fn is_mingw_gnu_toolchain(target: &Target) -> bool {
+    target.vendor == "pc" && target.os == "windows" && target.env == "gnu" && target.abi.is_empty()
+}
+
+pub(crate) fn i686_decorated_name(
+    dll_import: &DllImport,
+    mingw: bool,
+    disable_name_mangling: bool,
+) -> String {
+    let name = dll_import.name.as_str();
+
+    let (add_prefix, add_suffix) = match dll_import.import_name_type {
+        Some(PeImportNameType::NoPrefix) => (false, true),
+        Some(PeImportNameType::Undecorated) => (false, false),
+        _ => (true, true),
+    };
+
+    // Worst case: +1 for disable name mangling, +1 for prefix, +4 for suffix (@@__).
+    let mut decorated_name = String::with_capacity(name.len() + 6);
+
+    if disable_name_mangling {
+        // LLVM uses a binary 1 ('\x01') prefix to a name to indicate that mangling needs to be disabled.
+        decorated_name.push('\x01');
+    }
+
+    let prefix = if add_prefix && dll_import.is_fn {
+        match dll_import.calling_convention {
+            DllCallingConvention::C | DllCallingConvention::Vectorcall(_) => None,
+            DllCallingConvention::Stdcall(_) => (!mingw
+                || dll_import.import_name_type == Some(PeImportNameType::Decorated))
+            .then_some('_'),
+            DllCallingConvention::Fastcall(_) => Some('@'),
+        }
+    } else if !dll_import.is_fn && !mingw {
+        // For static variables, prefix with '_' on MSVC.
+        Some('_')
+    } else {
+        None
+    };
+    if let Some(prefix) = prefix {
+        decorated_name.push(prefix);
+    }
+
+    decorated_name.push_str(name);
+
+    if add_suffix && dll_import.is_fn {
+        match dll_import.calling_convention {
+            DllCallingConvention::C => {}
+            DllCallingConvention::Stdcall(arg_list_size)
+            | DllCallingConvention::Fastcall(arg_list_size) => {
+                write!(&mut decorated_name, "@{arg_list_size}").unwrap();
+            }
+            DllCallingConvention::Vectorcall(arg_list_size) => {
+                write!(&mut decorated_name, "@@{arg_list_size}").unwrap();
+            }
+        }
+    }
+
+    decorated_name
+}
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
new file mode 100644
index 00000000000..4afa230e598
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -0,0 +1,579 @@
+use crate::base;
+use crate::common::{self, CodegenCx};
+use crate::debuginfo;
+use crate::errors::{
+    InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined,
+};
+use crate::llvm::{self, True};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::interpret::{
+    read_target_uint, Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer,
+    Scalar as InterpScalar,
+};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Instance};
+use rustc_middle::{bug, span_bug};
+use rustc_session::config::Lto;
+use rustc_target::abi::{
+    Align, AlignFromBytesError, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
+};
+use std::ops::Range;
+
+pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
+    let alloc = alloc.inner();
+    let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
+    let dl = cx.data_layout();
+    let pointer_size = dl.pointer_size.bytes() as usize;
+
+    // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range`
+    // must be within the bounds of `alloc` and not contain or overlap a pointer provenance.
+    fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
+        llvals: &mut Vec<&'ll Value>,
+        cx: &'a CodegenCx<'ll, 'b>,
+        alloc: &'a Allocation,
+        range: Range<usize>,
+    ) {
+        let chunks = alloc.init_mask().range_as_init_chunks(range.clone().into());
+
+        let chunk_to_llval = move |chunk| match chunk {
+            InitChunk::Init(range) => {
+                let range = (range.start.bytes() as usize)..(range.end.bytes() as usize);
+                let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+                cx.const_bytes(bytes)
+            }
+            InitChunk::Uninit(range) => {
+                let len = range.end.bytes() - range.start.bytes();
+                cx.const_undef(cx.type_array(cx.type_i8(), len))
+            }
+        };
+
+        // Generating partially-uninit consts is limited to small numbers of chunks,
+        // to avoid the cost of generating large complex const expressions.
+        // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element,
+        // and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
+        let max = cx.sess().opts.unstable_opts.uninit_const_chunk_threshold;
+        let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max;
+
+        if allow_uninit_chunks {
+            llvals.extend(chunks.map(chunk_to_llval));
+        } else {
+            // If this allocation contains any uninit bytes, codegen as if it was initialized
+            // (using some arbitrary value for uninit bytes).
+            let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+            llvals.push(cx.const_bytes(bytes));
+        }
+    }
+
+    let mut next_offset = 0;
+    for &(offset, prov) in alloc.provenance().ptrs().iter() {
+        let offset = offset.bytes();
+        assert_eq!(offset as usize as u64, offset);
+        let offset = offset as usize;
+        if offset > next_offset {
+            // This `inspect` is okay since we have checked that there is no provenance, it
+            // is within the bounds of the allocation, and it doesn't affect interpreter execution
+            // (we inspect the result after interpreter execution).
+            append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset);
+        }
+        let ptr_offset = read_target_uint(
+            dl.endian,
+            // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+            // affect interpreter execution (we inspect the result after interpreter execution),
+            // and we properly interpret the provenance as a relocation pointer offset.
+            alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+        )
+        .expect("const_alloc_to_llvm: could not read relocation pointer")
+            as u64;
+
+        let address_space = cx.tcx.global_alloc(prov.alloc_id()).address_space(cx);
+
+        llvals.push(cx.scalar_to_backend(
+            InterpScalar::from_pointer(Pointer::new(prov, Size::from_bytes(ptr_offset)), &cx.tcx),
+            Scalar::Initialized {
+                value: Primitive::Pointer(address_space),
+                valid_range: WrappingRange::full(dl.pointer_size),
+            },
+            cx.type_ptr_ext(address_space),
+        ));
+        next_offset = offset + pointer_size;
+    }
+    if alloc.len() >= next_offset {
+        let range = next_offset..alloc.len();
+        // This `inspect` is okay since we have check that it is after all provenance, it is
+        // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+        // inspect the result after interpreter execution).
+        append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range);
+    }
+
+    cx.const_struct(&llvals, true)
+}
+
+fn codegen_static_initializer<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    def_id: DefId,
+) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> {
+    let alloc = cx.tcx.eval_static_initializer(def_id)?;
+    Ok((const_alloc_to_llvm(cx, alloc), alloc))
+}
+
+fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
+    // The target may require greater alignment for globals than the type does.
+    // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
+    // which can force it to be smaller. Rust doesn't support this yet.
+    if let Some(min) = cx.sess().target.min_global_align {
+        match Align::from_bits(min) {
+            Ok(min) => align = align.max(min),
+            Err(err) => match err {
+                AlignFromBytesError::NotPowerOfTwo(align) => {
+                    cx.sess().dcx().emit_err(InvalidMinimumAlignmentNotPowerOfTwo { align });
+                }
+                AlignFromBytesError::TooLarge(align) => {
+                    cx.sess().dcx().emit_err(InvalidMinimumAlignmentTooLarge { align });
+                }
+            },
+        }
+    }
+    unsafe {
+        llvm::LLVMSetAlignment(gv, align.bytes() as u32);
+    }
+}
+
+fn check_and_apply_linkage<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    attrs: &CodegenFnAttrs,
+    llty: &'ll Type,
+    sym: &str,
+    def_id: DefId,
+) -> &'ll Value {
+    if let Some(linkage) = attrs.import_linkage {
+        debug!("get_static: sym={} linkage={:?}", sym, linkage);
+
+        unsafe {
+            // Declare a symbol `foo` with the desired linkage.
+            let g1 = cx.declare_global(sym, cx.type_i8());
+            llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
+
+            // Declare an internal global `extern_with_linkage_foo` which
+            // is initialized with the address of `foo`. If `foo` is
+            // discarded during linking (for example, if `foo` has weak
+            // linkage and there are no definitions), then
+            // `extern_with_linkage_foo` will instead be initialized to
+            // zero.
+            let mut real_name = "_rust_extern_with_linkage_".to_string();
+            real_name.push_str(sym);
+            let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
+                cx.sess().dcx().emit_fatal(SymbolAlreadyDefined {
+                    span: cx.tcx.def_span(def_id),
+                    symbol_name: sym,
+                })
+            });
+            llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
+            llvm::LLVMSetInitializer(g2, g1);
+            g2
+        }
+    } else if cx.tcx.sess.target.arch == "x86"
+        && let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym)
+    {
+        cx.declare_global(
+            &common::i686_decorated_name(
+                dllimport,
+                common::is_mingw_gnu_toolchain(&cx.tcx.sess.target),
+                true,
+            ),
+            llty,
+        )
+    } else {
+        // Generate an external declaration.
+        // FIXME(nagisa): investigate whether it can be changed into define_global
+        cx.declare_global(sym, llty)
+    }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+    pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMConstBitCast(val, ty) }
+    }
+
+    pub(crate) fn static_addr_of_mut(
+        &self,
+        cv: &'ll Value,
+        align: Align,
+        kind: Option<&str>,
+    ) -> &'ll Value {
+        unsafe {
+            let gv = match kind {
+                Some(kind) if !self.tcx.sess.fewer_names() => {
+                    let name = self.generate_local_symbol_name(kind);
+                    let gv = self.define_global(&name, self.val_ty(cv)).unwrap_or_else(|| {
+                        bug!("symbol `{}` is already defined", name);
+                    });
+                    llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
+                    gv
+                }
+                _ => self.define_private_global(self.val_ty(cv)),
+            };
+            llvm::LLVMSetInitializer(gv, cv);
+            set_global_alignment(self, gv, align);
+            llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
+            gv
+        }
+    }
+
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn get_static(&self, def_id: DefId) -> &'ll Value {
+        let instance = Instance::mono(self.tcx, def_id);
+        trace!(?instance);
+
+        let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
+        // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out
+        // the llvm type from the actual evaluated initializer.
+        let llty = if nested {
+            self.type_i8()
+        } else {
+            let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+            trace!(?ty);
+            self.layout_of(ty).llvm_type(self)
+        };
+        self.get_static_inner(def_id, llty)
+    }
+
+    #[instrument(level = "debug", skip(self, llty))]
+    pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value {
+        if let Some(&g) = self.instances.borrow().get(&Instance::mono(self.tcx, def_id)) {
+            trace!("used cached value");
+            return g;
+        }
+
+        let defined_in_current_codegen_unit =
+            self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+        assert!(
+            !defined_in_current_codegen_unit,
+            "consts::get_static() should always hit the cache for \
+                 statics defined in the same CGU, but did not for `{def_id:?}`"
+        );
+
+        let sym = self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name;
+        let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+
+        debug!(?sym, ?fn_attrs);
+
+        let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
+            if let Some(g) = self.get_declared_value(sym) {
+                if self.val_ty(g) != self.type_ptr() {
+                    span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
+                }
+            }
+
+            let g = self.declare_global(sym, llty);
+
+            if !self.tcx.is_reachable_non_generic(def_id) {
+                unsafe {
+                    llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
+                }
+            }
+
+            g
+        } else {
+            check_and_apply_linkage(self, fn_attrs, llty, sym, def_id)
+        };
+
+        // Thread-local statics in some other crate need to *always* be linked
+        // against in a thread-local fashion, so we need to be sure to apply the
+        // thread-local attribute locally if it was present remotely. If we
+        // don't do this then linker errors can be generated where the linker
+        // complains that one object files has a thread local version of the
+        // symbol and another one doesn't.
+        if fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+            llvm::set_thread_local_mode(g, self.tls_model);
+        }
+
+        let dso_local = unsafe { self.should_assume_dso_local(g, true) };
+        if dso_local {
+            unsafe {
+                llvm::LLVMRustSetDSOLocal(g, true);
+            }
+        }
+
+        if !def_id.is_local() {
+            let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
+                // Local definitions can never be imported, so we must not apply
+                // the DLLImport annotation.
+                !dso_local &&
+                // ThinLTO can't handle this workaround in all cases, so we don't
+                // emit the attrs. Instead we make them unnecessary by disallowing
+                // dynamic linking when linker plugin based LTO is enabled.
+                !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
+                self.tcx.sess.lto() != Lto::Thin;
+
+            // If this assertion triggers, there's something wrong with commandline
+            // argument validation.
+            debug_assert!(
+                !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+                    && self.tcx.sess.target.is_like_windows
+                    && self.tcx.sess.opts.cg.prefer_dynamic)
+            );
+
+            if needs_dll_storage_attr {
+                // This item is external but not foreign, i.e., it originates from an external Rust
+                // crate. Since we don't know whether this crate will be linked dynamically or
+                // statically in the final application, we always mark such symbols as 'dllimport'.
+                // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+                // to make things work.
+                //
+                // However, in some scenarios we defer emission of statics to downstream
+                // crates, so there are cases where a static with an upstream DefId
+                // is actually present in the current crate. We can find out via the
+                // is_codegened_item query.
+                if !self.tcx.is_codegened_item(def_id) {
+                    unsafe {
+                        llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+                    }
+                }
+            }
+        }
+
+        if self.use_dll_storage_attrs
+            && let Some(library) = self.tcx.native_library(def_id)
+            && library.kind.is_dllimport()
+        {
+            // For foreign (native) libs we know the exact storage type to use.
+            unsafe {
+                llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+            }
+        }
+
+        self.instances.borrow_mut().insert(Instance::mono(self.tcx, def_id), g);
+        g
+    }
+
+    fn codegen_static_item(&self, def_id: DefId) {
+        unsafe {
+            assert!(
+                llvm::LLVMGetInitializer(
+                    self.instances.borrow().get(&Instance::mono(self.tcx, def_id)).unwrap()
+                )
+                .is_none()
+            );
+            let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+            let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else {
+                // Error has already been reported
+                return;
+            };
+            let alloc = alloc.inner();
+
+            let val_llty = self.val_ty(v);
+
+            let g = self.get_static_inner(def_id, val_llty);
+            let llty = self.val_ty(g);
+
+            let g = if val_llty == llty {
+                g
+            } else {
+                // If we created the global with the wrong type,
+                // correct the type.
+                let name = llvm::get_value_name(g).to_vec();
+                llvm::set_value_name(g, b"");
+
+                let linkage = llvm::LLVMRustGetLinkage(g);
+                let visibility = llvm::LLVMRustGetVisibility(g);
+
+                let new_g = llvm::LLVMRustGetOrInsertGlobal(
+                    self.llmod,
+                    name.as_ptr().cast(),
+                    name.len(),
+                    val_llty,
+                );
+
+                llvm::LLVMRustSetLinkage(new_g, linkage);
+                llvm::LLVMRustSetVisibility(new_g, visibility);
+
+                // The old global has had its name removed but is returned by
+                // get_static since it is in the instance cache. Provide an
+                // alternative lookup that points to the new global so that
+                // global_asm! can compute the correct mangled symbol name
+                // for the global.
+                self.renamed_statics.borrow_mut().insert(def_id, new_g);
+
+                // To avoid breaking any invariants, we leave around the old
+                // global for the moment; we'll replace all references to it
+                // with the new global later. (See base::codegen_backend.)
+                self.statics_to_rauw.borrow_mut().push((g, new_g));
+                new_g
+            };
+            set_global_alignment(self, g, alloc.align);
+            llvm::LLVMSetInitializer(g, v);
+
+            if self.should_assume_dso_local(g, true) {
+                llvm::LLVMRustSetDSOLocal(g, true);
+            }
+
+            // Forward the allocation's mutability (picked by the const interner) to LLVM.
+            if alloc.mutability.is_not() {
+                llvm::LLVMSetGlobalConstant(g, llvm::True);
+            }
+
+            debuginfo::build_global_var_di_node(self, def_id, g);
+
+            if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+                llvm::set_thread_local_mode(g, self.tls_model);
+
+                // Do not allow LLVM to change the alignment of a TLS on macOS.
+                //
+                // By default a global's alignment can be freely increased.
+                // This allows LLVM to generate more performant instructions
+                // e.g., using load-aligned into a SIMD register.
+                //
+                // However, on macOS 10.10 or below, the dynamic linker does not
+                // respect any alignment given on the TLS (radar 24221680).
+                // This will violate the alignment assumption, and causing segfault at runtime.
+                //
+                // This bug is very easy to trigger. In `println!` and `panic!`,
+                // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+                // which the values would be `mem::replace`d on initialization.
+                // The implementation of `mem::replace` will use SIMD
+                // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+                // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+                // which macOS's dyld disregarded and causing crashes
+                // (see issues #51794, #51758, #50867, #48866 and #44056).
+                //
+                // To workaround the bug, we trick LLVM into not increasing
+                // the global's alignment by explicitly assigning a section to it
+                // (equivalent to automatically generating a `#[link_section]` attribute).
+                // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+                // of `lib/IR/Globals.cpp` for why this works.
+                //
+                // When the alignment is not increased, the optimized `mem::replace`
+                // will use load-unaligned instructions instead, and thus avoiding the crash.
+                //
+                // We could remove this hack whenever we decide to drop macOS 10.10 support.
+                if self.tcx.sess.target.is_like_osx {
+                    // The `inspect` method is okay here because we checked for provenance, and
+                    // because we are doing this access to inspect the final interpreter state
+                    // (not as part of the interpreter execution).
+                    //
+                    // FIXME: This check requires that the (arbitrary) value of undefined bytes
+                    // happens to be zero. Instead, we should only check the value of defined bytes
+                    // and set all undefined bytes to zero if this allocation is headed for the
+                    // BSS.
+                    let all_bytes_are_zero = alloc.provenance().ptrs().is_empty()
+                        && alloc
+                            .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
+                            .iter()
+                            .all(|&byte| byte == 0);
+
+                    let sect_name = if all_bytes_are_zero {
+                        c"__DATA,__thread_bss"
+                    } else {
+                        c"__DATA,__thread_data"
+                    };
+                    llvm::LLVMSetSection(g, sect_name.as_ptr());
+                }
+            }
+
+            // Wasm statics with custom link sections get special treatment as they
+            // go into custom sections of the wasm executable.
+            if self.tcx.sess.target.is_like_wasm {
+                if let Some(section) = attrs.link_section {
+                    let section = llvm::LLVMMDStringInContext2(
+                        self.llcx,
+                        section.as_str().as_ptr().cast(),
+                        section.as_str().len(),
+                    );
+                    assert!(alloc.provenance().ptrs().is_empty());
+
+                    // The `inspect` method is okay here because we checked for provenance, and
+                    // because we are doing this access to inspect the final interpreter state (not
+                    // as part of the interpreter execution).
+                    let bytes =
+                        alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
+                    let alloc =
+                        llvm::LLVMMDStringInContext2(self.llcx, bytes.as_ptr().cast(), bytes.len());
+                    let data = [section, alloc];
+                    let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len());
+                    let val = llvm::LLVMMetadataAsValue(self.llcx, meta);
+                    llvm::LLVMAddNamedMetadataOperand(
+                        self.llmod,
+                        c"wasm.custom_sections".as_ptr().cast(),
+                        val,
+                    );
+                }
+            } else {
+                base::set_link_section(g, attrs);
+            }
+
+            if attrs.flags.contains(CodegenFnAttrFlags::USED) {
+                // `USED` and `USED_LINKER` can't be used together.
+                assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER));
+
+                // The semantics of #[used] in Rust only require the symbol to make it into the
+                // object file. It is explicitly allowed for the linker to strip the symbol if it
+                // is dead, which means we are allowed to use `llvm.compiler.used` instead of
+                // `llvm.used` here.
+                //
+                // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique
+                // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs
+                // in the handling of `.init_array` (the static constructor list) in versions of
+                // the gold linker (prior to the one released with binutils 2.36).
+                //
+                // That said, we only ever emit these when compiling for ELF targets, unless
+                // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
+                // on other targets, in particular MachO targets have *their* static constructor
+                // lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However,
+                // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`,
+                // so we don't need to take care of it here.
+                self.add_compiler_used_global(g);
+            }
+            if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
+                // `USED` and `USED_LINKER` can't be used together.
+                assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED));
+
+                self.add_used_global(g);
+            }
+        }
+    }
+}
+
+impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
+    fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value {
+        if let Some(&gv) = self.const_globals.borrow().get(&cv) {
+            unsafe {
+                // Upgrade the alignment in cases where the same constant is used with different
+                // alignment requirements
+                let llalign = align.bytes() as u32;
+                if llalign > llvm::LLVMGetAlignment(gv) {
+                    llvm::LLVMSetAlignment(gv, llalign);
+                }
+            }
+            return gv;
+        }
+        let gv = self.static_addr_of_mut(cv, align, kind);
+        unsafe {
+            llvm::LLVMSetGlobalConstant(gv, True);
+        }
+        self.const_globals.borrow_mut().insert(cv, gv);
+        gv
+    }
+
+    fn codegen_static(&self, def_id: DefId) {
+        self.codegen_static_item(def_id)
+    }
+
+    /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr.
+    fn add_used_global(&self, global: &'ll Value) {
+        self.used_statics.borrow_mut().push(global);
+    }
+
+    /// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
+    /// an array of ptr.
+    fn add_compiler_used_global(&self, global: &'ll Value) {
+        self.compiler_used_statics.borrow_mut().push(global);
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
new file mode 100644
index 00000000000..649ff9df2cc
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -0,0 +1,1077 @@
+use crate::attributes;
+use crate::back::write::to_llvm_code_model;
+use crate::callee::get_fn;
+use crate::coverageinfo;
+use crate::debuginfo;
+use crate::llvm;
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
+use rustc_codegen_ssa::errors as ssa_errors;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers,
+    TyAndLayout,
+};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_middle::{bug, span_bug};
+use rustc_session::config::{BranchProtection, CFGuard, CFProtection};
+use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet};
+use rustc_session::Session;
+use rustc_span::source_map::Spanned;
+use rustc_span::Span;
+use rustc_target::abi::{
+    call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
+};
+use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
+use smallvec::SmallVec;
+
+use libc::c_uint;
+use std::borrow::Borrow;
+use std::cell::{Cell, RefCell};
+use std::ffi::CStr;
+use std::str;
+
+/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
+/// `llvm::Context` so that several compilation units may be optimized in parallel.
+/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
+pub struct CodegenCx<'ll, 'tcx> {
+    pub tcx: TyCtxt<'tcx>,
+    pub check_overflow: bool,
+    pub use_dll_storage_attrs: bool,
+    pub tls_model: llvm::ThreadLocalMode,
+
+    pub llmod: &'ll llvm::Module,
+    pub llcx: &'ll llvm::Context,
+    pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+
+    /// Cache instances of monomorphic and polymorphic items
+    pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
+    /// Cache generated vtables
+    pub vtables:
+        RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
+    /// Cache of constant strings,
+    pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
+
+    /// Cache of emitted const globals (value -> global)
+    pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
+
+    /// List of globals for static variables which need to be passed to the
+    /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
+    /// (We have to make sure we don't invalidate any Values referring
+    /// to constants.)
+    pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
+
+    /// Statics that will be placed in the llvm.used variable
+    /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
+    pub used_statics: RefCell<Vec<&'ll Value>>,
+
+    /// Statics that will be placed in the llvm.compiler.used variable
+    /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
+    pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
+
+    /// Mapping of non-scalar types to llvm types.
+    pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>,
+
+    /// Mapping of scalar types to llvm types.
+    pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
+
+    pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+    pub isize_ty: &'ll Type,
+
+    pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
+    pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
+
+    eh_personality: Cell<Option<&'ll Value>>,
+    eh_catch_typeinfo: Cell<Option<&'ll Value>>,
+    pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
+
+    intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
+
+    /// A counter that is used for generating local symbol names
+    local_gen_sym_counter: Cell<usize>,
+
+    /// `codegen_static` will sometimes create a second global variable with a
+    /// different type and clear the symbol name of the original global.
+    /// `global_asm!` needs to be able to find this new global so that it can
+    /// compute the correct mangled symbol name to insert into the asm.
+    pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
+}
+
+fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
+    match tls_model {
+        TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
+        TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
+        TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
+        TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
+        TlsModel::Emulated => llvm::ThreadLocalMode::GeneralDynamic,
+    }
+}
+
+pub unsafe fn create_module<'ll>(
+    tcx: TyCtxt<'_>,
+    llcx: &'ll llvm::Context,
+    mod_name: &str,
+) -> &'ll llvm::Module {
+    let sess = tcx.sess;
+    let mod_name = SmallCStr::new(mod_name);
+    let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
+
+    let mut target_data_layout = sess.target.data_layout.to_string();
+    let llvm_version = llvm_util::get_version();
+    if llvm_version < (18, 0, 0) {
+        if sess.target.arch == "x86" || sess.target.arch == "x86_64" {
+            // LLVM 18 adjusts i128 to be 128-bit aligned on x86 variants.
+            // Earlier LLVMs leave this as default alignment, so remove it.
+            // See https://reviews.llvm.org/D86310
+            target_data_layout = target_data_layout.replace("-i128:128", "");
+        }
+    }
+
+    // Ensure the data-layout values hardcoded remain the defaults.
+    {
+        let tm = crate::back::write::create_informational_target_machine(tcx.sess);
+        llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm);
+
+        let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
+        let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
+            .expect("got a non-UTF8 data-layout from LLVM");
+
+        if target_data_layout != llvm_data_layout {
+            tcx.dcx().emit_err(crate::errors::MismatchedDataLayout {
+                rustc_target: sess.opts.target_triple.to_string().as_str(),
+                rustc_layout: target_data_layout.as_str(),
+                llvm_target: sess.target.llvm_target.borrow(),
+                llvm_layout: llvm_data_layout,
+            });
+        }
+    }
+
+    let data_layout = SmallCStr::new(&target_data_layout);
+    llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
+
+    let llvm_target = SmallCStr::new(&sess.target.llvm_target);
+    llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
+
+    let reloc_model = sess.relocation_model();
+    if matches!(reloc_model, RelocModel::Pic | RelocModel::Pie) {
+        llvm::LLVMRustSetModulePICLevel(llmod);
+        // PIE is potentially more effective than PIC, but can only be used in executables.
+        // If all our outputs are executables, then we can relax PIC to PIE.
+        if reloc_model == RelocModel::Pie
+            || tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable)
+        {
+            llvm::LLVMRustSetModulePIELevel(llmod);
+        }
+    }
+
+    // Linking object files with different code models is undefined behavior
+    // because the compiler would have to generate additional code (to span
+    // longer jumps) if a larger code model is used with a smaller one.
+    //
+    // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
+    llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
+
+    // If skipping the PLT is enabled, we need to add some module metadata
+    // to ensure intrinsic calls don't use it.
+    if !sess.needs_plt() {
+        let avoid_plt = c"RtLibUseGOT".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
+    }
+
+    // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.)
+    if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() {
+        let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(
+            llmod,
+            llvm::LLVMModFlagBehavior::Override,
+            canonical_jump_tables,
+            1,
+        );
+    }
+
+    // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.)
+    if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
+        let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(
+            llmod,
+            llvm::LLVMModFlagBehavior::Override,
+            enable_split_lto_unit,
+            1,
+        );
+    }
+
+    // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.)
+    if sess.is_sanitizer_kcfi_enabled() {
+        let kcfi = c"kcfi".as_ptr().cast();
+        llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1);
+    }
+
+    // Control Flow Guard is currently only supported by the MSVC linker on Windows.
+    if sess.target.is_like_msvc {
+        match sess.opts.cg.control_flow_guard {
+            CFGuard::Disabled => {}
+            CFGuard::NoChecks => {
+                // Set `cfguard=1` module flag to emit metadata only.
+                llvm::LLVMRustAddModuleFlag(
+                    llmod,
+                    llvm::LLVMModFlagBehavior::Warning,
+                    c"cfguard".as_ptr() as *const _,
+                    1,
+                )
+            }
+            CFGuard::Checks => {
+                // Set `cfguard=2` module flag to emit metadata and checks.
+                llvm::LLVMRustAddModuleFlag(
+                    llmod,
+                    llvm::LLVMModFlagBehavior::Warning,
+                    c"cfguard".as_ptr() as *const _,
+                    2,
+                )
+            }
+        }
+    }
+
+    if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
+        if sess.target.arch == "aarch64" {
+            llvm::LLVMRustAddModuleFlag(
+                llmod,
+                llvm::LLVMModFlagBehavior::Min,
+                c"branch-target-enforcement".as_ptr().cast(),
+                bti.into(),
+            );
+            llvm::LLVMRustAddModuleFlag(
+                llmod,
+                llvm::LLVMModFlagBehavior::Min,
+                c"sign-return-address".as_ptr().cast(),
+                pac_ret.is_some().into(),
+            );
+            let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
+            llvm::LLVMRustAddModuleFlag(
+                llmod,
+                llvm::LLVMModFlagBehavior::Min,
+                c"sign-return-address-all".as_ptr().cast(),
+                pac_opts.leaf.into(),
+            );
+            llvm::LLVMRustAddModuleFlag(
+                llmod,
+                llvm::LLVMModFlagBehavior::Min,
+                c"sign-return-address-with-bkey".as_ptr().cast(),
+                u32::from(pac_opts.key == PAuthKey::B),
+            );
+        } else {
+            bug!(
+                "branch-protection used on non-AArch64 target; \
+                  this should be checked in rustc_session."
+            );
+        }
+    }
+
+    // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
+    if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
+        llvm::LLVMRustAddModuleFlag(
+            llmod,
+            llvm::LLVMModFlagBehavior::Override,
+            c"cf-protection-branch".as_ptr().cast(),
+            1,
+        )
+    }
+    if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
+        llvm::LLVMRustAddModuleFlag(
+            llmod,
+            llvm::LLVMModFlagBehavior::Override,
+            c"cf-protection-return".as_ptr().cast(),
+            1,
+        )
+    }
+
+    if sess.opts.unstable_opts.virtual_function_elimination {
+        llvm::LLVMRustAddModuleFlag(
+            llmod,
+            llvm::LLVMModFlagBehavior::Error,
+            c"Virtual Function Elim".as_ptr().cast(),
+            1,
+        );
+    }
+
+    // Set module flag to enable Windows EHCont Guard (/guard:ehcont).
+    if sess.opts.unstable_opts.ehcont_guard {
+        llvm::LLVMRustAddModuleFlag(
+            llmod,
+            llvm::LLVMModFlagBehavior::Warning,
+            c"ehcontguard".as_ptr() as *const _,
+            1,
+        )
+    }
+
+    // Insert `llvm.ident` metadata.
+    //
+    // On the wasm targets it will get hooked up to the "producer" sections
+    // `processed-by` information.
+    #[allow(clippy::option_env_unwrap)]
+    let rustc_producer =
+        format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"));
+    let name_metadata = llvm::LLVMMDStringInContext(
+        llcx,
+        rustc_producer.as_ptr().cast(),
+        rustc_producer.as_bytes().len() as c_uint,
+    );
+    llvm::LLVMAddNamedMetadataOperand(
+        llmod,
+        c"llvm.ident".as_ptr(),
+        llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1),
+    );
+
+    // Add module flags specified via -Z llvm_module_flag
+    for (key, value, behavior) in &sess.opts.unstable_opts.llvm_module_flag {
+        let key = format!("{key}\0");
+        let behavior = match behavior.as_str() {
+            "error" => llvm::LLVMModFlagBehavior::Error,
+            "warning" => llvm::LLVMModFlagBehavior::Warning,
+            "require" => llvm::LLVMModFlagBehavior::Require,
+            "override" => llvm::LLVMModFlagBehavior::Override,
+            "append" => llvm::LLVMModFlagBehavior::Append,
+            "appendunique" => llvm::LLVMModFlagBehavior::AppendUnique,
+            "max" => llvm::LLVMModFlagBehavior::Max,
+            "min" => llvm::LLVMModFlagBehavior::Min,
+            // We already checked this during option parsing
+            _ => unreachable!(),
+        };
+        llvm::LLVMRustAddModuleFlag(llmod, behavior, key.as_ptr().cast(), *value)
+    }
+
+    llmod
+}
+
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+    pub(crate) fn new(
+        tcx: TyCtxt<'tcx>,
+        codegen_unit: &'tcx CodegenUnit<'tcx>,
+        llvm_module: &'ll crate::ModuleLlvm,
+    ) -> Self {
+        // An interesting part of Windows which MSVC forces our hand on (and
+        // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
+        // attributes in LLVM IR as well as native dependencies (in C these
+        // correspond to `__declspec(dllimport)`).
+        //
+        // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
+        // relying on that can result in issues like #50176.
+        // LLD won't support that and expects symbols with proper attributes.
+        // Because of that we make MinGW target emit dllexport just like MSVC.
+        // When it comes to dllimport we use it for constants but for functions
+        // rely on the linker to do the right thing. Opposed to dllexport this
+        // task is easy for them (both LD and LLD) and allows us to easily use
+        // symbols from static libraries in shared libraries.
+        //
+        // Whenever a dynamic library is built on Windows it must have its public
+        // interface specified by functions tagged with `dllexport` or otherwise
+        // they're not available to be linked against. This poses a few problems
+        // for the compiler, some of which are somewhat fundamental, but we use
+        // the `use_dll_storage_attrs` variable below to attach the `dllexport`
+        // attribute to all LLVM functions that are exported e.g., they're
+        // already tagged with external linkage). This is suboptimal for a few
+        // reasons:
+        //
+        // * If an object file will never be included in a dynamic library,
+        //   there's no need to attach the dllexport attribute. Most object
+        //   files in Rust are not destined to become part of a dll as binaries
+        //   are statically linked by default.
+        // * If the compiler is emitting both an rlib and a dylib, the same
+        //   source object file is currently used but with MSVC this may be less
+        //   feasible. The compiler may be able to get around this, but it may
+        //   involve some invasive changes to deal with this.
+        //
+        // The flip side of this situation is that whenever you link to a dll and
+        // you import a function from it, the import should be tagged with
+        // `dllimport`. At this time, however, the compiler does not emit
+        // `dllimport` for any declarations other than constants (where it is
+        // required), which is again suboptimal for even more reasons!
+        //
+        // * Calling a function imported from another dll without using
+        //   `dllimport` causes the linker/compiler to have extra overhead (one
+        //   `jmp` instruction on x86) when calling the function.
+        // * The same object file may be used in different circumstances, so a
+        //   function may be imported from a dll if the object is linked into a
+        //   dll, but it may be just linked against if linked into an rlib.
+        // * The compiler has no knowledge about whether native functions should
+        //   be tagged dllimport or not.
+        //
+        // For now the compiler takes the perf hit (I do not have any numbers to
+        // this effect) by marking very little as `dllimport` and praying the
+        // linker will take care of everything. Fixing this problem will likely
+        // require adding a few attributes to Rust itself (feature gated at the
+        // start) and then strongly recommending static linkage on Windows!
+        let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
+
+        let check_overflow = tcx.sess.overflow_checks();
+
+        let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
+
+        let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
+
+        let coverage_cx =
+            tcx.sess.instrument_coverage().then(coverageinfo::CrateCoverageContext::new);
+
+        let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
+            let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
+            debuginfo::metadata::build_compile_unit_di_node(
+                tcx,
+                codegen_unit.name().as_str(),
+                &dctx,
+            );
+            Some(dctx)
+        } else {
+            None
+        };
+
+        let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
+
+        CodegenCx {
+            tcx,
+            check_overflow,
+            use_dll_storage_attrs,
+            tls_model,
+            llmod,
+            llcx,
+            codegen_unit,
+            instances: Default::default(),
+            vtables: Default::default(),
+            const_str_cache: Default::default(),
+            const_globals: Default::default(),
+            statics_to_rauw: RefCell::new(Vec::new()),
+            used_statics: RefCell::new(Vec::new()),
+            compiler_used_statics: RefCell::new(Vec::new()),
+            type_lowering: Default::default(),
+            scalar_lltypes: Default::default(),
+            pointee_infos: Default::default(),
+            isize_ty,
+            coverage_cx,
+            dbg_cx,
+            eh_personality: Cell::new(None),
+            eh_catch_typeinfo: Cell::new(None),
+            rust_try_fn: Cell::new(None),
+            intrinsics: Default::default(),
+            local_gen_sym_counter: Cell::new(0),
+            renamed_statics: Default::default(),
+        }
+    }
+
+    pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
+        &self.statics_to_rauw
+    }
+
+    #[inline]
+    pub fn coverage_context(&self) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
+        self.coverage_cx.as_ref()
+    }
+
+    pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
+        let array = self.const_array(self.type_ptr(), values);
+
+        unsafe {
+            let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
+            llvm::LLVMSetInitializer(g, array);
+            llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
+            llvm::LLVMSetSection(g, c"llvm.metadata".as_ptr());
+        }
+    }
+}
+
+impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn vtables(
+        &self,
+    ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
+    {
+        &self.vtables
+    }
+
+    fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
+        get_fn(self, instance)
+    }
+
+    fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
+        get_fn(self, instance)
+    }
+
+    fn eh_personality(&self) -> &'ll Value {
+        // The exception handling personality function.
+        //
+        // If our compilation unit has the `eh_personality` lang item somewhere
+        // within it, then we just need to codegen that. Otherwise, we're
+        // building an rlib which will depend on some upstream implementation of
+        // this function, so we just codegen a generic reference to it. We don't
+        // specify any of the types for the function, we just make it a symbol
+        // that LLVM can later use.
+        //
+        // Note that MSVC is a little special here in that we don't use the
+        // `eh_personality` lang item at all. Currently LLVM has support for
+        // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+        // *name of the personality function* to decide what kind of unwind side
+        // tables/landing pads to emit. It looks like Dwarf is used by default,
+        // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+        // an "exception", but for MSVC we want to force SEH. This means that we
+        // can't actually have the personality function be our standard
+        // `rust_eh_personality` function, but rather we wired it up to the
+        // CRT's custom personality function, which forces LLVM to consider
+        // landing pads as "landing pads for SEH".
+        if let Some(llpersonality) = self.eh_personality.get() {
+            return llpersonality;
+        }
+
+        let name = if wants_msvc_seh(self.sess()) {
+            Some("__CxxFrameHandler3")
+        } else if wants_wasm_eh(self.sess()) {
+            // LLVM specifically tests for the name of the personality function
+            // There is no need for this function to exist anywhere, it will
+            // not be called. However, its name has to be "__gxx_wasm_personality_v0"
+            // for native wasm exceptions.
+            Some("__gxx_wasm_personality_v0")
+        } else {
+            None
+        };
+
+        let tcx = self.tcx;
+        let llfn = match tcx.lang_items().eh_personality() {
+            Some(def_id) if name.is_none() => self.get_fn_addr(ty::Instance::expect_resolve(
+                tcx,
+                ty::ParamEnv::reveal_all(),
+                def_id,
+                ty::List::empty(),
+            )),
+            _ => {
+                let name = name.unwrap_or("rust_eh_personality");
+                if let Some(llfn) = self.get_declared_value(name) {
+                    llfn
+                } else {
+                    let fty = self.type_variadic_func(&[], self.type_i32());
+                    let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
+                    let target_cpu = attributes::target_cpu_attr(self);
+                    attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]);
+                    llfn
+                }
+            }
+        };
+        self.eh_personality.set(Some(llfn));
+        llfn
+    }
+
+    fn sess(&self) -> &Session {
+        self.tcx.sess
+    }
+
+    fn check_overflow(&self) -> bool {
+        self.check_overflow
+    }
+
+    fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+        self.codegen_unit
+    }
+
+    fn set_frame_pointer_type(&self, llfn: &'ll Value) {
+        if let Some(attr) = attributes::frame_pointer_type_attr(self) {
+            attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
+        }
+    }
+
+    fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
+        let mut attrs = SmallVec::<[_; 2]>::new();
+        attrs.push(attributes::target_cpu_attr(self));
+        attrs.extend(attributes::tune_cpu_attr(self));
+        attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
+    }
+
+    fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+        let entry_name = self.sess().target.entry_name.as_ref();
+        if self.get_declared_value(entry_name).is_none() {
+            Some(self.declare_entry_fn(
+                entry_name,
+                self.sess().target.entry_abi.into(),
+                llvm::UnnamedAddr::Global,
+                fn_type,
+            ))
+        } else {
+            // If the symbol already exists, it is an error: for example, the user wrote
+            // #[no_mangle] extern "C" fn main(..) {..}
+            // instead of #[start]
+            None
+        }
+    }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+    pub(crate) fn get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value) {
+        if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
+            return v;
+        }
+
+        self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
+    }
+
+    fn insert_intrinsic(
+        &self,
+        name: &'static str,
+        args: Option<&[&'ll llvm::Type]>,
+        ret: &'ll llvm::Type,
+    ) -> (&'ll llvm::Type, &'ll llvm::Value) {
+        let fn_ty = if let Some(args) = args {
+            self.type_func(args, ret)
+        } else {
+            self.type_variadic_func(&[], ret)
+        };
+        let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
+        self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
+        (fn_ty, f)
+    }
+
+    fn declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)> {
+        macro_rules! ifn {
+            ($name:expr, fn() -> $ret:expr) => (
+                if key == $name {
+                    return Some(self.insert_intrinsic($name, Some(&[]), $ret));
+                }
+            );
+            ($name:expr, fn(...) -> $ret:expr) => (
+                if key == $name {
+                    return Some(self.insert_intrinsic($name, None, $ret));
+                }
+            );
+            ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
+                if key == $name {
+                    return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
+                }
+            );
+        }
+        macro_rules! mk_struct {
+            ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
+        }
+
+        let ptr = self.type_ptr();
+        let void = self.type_void();
+        let i1 = self.type_i1();
+        let t_i8 = self.type_i8();
+        let t_i16 = self.type_i16();
+        let t_i32 = self.type_i32();
+        let t_i64 = self.type_i64();
+        let t_i128 = self.type_i128();
+        let t_isize = self.type_isize();
+        let t_f16 = self.type_f16();
+        let t_f32 = self.type_f32();
+        let t_f64 = self.type_f64();
+        let t_f128 = self.type_f128();
+        let t_metadata = self.type_metadata();
+        let t_token = self.type_token();
+
+        ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr);
+        ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
+
+        ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
+        ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
+        ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
+        ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
+        ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
+        ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
+        ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
+        ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
+
+        ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
+        ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
+        ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
+        ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
+        ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
+        ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
+        ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
+        ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
+        ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
+        ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
+
+        ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
+        ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
+        ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
+        ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
+        ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
+        ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
+        ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
+        ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
+        ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
+        ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
+
+        ifn!("llvm.trap", fn() -> void);
+        ifn!("llvm.debugtrap", fn() -> void);
+        ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
+
+        ifn!("llvm.powi.f16", fn(t_f16, t_i32) -> t_f16);
+        ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
+        ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
+        ifn!("llvm.powi.f128", fn(t_f128, t_i32) -> t_f128);
+
+        ifn!("llvm.pow.f16", fn(t_f16, t_f16) -> t_f16);
+        ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
+        ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.pow.f128", fn(t_f128, t_f128) -> t_f128);
+
+        ifn!("llvm.sqrt.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.sqrt.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.sin.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.sin.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.cos.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.cos.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.exp.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.exp.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.exp2.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.exp2.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.log.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.log.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.log10.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.log10.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.log2.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.log2.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.fma.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
+        ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
+        ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
+        ifn!("llvm.fma.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
+
+        ifn!("llvm.fabs.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.fabs.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.minnum.f16", fn(t_f16, t_f16) -> t_f16);
+        ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
+        ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.minnum.f128", fn(t_f128, t_f128) -> t_f128);
+
+        ifn!("llvm.maxnum.f16", fn(t_f16, t_f16) -> t_f16);
+        ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
+        ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.maxnum.f128", fn(t_f128, t_f128) -> t_f128);
+
+        ifn!("llvm.floor.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.floor.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.ceil.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.ceil.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.trunc.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.trunc.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.copysign.f16", fn(t_f16, t_f16) -> t_f16);
+        ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
+        ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.copysign.f128", fn(t_f128, t_f128) -> t_f128);
+
+        ifn!("llvm.round.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.round.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.roundeven.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.roundeven.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.roundeven.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.roundeven.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.rint.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.rint.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.nearbyint.f16", fn(t_f16) -> t_f16);
+        ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
+        ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.nearbyint.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
+        ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
+        ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
+        ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
+        ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
+
+        ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
+        ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
+        ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
+        ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
+        ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
+
+        ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
+        ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
+        ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
+        ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
+        ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
+
+        ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
+        ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
+        ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
+        ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
+
+        ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
+        ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
+        ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
+        ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
+        ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
+
+        ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+        ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+        ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+        ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+        ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+        ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+        ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+        ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+        ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+        ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+        ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+        ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+        ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+        ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+        ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+        ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+        ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+        ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+        ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+        ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+        ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+        ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+        ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+        ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+        ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+        ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+        ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+        ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+        ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+        ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+        ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+        ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+        ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+        ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+        ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+        ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+        ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+        ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+        ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+        ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+        ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+        ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+        ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+        ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+        ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+        ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+        ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+        ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+        ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+        ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+        ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+        ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+        ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+        ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+        ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+        ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+        ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+        ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+        ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+        ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+        ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
+        ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
+
+        // FIXME: This is an infinitesimally small portion of the types you can
+        // pass to this intrinsic, if we can ever lazily register intrinsics we
+        // should register these when they're used, that way any type can be
+        // passed.
+        ifn!("llvm.is.constant.i1", fn(i1) -> i1);
+        ifn!("llvm.is.constant.i8", fn(t_i8) -> i1);
+        ifn!("llvm.is.constant.i16", fn(t_i16) -> i1);
+        ifn!("llvm.is.constant.i32", fn(t_i32) -> i1);
+        ifn!("llvm.is.constant.i64", fn(t_i64) -> i1);
+        ifn!("llvm.is.constant.i128", fn(t_i128) -> i1);
+        ifn!("llvm.is.constant.isize", fn(t_isize) -> i1);
+        ifn!("llvm.is.constant.f32", fn(t_f32) -> i1);
+        ifn!("llvm.is.constant.f64", fn(t_f64) -> i1);
+        ifn!("llvm.is.constant.ptr", fn(ptr) -> i1);
+
+        ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
+        ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
+        ifn!("llvm.localescape", fn(...) -> void);
+        ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr);
+        ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr);
+
+        ifn!("llvm.assume", fn(i1) -> void);
+        ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
+
+        // This isn't an "LLVM intrinsic", but LLVM's optimization passes
+        // recognize it like one (including turning it into `bcmp` sometimes)
+        // and we use it to implement intrinsics like `raw_eq` and `compare_bytes`
+        match self.sess().target.arch.as_ref() {
+            "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
+            _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
+        }
+
+        // variadic intrinsics
+        ifn!("llvm.va_start", fn(ptr) -> void);
+        ifn!("llvm.va_end", fn(ptr) -> void);
+        ifn!("llvm.va_copy", fn(ptr, ptr) -> void);
+
+        if self.sess().instrument_coverage() {
+            ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
+        }
+
+        ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1);
+        ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1});
+
+        if self.sess().opts.debuginfo != DebugInfo::None {
+            ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
+            ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
+        }
+
+        ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
+
+        None
+    }
+
+    pub(crate) fn eh_catch_typeinfo(&self) -> &'ll Value {
+        if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
+            return eh_catch_typeinfo;
+        }
+        let tcx = self.tcx;
+        assert!(self.sess().target.os == "emscripten");
+        let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
+            Some(def_id) => self.get_static(def_id),
+            _ => {
+                let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false);
+                self.declare_global("rust_eh_catch_typeinfo", ty)
+            }
+        };
+        self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
+        eh_catch_typeinfo
+    }
+}
+
+impl CodegenCx<'_, '_> {
+    /// Generates a new symbol name with the given prefix. This symbol name must
+    /// only be used for definitions with `internal` or `private` linkage.
+    pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+        let idx = self.local_gen_sym_counter.get();
+        self.local_gen_sym_counter.set(idx + 1);
+        // Include a '.' character, so there can be no accidental conflicts with
+        // user defined names
+        let mut name = String::with_capacity(prefix.len() + 6);
+        name.push_str(prefix);
+        name.push('.');
+        base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+        name
+    }
+}
+
+impl HasDataLayout for CodegenCx<'_, '_> {
+    #[inline]
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.tcx.data_layout
+    }
+}
+
+impl HasTargetSpec for CodegenCx<'_, '_> {
+    #[inline]
+    fn target_spec(&self) -> &Target {
+        &self.tcx.sess.target
+    }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for CodegenCx<'_, 'tcx> {
+    #[inline]
+    fn tcx(&self) -> TyCtxt<'tcx> {
+        self.tcx
+    }
+}
+
+impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn param_env(&self) -> ty::ParamEnv<'tcx> {
+        ty::ParamEnv::reveal_all()
+    }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
+    type LayoutOfResult = TyAndLayout<'tcx>;
+
+    #[inline]
+    fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+        if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err {
+            self.tcx.dcx().emit_fatal(Spanned { span, node: err.into_diagnostic() })
+        } else {
+            self.tcx.dcx().emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err })
+        }
+    }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
+    type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+    #[inline]
+    fn handle_fn_abi_err(
+        &self,
+        err: FnAbiError<'tcx>,
+        span: Span,
+        fn_abi_request: FnAbiRequest<'tcx>,
+    ) -> ! {
+        if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+            self.tcx.dcx().emit_fatal(Spanned { span, node: err })
+        } else {
+            match fn_abi_request {
+                FnAbiRequest::OfFnPtr { sig, extra_args } => {
+                    span_bug!(span, "`fn_abi_of_fn_ptr({sig}, {extra_args:?})` failed: {err:?}",);
+                }
+                FnAbiRequest::OfInstance { instance, extra_args } => {
+                    span_bug!(
+                        span,
+                        "`fn_abi_of_instance({instance}, {extra_args:?})` failed: {err:?}",
+                    );
+                }
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
new file mode 100644
index 00000000000..2af28146a51
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -0,0 +1,292 @@
+use rustc_middle::mir::coverage::{CodeRegion, CounterId, CovTerm, ExpressionId, MappingKind};
+
+/// Must match the layout of `LLVMRustCounterKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum CounterKind {
+    Zero = 0,
+    CounterValueReference = 1,
+    Expression = 2,
+}
+
+/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
+/// report. Note that `id` has different interpretations, depending on the `kind`:
+///   * For `CounterKind::Zero`, `id` is assumed to be `0`
+///   * For `CounterKind::CounterValueReference`,  `id` matches the `counter_id` of the injected
+///     instrumentation counter (the `index` argument to the LLVM intrinsic
+///     `instrprof.increment()`)
+///   * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
+///     counter expressions.
+///
+/// Corresponds to struct `llvm::coverage::Counter`.
+///
+/// Must match the layout of `LLVMRustCounter`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct Counter {
+    // Important: The layout (order and types of fields) must match its C++ counterpart.
+    pub kind: CounterKind,
+    id: u32,
+}
+
+impl Counter {
+    /// A `Counter` of kind `Zero`. For this counter kind, the `id` is not used.
+    pub(crate) const ZERO: Self = Self { kind: CounterKind::Zero, id: 0 };
+
+    /// Constructs a new `Counter` of kind `CounterValueReference`.
+    pub fn counter_value_reference(counter_id: CounterId) -> Self {
+        Self { kind: CounterKind::CounterValueReference, id: counter_id.as_u32() }
+    }
+
+    /// Constructs a new `Counter` of kind `Expression`.
+    pub(crate) fn expression(expression_id: ExpressionId) -> Self {
+        Self { kind: CounterKind::Expression, id: expression_id.as_u32() }
+    }
+
+    pub(crate) fn from_term(term: CovTerm) -> Self {
+        match term {
+            CovTerm::Zero => Self::ZERO,
+            CovTerm::Counter(id) => Self::counter_value_reference(id),
+            CovTerm::Expression(id) => Self::expression(id),
+        }
+    }
+}
+
+/// Corresponds to enum `llvm::coverage::CounterExpression::ExprKind`.
+///
+/// Must match the layout of `LLVMRustCounterExprKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum ExprKind {
+    Subtract = 0,
+    Add = 1,
+}
+
+/// Corresponds to struct `llvm::coverage::CounterExpression`.
+///
+/// Must match the layout of `LLVMRustCounterExpression`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterExpression {
+    pub kind: ExprKind,
+    pub lhs: Counter,
+    pub rhs: Counter,
+}
+
+/// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegionKind`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum RegionKind {
+    /// A CodeRegion associates some code with a counter
+    CodeRegion = 0,
+
+    /// An ExpansionRegion represents a file expansion region that associates
+    /// a source range with the expansion of a virtual source file, such as
+    /// for a macro instantiation or #include file.
+    ExpansionRegion = 1,
+
+    /// A SkippedRegion represents a source range with code that was skipped
+    /// by a preprocessor or similar means.
+    SkippedRegion = 2,
+
+    /// A GapRegion is like a CodeRegion, but its count is only set as the
+    /// line execution count when its the only region in the line.
+    GapRegion = 3,
+
+    /// A BranchRegion represents leaf-level boolean expressions and is
+    /// associated with two counters, each representing the number of times the
+    /// expression evaluates to true or false.
+    BranchRegion = 4,
+}
+
+/// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
+/// coverage map, in accordance with the
+/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+/// The struct composes fields representing the `Counter` type and value(s) (injected counter
+/// ID, or expression type and operands), the source file (an indirect index into a "filenames
+/// array", encoded separately), and source location (start and end positions of the represented
+/// code region).
+///
+/// Corresponds to struct `llvm::coverage::CounterMappingRegion`.
+///
+/// Must match the layout of `LLVMRustCounterMappingRegion`.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterMappingRegion {
+    /// The counter type and type-dependent counter data, if any.
+    counter: Counter,
+
+    /// If the `RegionKind` is a `BranchRegion`, this represents the counter
+    /// for the false branch of the region.
+    false_counter: Counter,
+
+    /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
+    /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
+    /// that, in turn, are used to look up the filename for this region.
+    file_id: u32,
+
+    /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
+    /// the mapping regions created as a result of macro expansion, by checking if their file id
+    /// matches the expanded file id.
+    expanded_file_id: u32,
+
+    /// 1-based starting line of the mapping region.
+    start_line: u32,
+
+    /// 1-based starting column of the mapping region.
+    start_col: u32,
+
+    /// 1-based ending line of the mapping region.
+    end_line: u32,
+
+    /// 1-based ending column of the mapping region. If the high bit is set, the current
+    /// mapping region is a gap area.
+    end_col: u32,
+
+    kind: RegionKind,
+}
+
+impl CounterMappingRegion {
+    pub(crate) fn from_mapping(
+        mapping_kind: &MappingKind,
+        local_file_id: u32,
+        code_region: &CodeRegion,
+    ) -> Self {
+        let &CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = code_region;
+        match *mapping_kind {
+            MappingKind::Code(term) => Self::code_region(
+                Counter::from_term(term),
+                local_file_id,
+                start_line,
+                start_col,
+                end_line,
+                end_col,
+            ),
+            MappingKind::Branch { true_term, false_term } => Self::branch_region(
+                Counter::from_term(true_term),
+                Counter::from_term(false_term),
+                local_file_id,
+                start_line,
+                start_col,
+                end_line,
+                end_col,
+            ),
+        }
+    }
+
+    pub(crate) fn code_region(
+        counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::CodeRegion,
+        }
+    }
+
+    pub(crate) fn branch_region(
+        counter: Counter,
+        false_counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::BranchRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn expansion_region(
+        file_id: u32,
+        expanded_file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::ExpansionRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn skipped_region(
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::SkippedRegion,
+        }
+    }
+
+    // This function might be used in the future; the LLVM API is still evolving, as is coverage
+    // support.
+    #[allow(dead_code)]
+    pub(crate) fn gap_region(
+        counter: Counter,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter: Counter::ZERO,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col: (1_u32 << 31) | end_col,
+            kind: RegionKind::GapRegion,
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
new file mode 100644
index 00000000000..d85d9411f03
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs
@@ -0,0 +1,272 @@
+use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind};
+
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::coverage::{
+    CodeRegion, CounterId, CovTerm, Expression, ExpressionId, FunctionCoverageInfo, Mapping,
+    MappingKind, Op,
+};
+use rustc_middle::ty::Instance;
+use rustc_span::Symbol;
+
+/// Holds all of the coverage mapping data associated with a function instance,
+/// collected during traversal of `Coverage` statements in the function's MIR.
+#[derive(Debug)]
+pub struct FunctionCoverageCollector<'tcx> {
+    /// Coverage info that was attached to this function by the instrumentor.
+    function_coverage_info: &'tcx FunctionCoverageInfo,
+    is_used: bool,
+
+    /// Tracks which counters have been seen, so that we can identify mappings
+    /// to counters that were optimized out, and set them to zero.
+    counters_seen: BitSet<CounterId>,
+    /// Contains all expression IDs that have been seen in an `ExpressionUsed`
+    /// coverage statement, plus all expression IDs that aren't directly used
+    /// by any mappings (and therefore do not have expression-used statements).
+    /// After MIR traversal is finished, we can conclude that any IDs missing
+    /// from this set must have had their statements deleted by MIR opts.
+    expressions_seen: BitSet<ExpressionId>,
+}
+
+impl<'tcx> FunctionCoverageCollector<'tcx> {
+    /// Creates a new set of coverage data for a used (called) function.
+    pub fn new(
+        instance: Instance<'tcx>,
+        function_coverage_info: &'tcx FunctionCoverageInfo,
+    ) -> Self {
+        Self::create(instance, function_coverage_info, true)
+    }
+
+    /// Creates a new set of coverage data for an unused (never called) function.
+    pub fn unused(
+        instance: Instance<'tcx>,
+        function_coverage_info: &'tcx FunctionCoverageInfo,
+    ) -> Self {
+        Self::create(instance, function_coverage_info, false)
+    }
+
+    fn create(
+        instance: Instance<'tcx>,
+        function_coverage_info: &'tcx FunctionCoverageInfo,
+        is_used: bool,
+    ) -> Self {
+        let num_counters = function_coverage_info.num_counters;
+        let num_expressions = function_coverage_info.expressions.len();
+        debug!(
+            "FunctionCoverage::create(instance={instance:?}) has \
+            num_counters={num_counters}, num_expressions={num_expressions}, is_used={is_used}"
+        );
+
+        // Create a filled set of expression IDs, so that expressions not
+        // directly used by mappings will be treated as "seen".
+        // (If they end up being unused, LLVM will delete them for us.)
+        let mut expressions_seen = BitSet::new_filled(num_expressions);
+        // For each expression ID that is directly used by one or more mappings,
+        // mark it as not-yet-seen. This indicates that we expect to see a
+        // corresponding `ExpressionUsed` statement during MIR traversal.
+        for term in function_coverage_info.mappings.iter().flat_map(|m| m.kind.terms()) {
+            if let CovTerm::Expression(id) = term {
+                expressions_seen.remove(id);
+            }
+        }
+
+        Self {
+            function_coverage_info,
+            is_used,
+            counters_seen: BitSet::new_empty(num_counters),
+            expressions_seen,
+        }
+    }
+
+    /// Marks a counter ID as having been seen in a counter-increment statement.
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn mark_counter_id_seen(&mut self, id: CounterId) {
+        self.counters_seen.insert(id);
+    }
+
+    /// Marks an expression ID as having been seen in an expression-used statement.
+    #[instrument(level = "debug", skip(self))]
+    pub(crate) fn mark_expression_id_seen(&mut self, id: ExpressionId) {
+        self.expressions_seen.insert(id);
+    }
+
+    /// Identify expressions that will always have a value of zero, and note
+    /// their IDs in [`ZeroExpressions`]. Mappings that refer to a zero expression
+    /// can instead become mappings to a constant zero value.
+    ///
+    /// This method mainly exists to preserve the simplifications that were
+    /// already being performed by the Rust-side expression renumbering, so that
+    /// the resulting coverage mappings don't get worse.
+    fn identify_zero_expressions(&self) -> ZeroExpressions {
+        // The set of expressions that either were optimized out entirely, or
+        // have zero as both of their operands, and will therefore always have
+        // a value of zero. Other expressions that refer to these as operands
+        // can have those operands replaced with `CovTerm::Zero`.
+        let mut zero_expressions = ZeroExpressions::default();
+
+        // Simplify a copy of each expression based on lower-numbered expressions,
+        // and then update the set of always-zero expressions if necessary.
+        // (By construction, expressions can only refer to other expressions
+        // that have lower IDs, so one pass is sufficient.)
+        for (id, expression) in self.function_coverage_info.expressions.iter_enumerated() {
+            if !self.expressions_seen.contains(id) {
+                // If an expression was not seen, it must have been optimized away,
+                // so any operand that refers to it can be replaced with zero.
+                zero_expressions.insert(id);
+                continue;
+            }
+
+            // We don't need to simplify the actual expression data in the
+            // expressions list; we can just simplify a temporary copy and then
+            // use that to update the set of always-zero expressions.
+            let Expression { mut lhs, op, mut rhs } = *expression;
+
+            // If an expression has an operand that is also an expression, the
+            // operand's ID must be strictly lower. This is what lets us find
+            // all zero expressions in one pass.
+            let assert_operand_expression_is_lower = |operand_id: ExpressionId| {
+                assert!(
+                    operand_id < id,
+                    "Operand {operand_id:?} should be less than {id:?} in {expression:?}",
+                )
+            };
+
+            // If an operand refers to a counter or expression that is always
+            // zero, then that operand can be replaced with `CovTerm::Zero`.
+            let maybe_set_operand_to_zero = |operand: &mut CovTerm| {
+                if let CovTerm::Expression(id) = *operand {
+                    assert_operand_expression_is_lower(id);
+                }
+
+                if is_zero_term(&self.counters_seen, &zero_expressions, *operand) {
+                    *operand = CovTerm::Zero;
+                }
+            };
+            maybe_set_operand_to_zero(&mut lhs);
+            maybe_set_operand_to_zero(&mut rhs);
+
+            // Coverage counter values cannot be negative, so if an expression
+            // involves subtraction from zero, assume that its RHS must also be zero.
+            // (Do this after simplifications that could set the LHS to zero.)
+            if lhs == CovTerm::Zero && op == Op::Subtract {
+                rhs = CovTerm::Zero;
+            }
+
+            // After the above simplifications, if both operands are zero, then
+            // we know that this expression is always zero too.
+            if lhs == CovTerm::Zero && rhs == CovTerm::Zero {
+                zero_expressions.insert(id);
+            }
+        }
+
+        zero_expressions
+    }
+
+    pub(crate) fn into_finished(self) -> FunctionCoverage<'tcx> {
+        let zero_expressions = self.identify_zero_expressions();
+        let FunctionCoverageCollector { function_coverage_info, is_used, counters_seen, .. } = self;
+
+        FunctionCoverage { function_coverage_info, is_used, counters_seen, zero_expressions }
+    }
+}
+
+pub(crate) struct FunctionCoverage<'tcx> {
+    function_coverage_info: &'tcx FunctionCoverageInfo,
+    is_used: bool,
+
+    counters_seen: BitSet<CounterId>,
+    zero_expressions: ZeroExpressions,
+}
+
+impl<'tcx> FunctionCoverage<'tcx> {
+    /// Returns true for a used (called) function, and false for an unused function.
+    pub(crate) fn is_used(&self) -> bool {
+        self.is_used
+    }
+
+    /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+    /// or not the source code structure changed between different compilations.
+    pub fn source_hash(&self) -> u64 {
+        if self.is_used { self.function_coverage_info.function_source_hash } else { 0 }
+    }
+
+    /// Returns an iterator over all filenames used by this function's mappings.
+    pub(crate) fn all_file_names(&self) -> impl Iterator<Item = Symbol> + Captures<'_> {
+        self.function_coverage_info.mappings.iter().map(|mapping| mapping.code_region.file_name)
+    }
+
+    /// Convert this function's coverage expression data into a form that can be
+    /// passed through FFI to LLVM.
+    pub(crate) fn counter_expressions(
+        &self,
+    ) -> impl Iterator<Item = CounterExpression> + ExactSizeIterator + Captures<'_> {
+        // We know that LLVM will optimize out any unused expressions before
+        // producing the final coverage map, so there's no need to do the same
+        // thing on the Rust side unless we're confident we can do much better.
+        // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.)
+
+        self.function_coverage_info.expressions.iter().map(move |&Expression { lhs, op, rhs }| {
+            CounterExpression {
+                lhs: self.counter_for_term(lhs),
+                kind: match op {
+                    Op::Add => ExprKind::Add,
+                    Op::Subtract => ExprKind::Subtract,
+                },
+                rhs: self.counter_for_term(rhs),
+            }
+        })
+    }
+
+    /// Converts this function's coverage mappings into an intermediate form
+    /// that will be used by `mapgen` when preparing for FFI.
+    pub(crate) fn counter_regions(
+        &self,
+    ) -> impl Iterator<Item = (MappingKind, &CodeRegion)> + ExactSizeIterator {
+        self.function_coverage_info.mappings.iter().map(move |mapping| {
+            let Mapping { kind, code_region } = mapping;
+            let kind =
+                kind.map_terms(|term| if self.is_zero_term(term) { CovTerm::Zero } else { term });
+            (kind, code_region)
+        })
+    }
+
+    fn counter_for_term(&self, term: CovTerm) -> Counter {
+        if self.is_zero_term(term) { Counter::ZERO } else { Counter::from_term(term) }
+    }
+
+    fn is_zero_term(&self, term: CovTerm) -> bool {
+        is_zero_term(&self.counters_seen, &self.zero_expressions, term)
+    }
+}
+
+/// Set of expression IDs that are known to always evaluate to zero.
+/// Any mapping or expression operand that refers to these expressions can have
+/// that reference replaced with a constant zero value.
+#[derive(Default)]
+struct ZeroExpressions(FxIndexSet<ExpressionId>);
+
+impl ZeroExpressions {
+    fn insert(&mut self, id: ExpressionId) {
+        self.0.insert(id);
+    }
+
+    fn contains(&self, id: ExpressionId) -> bool {
+        self.0.contains(&id)
+    }
+}
+
+/// Returns `true` if the given term is known to have a value of zero, taking
+/// into account knowledge of which counters are unused and which expressions
+/// are always zero.
+fn is_zero_term(
+    counters_seen: &BitSet<CounterId>,
+    zero_expressions: &ZeroExpressions,
+    term: CovTerm,
+) -> bool {
+    match term {
+        CovTerm::Zero => true,
+        CovTerm::Counter(id) => !counters_seen.contains(id),
+        CovTerm::Expression(id) => zero_expressions.contains(id),
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
new file mode 100644
index 00000000000..ee7ea342301
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -0,0 +1,451 @@
+use crate::common::CodegenCx;
+use crate::coverageinfo;
+use crate::coverageinfo::ffi::CounterMappingRegion;
+use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector};
+use crate::llvm;
+
+use itertools::Itertools as _;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods};
+use rustc_data_structures::fx::{FxIndexMap, FxIndexSet};
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_index::IndexVec;
+use rustc_middle::bug;
+use rustc_middle::mir;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::DefIdSet;
+use rustc_span::Symbol;
+
+/// Generates and exports the Coverage Map.
+///
+/// Rust Coverage Map generation supports LLVM Coverage Mapping Format version
+/// 6 (zero-based encoded as 5), as defined at
+/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+/// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`)
+/// bundled with Rust's fork of LLVM.
+///
+/// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
+/// the same version. Clang's implementation of Coverage Map generation was referenced when
+/// implementing this Rust version, and though the format documentation is very explicit and
+/// detailed, some undocumented details in Clang's implementation (that may or may not be important)
+/// were also replicated for Rust's Coverage Map.
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
+    let tcx = cx.tcx;
+
+    // Ensure the installed version of LLVM supports Coverage Map Version 6
+    // (encoded as a zero-based value: 5), which was introduced with LLVM 13.
+    let version = coverageinfo::mapping_version();
+    assert_eq!(version, 5, "The `CoverageMappingVersion` exposed by `llvm-wrapper` is out of sync");
+
+    debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name());
+
+    // In order to show that unused functions have coverage counts of zero (0), LLVM requires the
+    // functions exist. Generate synthetic functions with a (required) single counter, and add the
+    // MIR `Coverage` code regions to the `function_coverage_map`, before calling
+    // `ctx.take_function_coverage_map()`.
+    if cx.codegen_unit.is_code_coverage_dead_code_cgu() {
+        add_unused_functions(cx);
+    }
+
+    let function_coverage_map = match cx.coverage_context() {
+        Some(ctx) => ctx.take_function_coverage_map(),
+        None => return,
+    };
+
+    if function_coverage_map.is_empty() {
+        // This module has no functions with coverage instrumentation
+        return;
+    }
+
+    let function_coverage_entries = function_coverage_map
+        .into_iter()
+        .map(|(instance, function_coverage)| (instance, function_coverage.into_finished()))
+        .collect::<Vec<_>>();
+
+    let all_file_names =
+        function_coverage_entries.iter().flat_map(|(_, fn_cov)| fn_cov.all_file_names());
+    let global_file_table = GlobalFileTable::new(all_file_names);
+
+    // Encode all filenames referenced by coverage mappings in this CGU.
+    let filenames_buffer = global_file_table.make_filenames_buffer(tcx);
+
+    let filenames_size = filenames_buffer.len();
+    let filenames_val = cx.const_bytes(&filenames_buffer);
+    let filenames_ref = coverageinfo::hash_bytes(&filenames_buffer);
+
+    // Generate the coverage map header, which contains the filenames used by
+    // this CGU's coverage mappings, and store it in a well-known global.
+    let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val);
+    coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
+
+    let mut unused_function_names = Vec::new();
+    let covfun_section_name = coverageinfo::covfun_section_name(cx);
+
+    // Encode coverage mappings and generate function records
+    for (instance, function_coverage) in function_coverage_entries {
+        debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
+
+        let mangled_function_name = tcx.symbol_name(instance).name;
+        let source_hash = function_coverage.source_hash();
+        let is_used = function_coverage.is_used();
+
+        let coverage_mapping_buffer =
+            encode_mappings_for_function(&global_file_table, &function_coverage);
+
+        if coverage_mapping_buffer.is_empty() {
+            if function_coverage.is_used() {
+                bug!(
+                    "A used function should have had coverage mapping data but did not: {}",
+                    mangled_function_name
+                );
+            } else {
+                debug!("unused function had no coverage mapping data: {}", mangled_function_name);
+                continue;
+            }
+        }
+
+        if !is_used {
+            unused_function_names.push(mangled_function_name);
+        }
+
+        save_function_record(
+            cx,
+            &covfun_section_name,
+            mangled_function_name,
+            source_hash,
+            filenames_ref,
+            coverage_mapping_buffer,
+            is_used,
+        );
+    }
+
+    // For unused functions, we need to take their mangled names and store them
+    // in a specially-named global array. LLVM's `InstrProfiling` pass will
+    // detect this global and include those names in its `__llvm_prf_names`
+    // section. (See `llvm/lib/Transforms/Instrumentation/InstrProfiling.cpp`.)
+    if !unused_function_names.is_empty() {
+        assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
+
+        let name_globals = unused_function_names
+            .into_iter()
+            .map(|mangled_function_name| cx.const_str(mangled_function_name).0)
+            .collect::<Vec<_>>();
+        let initializer = cx.const_array(cx.type_ptr(), &name_globals);
+
+        let array = llvm::add_global(cx.llmod, cx.val_ty(initializer), "__llvm_coverage_names");
+        llvm::set_global_constant(array, true);
+        llvm::set_linkage(array, llvm::Linkage::InternalLinkage);
+        llvm::set_initializer(array, initializer);
+    }
+}
+
+/// Maps "global" (per-CGU) file ID numbers to their underlying filenames.
+struct GlobalFileTable {
+    /// This "raw" table doesn't include the working dir, so a filename's
+    /// global ID is its index in this set **plus one**.
+    raw_file_table: FxIndexSet<Symbol>,
+}
+
+impl GlobalFileTable {
+    fn new(all_file_names: impl IntoIterator<Item = Symbol>) -> Self {
+        // Collect all of the filenames into a set. Filenames usually come in
+        // contiguous runs, so we can dedup adjacent ones to save work.
+        let mut raw_file_table = all_file_names.into_iter().dedup().collect::<FxIndexSet<Symbol>>();
+
+        // Sort the file table by its actual string values, not the arbitrary
+        // ordering of its symbols.
+        raw_file_table.sort_unstable_by(|a, b| a.as_str().cmp(b.as_str()));
+
+        Self { raw_file_table }
+    }
+
+    fn global_file_id_for_file_name(&self, file_name: Symbol) -> u32 {
+        let raw_id = self.raw_file_table.get_index_of(&file_name).unwrap_or_else(|| {
+            bug!("file name not found in prepared global file table: {file_name}");
+        });
+        // The raw file table doesn't include an entry for the working dir
+        // (which has ID 0), so add 1 to get the correct ID.
+        (raw_id + 1) as u32
+    }
+
+    fn make_filenames_buffer(&self, tcx: TyCtxt<'_>) -> Vec<u8> {
+        // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
+        // requires setting the first filename to the compilation directory.
+        // Since rustc generates coverage maps with relative paths, the
+        // compilation directory can be combined with the relative paths
+        // to get absolute paths, if needed.
+        use rustc_session::RemapFileNameExt;
+        let working_dir: &str = &tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy();
+
+        llvm::build_byte_buffer(|buffer| {
+            coverageinfo::write_filenames_section_to_buffer(
+                // Insert the working dir at index 0, before the other filenames.
+                std::iter::once(working_dir).chain(self.raw_file_table.iter().map(Symbol::as_str)),
+                buffer,
+            );
+        })
+    }
+}
+
+rustc_index::newtype_index! {
+    struct LocalFileId {}
+}
+
+/// Holds a mapping from "local" (per-function) file IDs to "global" (per-CGU)
+/// file IDs.
+#[derive(Default)]
+struct VirtualFileMapping {
+    local_to_global: IndexVec<LocalFileId, u32>,
+    global_to_local: FxIndexMap<u32, LocalFileId>,
+}
+
+impl VirtualFileMapping {
+    fn local_id_for_global(&mut self, global_file_id: u32) -> LocalFileId {
+        *self
+            .global_to_local
+            .entry(global_file_id)
+            .or_insert_with(|| self.local_to_global.push(global_file_id))
+    }
+
+    fn into_vec(self) -> Vec<u32> {
+        self.local_to_global.raw
+    }
+}
+
+/// Using the expressions and counter regions collected for a single function,
+/// generate the variable-sized payload of its corresponding `__llvm_covfun`
+/// entry. The payload is returned as a vector of bytes.
+///
+/// Newly-encountered filenames will be added to the global file table.
+fn encode_mappings_for_function(
+    global_file_table: &GlobalFileTable,
+    function_coverage: &FunctionCoverage<'_>,
+) -> Vec<u8> {
+    let counter_regions = function_coverage.counter_regions();
+    if counter_regions.is_empty() {
+        return Vec::new();
+    }
+
+    let expressions = function_coverage.counter_expressions().collect::<Vec<_>>();
+
+    let mut virtual_file_mapping = VirtualFileMapping::default();
+    let mut mapping_regions = Vec::with_capacity(counter_regions.len());
+
+    // Group mappings into runs with the same filename, preserving the order
+    // yielded by `FunctionCoverage`.
+    // Prepare file IDs for each filename, and prepare the mapping data so that
+    // we can pass it through FFI to LLVM.
+    for (file_name, counter_regions_for_file) in
+        &counter_regions.group_by(|(_, region)| region.file_name)
+    {
+        // Look up the global file ID for this filename.
+        let global_file_id = global_file_table.global_file_id_for_file_name(file_name);
+
+        // Associate that global file ID with a local file ID for this function.
+        let local_file_id = virtual_file_mapping.local_id_for_global(global_file_id);
+        debug!("  file id: {local_file_id:?} => global {global_file_id} = '{file_name:?}'");
+
+        // For each counter/region pair in this function+file, convert it to a
+        // form suitable for FFI.
+        for (mapping_kind, region) in counter_regions_for_file {
+            debug!("Adding counter {mapping_kind:?} to map for {region:?}");
+            mapping_regions.push(CounterMappingRegion::from_mapping(
+                &mapping_kind,
+                local_file_id.as_u32(),
+                region,
+            ));
+        }
+    }
+
+    // Encode the function's coverage mappings into a buffer.
+    llvm::build_byte_buffer(|buffer| {
+        coverageinfo::write_mapping_to_buffer(
+            virtual_file_mapping.into_vec(),
+            expressions,
+            mapping_regions,
+            buffer,
+        );
+    })
+}
+
+/// Construct coverage map header and the array of function records, and combine them into the
+/// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn generate_coverage_map<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    version: u32,
+    filenames_size: usize,
+    filenames_val: &'ll llvm::Value,
+) -> &'ll llvm::Value {
+    debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+    // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+    // as of `llvm::coverage::CovMapVersion::Version4`.)
+    let zero_was_n_records_val = cx.const_u32(0);
+    let filenames_size_val = cx.const_u32(filenames_size as u32);
+    let zero_was_coverage_size_val = cx.const_u32(0);
+    let version_val = cx.const_u32(version);
+    let cov_data_header_val = cx.const_struct(
+        &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+        /*packed=*/ false,
+    );
+
+    // Create the complete LLVM coverage data value to add to the LLVM IR
+    cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
+}
+
+/// Construct a function record and combine it with the function's coverage mapping data.
+/// Save the function record into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn save_function_record(
+    cx: &CodegenCx<'_, '_>,
+    covfun_section_name: &str,
+    mangled_function_name: &str,
+    source_hash: u64,
+    filenames_ref: u64,
+    coverage_mapping_buffer: Vec<u8>,
+    is_used: bool,
+) {
+    // Concatenate the encoded coverage mappings
+    let coverage_mapping_size = coverage_mapping_buffer.len();
+    let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer);
+
+    let func_name_hash = coverageinfo::hash_bytes(mangled_function_name.as_bytes());
+    let func_name_hash_val = cx.const_u64(func_name_hash);
+    let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
+    let source_hash_val = cx.const_u64(source_hash);
+    let filenames_ref_val = cx.const_u64(filenames_ref);
+    let func_record_val = cx.const_struct(
+        &[
+            func_name_hash_val,
+            coverage_mapping_size_val,
+            source_hash_val,
+            filenames_ref_val,
+            coverage_mapping_val,
+        ],
+        /*packed=*/ true,
+    );
+
+    coverageinfo::save_func_record_to_mod(
+        cx,
+        covfun_section_name,
+        func_name_hash,
+        func_record_val,
+        is_used,
+    );
+}
+
+/// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for
+/// the functions that went through codegen; such as public functions and "used" functions
+/// (functions referenced by other "used" or public items). Any other functions considered unused,
+/// or "Unreachable", were still parsed and processed through the MIR stage, but were not
+/// codegenned. (Note that `-Clink-dead-code` can force some unused code to be codegenned, but
+/// that flag is known to cause other errors, when combined with `-C instrument-coverage`; and
+/// `-Clink-dead-code` will not generate code for unused generic functions.)
+///
+/// We can find the unused functions (including generic functions) by the set difference of all MIR
+/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`codegenned_and_inlined_items`).
+///
+/// These unused functions don't need to be codegenned, but we do need to add them to the function
+/// coverage map (in a single designated CGU) so that we still emit coverage mappings for them.
+/// We also end up adding their symbol names to a special global array that LLVM will include in
+/// its embedded coverage data.
+fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
+    assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
+
+    let tcx = cx.tcx;
+
+    let eligible_def_ids = tcx.mir_keys(()).iter().filter_map(|local_def_id| {
+        let def_id = local_def_id.to_def_id();
+        let kind = tcx.def_kind(def_id);
+        // `mir_keys` will give us `DefId`s for all kinds of things, not
+        // just "functions", like consts, statics, etc. Filter those out.
+        if !matches!(kind, DefKind::Fn | DefKind::AssocFn | DefKind::Closure) {
+            return None;
+        }
+
+        // FIXME(79651): Consider trying to filter out dummy instantiations of
+        // unused generic functions from library crates, because they can produce
+        // "unused instantiation" in coverage reports even when they are actually
+        // used by some downstream crate in the same binary.
+
+        Some(local_def_id.to_def_id())
+    });
+
+    let codegenned_def_ids = codegenned_and_inlined_items(tcx);
+
+    // For each `DefId` that should have coverage instrumentation but wasn't
+    // codegenned, add it to the function coverage map as an unused function.
+    for def_id in eligible_def_ids.filter(|id| !codegenned_def_ids.contains(id)) {
+        // Skip any function that didn't have coverage data added to it by the
+        // coverage instrumentor.
+        let body = tcx.instance_mir(ty::InstanceDef::Item(def_id));
+        let Some(function_coverage_info) = body.function_coverage_info.as_deref() else {
+            continue;
+        };
+
+        debug!("generating unused fn: {def_id:?}");
+        let instance = declare_unused_fn(tcx, def_id);
+        add_unused_function_coverage(cx, instance, function_coverage_info);
+    }
+}
+
+/// All items participating in code generation together with (instrumented)
+/// items inlined into them.
+fn codegenned_and_inlined_items(tcx: TyCtxt<'_>) -> DefIdSet {
+    let (items, cgus) = tcx.collect_and_partition_mono_items(());
+    let mut visited = DefIdSet::default();
+    let mut result = items.clone();
+
+    for cgu in cgus {
+        for item in cgu.items().keys() {
+            if let mir::mono::MonoItem::Fn(ref instance) = item {
+                let did = instance.def_id();
+                if !visited.insert(did) {
+                    continue;
+                }
+                let body = tcx.instance_mir(instance.def);
+                for block in body.basic_blocks.iter() {
+                    for statement in &block.statements {
+                        let mir::StatementKind::Coverage(_) = statement.kind else { continue };
+                        let scope = statement.source_info.scope;
+                        if let Some(inlined) = scope.inlined_instance(&body.source_scopes) {
+                            result.insert(inlined.def_id());
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    result
+}
+
+fn declare_unused_fn<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::Instance<'tcx> {
+    ty::Instance::new(
+        def_id,
+        ty::GenericArgs::for_item(tcx, def_id, |param, _| {
+            if let ty::GenericParamDefKind::Lifetime = param.kind {
+                tcx.lifetimes.re_erased.into()
+            } else {
+                tcx.mk_param_from_def(param)
+            }
+        }),
+    )
+}
+
+fn add_unused_function_coverage<'tcx>(
+    cx: &CodegenCx<'_, 'tcx>,
+    instance: ty::Instance<'tcx>,
+    function_coverage_info: &'tcx mir::coverage::FunctionCoverageInfo,
+) {
+    // An unused function's mappings will automatically be rewritten to map to
+    // zero, because none of its counters/expressions are marked as seen.
+    let function_coverage = FunctionCoverageCollector::unused(instance, function_coverage_info);
+
+    if let Some(coverage_context) = cx.coverage_context() {
+        coverage_context.function_coverage_map.borrow_mut().insert(instance, function_coverage);
+    } else {
+        bug!("Could not get the `coverage_context`");
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
new file mode 100644
index 00000000000..133084b7c12
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -0,0 +1,289 @@
+use crate::llvm;
+
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion};
+use crate::coverageinfo::map_data::FunctionCoverageCollector;
+
+use libc::c_uint;
+use rustc_codegen_ssa::traits::{
+    BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods,
+    StaticMethods,
+};
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_llvm::RustString;
+use rustc_middle::bug;
+use rustc_middle::mir::coverage::CoverageKind;
+use rustc_middle::mir::Coverage;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::Instance;
+
+use std::cell::RefCell;
+
+pub(crate) mod ffi;
+pub(crate) mod map_data;
+pub mod mapgen;
+
+const VAR_ALIGN_BYTES: usize = 8;
+
+/// A context object for maintaining all state needed by the coverageinfo module.
+pub struct CrateCoverageContext<'ll, 'tcx> {
+    /// Coverage data for each instrumented function identified by DefId.
+    pub(crate) function_coverage_map:
+        RefCell<FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>>>,
+    pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>,
+}
+
+impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
+    pub fn new() -> Self {
+        Self {
+            function_coverage_map: Default::default(),
+            pgo_func_name_var_map: Default::default(),
+        }
+    }
+
+    pub fn take_function_coverage_map(
+        &self,
+    ) -> FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>> {
+        self.function_coverage_map.replace(FxIndexMap::default())
+    }
+}
+
+// These methods used to be part of trait `CoverageInfoMethods`, which no longer
+// exists after most coverage code was moved out of SSA.
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+    pub(crate) fn coverageinfo_finalize(&self) {
+        mapgen::finalize(self)
+    }
+
+    /// For LLVM codegen, returns a function-specific `Value` for a global
+    /// string, to hold the function name passed to LLVM intrinsic
+    /// `instrprof.increment()`. The `Value` is only created once per instance.
+    /// Multiple invocations with the same instance return the same `Value`.
+    fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
+        if let Some(coverage_context) = self.coverage_context() {
+            debug!("getting pgo_func_name_var for instance={:?}", instance);
+            let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
+            pgo_func_name_var_map
+                .entry(instance)
+                .or_insert_with(|| create_pgo_func_name_var(self, instance))
+        } else {
+            bug!("Could not get the `coverage_context`");
+        }
+    }
+}
+
+impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+    #[instrument(level = "debug", skip(self))]
+    fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) {
+        // Our caller should have already taken care of inlining subtleties,
+        // so we can assume that counter/expression IDs in this coverage
+        // statement are meaningful for the given instance.
+        //
+        // (Either the statement was not inlined and directly belongs to this
+        // instance, or it was inlined *from* this instance.)
+
+        let bx = self;
+
+        match coverage.kind {
+            // Marker statements have no effect during codegen,
+            // so return early and don't create `func_coverage`.
+            CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => return,
+            // Match exhaustively to ensure that newly-added kinds are classified correctly.
+            CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. } => {}
+        }
+
+        let Some(function_coverage_info) =
+            bx.tcx.instance_mir(instance.def).function_coverage_info.as_deref()
+        else {
+            debug!("function has a coverage statement but no coverage info");
+            return;
+        };
+
+        let Some(coverage_context) = bx.coverage_context() else { return };
+        let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+        let func_coverage = coverage_map
+            .entry(instance)
+            .or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info));
+
+        let Coverage { kind } = coverage;
+        match *kind {
+            CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => unreachable!(
+                "unexpected marker statement {kind:?} should have caused an early return"
+            ),
+            CoverageKind::CounterIncrement { id } => {
+                func_coverage.mark_counter_id_seen(id);
+                // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
+                // as that needs an exclusive borrow.
+                drop(coverage_map);
+
+                // The number of counters passed to `llvm.instrprof.increment` might
+                // be smaller than the number originally inserted by the instrumentor,
+                // if some high-numbered counters were removed by MIR optimizations.
+                // If so, LLVM's profiler runtime will use fewer physical counters.
+                let num_counters =
+                    bx.tcx().coverage_ids_info(instance.def).max_counter_id.as_u32() + 1;
+                assert!(
+                    num_counters as usize <= function_coverage_info.num_counters,
+                    "num_counters disagreement: query says {num_counters} but function info only has {}",
+                    function_coverage_info.num_counters
+                );
+
+                let fn_name = bx.get_pgo_func_name_var(instance);
+                let hash = bx.const_u64(function_coverage_info.function_source_hash);
+                let num_counters = bx.const_u32(num_counters);
+                let index = bx.const_u32(id.as_u32());
+                debug!(
+                    "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+                    fn_name, hash, num_counters, index,
+                );
+                bx.instrprof_increment(fn_name, hash, num_counters, index);
+            }
+            CoverageKind::ExpressionUsed { id } => {
+                func_coverage.mark_expression_id_seen(id);
+            }
+        }
+    }
+}
+
+/// Calls llvm::createPGOFuncNameVar() with the given function instance's
+/// mangled function name. The LLVM API returns an llvm::GlobalVariable
+/// containing the function name, with the specific variable name and linkage
+/// required by LLVM InstrProf source-based coverage instrumentation. Use
+/// `bx.get_pgo_func_name_var()` to ensure the variable is only created once per
+/// `Instance`.
+fn create_pgo_func_name_var<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    instance: Instance<'tcx>,
+) -> &'ll llvm::Value {
+    let mangled_fn_name: &str = cx.tcx.symbol_name(instance).name;
+    let llfn = cx.get_fn(instance);
+    unsafe {
+        llvm::LLVMRustCoverageCreatePGOFuncNameVar(
+            llfn,
+            mangled_fn_name.as_ptr().cast(),
+            mangled_fn_name.len(),
+        )
+    }
+}
+
+pub(crate) fn write_filenames_section_to_buffer<'a>(
+    filenames: impl IntoIterator<Item = &'a str>,
+    buffer: &RustString,
+) {
+    let (pointers, lengths) = filenames
+        .into_iter()
+        .map(|s: &str| (s.as_ptr().cast(), s.len()))
+        .unzip::<_, _, Vec<_>, Vec<_>>();
+
+    unsafe {
+        llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
+            pointers.as_ptr(),
+            pointers.len(),
+            lengths.as_ptr(),
+            lengths.len(),
+            buffer,
+        );
+    }
+}
+
+pub(crate) fn write_mapping_to_buffer(
+    virtual_file_mapping: Vec<u32>,
+    expressions: Vec<CounterExpression>,
+    mapping_regions: Vec<CounterMappingRegion>,
+    buffer: &RustString,
+) {
+    unsafe {
+        llvm::LLVMRustCoverageWriteMappingToBuffer(
+            virtual_file_mapping.as_ptr(),
+            virtual_file_mapping.len() as c_uint,
+            expressions.as_ptr(),
+            expressions.len() as c_uint,
+            mapping_regions.as_ptr(),
+            mapping_regions.len() as c_uint,
+            buffer,
+        );
+    }
+}
+
+pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 {
+    unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
+}
+
+pub(crate) fn mapping_version() -> u32 {
+    unsafe { llvm::LLVMRustCoverageMappingVersion() }
+}
+
+pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    cov_data_val: &'ll llvm::Value,
+) {
+    let covmap_var_name = llvm::build_string(|s| unsafe {
+        llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
+    })
+    .expect("Rust Coverage Mapping var name failed UTF-8 conversion");
+    debug!("covmap var name: {:?}", covmap_var_name);
+
+    let covmap_section_name = llvm::build_string(|s| unsafe {
+        llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
+    })
+    .expect("Rust Coverage section name failed UTF-8 conversion");
+    debug!("covmap section name: {:?}", covmap_section_name);
+
+    let llglobal = llvm::add_global(cx.llmod, cx.val_ty(cov_data_val), &covmap_var_name);
+    llvm::set_initializer(llglobal, cov_data_val);
+    llvm::set_global_constant(llglobal, true);
+    llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
+    llvm::set_section(llglobal, &covmap_section_name);
+    llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
+    cx.add_used_global(llglobal);
+}
+
+pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    covfun_section_name: &str,
+    func_name_hash: u64,
+    func_record_val: &'ll llvm::Value,
+    is_used: bool,
+) {
+    // Assign a name to the function record. This is used to merge duplicates.
+    //
+    // In LLVM, a "translation unit" (effectively, a `Crate` in Rust) can describe functions that
+    // are included-but-not-used. If (or when) Rust generates functions that are
+    // included-but-not-used, note that a dummy description for a function included-but-not-used
+    // in a Crate can be replaced by full description provided by a different Crate. The two kinds
+    // of descriptions play distinct roles in LLVM IR; therefore, assign them different names (by
+    // appending "u" to the end of the function record var name, to prevent `linkonce_odr` merging.
+    let func_record_var_name =
+        format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
+    debug!("function record var name: {:?}", func_record_var_name);
+    debug!("function record section name: {:?}", covfun_section_name);
+
+    let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
+    llvm::set_initializer(llglobal, func_record_val);
+    llvm::set_global_constant(llglobal, true);
+    llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
+    llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
+    llvm::set_section(llglobal, covfun_section_name);
+    llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
+    llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
+    cx.add_used_global(llglobal);
+}
+
+/// Returns the section name string to pass through to the linker when embedding
+/// per-function coverage information in the object file, according to the target
+/// platform's object file format.
+///
+/// LLVM's coverage tools read coverage mapping details from this section when
+/// producing coverage reports.
+///
+/// Typical values are:
+/// - `__llvm_covfun` on Linux
+/// - `__LLVM_COV,__llvm_covfun` on macOS (includes `__LLVM_COV,` segment prefix)
+/// - `.lcovfun$M` on Windows (includes `$M` sorting suffix)
+pub(crate) fn covfun_section_name(cx: &CodegenCx<'_, '_>) -> String {
+    llvm::build_string(|s| unsafe {
+        llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
+    })
+    .expect("Rust Coverage function record section name failed UTF-8 conversion")
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
new file mode 100644
index 00000000000..6a63eda4b99
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -0,0 +1,133 @@
+use super::metadata::file_metadata;
+use super::utils::DIB;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
+use rustc_codegen_ssa::traits::*;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DILocation, DIScope};
+use rustc_middle::mir::{Body, SourceScope};
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::{self, Instance};
+use rustc_session::config::DebugInfo;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::Idx;
+
+/// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
+// FIXME(eddyb) almost all of this should be in `rustc_codegen_ssa::mir::debuginfo`.
+pub fn compute_mir_scopes<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
+) {
+    // Find all scopes with variables defined in them.
+    let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
+        let mut vars = BitSet::new_empty(mir.source_scopes.len());
+        // FIXME(eddyb) take into account that arguments always have debuginfo,
+        // irrespective of their name (assuming full debuginfo is enabled).
+        // NOTE(eddyb) actually, on second thought, those are always in the
+        // function scope, which always exists.
+        for var_debug_info in &mir.var_debug_info {
+            vars.insert(var_debug_info.source_info.scope);
+        }
+        Some(vars)
+    } else {
+        // Nothing to emit, of course.
+        None
+    };
+    let mut instantiated = BitSet::new_empty(mir.source_scopes.len());
+    // Instantiate all scopes.
+    for idx in 0..mir.source_scopes.len() {
+        let scope = SourceScope::new(idx);
+        make_mir_scope(cx, instance, mir, &variables, debug_context, &mut instantiated, scope);
+    }
+    assert!(instantiated.count() == mir.source_scopes.len());
+}
+
+fn make_mir_scope<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    variables: &Option<BitSet<SourceScope>>,
+    debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>,
+    instantiated: &mut BitSet<SourceScope>,
+    scope: SourceScope,
+) {
+    if instantiated.contains(scope) {
+        return;
+    }
+
+    let scope_data = &mir.source_scopes[scope];
+    let parent_scope = if let Some(parent) = scope_data.parent_scope {
+        make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
+        debug_context.scopes[parent]
+    } else {
+        // The root is the function itself.
+        let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
+        debug_context.scopes[scope] = DebugScope {
+            file_start_pos: file.start_pos,
+            file_end_pos: file.end_position(),
+            ..debug_context.scopes[scope]
+        };
+        instantiated.insert(scope);
+        return;
+    };
+
+    if let Some(vars) = variables
+        && !vars.contains(scope)
+        && scope_data.inlined.is_none()
+    {
+        // Do not create a DIScope if there are no variables defined in this
+        // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
+        debug_context.scopes[scope] = parent_scope;
+        instantiated.insert(scope);
+        return;
+    }
+
+    let loc = cx.lookup_debug_loc(scope_data.span.lo());
+    let file_metadata = file_metadata(cx, &loc.file);
+
+    let parent_dbg_scope = match scope_data.inlined {
+        Some((callee, _)) => {
+            // FIXME(eddyb) this would be `self.monomorphize(&callee)`
+            // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
+            let callee = cx.tcx.instantiate_and_normalize_erasing_regions(
+                instance.args,
+                ty::ParamEnv::reveal_all(),
+                ty::EarlyBinder::bind(callee),
+            );
+            debug_context.inlined_function_scopes.entry(callee).or_insert_with(|| {
+                let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+                cx.dbg_scope_fn(callee, callee_fn_abi, None)
+            })
+        }
+        None => parent_scope.dbg_scope,
+    };
+
+    let dbg_scope = unsafe {
+        llvm::LLVMRustDIBuilderCreateLexicalBlock(
+            DIB(cx),
+            parent_dbg_scope,
+            file_metadata,
+            loc.line,
+            loc.col,
+        )
+    };
+
+    let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
+        // FIXME(eddyb) this doesn't account for the macro-related
+        // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
+        let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
+        cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
+    });
+
+    debug_context.scopes[scope] = DebugScope {
+        dbg_scope,
+        inlined_at: inlined_at.or(parent_scope.inlined_at),
+        file_start_pos: loc.file.start_pos,
+        file_end_pos: loc.file.end_position(),
+    };
+    instantiated.insert(scope);
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.md b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
new file mode 100644
index 00000000000..aaec4e68c17
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
@@ -0,0 +1,131 @@
+# Debug Info Module
+
+This module serves the purpose of generating debug symbols. We use LLVM's
+[source level debugging](https://llvm.org/docs/SourceLevelDebugging.html)
+features for generating the debug information. The general principle is
+this:
+
+Given the right metadata in the LLVM IR, the LLVM code generator is able to
+create DWARF debug symbols for the given code. The
+[metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured
+much like DWARF *debugging information entries* (DIE), representing type
+information such as datatype layout, function signatures, block layout,
+variable location and scope information, etc. It is the purpose of this
+module to generate correct metadata and insert it into the LLVM IR.
+
+As the exact format of metadata trees may change between different LLVM
+versions, we now use LLVM
+[DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
+to create metadata where possible. This will hopefully ease the adaption of
+this module to future LLVM versions.
+
+The public API of the module is a set of functions that will insert the
+correct metadata into the LLVM IR when called with the right parameters.
+The module is thus driven from an outside client with functions like
+`debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
+
+Internally the module will try to reuse already created metadata by
+utilizing a cache. The way to get a shared metadata node when needed is
+thus to just call the corresponding function in this module:
+```ignore (illustrative)
+let file_metadata = file_metadata(cx, file);
+```
+The function will take care of probing the cache for an existing node for
+that exact file path.
+
+All private state used by the module is stored within either the
+CodegenUnitDebugContext struct (owned by the CodegenCx) or the
+FunctionDebugContext (owned by the FunctionCx).
+
+This file consists of three conceptual sections:
+1. The public interface of the module
+2. Module-internal metadata creation functions
+3. Minor utility functions
+
+
+## Recursive Types
+
+Some kinds of types, such as structs and enums can be recursive. That means
+that the type definition of some type X refers to some other type which in
+turn (transitively) refers to X. This introduces cycles into the type
+referral graph. A naive algorithm doing an on-demand, depth-first traversal
+of this graph when describing types, can get trapped in an endless loop
+when it reaches such a cycle.
+
+For example, the following simple type for a singly-linked list...
+
+```
+struct List {
+    value: i32,
+    tail: Option<Box<List>>,
+}
+```
+
+will generate the following callstack with a naive DFS algorithm:
+
+```ignore (illustrative)
+describe(t = List)
+  describe(t = i32)
+  describe(t = Option<Box<List>>)
+    describe(t = Box<List>)
+      describe(t = List) // at the beginning again...
+      ...
+```
+
+To break cycles like these, we use "stubs". That is, when
+the algorithm encounters a possibly recursive type (any struct or enum), it
+immediately creates a type description node and inserts it into the cache
+*before* describing the members of the type. This type description is just
+a stub (as type members are not described and added to it yet) but it
+allows the algorithm to already refer to the type. After the stub is
+inserted into the cache, the algorithm continues as before. If it now
+encounters a recursive reference, it will hit the cache and does not try to
+describe the type anew. This behavior is encapsulated in the
+`type_map::build_type_with_children()` function.
+
+
+## Source Locations and Line Information
+
+In addition to data type descriptions the debugging information must also
+allow to map machine code locations back to source code locations in order
+to be useful. This functionality is also handled in this module. The
+following functions allow to control source mappings:
+
++ `set_source_location()`
++ `clear_source_location()`
++ `start_emitting_source_locations()`
+
+`set_source_location()` allows to set the current source location. All IR
+instructions created after a call to this function will be linked to the
+given source location, until another location is specified with
+`set_source_location()` or the source location is cleared with
+`clear_source_location()`. In the later case, subsequent IR instruction
+will not be linked to any source location. As you can see, this is a
+stateful API (mimicking the one in LLVM), so be careful with source
+locations set by previous calls. It's probably best to not rely on any
+specific state being present at a given point in code.
+
+One topic that deserves some extra attention is *function prologues*. At
+the beginning of a function's machine code there are typically a few
+instructions for loading argument values into allocas and checking if
+there's enough stack space for the function to execute. This *prologue* is
+not visible in the source code and LLVM puts a special PROLOGUE END marker
+into the line table at the first non-prologue instruction of the function.
+In order to find out where the prologue ends, LLVM looks for the first
+instruction in the function body that is linked to a source location. So,
+when generating prologue instructions we have to make sure that we don't
+emit source location information until the 'real' function body begins. For
+this reason, source location emission is disabled by default for any new
+function being codegened and is only activated after a call to the third
+function from the list above, `start_emitting_source_locations()`. This
+function should be called right before regularly starting to codegen the
+top-level block of the given function.
+
+There is one exception to the above rule: `llvm.dbg.declare` instruction
+must be linked to the source location of the variable being declared. For
+function parameters these `llvm.dbg.declare` instructions typically occur
+in the middle of the prologue, however, they are ignored by LLVM's prologue
+detection. The `create_argument_metadata()` and related functions take care
+of linking the `llvm.dbg.declare` instructions to the correct source
+locations even while source location emission is still disabled, so there
+is no need to do anything special with source location handling here.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
new file mode 100644
index 00000000000..d82b1e1e721
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -0,0 +1,117 @@
+// .debug_gdb_scripts binary section.
+
+use crate::llvm;
+
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::value::Value;
+use rustc_ast::attr;
+use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::{bug, middle::debugger_visualizer::DebuggerVisualizerType};
+use rustc_session::config::{CrateType, DebugInfo};
+use rustc_span::symbol::sym;
+
+/// Inserts a side-effect free instruction sequence that makes sure that the
+/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
+pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
+    if needs_gdb_debug_scripts_section(bx) {
+        let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
+        // Load just the first byte as that's all that's necessary to force
+        // LLVM to keep around the reference to the global.
+        let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
+        unsafe {
+            llvm::LLVMSetAlignment(volatile_load_instruction, 1);
+        }
+    }
+}
+
+/// Allocates the global variable responsible for the .debug_gdb_scripts binary
+/// section.
+pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Value {
+    let c_section_var_name = c"__rustc_debug_gdb_scripts_section__";
+    let section_var_name = c_section_var_name.to_str().unwrap();
+
+    let section_var =
+        unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) };
+
+    section_var.unwrap_or_else(|| {
+        let mut section_contents = Vec::new();
+
+        // Add the pretty printers for the standard library first.
+        section_contents.extend_from_slice(b"\x01gdb_load_rust_pretty_printers.py\0");
+
+        // Next, add the pretty printers that were specified via the `#[debugger_visualizer]` attribute.
+        let visualizers = collect_debugger_visualizers_transitive(
+            cx.tcx,
+            DebuggerVisualizerType::GdbPrettyPrinter,
+        );
+        let crate_name = cx.tcx.crate_name(LOCAL_CRATE);
+        for (index, visualizer) in visualizers.iter().enumerate() {
+            // The initial byte `4` instructs GDB that the following pretty printer
+            // is defined inline as opposed to in a standalone file.
+            section_contents.extend_from_slice(b"\x04");
+            let vis_name = format!("pretty-printer-{crate_name}-{index}\n");
+            section_contents.extend_from_slice(vis_name.as_bytes());
+            section_contents.extend_from_slice(&visualizer.src);
+
+            // The final byte `0` tells GDB that the pretty printer has been
+            // fully defined and can continue searching for additional
+            // pretty printers.
+            section_contents.extend_from_slice(b"\0");
+        }
+
+        unsafe {
+            let section_contents = section_contents.as_slice();
+            let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64);
+
+            let section_var = cx
+                .define_global(section_var_name, llvm_type)
+                .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
+            llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr().cast());
+            llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
+            llvm::LLVMSetGlobalConstant(section_var, llvm::True);
+            llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
+            llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
+            // This should make sure that the whole section is not larger than
+            // the string it contains. Otherwise we get a warning from GDB.
+            llvm::LLVMSetAlignment(section_var, 1);
+            section_var
+        }
+    })
+}
+
+pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
+    let omit_gdb_pretty_printer_section =
+        attr::contains_name(cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section);
+
+    // To ensure the section `__rustc_debug_gdb_scripts_section__` will not create
+    // ODR violations at link time, this section will not be emitted for rlibs since
+    // each rlib could produce a different set of visualizers that would be embedded
+    // in the `.debug_gdb_scripts` section. For that reason, we make sure that the
+    // section is only emitted for leaf crates.
+    let embed_visualizers = cx.tcx.crate_types().iter().any(|&crate_type| match crate_type {
+        CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::Staticlib => {
+            // These are crate types for which we will embed pretty printers since they
+            // are treated as leaf crates.
+            true
+        }
+        CrateType::ProcMacro => {
+            // We could embed pretty printers for proc macro crates too but it does not
+            // seem like a good default, since this is a rare use case and we don't
+            // want to slow down the common case.
+            false
+        }
+        CrateType::Rlib => {
+            // As per the above description, embedding pretty printers for rlibs could
+            // lead to ODR violations so we skip this crate type as well.
+            false
+        }
+    });
+
+    !omit_gdb_pretty_printer_section
+        && cx.sess().opts.debuginfo != DebugInfo::None
+        && cx.sess().target.emit_debug_gdb_scripts
+        && embed_visualizers
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
new file mode 100644
index 00000000000..5782b156335
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -0,0 +1,1591 @@
+use self::type_map::DINodeCreationResult;
+use self::type_map::Stub;
+use self::type_map::UniqueTypeId;
+
+use super::namespace::mangled_name_of_instance;
+use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_name};
+use super::utils::{
+    create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB,
+};
+use super::CodegenUnitDebugContext;
+
+use crate::abi;
+use crate::common::CodegenCx;
+use crate::debuginfo::metadata::type_map::build_type_with_children;
+use crate::debuginfo::utils::fat_pointer_kind;
+use crate::debuginfo::utils::FatPtrKind;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+    DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind,
+    DebugNameTableKind,
+};
+use crate::value::Value;
+
+use rustc_codegen_ssa::debuginfo::type_names::cpp_like_debuginfo;
+use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind;
+use rustc_codegen_ssa::traits::*;
+use rustc_fs_util::path_to_c_string;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_middle::bug;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::{
+    self, AdtKind, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt, Visibility,
+};
+use rustc_session::config::{self, DebugInfo, Lto};
+use rustc_span::symbol::Symbol;
+use rustc_span::FileName;
+use rustc_span::{FileNameDisplayPreference, SourceFile};
+use rustc_symbol_mangling::typeid_for_trait_ref;
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::DebuginfoKind;
+use smallvec::smallvec;
+
+use libc::{c_char, c_longlong, c_uint};
+use std::borrow::Cow;
+use std::fmt::{self, Write};
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::path::{Path, PathBuf};
+use std::ptr;
+
+impl PartialEq for llvm::Metadata {
+    fn eq(&self, other: &Self) -> bool {
+        ptr::eq(self, other)
+    }
+}
+
+impl Eq for llvm::Metadata {}
+
+impl Hash for llvm::Metadata {
+    fn hash<H: Hasher>(&self, hasher: &mut H) {
+        (self as *const Self).hash(hasher);
+    }
+}
+
+impl fmt::Debug for llvm::Metadata {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        (self as *const Self).fmt(f)
+    }
+}
+
+// From DWARF 5.
+// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1.
+const DW_LANG_RUST: c_uint = 0x1c;
+#[allow(non_upper_case_globals)]
+const DW_ATE_boolean: c_uint = 0x02;
+#[allow(non_upper_case_globals)]
+const DW_ATE_float: c_uint = 0x04;
+#[allow(non_upper_case_globals)]
+const DW_ATE_signed: c_uint = 0x05;
+#[allow(non_upper_case_globals)]
+const DW_ATE_unsigned: c_uint = 0x07;
+#[allow(non_upper_case_globals)]
+const DW_ATE_UTF: c_uint = 0x10;
+
+pub(super) const UNKNOWN_LINE_NUMBER: c_uint = 0;
+pub(super) const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
+
+const NO_SCOPE_METADATA: Option<&DIScope> = None;
+/// A function that returns an empty list of generic parameter debuginfo nodes.
+const NO_GENERICS: for<'ll> fn(&CodegenCx<'ll, '_>) -> SmallVec<&'ll DIType> = |_| SmallVec::new();
+
+// SmallVec is used quite a bit in this module, so create a shorthand.
+// The actual number of elements is not so important.
+pub type SmallVec<T> = smallvec::SmallVec<[T; 16]>;
+
+mod enums;
+mod type_map;
+
+pub(crate) use type_map::TypeMap;
+
+/// Returns from the enclosing function if the type debuginfo node with the given
+/// unique ID can be found in the type map.
+macro_rules! return_if_di_node_created_in_meantime {
+    ($cx: expr, $unique_type_id: expr) => {
+        if let Some(di_node) = debug_context($cx).type_map.di_node_for_unique_id($unique_type_id) {
+            return DINodeCreationResult::new(di_node, true);
+        }
+    };
+}
+
+/// Extract size and alignment from a TyAndLayout.
+#[inline]
+fn size_and_align_of(ty_and_layout: TyAndLayout<'_>) -> (Size, Align) {
+    (ty_and_layout.size, ty_and_layout.align.abi)
+}
+
+/// Creates debuginfo for a fixed size array (e.g. `[u64; 123]`).
+/// For slices (that is, "arrays" of unknown size) use [build_slice_type_di_node].
+fn build_fixed_size_array_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+    array_type: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let ty::Array(element_type, len) = array_type.kind() else {
+        bug!("build_fixed_size_array_di_node() called with non-ty::Array type `{:?}`", array_type)
+    };
+
+    let element_type_di_node = type_di_node(cx, *element_type);
+
+    return_if_di_node_created_in_meantime!(cx, unique_type_id);
+
+    let (size, align) = cx.size_and_align_of(array_type);
+
+    let upper_bound = len.eval_target_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
+
+    let subrange =
+        unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
+
+    let subscripts = create_DIArray(DIB(cx), &[subrange]);
+    let di_node = unsafe {
+        llvm::LLVMRustDIBuilderCreateArrayType(
+            DIB(cx),
+            size.bits(),
+            align.bits() as u32,
+            element_type_di_node,
+            subscripts,
+        )
+    };
+
+    DINodeCreationResult::new(di_node, false)
+}
+
+/// Creates debuginfo for built-in pointer-like things:
+///
+///  - ty::Ref
+///  - ty::RawPtr
+///  - ty::Adt in the case it's Box
+///
+/// At some point we might want to remove the special handling of Box
+/// and treat it the same as other smart pointers (like Rc, Arc, ...).
+fn build_pointer_or_reference_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    ptr_type: Ty<'tcx>,
+    pointee_type: Ty<'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    // The debuginfo generated by this function is only valid if `ptr_type` is really just
+    // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
+    debug_assert_eq!(
+        cx.size_and_align_of(ptr_type),
+        cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type))
+    );
+
+    let pointee_type_di_node = type_di_node(cx, pointee_type);
+
+    return_if_di_node_created_in_meantime!(cx, unique_type_id);
+
+    let data_layout = &cx.tcx.data_layout;
+    let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true);
+
+    match fat_pointer_kind(cx, pointee_type) {
+        None => {
+            // This is a thin pointer. Create a regular pointer type and give it the correct name.
+            debug_assert_eq!(
+                (data_layout.pointer_size, data_layout.pointer_align.abi),
+                cx.size_and_align_of(ptr_type),
+                "ptr_type={ptr_type}, pointee_type={pointee_type}",
+            );
+
+            let di_node = unsafe {
+                llvm::LLVMRustDIBuilderCreatePointerType(
+                    DIB(cx),
+                    pointee_type_di_node,
+                    data_layout.pointer_size.bits(),
+                    data_layout.pointer_align.abi.bits() as u32,
+                    0, // Ignore DWARF address space.
+                    ptr_type_debuginfo_name.as_ptr().cast(),
+                    ptr_type_debuginfo_name.len(),
+                )
+            };
+
+            DINodeCreationResult { di_node, already_stored_in_typemap: false }
+        }
+        Some(fat_pointer_kind) => {
+            type_map::build_type_with_children(
+                cx,
+                type_map::stub(
+                    cx,
+                    Stub::Struct,
+                    unique_type_id,
+                    &ptr_type_debuginfo_name,
+                    cx.size_and_align_of(ptr_type),
+                    NO_SCOPE_METADATA,
+                    DIFlags::FlagZero,
+                ),
+                |cx, owner| {
+                    // FIXME: If this fat pointer is a `Box` then we don't want to use its
+                    //        type layout and instead use the layout of the raw pointer inside
+                    //        of it.
+                    //        The proper way to handle this is to not treat Box as a pointer
+                    //        at all and instead emit regular struct debuginfo for it. We just
+                    //        need to make sure that we don't break existing debuginfo consumers
+                    //        by doing that (at least not without a warning period).
+                    let layout_type = if ptr_type.is_box() {
+                        Ty::new_mut_ptr(cx.tcx, pointee_type)
+                    } else {
+                        ptr_type
+                    };
+
+                    let layout = cx.layout_of(layout_type);
+                    let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
+                    let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA);
+
+                    let (addr_field_name, extra_field_name) = match fat_pointer_kind {
+                        FatPtrKind::Dyn => ("pointer", "vtable"),
+                        FatPtrKind::Slice => ("data_ptr", "length"),
+                    };
+
+                    debug_assert_eq!(abi::FAT_PTR_ADDR, 0);
+                    debug_assert_eq!(abi::FAT_PTR_EXTRA, 1);
+
+                    // The data pointer type is a regular, thin pointer, regardless of whether this
+                    // is a slice or a trait object.
+                    let data_ptr_type_di_node = unsafe {
+                        llvm::LLVMRustDIBuilderCreatePointerType(
+                            DIB(cx),
+                            pointee_type_di_node,
+                            addr_field.size.bits(),
+                            addr_field.align.abi.bits() as u32,
+                            0, // Ignore DWARF address space.
+                            std::ptr::null(),
+                            0,
+                        )
+                    };
+
+                    smallvec![
+                        build_field_di_node(
+                            cx,
+                            owner,
+                            addr_field_name,
+                            (addr_field.size, addr_field.align.abi),
+                            layout.fields.offset(abi::FAT_PTR_ADDR),
+                            DIFlags::FlagZero,
+                            data_ptr_type_di_node,
+                        ),
+                        build_field_di_node(
+                            cx,
+                            owner,
+                            extra_field_name,
+                            (extra_field.size, extra_field.align.abi),
+                            layout.fields.offset(abi::FAT_PTR_EXTRA),
+                            DIFlags::FlagZero,
+                            type_di_node(cx, extra_field.ty),
+                        ),
+                    ]
+                },
+                NO_GENERICS,
+            )
+        }
+    }
+}
+
+fn build_subroutine_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    // It's possible to create a self-referential
+    // type in Rust by using 'impl trait':
+    //
+    // fn foo() -> impl Copy { foo }
+    //
+    // Unfortunately LLVM's API does not allow us to create recursive subroutine types.
+    // In order to work around that restriction we place a marker type in the type map,
+    // before creating the actual type. If the actual type is recursive, it will hit the
+    // marker type. So we end up with a type that looks like
+    //
+    // fn foo() -> <recursive_type>
+    //
+    // Once that is created, we replace the marker in the typemap with the actual type.
+    debug_context(cx)
+        .type_map
+        .unique_id_to_di_node
+        .borrow_mut()
+        .insert(unique_type_id, recursion_marker_type_di_node(cx));
+
+    let fn_ty = unique_type_id.expect_ty();
+    let signature = cx
+        .tcx
+        .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), fn_ty.fn_sig(cx.tcx));
+
+    let signature_di_nodes: SmallVec<_> = iter::once(
+        // return type
+        match signature.output().kind() {
+            ty::Tuple(tys) if tys.is_empty() => {
+                // this is a "void" function
+                None
+            }
+            _ => Some(type_di_node(cx, signature.output())),
+        },
+    )
+    .chain(
+        // regular arguments
+        signature.inputs().iter().map(|&argument_type| Some(type_di_node(cx, argument_type))),
+    )
+    .collect();
+
+    debug_context(cx).type_map.unique_id_to_di_node.borrow_mut().remove(&unique_type_id);
+
+    let fn_di_node = unsafe {
+        llvm::LLVMRustDIBuilderCreateSubroutineType(
+            DIB(cx),
+            create_DIArray(DIB(cx), &signature_di_nodes[..]),
+        )
+    };
+
+    // This is actually a function pointer, so wrap it in pointer DI.
+    let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false);
+    let (size, align) = match fn_ty.kind() {
+        ty::FnDef(..) => (0, 1),
+        ty::FnPtr(..) => (
+            cx.tcx.data_layout.pointer_size.bits(),
+            cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+        ),
+        _ => unreachable!(),
+    };
+    let di_node = unsafe {
+        llvm::LLVMRustDIBuilderCreatePointerType(
+            DIB(cx),
+            fn_di_node,
+            size,
+            align,
+            0, // Ignore DWARF address space.
+            name.as_ptr().cast(),
+            name.len(),
+        )
+    };
+
+    DINodeCreationResult::new(di_node, false)
+}
+
+/// Create debuginfo for `dyn SomeTrait` types. Currently these are empty structs
+/// we with the correct type name (e.g. "dyn SomeTrait<Foo, Item=u32> + Sync").
+fn build_dyn_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    dyn_type: Ty<'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    if let ty::Dynamic(..) = dyn_type.kind() {
+        let type_name = compute_debuginfo_type_name(cx.tcx, dyn_type, true);
+        type_map::build_type_with_children(
+            cx,
+            type_map::stub(
+                cx,
+                Stub::Struct,
+                unique_type_id,
+                &type_name,
+                cx.size_and_align_of(dyn_type),
+                NO_SCOPE_METADATA,
+                DIFlags::FlagZero,
+            ),
+            |_, _| smallvec![],
+            NO_GENERICS,
+        )
+    } else {
+        bug!(
+            "Only ty::Dynamic is valid for build_dyn_type_di_node(). Found {:?} instead.",
+            dyn_type
+        )
+    }
+}
+
+/// Create debuginfo for `[T]` and `str`. These are unsized.
+///
+/// NOTE: We currently emit just emit the debuginfo for the element type here
+/// (i.e. `T` for slices and `u8` for `str`), so that we end up with
+/// `*const T` for the `data_ptr` field of the corresponding fat-pointer
+/// debuginfo of `&[T]`.
+///
+/// It would be preferable and more accurate if we emitted a DIArray of T
+/// without an upper bound instead. That is, LLVM already supports emitting
+/// debuginfo of arrays of unknown size. But GDB currently seems to end up
+/// in an infinite loop when confronted with such a type.
+///
+/// As a side effect of the current encoding every instance of a type like
+/// `struct Foo { unsized_field: [u8] }` will look like
+/// `struct Foo { unsized_field: u8 }` in debuginfo. If the length of the
+/// slice is zero, then accessing `unsized_field` in the debugger would
+/// result in an out-of-bounds access.
+fn build_slice_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    slice_type: Ty<'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let element_type = match slice_type.kind() {
+        ty::Slice(element_type) => *element_type,
+        ty::Str => cx.tcx.types.u8,
+        _ => {
+            bug!(
+                "Only ty::Slice is valid for build_slice_type_di_node(). Found {:?} instead.",
+                slice_type
+            )
+        }
+    };
+
+    let element_type_di_node = type_di_node(cx, element_type);
+    return_if_di_node_created_in_meantime!(cx, unique_type_id);
+    DINodeCreationResult { di_node: element_type_di_node, already_stored_in_typemap: false }
+}
+
+/// Get the debuginfo node for the given type.
+///
+/// This function will look up the debuginfo node in the TypeMap. If it can't find it, it
+/// will create the node by dispatching to the corresponding `build_*_di_node()` function.
+pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+    let unique_type_id = UniqueTypeId::for_ty(cx.tcx, t);
+
+    if let Some(existing_di_node) = debug_context(cx).type_map.di_node_for_unique_id(unique_type_id)
+    {
+        return existing_di_node;
+    }
+
+    debug!("type_di_node: {:?} kind: {:?}", t, t.kind());
+
+    let DINodeCreationResult { di_node, already_stored_in_typemap } = match *t.kind() {
+        ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
+            build_basic_type_di_node(cx, t)
+        }
+        ty::Tuple(elements) if elements.is_empty() => build_basic_type_di_node(cx, t),
+        ty::Array(..) => build_fixed_size_array_di_node(cx, unique_type_id, t),
+        ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id),
+        ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id),
+        ty::Foreign(..) => build_foreign_type_di_node(cx, t, unique_type_id),
+        ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
+            build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
+        }
+        // Some `Box` are newtyped pointers, make debuginfo aware of that.
+        // Only works if the allocator argument is a 1-ZST and hence irrelevant for layout
+        // (or if there is no allocator argument).
+        ty::Adt(def, args)
+            if def.is_box()
+                && args.get(1).map_or(true, |arg| cx.layout_of(arg.expect_ty()).is_1zst()) =>
+        {
+            build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
+        }
+        ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
+        ty::Closure(..) => build_closure_env_di_node(cx, unique_type_id),
+        ty::CoroutineClosure(..) => build_closure_env_di_node(cx, unique_type_id),
+        ty::Coroutine(..) => enums::build_coroutine_di_node(cx, unique_type_id),
+        ty::Adt(def, ..) => match def.adt_kind() {
+            AdtKind::Struct => build_struct_type_di_node(cx, unique_type_id),
+            AdtKind::Union => build_union_type_di_node(cx, unique_type_id),
+            AdtKind::Enum => enums::build_enum_type_di_node(cx, unique_type_id),
+        },
+        ty::Tuple(_) => build_tuple_type_di_node(cx, unique_type_id),
+        // Type parameters from polymorphized functions.
+        ty::Param(_) => build_param_type_di_node(cx, t),
+        _ => bug!("debuginfo: unexpected type in type_di_node(): {:?}", t),
+    };
+
+    {
+        if already_stored_in_typemap {
+            // Make sure that we really do have a `TypeMap` entry for the unique type ID.
+            let di_node_for_uid =
+                match debug_context(cx).type_map.di_node_for_unique_id(unique_type_id) {
+                    Some(di_node) => di_node,
+                    None => {
+                        bug!(
+                            "expected type debuginfo node for unique \
+                               type ID '{:?}' to already be in \
+                               the `debuginfo::TypeMap` but it \
+                               was not.",
+                            unique_type_id,
+                        );
+                    }
+                };
+
+            debug_assert_eq!(di_node_for_uid as *const _, di_node as *const _);
+        } else {
+            debug_context(cx).type_map.insert(unique_type_id, di_node);
+        }
+    }
+
+    di_node
+}
+
+// FIXME(mw): Cache this via a regular UniqueTypeId instead of an extra field in the debug context.
+fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll DIType {
+    *debug_context(cx).recursion_marker_type.get_or_init(move || {
+        unsafe {
+            // The choice of type here is pretty arbitrary -
+            // anything reading the debuginfo for a recursive
+            // type is going to see *something* weird - the only
+            // question is what exactly it will see.
+            //
+            // FIXME: the name `<recur_type>` does not fit the naming scheme
+            //        of other types.
+            //
+            // FIXME: it might make sense to use an actual pointer type here
+            //        so that debuggers can show the address.
+            let name = "<recur_type>";
+            llvm::LLVMRustDIBuilderCreateBasicType(
+                DIB(cx),
+                name.as_ptr().cast(),
+                name.len(),
+                cx.tcx.data_layout.pointer_size.bits(),
+                DW_ATE_unsigned,
+            )
+        }
+    })
+}
+
+fn hex_encode(data: &[u8]) -> String {
+    let mut hex_string = String::with_capacity(data.len() * 2);
+    for byte in data.iter() {
+        write!(&mut hex_string, "{byte:02x}").unwrap();
+    }
+    hex_string
+}
+
+pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll DIFile {
+    let cache_key = Some((source_file.stable_id, source_file.src_hash));
+    return debug_context(cx)
+        .created_files
+        .borrow_mut()
+        .entry(cache_key)
+        .or_insert_with(|| alloc_new_file_metadata(cx, source_file));
+
+    #[instrument(skip(cx, source_file), level = "debug")]
+    fn alloc_new_file_metadata<'ll>(
+        cx: &CodegenCx<'ll, '_>,
+        source_file: &SourceFile,
+    ) -> &'ll DIFile {
+        debug!(?source_file.name);
+
+        use rustc_session::RemapFileNameExt;
+        let (directory, file_name) = match &source_file.name {
+            FileName::Real(filename) => {
+                let working_directory = &cx.sess().opts.working_dir;
+                debug!(?working_directory);
+
+                if cx.sess().should_prefer_remapped_for_codegen() {
+                    let filename = cx
+                        .sess()
+                        .source_map()
+                        .path_mapping()
+                        .to_embeddable_absolute_path(filename.clone(), working_directory);
+
+                    // Construct the absolute path of the file
+                    let abs_path = filename.remapped_path_if_available();
+                    debug!(?abs_path);
+
+                    if let Ok(rel_path) =
+                        abs_path.strip_prefix(working_directory.remapped_path_if_available())
+                    {
+                        // If the compiler's working directory (which also is the DW_AT_comp_dir of
+                        // the compilation unit) is a prefix of the path we are about to emit, then
+                        // only emit the part relative to the working directory.
+                        // Because of path remapping we sometimes see strange things here: `abs_path`
+                        // might actually look like a relative path
+                        // (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without
+                        // taking the working directory into account, downstream tooling will
+                        // interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`,
+                        // which makes no sense. Usually in such cases the working directory will also
+                        // be remapped to `<crate-name-and-version>` or some other prefix of the path
+                        // we are remapping, so we end up with
+                        // `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`.
+                        // By moving the working directory portion into the `directory` part of the
+                        // DIFile, we allow LLVM to emit just the relative path for DWARF, while
+                        // still emitting the correct absolute path for CodeView.
+                        (
+                            working_directory.to_string_lossy(FileNameDisplayPreference::Remapped),
+                            rel_path.to_string_lossy().into_owned(),
+                        )
+                    } else {
+                        ("".into(), abs_path.to_string_lossy().into_owned())
+                    }
+                } else {
+                    let working_directory = working_directory.local_path_if_available();
+                    let filename = filename.local_path_if_available();
+
+                    debug!(?working_directory, ?filename);
+
+                    let abs_path: Cow<'_, Path> = if filename.is_absolute() {
+                        filename.into()
+                    } else {
+                        let mut p = PathBuf::new();
+                        p.push(working_directory);
+                        p.push(filename);
+                        p.into()
+                    };
+
+                    if let Ok(rel_path) = abs_path.strip_prefix(working_directory) {
+                        (
+                            working_directory.to_string_lossy(),
+                            rel_path.to_string_lossy().into_owned(),
+                        )
+                    } else {
+                        ("".into(), abs_path.to_string_lossy().into_owned())
+                    }
+                }
+            }
+            other => {
+                debug!(?other);
+                ("".into(), other.for_codegen(cx.sess()).to_string_lossy().into_owned())
+            }
+        };
+
+        let hash_kind = match source_file.src_hash.kind {
+            rustc_span::SourceFileHashAlgorithm::Md5 => llvm::ChecksumKind::MD5,
+            rustc_span::SourceFileHashAlgorithm::Sha1 => llvm::ChecksumKind::SHA1,
+            rustc_span::SourceFileHashAlgorithm::Sha256 => llvm::ChecksumKind::SHA256,
+        };
+        let hash_value = hex_encode(source_file.src_hash.hash_bytes());
+
+        unsafe {
+            llvm::LLVMRustDIBuilderCreateFile(
+                DIB(cx),
+                file_name.as_ptr().cast(),
+                file_name.len(),
+                directory.as_ptr().cast(),
+                directory.len(),
+                hash_kind,
+                hash_value.as_ptr().cast(),
+                hash_value.len(),
+            )
+        }
+    }
+}
+
+pub fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile {
+    debug_context(cx).created_files.borrow_mut().entry(None).or_insert_with(|| unsafe {
+        let file_name = "<unknown>";
+        let directory = "";
+        let hash_value = "";
+
+        llvm::LLVMRustDIBuilderCreateFile(
+            DIB(cx),
+            file_name.as_ptr().cast(),
+            file_name.len(),
+            directory.as_ptr().cast(),
+            directory.len(),
+            llvm::ChecksumKind::None,
+            hash_value.as_ptr().cast(),
+            hash_value.len(),
+        )
+    })
+}
+
+trait MsvcBasicName {
+    fn msvc_basic_name(self) -> &'static str;
+}
+
+impl MsvcBasicName for ty::IntTy {
+    fn msvc_basic_name(self) -> &'static str {
+        match self {
+            ty::IntTy::Isize => "ptrdiff_t",
+            ty::IntTy::I8 => "__int8",
+            ty::IntTy::I16 => "__int16",
+            ty::IntTy::I32 => "__int32",
+            ty::IntTy::I64 => "__int64",
+            ty::IntTy::I128 => "__int128",
+        }
+    }
+}
+
+impl MsvcBasicName for ty::UintTy {
+    fn msvc_basic_name(self) -> &'static str {
+        match self {
+            ty::UintTy::Usize => "size_t",
+            ty::UintTy::U8 => "unsigned __int8",
+            ty::UintTy::U16 => "unsigned __int16",
+            ty::UintTy::U32 => "unsigned __int32",
+            ty::UintTy::U64 => "unsigned __int64",
+            ty::UintTy::U128 => "unsigned __int128",
+        }
+    }
+}
+
+impl MsvcBasicName for ty::FloatTy {
+    fn msvc_basic_name(self) -> &'static str {
+        // FIXME: f16 and f128 have no MSVC representation. We could improve the debuginfo.
+        // See: <https://github.com/rust-lang/rust/pull/114607/files#r1454683264>
+        match self {
+            ty::FloatTy::F16 => "half",
+            ty::FloatTy::F32 => "float",
+            ty::FloatTy::F64 => "double",
+            ty::FloatTy::F128 => "fp128",
+        }
+    }
+}
+
+fn build_basic_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    debug!("build_basic_type_di_node: {:?}", t);
+
+    // When targeting MSVC, emit MSVC style type names for compatibility with
+    // .natvis visualizers (and perhaps other existing native debuggers?)
+    let cpp_like_debuginfo = cpp_like_debuginfo(cx.tcx);
+
+    let (name, encoding) = match t.kind() {
+        ty::Never => ("!", DW_ATE_unsigned),
+        ty::Tuple(elements) if elements.is_empty() => {
+            if cpp_like_debuginfo {
+                return build_tuple_type_di_node(cx, UniqueTypeId::for_ty(cx.tcx, t));
+            } else {
+                ("()", DW_ATE_unsigned)
+            }
+        }
+        ty::Bool => ("bool", DW_ATE_boolean),
+        ty::Char => ("char", DW_ATE_UTF),
+        ty::Int(int_ty) if cpp_like_debuginfo => (int_ty.msvc_basic_name(), DW_ATE_signed),
+        ty::Uint(uint_ty) if cpp_like_debuginfo => (uint_ty.msvc_basic_name(), DW_ATE_unsigned),
+        ty::Float(float_ty) if cpp_like_debuginfo => (float_ty.msvc_basic_name(), DW_ATE_float),
+        ty::Int(int_ty) => (int_ty.name_str(), DW_ATE_signed),
+        ty::Uint(uint_ty) => (uint_ty.name_str(), DW_ATE_unsigned),
+        ty::Float(float_ty) => (float_ty.name_str(), DW_ATE_float),
+        _ => bug!("debuginfo::build_basic_type_di_node - `t` is invalid type"),
+    };
+
+    let ty_di_node = unsafe {
+        llvm::LLVMRustDIBuilderCreateBasicType(
+            DIB(cx),
+            name.as_ptr().cast(),
+            name.len(),
+            cx.size_of(t).bits(),
+            encoding,
+        )
+    };
+
+    if !cpp_like_debuginfo {
+        return DINodeCreationResult::new(ty_di_node, false);
+    }
+
+    let typedef_name = match t.kind() {
+        ty::Int(int_ty) => int_ty.name_str(),
+        ty::Uint(uint_ty) => uint_ty.name_str(),
+        ty::Float(float_ty) => float_ty.name_str(),
+        _ => return DINodeCreationResult::new(ty_di_node, false),
+    };
+
+    let typedef_di_node = unsafe {
+        llvm::LLVMRustDIBuilderCreateTypedef(
+            DIB(cx),
+            ty_di_node,
+            typedef_name.as_ptr().cast(),
+            typedef_name.len(),
+            unknown_file_metadata(cx),
+            0,
+            None,
+        )
+    };
+
+    DINodeCreationResult::new(typedef_di_node, false)
+}
+
+fn build_foreign_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    t: Ty<'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    debug!("build_foreign_type_di_node: {:?}", t);
+
+    let &ty::Foreign(def_id) = unique_type_id.expect_ty().kind() else {
+        bug!(
+            "build_foreign_type_di_node() called with unexpected type: {:?}",
+            unique_type_id.expect_ty()
+        );
+    };
+
+    build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            unique_type_id,
+            &compute_debuginfo_type_name(cx.tcx, t, false),
+            cx.size_and_align_of(t),
+            Some(get_namespace_for_item(cx, def_id)),
+            DIFlags::FlagZero,
+        ),
+        |_, _| smallvec![],
+        NO_GENERICS,
+    )
+}
+
+fn build_param_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    debug!("build_param_type_di_node: {:?}", t);
+    let name = format!("{t:?}");
+    DINodeCreationResult {
+        di_node: unsafe {
+            llvm::LLVMRustDIBuilderCreateBasicType(
+                DIB(cx),
+                name.as_ptr().cast(),
+                name.len(),
+                Size::ZERO.bits(),
+                DW_ATE_unsigned,
+            )
+        },
+        already_stored_in_typemap: false,
+    }
+}
+
+pub fn build_compile_unit_di_node<'ll, 'tcx>(
+    tcx: TyCtxt<'tcx>,
+    codegen_unit_name: &str,
+    debug_context: &CodegenUnitDebugContext<'ll, 'tcx>,
+) -> &'ll DIDescriptor {
+    let mut name_in_debuginfo = tcx
+        .sess
+        .local_crate_source_file()
+        .unwrap_or_else(|| PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str()));
+
+    // To avoid breaking split DWARF, we need to ensure that each codegen unit
+    // has a unique `DW_AT_name`. This is because there's a remote chance that
+    // different codegen units for the same module will have entirely
+    // identical DWARF entries for the purpose of the DWO ID, which would
+    // violate Appendix F ("Split Dwarf Object Files") of the DWARF 5
+    // specification. LLVM uses the algorithm specified in section 7.32 "Type
+    // Signature Computation" to compute the DWO ID, which does not include
+    // any fields that would distinguish compilation units. So we must embed
+    // the codegen unit name into the `DW_AT_name`. (Issue #88521.)
+    //
+    // Additionally, the OSX linker has an idiosyncrasy where it will ignore
+    // some debuginfo if multiple object files with the same `DW_AT_name` are
+    // linked together.
+    //
+    // As a workaround for these two issues, we generate unique names for each
+    // object file. Those do not correspond to an actual source file but that
+    // is harmless.
+    name_in_debuginfo.push("@");
+    name_in_debuginfo.push(codegen_unit_name);
+
+    debug!("build_compile_unit_di_node: {:?}", name_in_debuginfo);
+    let rustc_producer = format!("rustc version {}", tcx.sess.cfg_version);
+    // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
+    let producer = format!("clang LLVM ({rustc_producer})");
+
+    use rustc_session::RemapFileNameExt;
+    let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
+    let work_dir = tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy();
+    let output_filenames = tcx.output_filenames(());
+    let split_name = if tcx.sess.target_can_use_split_dwarf() {
+        output_filenames
+            .split_dwarf_path(
+                tcx.sess.split_debuginfo(),
+                tcx.sess.opts.unstable_opts.split_dwarf_kind,
+                Some(codegen_unit_name),
+            )
+            // We get a path relative to the working directory from split_dwarf_path
+            .map(|f| {
+                if tcx.sess.should_prefer_remapped_for_split_debuginfo_paths() {
+                    tcx.sess.source_map().path_mapping().map_prefix(f).0
+                } else {
+                    f.into()
+                }
+            })
+    } else {
+        None
+    }
+    .unwrap_or_default();
+    let split_name = split_name.to_str().unwrap();
+    let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo);
+
+    let dwarf_version =
+        tcx.sess.opts.unstable_opts.dwarf_version.unwrap_or(tcx.sess.target.default_dwarf_version);
+    let is_dwarf_kind =
+        matches!(tcx.sess.target.debuginfo_kind, DebuginfoKind::Dwarf | DebuginfoKind::DwarfDsym);
+    // Don't emit `.debug_pubnames` and `.debug_pubtypes` on DWARFv4 or lower.
+    let debug_name_table_kind = if is_dwarf_kind && dwarf_version <= 4 {
+        DebugNameTableKind::None
+    } else {
+        DebugNameTableKind::Default
+    };
+
+    unsafe {
+        let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile(
+            debug_context.builder,
+            name_in_debuginfo.as_ptr().cast(),
+            name_in_debuginfo.len(),
+            work_dir.as_ptr().cast(),
+            work_dir.len(),
+            llvm::ChecksumKind::None,
+            ptr::null(),
+            0,
+        );
+
+        let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit(
+            debug_context.builder,
+            DW_LANG_RUST,
+            compile_unit_file,
+            producer.as_ptr().cast(),
+            producer.len(),
+            tcx.sess.opts.optimize != config::OptLevel::No,
+            c"".as_ptr().cast(),
+            0,
+            // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead
+            // put the path supplied to `MCSplitDwarfFile` into the debug info of the final
+            // output(s).
+            split_name.as_ptr().cast(),
+            split_name.len(),
+            kind,
+            0,
+            tcx.sess.opts.unstable_opts.split_dwarf_inlining,
+            debug_name_table_kind,
+        );
+
+        if tcx.sess.opts.unstable_opts.profile {
+            let default_gcda_path = &output_filenames.with_extension("gcda");
+            let gcda_path =
+                tcx.sess.opts.unstable_opts.profile_emit.as_ref().unwrap_or(default_gcda_path);
+
+            let gcov_cu_info = [
+                path_to_mdstring(debug_context.llcontext, &output_filenames.with_extension("gcno")),
+                path_to_mdstring(debug_context.llcontext, gcda_path),
+                unit_metadata,
+            ];
+            let gcov_metadata = llvm::LLVMMDNodeInContext2(
+                debug_context.llcontext,
+                gcov_cu_info.as_ptr(),
+                gcov_cu_info.len(),
+            );
+            let val = llvm::LLVMMetadataAsValue(debug_context.llcontext, gcov_metadata);
+
+            llvm::LLVMAddNamedMetadataOperand(debug_context.llmod, c"llvm.gcov".as_ptr(), val);
+        }
+
+        return unit_metadata;
+    };
+
+    fn path_to_mdstring<'ll>(llcx: &'ll llvm::Context, path: &Path) -> &'ll llvm::Metadata {
+        let path_str = path_to_c_string(path);
+        unsafe { llvm::LLVMMDStringInContext2(llcx, path_str.as_ptr(), path_str.as_bytes().len()) }
+    }
+}
+
+/// Creates a `DW_TAG_member` entry inside the DIE represented by the given `type_di_node`.
+fn build_field_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    owner: &'ll DIScope,
+    name: &str,
+    size_and_align: (Size, Align),
+    offset: Size,
+    flags: DIFlags,
+    type_di_node: &'ll DIType,
+) -> &'ll DIType {
+    unsafe {
+        llvm::LLVMRustDIBuilderCreateMemberType(
+            DIB(cx),
+            owner,
+            name.as_ptr().cast(),
+            name.len(),
+            unknown_file_metadata(cx),
+            UNKNOWN_LINE_NUMBER,
+            size_and_align.0.bits(),
+            size_and_align.1.bits() as u32,
+            offset.bits(),
+            flags,
+            type_di_node,
+        )
+    }
+}
+
+/// Returns the `DIFlags` corresponding to the visibility of the item identified by `did`.
+///
+/// `DIFlags::Flag{Public,Protected,Private}` correspond to `DW_AT_accessibility`
+/// (public/protected/private) aren't exactly right for Rust, but neither is `DW_AT_visibility`
+/// (local/exported/qualified), and there's no way to set `DW_AT_visibility` in LLVM's API.
+fn visibility_di_flags<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    did: DefId,
+    type_did: DefId,
+) -> DIFlags {
+    let parent_did = cx.tcx.parent(type_did);
+    let visibility = cx.tcx.visibility(did);
+    match visibility {
+        Visibility::Public => DIFlags::FlagPublic,
+        // Private fields have a restricted visibility of the module containing the type.
+        Visibility::Restricted(did) if did == parent_did => DIFlags::FlagPrivate,
+        // `pub(crate)`/`pub(super)` visibilities are any other restricted visibility.
+        Visibility::Restricted(..) => DIFlags::FlagProtected,
+    }
+}
+
+/// Creates the debuginfo node for a Rust struct type. Maybe be a regular struct or a tuple-struct.
+fn build_struct_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let struct_type = unique_type_id.expect_ty();
+    let ty::Adt(adt_def, _) = struct_type.kind() else {
+        bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type);
+    };
+    debug_assert!(adt_def.is_struct());
+    let containing_scope = get_namespace_for_item(cx, adt_def.did());
+    let struct_type_and_layout = cx.layout_of(struct_type);
+    let variant_def = adt_def.non_enum_variant();
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            unique_type_id,
+            &compute_debuginfo_type_name(cx.tcx, struct_type, false),
+            size_and_align_of(struct_type_and_layout),
+            Some(containing_scope),
+            visibility_di_flags(cx, adt_def.did(), adt_def.did()),
+        ),
+        // Fields:
+        |cx, owner| {
+            variant_def
+                .fields
+                .iter()
+                .enumerate()
+                .map(|(i, f)| {
+                    let field_name = if variant_def.ctor_kind() == Some(CtorKind::Fn) {
+                        // This is a tuple struct
+                        tuple_field_name(i)
+                    } else {
+                        // This is struct with named fields
+                        Cow::Borrowed(f.name.as_str())
+                    };
+                    let field_layout = struct_type_and_layout.field(cx, i);
+                    build_field_di_node(
+                        cx,
+                        owner,
+                        &field_name[..],
+                        (field_layout.size, field_layout.align.abi),
+                        struct_type_and_layout.fields.offset(i),
+                        visibility_di_flags(cx, f.did, adt_def.did()),
+                        type_di_node(cx, field_layout.ty),
+                    )
+                })
+                .collect()
+        },
+        |cx| build_generic_type_param_di_nodes(cx, struct_type),
+    )
+}
+
+//=-----------------------------------------------------------------------------
+// Tuples
+//=-----------------------------------------------------------------------------
+
+/// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or coroutine.
+/// For a coroutine, this will handle upvars shared by all states.
+fn build_upvar_field_di_nodes<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    closure_or_coroutine_ty: Ty<'tcx>,
+    closure_or_coroutine_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+    let (&def_id, up_var_tys) = match closure_or_coroutine_ty.kind() {
+        ty::Coroutine(def_id, args) => (def_id, args.as_coroutine().prefix_tys()),
+        ty::Closure(def_id, args) => (def_id, args.as_closure().upvar_tys()),
+        ty::CoroutineClosure(def_id, args) => (def_id, args.as_coroutine_closure().upvar_tys()),
+        _ => {
+            bug!(
+                "build_upvar_field_di_nodes() called with non-closure-or-coroutine-type: {:?}",
+                closure_or_coroutine_ty
+            )
+        }
+    };
+
+    debug_assert!(
+        up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+    );
+
+    let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
+    let layout = cx.layout_of(closure_or_coroutine_ty);
+
+    up_var_tys
+        .into_iter()
+        .zip(capture_names.iter())
+        .enumerate()
+        .map(|(index, (up_var_ty, capture_name))| {
+            build_field_di_node(
+                cx,
+                closure_or_coroutine_di_node,
+                capture_name.as_str(),
+                cx.size_and_align_of(up_var_ty),
+                layout.fields.offset(index),
+                DIFlags::FlagZero,
+                type_di_node(cx, up_var_ty),
+            )
+        })
+        .collect()
+}
+
+/// Builds the DW_TAG_structure_type debuginfo node for a Rust tuple type.
+fn build_tuple_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let tuple_type = unique_type_id.expect_ty();
+    let &ty::Tuple(component_types) = tuple_type.kind() else {
+        bug!("build_tuple_type_di_node() called with non-tuple-type: {:?}", tuple_type)
+    };
+
+    let tuple_type_and_layout = cx.layout_of(tuple_type);
+    let type_name = compute_debuginfo_type_name(cx.tcx, tuple_type, false);
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            unique_type_id,
+            &type_name,
+            size_and_align_of(tuple_type_and_layout),
+            NO_SCOPE_METADATA,
+            DIFlags::FlagZero,
+        ),
+        // Fields:
+        |cx, tuple_di_node| {
+            component_types
+                .into_iter()
+                .enumerate()
+                .map(|(index, component_type)| {
+                    build_field_di_node(
+                        cx,
+                        tuple_di_node,
+                        &tuple_field_name(index),
+                        cx.size_and_align_of(component_type),
+                        tuple_type_and_layout.fields.offset(index),
+                        DIFlags::FlagZero,
+                        type_di_node(cx, component_type),
+                    )
+                })
+                .collect()
+        },
+        NO_GENERICS,
+    )
+}
+
+/// Builds the debuginfo node for a closure environment.
+fn build_closure_env_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let closure_env_type = unique_type_id.expect_ty();
+    let &(ty::Closure(def_id, _) | ty::CoroutineClosure(def_id, _)) = closure_env_type.kind()
+    else {
+        bug!("build_closure_env_di_node() called with non-closure-type: {:?}", closure_env_type)
+    };
+    let containing_scope = get_namespace_for_item(cx, def_id);
+    let type_name = compute_debuginfo_type_name(cx.tcx, closure_env_type, false);
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            unique_type_id,
+            &type_name,
+            cx.size_and_align_of(closure_env_type),
+            Some(containing_scope),
+            DIFlags::FlagZero,
+        ),
+        // Fields:
+        |cx, owner| build_upvar_field_di_nodes(cx, closure_env_type, owner),
+        NO_GENERICS,
+    )
+}
+
+/// Build the debuginfo node for a Rust `union` type.
+fn build_union_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let union_type = unique_type_id.expect_ty();
+    let (union_def_id, variant_def) = match union_type.kind() {
+        ty::Adt(def, _) => (def.did(), def.non_enum_variant()),
+        _ => bug!("build_union_type_di_node on a non-ADT"),
+    };
+    let containing_scope = get_namespace_for_item(cx, union_def_id);
+    let union_ty_and_layout = cx.layout_of(union_type);
+    let type_name = compute_debuginfo_type_name(cx.tcx, union_type, false);
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Union,
+            unique_type_id,
+            &type_name,
+            size_and_align_of(union_ty_and_layout),
+            Some(containing_scope),
+            DIFlags::FlagZero,
+        ),
+        // Fields:
+        |cx, owner| {
+            variant_def
+                .fields
+                .iter()
+                .enumerate()
+                .map(|(i, f)| {
+                    let field_layout = union_ty_and_layout.field(cx, i);
+                    build_field_di_node(
+                        cx,
+                        owner,
+                        f.name.as_str(),
+                        size_and_align_of(field_layout),
+                        Size::ZERO,
+                        DIFlags::FlagZero,
+                        type_di_node(cx, field_layout.ty),
+                    )
+                })
+                .collect()
+        },
+        // Generics:
+        |cx| build_generic_type_param_di_nodes(cx, union_type),
+    )
+}
+
+/// Computes the type parameters for a type, if any, for the given metadata.
+fn build_generic_type_param_di_nodes<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    ty: Ty<'tcx>,
+) -> SmallVec<&'ll DIType> {
+    if let ty::Adt(def, args) = *ty.kind() {
+        if args.types().next().is_some() {
+            let generics = cx.tcx.generics_of(def.did());
+            let names = get_parameter_names(cx, generics);
+            let template_params: SmallVec<_> = iter::zip(args, names)
+                .filter_map(|(kind, name)| {
+                    kind.as_type().map(|ty| {
+                        let actual_type =
+                            cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+                        let actual_type_di_node = type_di_node(cx, actual_type);
+                        let name = name.as_str();
+                        unsafe {
+                            llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+                                DIB(cx),
+                                None,
+                                name.as_ptr().cast(),
+                                name.len(),
+                                actual_type_di_node,
+                            )
+                        }
+                    })
+                })
+                .collect();
+
+            return template_params;
+        }
+    }
+
+    return smallvec![];
+
+    fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+        let mut names = generics
+            .parent
+            .map_or_else(Vec::new, |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
+        names.extend(generics.params.iter().map(|param| param.name));
+        names
+    }
+}
+
+/// Creates debug information for the given global variable.
+///
+/// Adds the created debuginfo nodes directly to the crate's IR.
+pub fn build_global_var_di_node<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value) {
+    if cx.dbg_cx.is_none() {
+        return;
+    }
+
+    // Only create type information if full debuginfo is enabled
+    if cx.sess().opts.debuginfo != DebugInfo::Full {
+        return;
+    }
+
+    let tcx = cx.tcx;
+
+    // We may want to remove the namespace scope if we're in an extern block (see
+    // https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952).
+    let var_scope = get_namespace_for_item(cx, def_id);
+    let span = tcx.def_span(def_id);
+
+    let (file_metadata, line_number) = if !span.is_dummy() {
+        let loc = cx.lookup_debug_loc(span.lo());
+        (file_metadata(cx, &loc.file), loc.line)
+    } else {
+        (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER)
+    };
+
+    let is_local_to_unit = is_node_local_to_unit(cx, def_id);
+
+    let DefKind::Static { nested, .. } = cx.tcx.def_kind(def_id) else { bug!() };
+    if nested {
+        return;
+    }
+    let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx, ty::ParamEnv::reveal_all());
+    let type_di_node = type_di_node(cx, variable_type);
+    let var_name = tcx.item_name(def_id);
+    let var_name = var_name.as_str();
+    let linkage_name = mangled_name_of_instance(cx, Instance::mono(tcx, def_id)).name;
+    // When empty, linkage_name field is omitted,
+    // which is what we want for no_mangle statics
+    let linkage_name = if var_name == linkage_name { "" } else { linkage_name };
+
+    let global_align = cx.align_of(variable_type);
+
+    unsafe {
+        llvm::LLVMRustDIBuilderCreateStaticVariable(
+            DIB(cx),
+            Some(var_scope),
+            var_name.as_ptr().cast(),
+            var_name.len(),
+            linkage_name.as_ptr().cast(),
+            linkage_name.len(),
+            file_metadata,
+            line_number,
+            type_di_node,
+            is_local_to_unit,
+            global,
+            None,
+            global_align.bits() as u32,
+        );
+    }
+}
+
+/// Generates LLVM debuginfo for a vtable.
+///
+/// The vtable type looks like a struct with a field for each function pointer and super-trait
+/// pointer it contains (plus the `size` and `align` fields).
+///
+/// Except for `size`, `align`, and `drop_in_place`, the field names don't try to mirror
+/// the name of the method they implement. This can be implemented in the future once there
+/// is a proper disambiguation scheme for dealing with methods from different traits that have
+/// the same name.
+fn build_vtable_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    ty: Ty<'tcx>,
+    poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> &'ll DIType {
+    let tcx = cx.tcx;
+
+    let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
+        let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
+        let trait_ref = tcx.erase_regions(trait_ref);
+
+        tcx.vtable_entries(trait_ref)
+    } else {
+        TyCtxt::COMMON_VTABLE_ENTRIES
+    };
+
+    // All function pointers are described as opaque pointers. This could be improved in the future
+    // by describing them as actual function pointers.
+    let void_pointer_ty = Ty::new_imm_ptr(tcx, tcx.types.unit);
+    let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
+    let usize_di_node = type_di_node(cx, tcx.types.usize);
+    let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty);
+    // If `usize` is not pointer-sized and -aligned then the size and alignment computations
+    // for the vtable as a whole would be wrong. Let's make sure this holds even on weird
+    // platforms.
+    assert_eq!(cx.size_and_align_of(tcx.types.usize), (pointer_size, pointer_align));
+
+    let vtable_type_name =
+        compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::Type);
+    let unique_type_id = UniqueTypeId::for_vtable_ty(tcx, ty, poly_trait_ref);
+    let size = pointer_size * vtable_entries.len() as u64;
+
+    // This gets mapped to a DW_AT_containing_type attribute which allows GDB to correlate
+    // the vtable to the type it is for.
+    let vtable_holder = type_di_node(cx, ty);
+
+    build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::VTableTy { vtable_holder },
+            unique_type_id,
+            &vtable_type_name,
+            (size, pointer_align),
+            NO_SCOPE_METADATA,
+            DIFlags::FlagArtificial,
+        ),
+        |cx, vtable_type_di_node| {
+            vtable_entries
+                .iter()
+                .enumerate()
+                .filter_map(|(index, vtable_entry)| {
+                    let (field_name, field_type_di_node) = match vtable_entry {
+                        ty::VtblEntry::MetadataDropInPlace => {
+                            ("drop_in_place".to_string(), void_pointer_type_di_node)
+                        }
+                        ty::VtblEntry::Method(_) => {
+                            // Note: This code does not try to give a proper name to each method
+                            //       because their might be multiple methods with the same name
+                            //       (coming from different traits).
+                            (format!("__method{index}"), void_pointer_type_di_node)
+                        }
+                        ty::VtblEntry::TraitVPtr(_) => {
+                            (format!("__super_trait_ptr{index}"), void_pointer_type_di_node)
+                        }
+                        ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
+                        ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
+                        ty::VtblEntry::Vacant => return None,
+                    };
+
+                    let field_offset = pointer_size * index as u64;
+
+                    Some(build_field_di_node(
+                        cx,
+                        vtable_type_di_node,
+                        &field_name,
+                        (pointer_size, pointer_align),
+                        field_offset,
+                        DIFlags::FlagZero,
+                        field_type_di_node,
+                    ))
+                })
+                .collect()
+        },
+        NO_GENERICS,
+    )
+    .di_node
+}
+
+fn vcall_visibility_metadata<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    ty: Ty<'tcx>,
+    trait_ref: Option<PolyExistentialTraitRef<'tcx>>,
+    vtable: &'ll Value,
+) {
+    enum VCallVisibility {
+        Public = 0,
+        LinkageUnit = 1,
+        TranslationUnit = 2,
+    }
+
+    let Some(trait_ref) = trait_ref else { return };
+
+    let trait_ref_self = trait_ref.with_self_ty(cx.tcx, ty);
+    let trait_ref_self = cx.tcx.erase_regions(trait_ref_self);
+    let trait_def_id = trait_ref_self.def_id();
+    let trait_vis = cx.tcx.visibility(trait_def_id);
+
+    let cgus = cx.sess().codegen_units().as_usize();
+    let single_cgu = cgus == 1;
+
+    let lto = cx.sess().lto();
+
+    // Since LLVM requires full LTO for the virtual function elimination optimization to apply,
+    // only the `Lto::Fat` cases are relevant currently.
+    let vcall_visibility = match (lto, trait_vis, single_cgu) {
+        // If there is not LTO and the visibility in public, we have to assume that the vtable can
+        // be seen from anywhere. With multiple CGUs, the vtable is quasi-public.
+        (Lto::No | Lto::ThinLocal, Visibility::Public, _)
+        | (Lto::No, Visibility::Restricted(_), false) => VCallVisibility::Public,
+        // With LTO and a quasi-public visibility, the usages of the functions of the vtable are
+        // all known by the `LinkageUnit`.
+        // FIXME: LLVM only supports this optimization for `Lto::Fat` currently. Once it also
+        // supports `Lto::Thin` the `VCallVisibility` may have to be adjusted for those.
+        (Lto::Fat | Lto::Thin, Visibility::Public, _)
+        | (Lto::ThinLocal | Lto::Thin | Lto::Fat, Visibility::Restricted(_), false) => {
+            VCallVisibility::LinkageUnit
+        }
+        // If there is only one CGU, private vtables can only be seen by that CGU/translation unit
+        // and therefore we know of all usages of functions in the vtable.
+        (_, Visibility::Restricted(_), true) => VCallVisibility::TranslationUnit,
+    };
+
+    let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref);
+
+    unsafe {
+        let typeid = llvm::LLVMMDStringInContext(
+            cx.llcx,
+            trait_ref_typeid.as_ptr() as *const c_char,
+            trait_ref_typeid.as_bytes().len() as c_uint,
+        );
+        let v = [cx.const_usize(0), typeid];
+        llvm::LLVMRustGlobalAddMetadata(
+            vtable,
+            llvm::MD_type as c_uint,
+            llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+                cx.llcx,
+                v.as_ptr(),
+                v.len() as c_uint,
+            )),
+        );
+        let vcall_visibility = llvm::LLVMValueAsMetadata(cx.const_u64(vcall_visibility as u64));
+        let vcall_visibility_metadata = llvm::LLVMMDNodeInContext2(cx.llcx, &vcall_visibility, 1);
+        llvm::LLVMGlobalSetMetadata(
+            vtable,
+            llvm::MetadataType::MD_vcall_visibility as c_uint,
+            vcall_visibility_metadata,
+        );
+    }
+}
+
+/// Creates debug information for the given vtable, which is for the
+/// given type.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_vtable_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    ty: Ty<'tcx>,
+    poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+    vtable: &'ll Value,
+) {
+    // FIXME(flip1995): The virtual function elimination optimization only works with full LTO in
+    // LLVM at the moment.
+    if cx.sess().opts.unstable_opts.virtual_function_elimination && cx.sess().lto() == Lto::Fat {
+        vcall_visibility_metadata(cx, ty, poly_trait_ref, vtable);
+    }
+
+    if cx.dbg_cx.is_none() {
+        return;
+    }
+
+    // Only create type information if full debuginfo is enabled
+    if cx.sess().opts.debuginfo != DebugInfo::Full {
+        return;
+    }
+
+    // When full debuginfo is enabled, we want to try and prevent vtables from being
+    // merged. Otherwise debuggers will have a hard time mapping from dyn pointer
+    // to concrete type.
+    llvm::SetUnnamedAddress(vtable, llvm::UnnamedAddr::No);
+
+    let vtable_name =
+        compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable);
+    let vtable_type_di_node = build_vtable_type_di_node(cx, ty, poly_trait_ref);
+    let linkage_name = "";
+
+    unsafe {
+        llvm::LLVMRustDIBuilderCreateStaticVariable(
+            DIB(cx),
+            NO_SCOPE_METADATA,
+            vtable_name.as_ptr().cast(),
+            vtable_name.len(),
+            linkage_name.as_ptr().cast(),
+            linkage_name.len(),
+            unknown_file_metadata(cx),
+            UNKNOWN_LINE_NUMBER,
+            vtable_type_di_node,
+            true,
+            vtable,
+            None,
+            0,
+        );
+    }
+}
+
+/// Creates an "extension" of an existing `DIScope` into another file.
+pub fn extend_scope_to_file<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    scope_metadata: &'ll DIScope,
+    file: &SourceFile,
+) -> &'ll DILexicalBlock {
+    let file_metadata = file_metadata(cx, file);
+    unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) }
+}
+
+pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> {
+    const TUPLE_FIELD_NAMES: [&'static str; 16] = [
+        "__0", "__1", "__2", "__3", "__4", "__5", "__6", "__7", "__8", "__9", "__10", "__11",
+        "__12", "__13", "__14", "__15",
+    ];
+    TUPLE_FIELD_NAMES
+        .get(field_index)
+        .map(|s| Cow::from(*s))
+        .unwrap_or_else(|| Cow::from(format!("__{field_index}")))
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
new file mode 100644
index 00000000000..4792b0798df
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -0,0 +1,931 @@
+use std::borrow::Cow;
+
+use libc::c_uint;
+use rustc_codegen_ssa::{
+    debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo},
+    traits::ConstMethods,
+};
+
+use rustc_index::IndexVec;
+use rustc_middle::{
+    bug,
+    ty::{
+        self,
+        layout::{LayoutOf, TyAndLayout},
+        AdtDef, CoroutineArgs, Ty,
+    },
+};
+use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+use crate::{
+    common::CodegenCx,
+    debuginfo::{
+        metadata::{
+            build_field_di_node,
+            enums::{tag_base_type, DiscrResult},
+            file_metadata, size_and_align_of, type_di_node,
+            type_map::{self, Stub, UniqueTypeId},
+            unknown_file_metadata, visibility_di_flags, DINodeCreationResult, SmallVec,
+            NO_GENERICS, NO_SCOPE_METADATA, UNKNOWN_LINE_NUMBER,
+        },
+        utils::DIB,
+    },
+    llvm::{
+        self,
+        debuginfo::{DIFile, DIFlags, DIType},
+    },
+};
+
+// The names of the associated constants in each variant wrapper struct.
+// These have to match up with the names being used in `intrinsic.natvis`.
+const ASSOC_CONST_DISCR_NAME: &str = "NAME";
+const ASSOC_CONST_DISCR_EXACT: &str = "DISCR_EXACT";
+const ASSOC_CONST_DISCR_BEGIN: &str = "DISCR_BEGIN";
+const ASSOC_CONST_DISCR_END: &str = "DISCR_END";
+
+const ASSOC_CONST_DISCR128_EXACT_LO: &str = "DISCR128_EXACT_LO";
+const ASSOC_CONST_DISCR128_EXACT_HI: &str = "DISCR128_EXACT_HI";
+const ASSOC_CONST_DISCR128_BEGIN_LO: &str = "DISCR128_BEGIN_LO";
+const ASSOC_CONST_DISCR128_BEGIN_HI: &str = "DISCR128_BEGIN_HI";
+const ASSOC_CONST_DISCR128_END_LO: &str = "DISCR128_END_LO";
+const ASSOC_CONST_DISCR128_END_HI: &str = "DISCR128_END_HI";
+
+// The name of the tag field in the top-level union
+const TAG_FIELD_NAME: &str = "tag";
+const TAG_FIELD_NAME_128_LO: &str = "tag128_lo";
+const TAG_FIELD_NAME_128_HI: &str = "tag128_hi";
+
+// We assign a "virtual" discriminant value to the sole variant of
+// a single-variant enum.
+const SINGLE_VARIANT_VIRTUAL_DISR: u64 = 0;
+
+/// In CPP-like mode, we generate a union with a field for each variant and an
+/// explicit tag field. The field of each variant has a struct type
+/// that encodes the discriminant of the variant and it's data layout.
+/// The union also has a nested enumeration type that is only used for encoding
+/// variant names in an efficient way. Its enumerator values do _not_ correspond
+/// to the enum's discriminant values.
+/// It's roughly equivalent to the following C/C++ code:
+///
+/// ```c
+/// union enum2$<{fully-qualified-name}> {
+///   struct Variant0 {
+///     struct {name-of-variant-0} {
+///        <variant 0 fields>
+///     } value;
+///
+///     static VariantNames NAME = {name-of-variant-0};
+///     static int_type DISCR_EXACT = {discriminant-of-variant-0};
+///   } variant0;
+///
+///   <other variant structs>
+///
+///   int_type tag;
+///
+///   enum VariantNames {
+///      <name-of-variant-0> = 0, // The numeric values are variant index,
+///      <name-of-variant-1> = 1, // not discriminant values.
+///      <name-of-variant-2> = 2,
+///      ...
+///   }
+/// }
+/// ```
+///
+/// As you can see, the type name is wrapped in `enum2$<_>`. This way we can
+/// have a single NatVis rule for handling all enums. The `2` in `enum2$<_>`
+/// is an encoding version tag, so that debuggers can decide to decode this
+/// differently than the previous `enum$<_>` encoding emitted by earlier
+/// compiler versions.
+///
+/// Niche-tag enums have one special variant, usually called the
+/// "untagged variant". This variant has a field that
+/// doubles as the tag of the enum. The variant is active when the value of
+/// that field is within a pre-defined range. Therefore the variant struct
+/// has a `DISCR_BEGIN` and `DISCR_END` field instead of `DISCR_EXACT` in
+/// that case. Both `DISCR_BEGIN` and `DISCR_END` are inclusive bounds.
+/// Note that these ranges can wrap around, so that `DISCR_END < DISCR_BEGIN`.
+///
+/// Single-variant enums don't actually have a tag field. In this case we
+/// emit a static tag field (that always has the value 0) so we can use the
+/// same representation (and NatVis).
+///
+/// For niche-layout enums it's possible to have a 128-bit tag. NatVis, VS, and
+/// WinDbg (the main targets for CPP-like debuginfo at the moment) don't support
+/// 128-bit integers, so all values involved get split into two 64-bit fields.
+/// Instead of the `tag` field, we generate two fields `tag128_lo` and `tag128_hi`,
+/// Instead of `DISCR_EXACT`, we generate `DISCR128_EXACT_LO` and `DISCR128_EXACT_HI`,
+/// and so on.
+///
+///
+/// The following pseudocode shows how to decode an enum value in a debugger:
+///
+/// ```text
+///
+/// fn find_active_variant(enum_value) -> (VariantName, VariantValue) {
+///     let is_128_bit = enum_value.has_field("tag128_lo");
+///
+///     if !is_128_bit {
+///         // Note: `tag` can be a static field for enums with only one
+///         //       inhabited variant.
+///         let tag = enum_value.field("tag").value;
+///
+///         // For each variant, check if it is a match. Only one of them will match,
+///         // so if we find it we can return it immediately.
+///         for variant_field in enum_value.fields().filter(|f| f.name.starts_with("variant")) {
+///             if variant_field.has_field("DISCR_EXACT") {
+///                 // This variant corresponds to a single tag value
+///                 if variant_field.field("DISCR_EXACT").value == tag {
+///                     return (variant_field.field("NAME"), variant_field.value);
+///                 }
+///             } else {
+///                 // This is a range variant
+///                 let begin = variant_field.field("DISCR_BEGIN");
+///                 let end = variant_field.field("DISCR_END");
+///
+///                 if is_in_range(tag, begin, end) {
+///                     return (variant_field.field("NAME"), variant_field.value);
+///                 }
+///             }
+///         }
+///     } else {
+///         // Basically the same as with smaller tags, we just have to
+///         // stitch the values together.
+///         let tag: u128 = (enum_value.field("tag128_lo").value as u128) |
+///                         (enum_value.field("tag128_hi").value as u128 << 64);
+///
+///         for variant_field in enum_value.fields().filter(|f| f.name.starts_with("variant")) {
+///             if variant_field.has_field("DISCR128_EXACT_LO") {
+///                 let discr_exact = (variant_field.field("DISCR128_EXACT_LO" as u128) |
+///                                   (variant_field.field("DISCR128_EXACT_HI") as u128 << 64);
+///
+///                 // This variant corresponds to a single tag value
+///                 if discr_exact.value == tag {
+///                     return (variant_field.field("NAME"), variant_field.value);
+///                 }
+///             } else {
+///                 // This is a range variant
+///                 let begin = (variant_field.field("DISCR128_BEGIN_LO").value as u128) |
+///                             (variant_field.field("DISCR128_BEGIN_HI").value as u128 << 64);
+///                 let end = (variant_field.field("DISCR128_END_LO").value as u128) |
+///                           (variant_field.field("DISCR128_END_HI").value as u128 << 64);
+///
+///                 if is_in_range(tag, begin, end) {
+///                     return (variant_field.field("NAME"), variant_field.value);
+///                 }
+///             }
+///         }
+///     }
+///
+///     // We should have found an active variant at this point.
+///     unreachable!();
+/// }
+///
+/// // Check if a value is within the given range
+/// // (where the range might wrap around the value space)
+/// fn is_in_range(value, start, end) -> bool {
+///     if start < end {
+///         value >= start && value <= end
+///     } else {
+///         value >= start || value <= end
+///     }
+/// }
+///
+/// ```
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let enum_type = unique_type_id.expect_ty();
+    let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+        bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+    };
+
+    let enum_type_and_layout = cx.layout_of(enum_type);
+    let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+    debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            type_map::Stub::Union,
+            unique_type_id,
+            &enum_type_name,
+            cx.size_and_align_of(enum_type),
+            NO_SCOPE_METADATA,
+            visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did()),
+        ),
+        |cx, enum_type_di_node| {
+            match enum_type_and_layout.variants {
+                Variants::Single { index: variant_index } => {
+                    if enum_adt_def.variants().is_empty() {
+                        // Uninhabited enums have Variants::Single. We don't generate
+                        // any members for them.
+                        return smallvec![];
+                    }
+
+                    build_single_variant_union_fields(
+                        cx,
+                        enum_adt_def,
+                        enum_type_and_layout,
+                        enum_type_di_node,
+                        variant_index,
+                    )
+                }
+                Variants::Multiple {
+                    tag_encoding: TagEncoding::Direct,
+                    ref variants,
+                    tag_field,
+                    ..
+                } => build_union_fields_for_enum(
+                    cx,
+                    enum_adt_def,
+                    enum_type_and_layout,
+                    enum_type_di_node,
+                    variants.indices(),
+                    tag_field,
+                    None,
+                ),
+                Variants::Multiple {
+                    tag_encoding: TagEncoding::Niche { untagged_variant, .. },
+                    ref variants,
+                    tag_field,
+                    ..
+                } => build_union_fields_for_enum(
+                    cx,
+                    enum_adt_def,
+                    enum_type_and_layout,
+                    enum_type_di_node,
+                    variants.indices(),
+                    tag_field,
+                    Some(untagged_variant),
+                ),
+            }
+        },
+        NO_GENERICS,
+    )
+}
+
+/// A coroutine debuginfo node looks the same as a that of an enum type.
+///
+/// See [build_enum_type_di_node] for more information.
+pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let coroutine_type = unique_type_id.expect_ty();
+    let coroutine_type_and_layout = cx.layout_of(coroutine_type);
+    let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);
+
+    debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            type_map::Stub::Union,
+            unique_type_id,
+            &coroutine_type_name,
+            size_and_align_of(coroutine_type_and_layout),
+            NO_SCOPE_METADATA,
+            DIFlags::FlagZero,
+        ),
+        |cx, coroutine_type_di_node| match coroutine_type_and_layout.variants {
+            Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => {
+                build_union_fields_for_direct_tag_coroutine(
+                    cx,
+                    coroutine_type_and_layout,
+                    coroutine_type_di_node,
+                )
+            }
+            Variants::Single { .. }
+            | Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => {
+                bug!(
+                    "Encountered coroutine with non-direct-tag layout: {:?}",
+                    coroutine_type_and_layout
+                )
+            }
+        },
+        NO_GENERICS,
+    )
+}
+
+fn build_single_variant_union_fields<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_adt_def: AdtDef<'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+    enum_type_di_node: &'ll DIType,
+    variant_index: VariantIdx,
+) -> SmallVec<&'ll DIType> {
+    let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+    let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did());
+    let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+        cx,
+        enum_type_and_layout,
+        enum_type_di_node,
+        variant_index,
+        enum_adt_def.variant(variant_index),
+        variant_layout,
+        visibility_flags,
+    );
+
+    let tag_base_type = cx.tcx.types.u32;
+    let tag_base_type_di_node = type_di_node(cx, tag_base_type);
+    let tag_base_type_align = cx.align_of(tag_base_type);
+
+    let variant_names_type_di_node = build_variant_names_type_di_node(
+        cx,
+        enum_type_di_node,
+        std::iter::once((
+            variant_index,
+            Cow::from(enum_adt_def.variant(variant_index).name.as_str()),
+        )),
+    );
+
+    let variant_struct_type_wrapper_di_node = build_variant_struct_wrapper_type_di_node(
+        cx,
+        enum_type_and_layout,
+        enum_type_di_node,
+        variant_index,
+        None,
+        variant_struct_type_di_node,
+        variant_names_type_di_node,
+        tag_base_type_di_node,
+        tag_base_type,
+        DiscrResult::NoDiscriminant,
+    );
+
+    smallvec![
+        build_field_di_node(
+            cx,
+            enum_type_di_node,
+            &variant_union_field_name(variant_index),
+            // NOTE: We use the size and align of the entire type, not from variant_layout
+            //       since the later is sometimes smaller (if it has fewer fields).
+            size_and_align_of(enum_type_and_layout),
+            Size::ZERO,
+            visibility_flags,
+            variant_struct_type_wrapper_di_node,
+        ),
+        unsafe {
+            llvm::LLVMRustDIBuilderCreateStaticMemberType(
+                DIB(cx),
+                enum_type_di_node,
+                TAG_FIELD_NAME.as_ptr().cast(),
+                TAG_FIELD_NAME.len(),
+                unknown_file_metadata(cx),
+                UNKNOWN_LINE_NUMBER,
+                variant_names_type_di_node,
+                visibility_flags,
+                Some(cx.const_u64(SINGLE_VARIANT_VIRTUAL_DISR)),
+                tag_base_type_align.bits() as u32,
+            )
+        }
+    ]
+}
+
+fn build_union_fields_for_enum<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_adt_def: AdtDef<'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+    enum_type_di_node: &'ll DIType,
+    variant_indices: impl Iterator<Item = VariantIdx> + Clone,
+    tag_field: usize,
+    untagged_variant_index: Option<VariantIdx>,
+) -> SmallVec<&'ll DIType> {
+    let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+
+    let variant_names_type_di_node = build_variant_names_type_di_node(
+        cx,
+        enum_type_di_node,
+        variant_indices.clone().map(|variant_index| {
+            let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
+            (variant_index, variant_name)
+        }),
+    );
+    let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did());
+
+    let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_indices
+        .map(|variant_index| {
+            let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+
+            let variant_def = enum_adt_def.variant(variant_index);
+
+            let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+                cx,
+                enum_type_and_layout,
+                enum_type_di_node,
+                variant_index,
+                variant_def,
+                variant_layout,
+                visibility_flags,
+            );
+
+            VariantFieldInfo {
+                variant_index,
+                variant_struct_type_di_node,
+                source_info: None,
+                discr: super::compute_discriminant_value(cx, enum_type_and_layout, variant_index),
+            }
+        })
+        .collect();
+
+    build_union_fields_for_direct_tag_enum_or_coroutine(
+        cx,
+        enum_type_and_layout,
+        enum_type_di_node,
+        &variant_field_infos,
+        variant_names_type_di_node,
+        tag_base_type,
+        tag_field,
+        untagged_variant_index,
+        visibility_flags,
+    )
+}
+
+// The base type of the VariantNames DW_AT_enumeration_type is always the same.
+// It has nothing to do with the tag of the enum and just has to be big enough
+// to hold all variant names.
+fn variant_names_enum_base_type<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> Ty<'tcx> {
+    cx.tcx.types.u32
+}
+
+/// This function builds a DW_AT_enumeration_type that contains an entry for
+/// each variant. Note that this has nothing to do with the discriminant. The
+/// numeric value of each enumerator corresponds to the variant index. The
+/// type is only used for efficiently encoding the name of each variant in
+/// debuginfo.
+fn build_variant_names_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    containing_scope: &'ll DIType,
+    variants: impl Iterator<Item = (VariantIdx, Cow<'tcx, str>)>,
+) -> &'ll DIType {
+    // Create an enumerator for each variant.
+    super::build_enumeration_type_di_node(
+        cx,
+        "VariantNames",
+        variant_names_enum_base_type(cx),
+        variants.map(|(variant_index, variant_name)| (variant_name, variant_index.as_u32().into())),
+        containing_scope,
+    )
+}
+
+fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_or_coroutine_type_and_layout: TyAndLayout<'tcx>,
+    enum_or_coroutine_type_di_node: &'ll DIType,
+    variant_index: VariantIdx,
+    untagged_variant_index: Option<VariantIdx>,
+    variant_struct_type_di_node: &'ll DIType,
+    variant_names_type_di_node: &'ll DIType,
+    tag_base_type_di_node: &'ll DIType,
+    tag_base_type: Ty<'tcx>,
+    discr: DiscrResult,
+) -> &'ll DIType {
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            UniqueTypeId::for_enum_variant_struct_type_wrapper(
+                cx.tcx,
+                enum_or_coroutine_type_and_layout.ty,
+                variant_index,
+            ),
+            &variant_struct_wrapper_type_name(variant_index),
+            // NOTE: We use size and align of enum_type, not from variant_layout:
+            size_and_align_of(enum_or_coroutine_type_and_layout),
+            Some(enum_or_coroutine_type_di_node),
+            DIFlags::FlagZero,
+        ),
+        |cx, wrapper_struct_type_di_node| {
+            enum DiscrKind {
+                Exact(u64),
+                Exact128(u128),
+                Range(u64, u64),
+                Range128(u128, u128),
+            }
+
+            let (tag_base_type_size, tag_base_type_align) = cx.size_and_align_of(tag_base_type);
+            let is_128_bits = tag_base_type_size.bits() > 64;
+
+            let discr = match discr {
+                DiscrResult::NoDiscriminant => DiscrKind::Exact(SINGLE_VARIANT_VIRTUAL_DISR),
+                DiscrResult::Value(discr_val) => {
+                    if is_128_bits {
+                        DiscrKind::Exact128(discr_val)
+                    } else {
+                        debug_assert_eq!(discr_val, discr_val as u64 as u128);
+                        DiscrKind::Exact(discr_val as u64)
+                    }
+                }
+                DiscrResult::Range(min, max) => {
+                    assert_eq!(Some(variant_index), untagged_variant_index);
+                    if is_128_bits {
+                        DiscrKind::Range128(min, max)
+                    } else {
+                        debug_assert_eq!(min, min as u64 as u128);
+                        debug_assert_eq!(max, max as u64 as u128);
+                        DiscrKind::Range(min as u64, max as u64)
+                    }
+                }
+            };
+
+            let mut fields = SmallVec::new();
+
+            // We always have a field for the value
+            fields.push(build_field_di_node(
+                cx,
+                wrapper_struct_type_di_node,
+                "value",
+                size_and_align_of(enum_or_coroutine_type_and_layout),
+                Size::ZERO,
+                DIFlags::FlagZero,
+                variant_struct_type_di_node,
+            ));
+
+            let build_assoc_const =
+                |name: &str, type_di_node: &'ll DIType, value: u64, align: Align| unsafe {
+                    llvm::LLVMRustDIBuilderCreateStaticMemberType(
+                        DIB(cx),
+                        wrapper_struct_type_di_node,
+                        name.as_ptr().cast(),
+                        name.len(),
+                        unknown_file_metadata(cx),
+                        UNKNOWN_LINE_NUMBER,
+                        type_di_node,
+                        DIFlags::FlagZero,
+                        Some(cx.const_u64(value)),
+                        align.bits() as u32,
+                    )
+                };
+
+            // We also always have an associated constant for the discriminant value
+            // of the variant.
+            fields.push(build_assoc_const(
+                ASSOC_CONST_DISCR_NAME,
+                variant_names_type_di_node,
+                variant_index.as_u32() as u64,
+                cx.align_of(variant_names_enum_base_type(cx)),
+            ));
+
+            // Emit the discriminant value (or range) corresponding to the variant.
+            match discr {
+                DiscrKind::Exact(discr_val) => {
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR_EXACT,
+                        tag_base_type_di_node,
+                        discr_val,
+                        tag_base_type_align,
+                    ));
+                }
+                DiscrKind::Exact128(discr_val) => {
+                    let align = cx.align_of(cx.tcx.types.u64);
+                    let type_di_node = type_di_node(cx, cx.tcx.types.u64);
+                    let Split128 { hi, lo } = split_128(discr_val);
+
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR128_EXACT_LO,
+                        type_di_node,
+                        lo,
+                        align,
+                    ));
+
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR128_EXACT_HI,
+                        type_di_node,
+                        hi,
+                        align,
+                    ));
+                }
+                DiscrKind::Range(begin, end) => {
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR_BEGIN,
+                        tag_base_type_di_node,
+                        begin,
+                        tag_base_type_align,
+                    ));
+
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR_END,
+                        tag_base_type_di_node,
+                        end,
+                        tag_base_type_align,
+                    ));
+                }
+                DiscrKind::Range128(begin, end) => {
+                    let align = cx.align_of(cx.tcx.types.u64);
+                    let type_di_node = type_di_node(cx, cx.tcx.types.u64);
+                    let Split128 { hi: begin_hi, lo: begin_lo } = split_128(begin);
+                    let Split128 { hi: end_hi, lo: end_lo } = split_128(end);
+
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR128_BEGIN_HI,
+                        type_di_node,
+                        begin_hi,
+                        align,
+                    ));
+
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR128_BEGIN_LO,
+                        type_di_node,
+                        begin_lo,
+                        align,
+                    ));
+
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR128_END_HI,
+                        type_di_node,
+                        end_hi,
+                        align,
+                    ));
+
+                    fields.push(build_assoc_const(
+                        ASSOC_CONST_DISCR128_END_LO,
+                        type_di_node,
+                        end_lo,
+                        align,
+                    ));
+                }
+            }
+
+            fields
+        },
+        NO_GENERICS,
+    )
+    .di_node
+}
+
+struct Split128 {
+    hi: u64,
+    lo: u64,
+}
+
+fn split_128(value: u128) -> Split128 {
+    Split128 { hi: (value >> 64) as u64, lo: value as u64 }
+}
+
+fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    coroutine_type_and_layout: TyAndLayout<'tcx>,
+    coroutine_type_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+    let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } =
+        coroutine_type_and_layout.variants
+    else {
+        bug!("This function only supports layouts with directly encoded tags.")
+    };
+
+    let (coroutine_def_id, coroutine_args) = match coroutine_type_and_layout.ty.kind() {
+        &ty::Coroutine(def_id, args) => (def_id, args.as_coroutine()),
+        _ => unreachable!(),
+    };
+
+    let coroutine_layout = cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap();
+
+    let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(coroutine_def_id);
+    let variant_range = coroutine_args.variant_range(coroutine_def_id, cx.tcx);
+    let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len();
+
+    let tag_base_type = tag_base_type(cx, coroutine_type_and_layout);
+
+    let variant_names_type_di_node = build_variant_names_type_di_node(
+        cx,
+        coroutine_type_di_node,
+        variant_range
+            .clone()
+            .map(|variant_index| (variant_index, CoroutineArgs::variant_name(variant_index))),
+    );
+
+    let discriminants: IndexVec<VariantIdx, DiscrResult> = {
+        let discriminants_iter = coroutine_args.discriminants(coroutine_def_id, cx.tcx);
+        let mut discriminants: IndexVec<VariantIdx, DiscrResult> =
+            IndexVec::with_capacity(variant_count);
+        for (variant_index, discr) in discriminants_iter {
+            // Assert that the index in the IndexMap matches up with the given VariantIdx.
+            assert_eq!(variant_index, discriminants.next_index());
+            discriminants.push(DiscrResult::Value(discr.val));
+        }
+        discriminants
+    };
+
+    // Build the type node for each field.
+    let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range
+        .map(|variant_index| {
+            let variant_struct_type_di_node = super::build_coroutine_variant_struct_type_di_node(
+                cx,
+                variant_index,
+                coroutine_type_and_layout,
+                coroutine_type_di_node,
+                coroutine_layout,
+                common_upvar_names,
+            );
+
+            let span = coroutine_layout.variant_source_info[variant_index].span;
+            let source_info = if !span.is_dummy() {
+                let loc = cx.lookup_debug_loc(span.lo());
+                Some((file_metadata(cx, &loc.file), loc.line as c_uint))
+            } else {
+                None
+            };
+
+            VariantFieldInfo {
+                variant_index,
+                variant_struct_type_di_node,
+                source_info,
+                discr: discriminants[variant_index],
+            }
+        })
+        .collect();
+
+    build_union_fields_for_direct_tag_enum_or_coroutine(
+        cx,
+        coroutine_type_and_layout,
+        coroutine_type_di_node,
+        &variant_field_infos[..],
+        variant_names_type_di_node,
+        tag_base_type,
+        tag_field,
+        None,
+        DIFlags::FlagZero,
+    )
+}
+
+/// This is a helper function shared between enums and coroutines that makes sure fields have the
+/// expect names.
+fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+    enum_type_di_node: &'ll DIType,
+    variant_field_infos: &[VariantFieldInfo<'ll>],
+    discr_type_di_node: &'ll DIType,
+    tag_base_type: Ty<'tcx>,
+    tag_field: usize,
+    untagged_variant_index: Option<VariantIdx>,
+    di_flags: DIFlags,
+) -> SmallVec<&'ll DIType> {
+    let tag_base_type_di_node = type_di_node(cx, tag_base_type);
+    let mut unions_fields = SmallVec::with_capacity(variant_field_infos.len() + 1);
+
+    // We create a field in the union for each variant ...
+    unions_fields.extend(variant_field_infos.into_iter().map(|variant_member_info| {
+        let (file_di_node, line_number) = variant_member_info
+            .source_info
+            .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+        let field_name = variant_union_field_name(variant_member_info.variant_index);
+        let (size, align) = size_and_align_of(enum_type_and_layout);
+
+        let variant_struct_type_wrapper = build_variant_struct_wrapper_type_di_node(
+            cx,
+            enum_type_and_layout,
+            enum_type_di_node,
+            variant_member_info.variant_index,
+            untagged_variant_index,
+            variant_member_info.variant_struct_type_di_node,
+            discr_type_di_node,
+            tag_base_type_di_node,
+            tag_base_type,
+            variant_member_info.discr,
+        );
+
+        // We use LLVMRustDIBuilderCreateMemberType() member type directly because
+        // the build_field_di_node() function does not support specifying a source location,
+        // which is something that we don't do anywhere else.
+        unsafe {
+            llvm::LLVMRustDIBuilderCreateMemberType(
+                DIB(cx),
+                enum_type_di_node,
+                field_name.as_ptr().cast(),
+                field_name.len(),
+                file_di_node,
+                line_number,
+                // NOTE: We use the size and align of the entire type, not from variant_layout
+                //       since the later is sometimes smaller (if it has fewer fields).
+                size.bits(),
+                align.bits() as u32,
+                // Union fields are always at offset zero
+                Size::ZERO.bits(),
+                di_flags,
+                variant_struct_type_wrapper,
+            )
+        }
+    }));
+
+    debug_assert_eq!(
+        cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+        cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout))
+    );
+
+    // ... and a field for the tag. If the tag is 128 bits wide, this will actually
+    // be two 64-bit fields.
+    let is_128_bits = cx.size_of(tag_base_type).bits() > 64;
+
+    if is_128_bits {
+        let type_di_node = type_di_node(cx, cx.tcx.types.u64);
+        let size_and_align = cx.size_and_align_of(cx.tcx.types.u64);
+
+        let (lo_offset, hi_offset) = match cx.tcx.data_layout.endian {
+            Endian::Little => (0, 8),
+            Endian::Big => (8, 0),
+        };
+
+        let tag_field_offset = enum_type_and_layout.fields.offset(tag_field).bytes();
+        let lo_offset = Size::from_bytes(tag_field_offset + lo_offset);
+        let hi_offset = Size::from_bytes(tag_field_offset + hi_offset);
+
+        unions_fields.push(build_field_di_node(
+            cx,
+            enum_type_di_node,
+            TAG_FIELD_NAME_128_LO,
+            size_and_align,
+            lo_offset,
+            di_flags,
+            type_di_node,
+        ));
+
+        unions_fields.push(build_field_di_node(
+            cx,
+            enum_type_di_node,
+            TAG_FIELD_NAME_128_HI,
+            size_and_align,
+            hi_offset,
+            DIFlags::FlagZero,
+            type_di_node,
+        ));
+    } else {
+        unions_fields.push(build_field_di_node(
+            cx,
+            enum_type_di_node,
+            TAG_FIELD_NAME,
+            cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+            enum_type_and_layout.fields.offset(tag_field),
+            di_flags,
+            tag_base_type_di_node,
+        ));
+    }
+
+    unions_fields
+}
+
+/// Information about a single field of the top-level DW_TAG_union_type.
+struct VariantFieldInfo<'ll> {
+    variant_index: VariantIdx,
+    variant_struct_type_di_node: &'ll DIType,
+    source_info: Option<(&'ll DIFile, c_uint)>,
+    discr: DiscrResult,
+}
+
+fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> {
+    const PRE_ALLOCATED: [&str; 16] = [
+        "variant0",
+        "variant1",
+        "variant2",
+        "variant3",
+        "variant4",
+        "variant5",
+        "variant6",
+        "variant7",
+        "variant8",
+        "variant9",
+        "variant10",
+        "variant11",
+        "variant12",
+        "variant13",
+        "variant14",
+        "variant15",
+    ];
+
+    PRE_ALLOCATED
+        .get(variant_index.as_usize())
+        .map(|&s| Cow::from(s))
+        .unwrap_or_else(|| format!("variant{}", variant_index.as_usize()).into())
+}
+
+fn variant_struct_wrapper_type_name(variant_index: VariantIdx) -> Cow<'static, str> {
+    const PRE_ALLOCATED: [&str; 16] = [
+        "Variant0",
+        "Variant1",
+        "Variant2",
+        "Variant3",
+        "Variant4",
+        "Variant5",
+        "Variant6",
+        "Variant7",
+        "Variant8",
+        "Variant9",
+        "Variant10",
+        "Variant11",
+        "Variant12",
+        "Variant13",
+        "Variant14",
+        "Variant15",
+    ];
+
+    PRE_ALLOCATED
+        .get(variant_index.as_usize())
+        .map(|&s| Cow::from(s))
+        .unwrap_or_else(|| format!("Variant{}", variant_index.as_usize()).into())
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
new file mode 100644
index 00000000000..657e9ce998f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -0,0 +1,465 @@
+use rustc_codegen_ssa::debuginfo::{
+    type_names::{compute_debuginfo_type_name, cpp_like_debuginfo},
+    wants_c_like_enum_debuginfo,
+};
+use rustc_hir::def::CtorKind;
+use rustc_index::IndexSlice;
+use rustc_middle::{
+    bug,
+    mir::CoroutineLayout,
+    ty::{
+        self,
+        layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
+        AdtDef, CoroutineArgs, Ty, VariantDef,
+    },
+};
+use rustc_span::Symbol;
+use rustc_target::abi::{
+    FieldIdx, HasDataLayout, Integer, Primitive, TagEncoding, VariantIdx, Variants,
+};
+use std::borrow::Cow;
+
+use crate::{
+    common::CodegenCx,
+    debuginfo::{
+        metadata::{
+            build_field_di_node, build_generic_type_param_di_nodes, type_di_node,
+            type_map::{self, Stub},
+            unknown_file_metadata, UNKNOWN_LINE_NUMBER,
+        },
+        utils::{create_DIArray, get_namespace_for_item, DIB},
+    },
+    llvm::{
+        self,
+        debuginfo::{DIFlags, DIType},
+    },
+};
+
+use super::{
+    size_and_align_of,
+    type_map::{DINodeCreationResult, UniqueTypeId},
+    SmallVec,
+};
+
+mod cpp_like;
+mod native;
+
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let enum_type = unique_type_id.expect_ty();
+    let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+        bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+    };
+
+    let enum_type_and_layout = cx.layout_of(enum_type);
+
+    if wants_c_like_enum_debuginfo(enum_type_and_layout) {
+        return build_c_style_enum_di_node(cx, enum_adt_def, enum_type_and_layout);
+    }
+
+    if cpp_like_debuginfo(cx.tcx) {
+        cpp_like::build_enum_type_di_node(cx, unique_type_id)
+    } else {
+        native::build_enum_type_di_node(cx, unique_type_id)
+    }
+}
+
+pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    if cpp_like_debuginfo(cx.tcx) {
+        cpp_like::build_coroutine_di_node(cx, unique_type_id)
+    } else {
+        native::build_coroutine_di_node(cx, unique_type_id)
+    }
+}
+
+/// Build the debuginfo node for a C-style enum, i.e. an enum the variants of which have no fields.
+///
+/// The resulting debuginfo will be a DW_TAG_enumeration_type.
+fn build_c_style_enum_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_adt_def: AdtDef<'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+    DINodeCreationResult {
+        di_node: build_enumeration_type_di_node(
+            cx,
+            &compute_debuginfo_type_name(cx.tcx, enum_type_and_layout.ty, false),
+            tag_base_type(cx, enum_type_and_layout),
+            enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+                let name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
+                (name, discr.val)
+            }),
+            containing_scope,
+        ),
+        already_stored_in_typemap: false,
+    }
+}
+
+/// Extract the type with which we want to describe the tag of the given enum or coroutine.
+fn tag_base_type<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+) -> Ty<'tcx> {
+    debug_assert!(match enum_type_and_layout.ty.kind() {
+        ty::Coroutine(..) => true,
+        ty::Adt(adt_def, _) => adt_def.is_enum(),
+        _ => false,
+    });
+
+    match enum_type_and_layout.layout.variants() {
+        // A single-variant enum has no discriminant.
+        Variants::Single { .. } => {
+            bug!("tag_base_type() called for enum without tag: {:?}", enum_type_and_layout)
+        }
+
+        Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
+            // Niche tags are always normalized to unsized integers of the correct size.
+            match tag.primitive() {
+                Primitive::Int(t, _) => t,
+                Primitive::F16 => Integer::I16,
+                Primitive::F32 => Integer::I32,
+                Primitive::F64 => Integer::I64,
+                Primitive::F128 => Integer::I128,
+                // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
+                Primitive::Pointer(_) => {
+                    // If the niche is the NULL value of a reference, then `discr_enum_ty` will be
+                    // a RawPtr. CodeView doesn't know what to do with enums whose base type is a
+                    // pointer so we fix this up to just be `usize`.
+                    // DWARF might be able to deal with this but with an integer type we are on
+                    // the safe side there too.
+                    cx.data_layout().ptr_sized_integer()
+                }
+            }
+            .to_ty(cx.tcx, false)
+        }
+
+        Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
+            // Direct tags preserve the sign.
+            tag.primitive().to_ty(cx.tcx)
+        }
+    }
+}
+
+/// Build a DW_TAG_enumeration_type debuginfo node, with the given base type and variants.
+/// This is a helper function and does not register anything in the type map by itself.
+///
+/// `variants` is an iterator of (discr-value, variant-name).
+fn build_enumeration_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    type_name: &str,
+    base_type: Ty<'tcx>,
+    enumerators: impl Iterator<Item = (Cow<'tcx, str>, u128)>,
+    containing_scope: &'ll DIType,
+) -> &'ll DIType {
+    let is_unsigned = match base_type.kind() {
+        ty::Int(_) => false,
+        ty::Uint(_) => true,
+        _ => bug!("build_enumeration_type_di_node() called with non-integer tag type."),
+    };
+    let (size, align) = cx.size_and_align_of(base_type);
+
+    let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = enumerators
+        .map(|(name, value)| unsafe {
+            let value = [value as u64, (value >> 64) as u64];
+            Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+                DIB(cx),
+                name.as_ptr().cast(),
+                name.len(),
+                value.as_ptr(),
+                size.bits() as libc::c_uint,
+                is_unsigned,
+            ))
+        })
+        .collect();
+
+    unsafe {
+        llvm::LLVMRustDIBuilderCreateEnumerationType(
+            DIB(cx),
+            containing_scope,
+            type_name.as_ptr().cast(),
+            type_name.len(),
+            unknown_file_metadata(cx),
+            UNKNOWN_LINE_NUMBER,
+            size.bits(),
+            align.bits() as u32,
+            create_DIArray(DIB(cx), &enumerator_di_nodes[..]),
+            type_di_node(cx, base_type),
+            true,
+        )
+    }
+}
+
+/// Build the debuginfo node for the struct type describing a single variant of an enum.
+///
+/// ```txt
+///       DW_TAG_structure_type              (top-level type for enum)
+///         DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+///           DW_TAG_member                  (discriminant member)
+///           DW_TAG_variant                 (variant 1)
+///           DW_TAG_variant                 (variant 2)
+///           DW_TAG_variant                 (variant 3)
+///  --->   DW_TAG_structure_type            (type of variant 1)
+///  --->   DW_TAG_structure_type            (type of variant 2)
+///  --->   DW_TAG_structure_type            (type of variant 3)
+/// ```
+///
+/// In CPP-like mode, we have the exact same descriptions for each variant too:
+///
+/// ```txt
+///       DW_TAG_union_type              (top-level type for enum)
+///         DW_TAG_member                    (member for variant 1)
+///         DW_TAG_member                    (member for variant 2)
+///         DW_TAG_member                    (member for variant 3)
+///  --->   DW_TAG_structure_type            (type of variant 1)
+///  --->   DW_TAG_structure_type            (type of variant 2)
+///  --->   DW_TAG_structure_type            (type of variant 3)
+///         DW_TAG_enumeration_type          (type of tag)
+/// ```
+///
+/// The node looks like:
+///
+/// ```txt
+/// DW_TAG_structure_type
+///   DW_AT_name                  <name-of-variant>
+///   DW_AT_byte_size             0x00000010
+///   DW_AT_alignment             0x00000008
+///   DW_TAG_member
+///     DW_AT_name                  <name-of-field-0>
+///     DW_AT_type                  <0x0000018e>
+///     DW_AT_alignment             0x00000004
+///     DW_AT_data_member_location  4
+///   DW_TAG_member
+///     DW_AT_name                  <name-of-field-1>
+///     DW_AT_type                  <0x00000195>
+///     DW_AT_alignment             0x00000008
+///     DW_AT_data_member_location  8
+///   ...
+/// ```
+///
+/// The type of a variant is always a struct type with the name of the variant
+/// and a DW_TAG_member for each field (but not the discriminant).
+fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+    enum_type_di_node: &'ll DIType,
+    variant_index: VariantIdx,
+    variant_def: &VariantDef,
+    variant_layout: TyAndLayout<'tcx>,
+    di_flags: DIFlags,
+) -> &'ll DIType {
+    debug_assert_eq!(variant_layout.ty, enum_type_and_layout.ty);
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            UniqueTypeId::for_enum_variant_struct_type(
+                cx.tcx,
+                enum_type_and_layout.ty,
+                variant_index,
+            ),
+            variant_def.name.as_str(),
+            // NOTE: We use size and align of enum_type, not from variant_layout:
+            size_and_align_of(enum_type_and_layout),
+            Some(enum_type_di_node),
+            di_flags,
+        ),
+        |cx, struct_type_di_node| {
+            (0..variant_layout.fields.count())
+                .map(|field_index| {
+                    let field_name = if variant_def.ctor_kind() != Some(CtorKind::Fn) {
+                        // Fields have names
+                        let field = &variant_def.fields[FieldIdx::from_usize(field_index)];
+                        Cow::from(field.name.as_str())
+                    } else {
+                        // Tuple-like
+                        super::tuple_field_name(field_index)
+                    };
+
+                    let field_layout = variant_layout.field(cx, field_index);
+
+                    build_field_di_node(
+                        cx,
+                        struct_type_di_node,
+                        &field_name,
+                        (field_layout.size, field_layout.align.abi),
+                        variant_layout.fields.offset(field_index),
+                        di_flags,
+                        type_di_node(cx, field_layout.ty),
+                    )
+                })
+                .collect::<SmallVec<_>>()
+        },
+        |cx| build_generic_type_param_di_nodes(cx, enum_type_and_layout.ty),
+    )
+    .di_node
+}
+
+/// Build the struct type for describing a single coroutine state.
+/// See [build_coroutine_variant_struct_type_di_node].
+///
+/// ```txt
+///
+///       DW_TAG_structure_type              (top-level type for enum)
+///         DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+///           DW_TAG_member                  (discriminant member)
+///           DW_TAG_variant                 (variant 1)
+///           DW_TAG_variant                 (variant 2)
+///           DW_TAG_variant                 (variant 3)
+///  --->   DW_TAG_structure_type            (type of variant 1)
+///  --->   DW_TAG_structure_type            (type of variant 2)
+///  --->   DW_TAG_structure_type            (type of variant 3)
+///
+/// ```
+pub fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    variant_index: VariantIdx,
+    coroutine_type_and_layout: TyAndLayout<'tcx>,
+    coroutine_type_di_node: &'ll DIType,
+    coroutine_layout: &CoroutineLayout<'tcx>,
+    common_upvar_names: &IndexSlice<FieldIdx, Symbol>,
+) -> &'ll DIType {
+    let variant_name = CoroutineArgs::variant_name(variant_index);
+    let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
+        cx.tcx,
+        coroutine_type_and_layout.ty,
+        variant_index,
+    );
+
+    let variant_layout = coroutine_type_and_layout.for_variant(cx, variant_index);
+
+    let coroutine_args = match coroutine_type_and_layout.ty.kind() {
+        ty::Coroutine(_, args) => args.as_coroutine(),
+        _ => unreachable!(),
+    };
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            unique_type_id,
+            &variant_name,
+            size_and_align_of(coroutine_type_and_layout),
+            Some(coroutine_type_di_node),
+            DIFlags::FlagZero,
+        ),
+        |cx, variant_struct_type_di_node| {
+            // Fields that just belong to this variant/state
+            let state_specific_fields: SmallVec<_> = (0..variant_layout.fields.count())
+                .map(|field_index| {
+                    let coroutine_saved_local = coroutine_layout.variant_fields[variant_index]
+                        [FieldIdx::from_usize(field_index)];
+                    let field_name_maybe = coroutine_layout.field_names[coroutine_saved_local];
+                    let field_name = field_name_maybe
+                        .as_ref()
+                        .map(|s| Cow::from(s.as_str()))
+                        .unwrap_or_else(|| super::tuple_field_name(field_index));
+
+                    let field_type = variant_layout.field(cx, field_index).ty;
+
+                    build_field_di_node(
+                        cx,
+                        variant_struct_type_di_node,
+                        &field_name,
+                        cx.size_and_align_of(field_type),
+                        variant_layout.fields.offset(field_index),
+                        DIFlags::FlagZero,
+                        type_di_node(cx, field_type),
+                    )
+                })
+                .collect();
+
+            // Fields that are common to all states
+            let common_fields: SmallVec<_> = coroutine_args
+                .prefix_tys()
+                .iter()
+                .zip(common_upvar_names)
+                .enumerate()
+                .map(|(index, (upvar_ty, upvar_name))| {
+                    build_field_di_node(
+                        cx,
+                        variant_struct_type_di_node,
+                        upvar_name.as_str(),
+                        cx.size_and_align_of(upvar_ty),
+                        coroutine_type_and_layout.fields.offset(index),
+                        DIFlags::FlagZero,
+                        type_di_node(cx, upvar_ty),
+                    )
+                })
+                .collect();
+
+            state_specific_fields.into_iter().chain(common_fields).collect()
+        },
+        |cx| build_generic_type_param_di_nodes(cx, coroutine_type_and_layout.ty),
+    )
+    .di_node
+}
+
+#[derive(Copy, Clone)]
+enum DiscrResult {
+    NoDiscriminant,
+    Value(u128),
+    Range(u128, u128),
+}
+
+impl DiscrResult {
+    fn opt_single_val(&self) -> Option<u128> {
+        if let Self::Value(d) = *self { Some(d) } else { None }
+    }
+}
+
+/// Returns the discriminant value corresponding to the variant index.
+///
+/// Will return `None` if there is less than two variants (because then the enum won't have)
+/// a tag, and if this is the untagged variant of a niche-layout enum (because then there is no
+/// single discriminant value).
+fn compute_discriminant_value<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+    variant_index: VariantIdx,
+) -> DiscrResult {
+    match enum_type_and_layout.layout.variants() {
+        &Variants::Single { .. } => DiscrResult::NoDiscriminant,
+        &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => DiscrResult::Value(
+            enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val,
+        ),
+        &Variants::Multiple {
+            tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, untagged_variant },
+            tag,
+            ..
+        } => {
+            if variant_index == untagged_variant {
+                let valid_range = enum_type_and_layout
+                    .for_variant(cx, variant_index)
+                    .largest_niche
+                    .as_ref()
+                    .unwrap()
+                    .valid_range;
+
+                let min = valid_range.start.min(valid_range.end);
+                let min = tag.size(cx).truncate(min);
+
+                let max = valid_range.start.max(valid_range.end);
+                let max = tag.size(cx).truncate(max);
+
+                DiscrResult::Range(min, max)
+            } else {
+                let value = (variant_index.as_u32() as u128)
+                    .wrapping_sub(niche_variants.start().as_u32() as u128)
+                    .wrapping_add(niche_start);
+                let value = tag.size(cx).truncate(value);
+                DiscrResult::Value(value)
+            }
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
new file mode 100644
index 00000000000..3dbe820b8ff
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -0,0 +1,445 @@
+use std::borrow::Cow;
+
+use crate::{
+    common::CodegenCx,
+    debuginfo::{
+        metadata::{
+            enums::tag_base_type,
+            file_metadata, size_and_align_of, type_di_node,
+            type_map::{self, Stub, StubInfo, UniqueTypeId},
+            unknown_file_metadata, visibility_di_flags, DINodeCreationResult, SmallVec,
+            NO_GENERICS, UNKNOWN_LINE_NUMBER,
+        },
+        utils::{create_DIArray, get_namespace_for_item, DIB},
+    },
+    llvm::{
+        self,
+        debuginfo::{DIFile, DIFlags, DIType},
+    },
+};
+use libc::c_uint;
+use rustc_codegen_ssa::{
+    debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo},
+    traits::ConstMethods,
+};
+use rustc_middle::{
+    bug,
+    ty::{
+        self,
+        layout::{LayoutOf, TyAndLayout},
+    },
+};
+use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+/// Build the debuginfo node for an enum type. The listing below shows how such a
+/// type looks like at the LLVM IR/DWARF level. It is a `DW_TAG_structure_type`
+/// with a single `DW_TAG_variant_part` that in turn contains a `DW_TAG_variant`
+/// for each variant of the enum. The variant-part also contains a single member
+/// describing the discriminant, and a nested struct type for each of the variants.
+///
+/// ```txt
+///  ---> DW_TAG_structure_type              (top-level type for enum)
+///         DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+///           DW_TAG_member                  (discriminant member)
+///           DW_TAG_variant                 (variant 1)
+///           DW_TAG_variant                 (variant 2)
+///           DW_TAG_variant                 (variant 3)
+///         DW_TAG_structure_type            (type of variant 1)
+///         DW_TAG_structure_type            (type of variant 2)
+///         DW_TAG_structure_type            (type of variant 3)
+/// ```
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let enum_type = unique_type_id.expect_ty();
+    let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+        bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+    };
+
+    let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+    let enum_type_and_layout = cx.layout_of(enum_type);
+    let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+    let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did());
+
+    debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            unique_type_id,
+            &enum_type_name,
+            size_and_align_of(enum_type_and_layout),
+            Some(containing_scope),
+            visibility_flags,
+        ),
+        |cx, enum_type_di_node| {
+            // Build the struct type for each variant. These will be referenced by the
+            // DW_TAG_variant DIEs inside of the DW_TAG_variant_part DIE.
+            // We also called the names for the corresponding DW_TAG_variant DIEs here.
+            let variant_member_infos: SmallVec<_> = enum_adt_def
+                .variant_range()
+                .map(|variant_index| VariantMemberInfo {
+                    variant_index,
+                    variant_name: Cow::from(enum_adt_def.variant(variant_index).name.as_str()),
+                    variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
+                        cx,
+                        enum_type_and_layout,
+                        enum_type_di_node,
+                        variant_index,
+                        enum_adt_def.variant(variant_index),
+                        enum_type_and_layout.for_variant(cx, variant_index),
+                        visibility_flags,
+                    ),
+                    source_info: None,
+                })
+                .collect();
+
+            smallvec![build_enum_variant_part_di_node(
+                cx,
+                enum_type_and_layout,
+                enum_type_di_node,
+                &variant_member_infos[..],
+            )]
+        },
+        // We don't seem to be emitting generic args on the enum type, it seems. Rather
+        // they get attached to the struct type of each variant.
+        NO_GENERICS,
+    )
+}
+
+/// Build the debuginfo node for a coroutine environment. It looks the same as the debuginfo for
+/// an enum. See [build_enum_type_di_node] for more information.
+///
+/// ```txt
+///
+///  ---> DW_TAG_structure_type              (top-level type for the coroutine)
+///         DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+///           DW_TAG_member                  (discriminant member)
+///           DW_TAG_variant                 (variant 1)
+///           DW_TAG_variant                 (variant 2)
+///           DW_TAG_variant                 (variant 3)
+///         DW_TAG_structure_type            (type of variant 1)
+///         DW_TAG_structure_type            (type of variant 2)
+///         DW_TAG_structure_type            (type of variant 3)
+///
+/// ```
+pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+    let coroutine_type = unique_type_id.expect_ty();
+    let &ty::Coroutine(coroutine_def_id, _) = coroutine_type.kind() else {
+        bug!("build_coroutine_di_node() called with non-coroutine type: `{:?}`", coroutine_type)
+    };
+
+    let containing_scope = get_namespace_for_item(cx, coroutine_def_id);
+    let coroutine_type_and_layout = cx.layout_of(coroutine_type);
+
+    debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
+
+    let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);
+
+    type_map::build_type_with_children(
+        cx,
+        type_map::stub(
+            cx,
+            Stub::Struct,
+            unique_type_id,
+            &coroutine_type_name,
+            size_and_align_of(coroutine_type_and_layout),
+            Some(containing_scope),
+            DIFlags::FlagZero,
+        ),
+        |cx, coroutine_type_di_node| {
+            let coroutine_layout =
+                cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap();
+
+            let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } =
+                coroutine_type_and_layout.variants
+            else {
+                bug!(
+                    "Encountered coroutine with non-direct-tag layout: {:?}",
+                    coroutine_type_and_layout
+                )
+            };
+
+            let common_upvar_names =
+                cx.tcx.closure_saved_names_of_captured_variables(coroutine_def_id);
+
+            // Build variant struct types
+            let variant_struct_type_di_nodes: SmallVec<_> = variants
+                .indices()
+                .map(|variant_index| {
+                    // FIXME: This is problematic because just a number is not a valid identifier.
+                    //        CoroutineArgs::variant_name(variant_index), would be consistent
+                    //        with enums?
+                    let variant_name = format!("{}", variant_index.as_usize()).into();
+
+                    let span = coroutine_layout.variant_source_info[variant_index].span;
+                    let source_info = if !span.is_dummy() {
+                        let loc = cx.lookup_debug_loc(span.lo());
+                        Some((file_metadata(cx, &loc.file), loc.line))
+                    } else {
+                        None
+                    };
+
+                    VariantMemberInfo {
+                        variant_index,
+                        variant_name,
+                        variant_struct_type_di_node:
+                            super::build_coroutine_variant_struct_type_di_node(
+                                cx,
+                                variant_index,
+                                coroutine_type_and_layout,
+                                coroutine_type_di_node,
+                                coroutine_layout,
+                                common_upvar_names,
+                            ),
+                        source_info,
+                    }
+                })
+                .collect();
+
+            smallvec![build_enum_variant_part_di_node(
+                cx,
+                coroutine_type_and_layout,
+                coroutine_type_di_node,
+                &variant_struct_type_di_nodes[..],
+            )]
+        },
+        // We don't seem to be emitting generic args on the coroutine type, it seems. Rather
+        // they get attached to the struct type of each variant.
+        NO_GENERICS,
+    )
+}
+
+/// Builds the DW_TAG_variant_part of an enum or coroutine debuginfo node:
+///
+/// ```txt
+///       DW_TAG_structure_type              (top-level type for enum)
+/// --->    DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+///           DW_TAG_member                  (discriminant member)
+///           DW_TAG_variant                 (variant 1)
+///           DW_TAG_variant                 (variant 2)
+///           DW_TAG_variant                 (variant 3)
+///         DW_TAG_structure_type            (type of variant 1)
+///         DW_TAG_structure_type            (type of variant 2)
+///         DW_TAG_structure_type            (type of variant 3)
+/// ```
+fn build_enum_variant_part_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+    enum_type_di_node: &'ll DIType,
+    variant_member_infos: &[VariantMemberInfo<'_, 'll>],
+) -> &'ll DIType {
+    let tag_member_di_node =
+        build_discr_member_di_node(cx, enum_type_and_layout, enum_type_di_node);
+
+    let variant_part_unique_type_id =
+        UniqueTypeId::for_enum_variant_part(cx.tcx, enum_type_and_layout.ty);
+
+    let stub = StubInfo::new(
+        cx,
+        variant_part_unique_type_id,
+        |cx, variant_part_unique_type_id_str| unsafe {
+            let variant_part_name = "";
+            llvm::LLVMRustDIBuilderCreateVariantPart(
+                DIB(cx),
+                enum_type_di_node,
+                variant_part_name.as_ptr().cast(),
+                variant_part_name.len(),
+                unknown_file_metadata(cx),
+                UNKNOWN_LINE_NUMBER,
+                enum_type_and_layout.size.bits(),
+                enum_type_and_layout.align.abi.bits() as u32,
+                DIFlags::FlagZero,
+                tag_member_di_node,
+                create_DIArray(DIB(cx), &[]),
+                variant_part_unique_type_id_str.as_ptr().cast(),
+                variant_part_unique_type_id_str.len(),
+            )
+        },
+    );
+
+    type_map::build_type_with_children(
+        cx,
+        stub,
+        |cx, variant_part_di_node| {
+            variant_member_infos
+                .iter()
+                .map(|variant_member_info| {
+                    build_enum_variant_member_di_node(
+                        cx,
+                        enum_type_and_layout,
+                        variant_part_di_node,
+                        variant_member_info,
+                    )
+                })
+                .collect()
+        },
+        NO_GENERICS,
+    )
+    .di_node
+}
+
+/// Builds the DW_TAG_member describing where we can find the tag of an enum.
+/// Returns `None` if the enum does not have a tag.
+///
+/// ```txt
+///
+///       DW_TAG_structure_type              (top-level type for enum)
+///         DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+/// --->      DW_TAG_member                  (discriminant member)
+///           DW_TAG_variant                 (variant 1)
+///           DW_TAG_variant                 (variant 2)
+///           DW_TAG_variant                 (variant 3)
+///         DW_TAG_structure_type            (type of variant 1)
+///         DW_TAG_structure_type            (type of variant 2)
+///         DW_TAG_structure_type            (type of variant 3)
+///
+/// ```
+fn build_discr_member_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_or_coroutine_type_and_layout: TyAndLayout<'tcx>,
+    enum_or_coroutine_type_di_node: &'ll DIType,
+) -> Option<&'ll DIType> {
+    let tag_name = match enum_or_coroutine_type_and_layout.ty.kind() {
+        ty::Coroutine(..) => "__state",
+        _ => "",
+    };
+
+    // NOTE: This is actually wrong. This will become a member of
+    //       of the DW_TAG_variant_part. But, due to LLVM's API, that
+    //       can only be constructed with this DW_TAG_member already in created.
+    //       In LLVM IR the wrong scope will be listed but when DWARF is
+    //       generated from it, the DW_TAG_member will be a child the
+    //       DW_TAG_variant_part.
+    let containing_scope = enum_or_coroutine_type_di_node;
+
+    match enum_or_coroutine_type_and_layout.layout.variants() {
+        // A single-variant enum has no discriminant.
+        &Variants::Single { .. } => None,
+
+        &Variants::Multiple { tag_field, .. } => {
+            let tag_base_type = tag_base_type(cx, enum_or_coroutine_type_and_layout);
+            let (size, align) = cx.size_and_align_of(tag_base_type);
+
+            unsafe {
+                Some(llvm::LLVMRustDIBuilderCreateMemberType(
+                    DIB(cx),
+                    containing_scope,
+                    tag_name.as_ptr().cast(),
+                    tag_name.len(),
+                    unknown_file_metadata(cx),
+                    UNKNOWN_LINE_NUMBER,
+                    size.bits(),
+                    align.bits() as u32,
+                    enum_or_coroutine_type_and_layout.fields.offset(tag_field).bits(),
+                    DIFlags::FlagArtificial,
+                    type_di_node(cx, tag_base_type),
+                ))
+            }
+        }
+    }
+}
+
+/// Build the debuginfo node for `DW_TAG_variant`:
+///
+/// ```txt
+///       DW_TAG_structure_type              (top-level type for enum)
+///         DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+///           DW_TAG_member                  (discriminant member)
+///  --->     DW_TAG_variant                 (variant 1)
+///  --->     DW_TAG_variant                 (variant 2)
+///  --->     DW_TAG_variant                 (variant 3)
+///         DW_TAG_structure_type            (type of variant 1)
+///         DW_TAG_structure_type            (type of variant 2)
+///         DW_TAG_structure_type            (type of variant 3)
+/// ```
+///
+/// This node looks like:
+///
+/// ```txt
+/// DW_TAG_variant
+///   DW_AT_discr_value           0
+///   DW_TAG_member
+///     DW_AT_name                  None
+///     DW_AT_type                  <0x000002a1>
+///     DW_AT_alignment             0x00000002
+///     DW_AT_data_member_location  0
+/// ```
+///
+/// The DW_AT_discr_value is optional, and is omitted if
+///   - This is the only variant of a univariant enum (i.e. their is no discriminant)
+///   - This is the "untagged" variant of a niche-layout enum
+///     (where only the other variants are identified by a single value)
+///
+/// There is only ever a single member, the type of which is a struct that describes the
+/// fields of the variant (excluding the discriminant). The name of the member is the name
+/// of the variant as given in the source code. The DW_AT_data_member_location is always
+/// zero.
+///
+/// Note that the LLVM DIBuilder API is a bit unintuitive here. The DW_TAG_variant subtree
+/// (including the DW_TAG_member) is built by a single call to
+/// `LLVMRustDIBuilderCreateVariantMemberType()`.
+fn build_enum_variant_member_di_node<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    enum_type_and_layout: TyAndLayout<'tcx>,
+    variant_part_di_node: &'ll DIType,
+    variant_member_info: &VariantMemberInfo<'_, 'll>,
+) -> &'ll DIType {
+    let variant_index = variant_member_info.variant_index;
+    let discr_value = super::compute_discriminant_value(cx, enum_type_and_layout, variant_index);
+
+    let (file_di_node, line_number) = variant_member_info
+        .source_info
+        .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+    unsafe {
+        llvm::LLVMRustDIBuilderCreateVariantMemberType(
+            DIB(cx),
+            variant_part_di_node,
+            variant_member_info.variant_name.as_ptr().cast(),
+            variant_member_info.variant_name.len(),
+            file_di_node,
+            line_number,
+            enum_type_and_layout.size.bits(),
+            enum_type_and_layout.align.abi.bits() as u32,
+            Size::ZERO.bits(),
+            discr_value.opt_single_val().map(|value| cx.const_u128(value)),
+            DIFlags::FlagZero,
+            variant_member_info.variant_struct_type_di_node,
+        )
+    }
+}
+
+/// Information needed for building a `DW_TAG_variant`:
+///
+/// ```txt
+///       DW_TAG_structure_type              (top-level type for enum)
+///         DW_TAG_variant_part              (variant part)
+///           DW_AT_discr                    (reference to discriminant DW_TAG_member)
+///           DW_TAG_member                  (discriminant member)
+///  --->     DW_TAG_variant                 (variant 1)
+///  --->     DW_TAG_variant                 (variant 2)
+///  --->     DW_TAG_variant                 (variant 3)
+///         DW_TAG_structure_type            (type of variant 1)
+///         DW_TAG_structure_type            (type of variant 2)
+///         DW_TAG_structure_type            (type of variant 3)
+/// ```
+struct VariantMemberInfo<'a, 'll> {
+    variant_index: VariantIdx,
+    variant_name: Cow<'a, str>,
+    variant_struct_type_di_node: &'ll DIType,
+    source_info: Option<(&'ll DIFile, c_uint)>,
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
new file mode 100644
index 00000000000..1aec65cf949
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
@@ -0,0 +1,278 @@
+use std::cell::RefCell;
+
+use rustc_data_structures::{
+    fingerprint::Fingerprint,
+    fx::FxHashMap,
+    stable_hasher::{HashStable, StableHasher},
+};
+use rustc_middle::{
+    bug,
+    ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt},
+};
+use rustc_target::abi::{Align, Size, VariantIdx};
+
+use crate::{
+    common::CodegenCx,
+    debuginfo::utils::{create_DIArray, debug_context, DIB},
+    llvm::{
+        self,
+        debuginfo::{DIFlags, DIScope, DIType},
+    },
+};
+
+use super::{unknown_file_metadata, SmallVec, UNKNOWN_LINE_NUMBER};
+
+mod private {
+    // This type cannot be constructed outside of this module because
+    // it has a private field. We make use of this in order to prevent
+    // `UniqueTypeId` from being constructed directly, without asserting
+    // the preconditions.
+    #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+    pub struct HiddenZst;
+}
+
+/// A unique identifier for anything that we create a debuginfo node for.
+/// The types it contains are expected to already be normalized (which
+/// is debug_asserted in the constructors).
+///
+/// Note that there are some things that only show up in debuginfo, like
+/// the separate type descriptions for each enum variant. These get an ID
+/// too because they have their own debuginfo node in LLVM IR.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+pub(super) enum UniqueTypeId<'tcx> {
+    /// The ID of a regular type as it shows up at the language level.
+    Ty(Ty<'tcx>, private::HiddenZst),
+    /// The ID for the single DW_TAG_variant_part nested inside the top-level
+    /// DW_TAG_structure_type that describes enums and coroutines.
+    VariantPart(Ty<'tcx>, private::HiddenZst),
+    /// The ID for the artificial struct type describing a single enum variant.
+    VariantStructType(Ty<'tcx>, VariantIdx, private::HiddenZst),
+    /// The ID for the additional wrapper struct type describing an enum variant in CPP-like mode.
+    VariantStructTypeCppLikeWrapper(Ty<'tcx>, VariantIdx, private::HiddenZst),
+    /// The ID of the artificial type we create for VTables.
+    VTableTy(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>, private::HiddenZst),
+}
+
+impl<'tcx> UniqueTypeId<'tcx> {
+    pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self {
+        debug_assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
+        UniqueTypeId::Ty(t, private::HiddenZst)
+    }
+
+    pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self {
+        debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+        UniqueTypeId::VariantPart(enum_ty, private::HiddenZst)
+    }
+
+    pub fn for_enum_variant_struct_type(
+        tcx: TyCtxt<'tcx>,
+        enum_ty: Ty<'tcx>,
+        variant_idx: VariantIdx,
+    ) -> Self {
+        debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+        UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst)
+    }
+
+    pub fn for_enum_variant_struct_type_wrapper(
+        tcx: TyCtxt<'tcx>,
+        enum_ty: Ty<'tcx>,
+        variant_idx: VariantIdx,
+    ) -> Self {
+        debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+        UniqueTypeId::VariantStructTypeCppLikeWrapper(enum_ty, variant_idx, private::HiddenZst)
+    }
+
+    pub fn for_vtable_ty(
+        tcx: TyCtxt<'tcx>,
+        self_type: Ty<'tcx>,
+        implemented_trait: Option<PolyExistentialTraitRef<'tcx>>,
+    ) -> Self {
+        debug_assert_eq!(
+            self_type,
+            tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type)
+        );
+        debug_assert_eq!(
+            implemented_trait,
+            tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait)
+        );
+        UniqueTypeId::VTableTy(self_type, implemented_trait, private::HiddenZst)
+    }
+
+    /// Generates a string version of this [UniqueTypeId], which can be used as the `UniqueId`
+    /// argument of the various `LLVMRustDIBuilderCreate*Type()` methods.
+    ///
+    /// Right now this takes the form of a hex-encoded opaque hash value.
+    pub fn generate_unique_id_string(self, tcx: TyCtxt<'tcx>) -> String {
+        let mut hasher = StableHasher::new();
+        tcx.with_stable_hashing_context(|mut hcx| {
+            hcx.while_hashing_spans(false, |hcx| self.hash_stable(hcx, &mut hasher))
+        });
+        hasher.finish::<Fingerprint>().to_hex()
+    }
+
+    pub fn expect_ty(self) -> Ty<'tcx> {
+        match self {
+            UniqueTypeId::Ty(ty, _) => ty,
+            _ => bug!("Expected `UniqueTypeId::Ty` but found `{:?}`", self),
+        }
+    }
+}
+
+/// The `TypeMap` is where the debug context holds the type metadata nodes
+/// created so far. The debuginfo nodes are identified by `UniqueTypeId`.
+#[derive(Default)]
+pub(crate) struct TypeMap<'ll, 'tcx> {
+    pub(super) unique_id_to_di_node: RefCell<FxHashMap<UniqueTypeId<'tcx>, &'ll DIType>>,
+}
+
+impl<'ll, 'tcx> TypeMap<'ll, 'tcx> {
+    /// Adds a `UniqueTypeId` to metadata mapping to the `TypeMap`. The method will
+    /// fail if the mapping already exists.
+    pub(super) fn insert(&self, unique_type_id: UniqueTypeId<'tcx>, metadata: &'ll DIType) {
+        if self.unique_id_to_di_node.borrow_mut().insert(unique_type_id, metadata).is_some() {
+            bug!("type metadata for unique ID '{:?}' is already in the `TypeMap`!", unique_type_id);
+        }
+    }
+
+    pub(super) fn di_node_for_unique_id(
+        &self,
+        unique_type_id: UniqueTypeId<'tcx>,
+    ) -> Option<&'ll DIType> {
+        self.unique_id_to_di_node.borrow().get(&unique_type_id).cloned()
+    }
+}
+
+pub struct DINodeCreationResult<'ll> {
+    pub di_node: &'ll DIType,
+    pub already_stored_in_typemap: bool,
+}
+
+impl<'ll> DINodeCreationResult<'ll> {
+    pub fn new(di_node: &'ll DIType, already_stored_in_typemap: bool) -> Self {
+        DINodeCreationResult { di_node, already_stored_in_typemap }
+    }
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum Stub<'ll> {
+    Struct,
+    Union,
+    VTableTy { vtable_holder: &'ll DIType },
+}
+
+pub struct StubInfo<'ll, 'tcx> {
+    metadata: &'ll DIType,
+    unique_type_id: UniqueTypeId<'tcx>,
+}
+
+impl<'ll, 'tcx> StubInfo<'ll, 'tcx> {
+    pub(super) fn new(
+        cx: &CodegenCx<'ll, 'tcx>,
+        unique_type_id: UniqueTypeId<'tcx>,
+        build: impl FnOnce(&CodegenCx<'ll, 'tcx>, /* unique_type_id_str: */ &str) -> &'ll DIType,
+    ) -> StubInfo<'ll, 'tcx> {
+        let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+        let di_node = build(cx, &unique_type_id_str);
+        StubInfo { metadata: di_node, unique_type_id }
+    }
+}
+
+/// Create a stub debuginfo node onto which fields and nested types can be attached.
+pub(super) fn stub<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    kind: Stub<'ll>,
+    unique_type_id: UniqueTypeId<'tcx>,
+    name: &str,
+    (size, align): (Size, Align),
+    containing_scope: Option<&'ll DIScope>,
+    flags: DIFlags,
+) -> StubInfo<'ll, 'tcx> {
+    let empty_array = create_DIArray(DIB(cx), &[]);
+    let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+
+    let metadata = match kind {
+        Stub::Struct | Stub::VTableTy { .. } => {
+            let vtable_holder = match kind {
+                Stub::VTableTy { vtable_holder } => Some(vtable_holder),
+                _ => None,
+            };
+            unsafe {
+                llvm::LLVMRustDIBuilderCreateStructType(
+                    DIB(cx),
+                    containing_scope,
+                    name.as_ptr().cast(),
+                    name.len(),
+                    unknown_file_metadata(cx),
+                    UNKNOWN_LINE_NUMBER,
+                    size.bits(),
+                    align.bits() as u32,
+                    flags,
+                    None,
+                    empty_array,
+                    0,
+                    vtable_holder,
+                    unique_type_id_str.as_ptr().cast(),
+                    unique_type_id_str.len(),
+                )
+            }
+        }
+        Stub::Union => unsafe {
+            llvm::LLVMRustDIBuilderCreateUnionType(
+                DIB(cx),
+                containing_scope,
+                name.as_ptr().cast(),
+                name.len(),
+                unknown_file_metadata(cx),
+                UNKNOWN_LINE_NUMBER,
+                size.bits(),
+                align.bits() as u32,
+                flags,
+                Some(empty_array),
+                0,
+                unique_type_id_str.as_ptr().cast(),
+                unique_type_id_str.len(),
+            )
+        },
+    };
+    StubInfo { metadata, unique_type_id }
+}
+
+/// This function enables creating debuginfo nodes that can recursively refer to themselves.
+/// It will first insert the given stub into the type map and only then execute the `members`
+/// and `generics` closures passed in. These closures have access to the stub so they can
+/// directly attach fields to them. If the type of a field transitively refers back
+/// to the type currently being built, the stub will already be found in the type map,
+/// which effectively breaks the recursion cycle.
+pub(super) fn build_type_with_children<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    stub_info: StubInfo<'ll, 'tcx>,
+    members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>,
+    generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>,
+) -> DINodeCreationResult<'ll> {
+    debug_assert_eq!(
+        debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id),
+        None
+    );
+
+    debug_context(cx).type_map.insert(stub_info.unique_type_id, stub_info.metadata);
+
+    let members: SmallVec<_> =
+        members(cx, stub_info.metadata).into_iter().map(|node| Some(node)).collect();
+    let generics: SmallVec<Option<&'ll DIType>> =
+        generics(cx).into_iter().map(|node| Some(node)).collect();
+
+    if !(members.is_empty() && generics.is_empty()) {
+        unsafe {
+            let members_array = create_DIArray(DIB(cx), &members[..]);
+            let generics_array = create_DIArray(DIB(cx), &generics[..]);
+            llvm::LLVMRustDICompositeTypeReplaceArrays(
+                DIB(cx),
+                stub_info.metadata,
+                Some(members_array),
+                Some(generics_array),
+            );
+        }
+    }
+
+    DINodeCreationResult { di_node: stub_info.metadata, already_stored_in_typemap: true }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
new file mode 100644
index 00000000000..d3a851b40c0
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -0,0 +1,642 @@
+#![doc = include_str!("doc.md")]
+
+use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
+use rustc_data_structures::unord::UnordMap;
+
+use self::metadata::{file_metadata, type_di_node};
+use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
+use self::namespace::mangled_name_of_instance;
+use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
+
+use crate::abi::FnAbi;
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+    DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType,
+    DIVariable,
+};
+use crate::value::Value;
+
+use rustc_codegen_ssa::debuginfo::type_names;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::{DefId, DefIdMap};
+use rustc_index::IndexVec;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::GenericArgsRef;
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitableExt};
+use rustc_session::config::{self, DebugInfo};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use rustc_span::{
+    BytePos, Pos, SourceFile, SourceFileAndLine, SourceFileHash, Span, StableSourceFileId,
+};
+use rustc_target::abi::Size;
+
+use libc::c_uint;
+use smallvec::SmallVec;
+use std::cell::OnceCell;
+use std::cell::RefCell;
+use std::iter;
+use std::ops::Range;
+
+mod create_scope_map;
+pub mod gdb;
+pub mod metadata;
+mod namespace;
+mod utils;
+
+pub use self::create_scope_map::compute_mir_scopes;
+pub use self::metadata::build_global_var_di_node;
+
+#[allow(non_upper_case_globals)]
+const DW_TAG_auto_variable: c_uint = 0x100;
+#[allow(non_upper_case_globals)]
+const DW_TAG_arg_variable: c_uint = 0x101;
+
+/// A context object for maintaining all state needed by the debuginfo module.
+pub struct CodegenUnitDebugContext<'ll, 'tcx> {
+    llcontext: &'ll llvm::Context,
+    llmod: &'ll llvm::Module,
+    builder: &'ll mut DIBuilder<'ll>,
+    created_files: RefCell<UnordMap<Option<(StableSourceFileId, SourceFileHash)>, &'ll DIFile>>,
+
+    type_map: metadata::TypeMap<'ll, 'tcx>,
+    namespace_map: RefCell<DefIdMap<&'ll DIScope>>,
+    recursion_marker_type: OnceCell<&'ll DIType>,
+}
+
+impl Drop for CodegenUnitDebugContext<'_, '_> {
+    fn drop(&mut self) {
+        unsafe {
+            llvm::LLVMRustDIBuilderDispose(&mut *(self.builder as *mut _));
+        }
+    }
+}
+
+impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
+    pub fn new(llmod: &'ll llvm::Module) -> Self {
+        debug!("CodegenUnitDebugContext::new");
+        let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
+        // DIBuilder inherits context from the module, so we'd better use the same one
+        let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
+        CodegenUnitDebugContext {
+            llcontext,
+            llmod,
+            builder,
+            created_files: Default::default(),
+            type_map: Default::default(),
+            namespace_map: RefCell::new(Default::default()),
+            recursion_marker_type: OnceCell::new(),
+        }
+    }
+
+    pub fn finalize(&self, sess: &Session) {
+        unsafe {
+            llvm::LLVMRustDIBuilderFinalize(self.builder);
+
+            if !sess.target.is_like_msvc {
+                // Debuginfo generation in LLVM by default uses a higher
+                // version of dwarf than macOS currently understands. We can
+                // instruct LLVM to emit an older version of dwarf, however,
+                // for macOS to understand. For more info see #11352
+                // This can be overridden using --llvm-opts -dwarf-version,N.
+                // Android has the same issue (#22398)
+                let dwarf_version = sess
+                    .opts
+                    .unstable_opts
+                    .dwarf_version
+                    .unwrap_or(sess.target.default_dwarf_version);
+                llvm::LLVMRustAddModuleFlag(
+                    self.llmod,
+                    llvm::LLVMModFlagBehavior::Warning,
+                    c"Dwarf Version".as_ptr().cast(),
+                    dwarf_version,
+                );
+            } else {
+                // Indicate that we want CodeView debug information on MSVC
+                llvm::LLVMRustAddModuleFlag(
+                    self.llmod,
+                    llvm::LLVMModFlagBehavior::Warning,
+                    c"CodeView".as_ptr().cast(),
+                    1,
+                )
+            }
+
+            // Prevent bitcode readers from deleting the debug info.
+            llvm::LLVMRustAddModuleFlag(
+                self.llmod,
+                llvm::LLVMModFlagBehavior::Warning,
+                c"Debug Info Version".as_ptr().cast(),
+                llvm::LLVMRustDebugMetadataVersion(),
+            );
+        }
+    }
+}
+
+/// Creates any deferred debug metadata nodes
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
+    if let Some(dbg_cx) = &cx.dbg_cx {
+        debug!("finalize");
+
+        if gdb::needs_gdb_debug_scripts_section(cx) {
+            // Add a .debug_gdb_scripts section to this compile-unit. This will
+            // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
+            // which activates the Rust pretty printers for binary this section is
+            // contained in.
+            gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
+        }
+
+        dbg_cx.finalize(cx.sess());
+    }
+}
+
+impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+    fn dbg_var_addr(
+        &mut self,
+        dbg_var: &'ll DIVariable,
+        dbg_loc: &'ll DILocation,
+        variable_alloca: Self::Value,
+        direct_offset: Size,
+        indirect_offsets: &[Size],
+        fragment: Option<Range<Size>>,
+    ) {
+        // Convert the direct and indirect offsets and fragment byte range to address ops.
+        // FIXME(eddyb) use `const`s instead of getting the values via FFI,
+        // the values should match the ones in the DWARF standard anyway.
+        let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
+        let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
+        let op_llvm_fragment = || unsafe { llvm::LLVMRustDIBuilderCreateOpLLVMFragment() };
+        let mut addr_ops = SmallVec::<[u64; 8]>::new();
+
+        if direct_offset.bytes() > 0 {
+            addr_ops.push(op_plus_uconst());
+            addr_ops.push(direct_offset.bytes() as u64);
+        }
+        for &offset in indirect_offsets {
+            addr_ops.push(op_deref());
+            if offset.bytes() > 0 {
+                addr_ops.push(op_plus_uconst());
+                addr_ops.push(offset.bytes() as u64);
+            }
+        }
+        if let Some(fragment) = fragment {
+            // `DW_OP_LLVM_fragment` takes as arguments the fragment's
+            // offset and size, both of them in bits.
+            addr_ops.push(op_llvm_fragment());
+            addr_ops.push(fragment.start.bits() as u64);
+            addr_ops.push((fragment.end - fragment.start).bits() as u64);
+        }
+
+        unsafe {
+            // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
+            llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
+                DIB(self.cx()),
+                variable_alloca,
+                dbg_var,
+                addr_ops.as_ptr(),
+                addr_ops.len() as c_uint,
+                dbg_loc,
+                self.llbb(),
+            );
+        }
+    }
+
+    fn set_dbg_loc(&mut self, dbg_loc: &'ll DILocation) {
+        unsafe {
+            llvm::LLVMSetCurrentDebugLocation2(self.llbuilder, dbg_loc);
+        }
+    }
+
+    fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+        gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
+    }
+
+    fn set_var_name(&mut self, value: &'ll Value, name: &str) {
+        // Avoid wasting time if LLVM value names aren't even enabled.
+        if self.sess().fewer_names() {
+            return;
+        }
+
+        // Only function parameters and instructions are local to a function,
+        // don't change the name of anything else (e.g. globals).
+        let param_or_inst = unsafe {
+            llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
+        };
+        if !param_or_inst {
+            return;
+        }
+
+        // Avoid replacing the name if it already exists.
+        // While we could combine the names somehow, it'd
+        // get noisy quick, and the usefulness is dubious.
+        if llvm::get_value_name(value).is_empty() {
+            llvm::set_value_name(value, name.as_bytes());
+        }
+    }
+}
+
+/// A source code location used to generate debug information.
+// FIXME(eddyb) rename this to better indicate it's a duplicate of
+// `rustc_span::Loc` rather than `DILocation`, perhaps by making
+// `lookup_char_pos` return the right information instead.
+pub struct DebugLoc {
+    /// Information about the original source file.
+    pub file: Lrc<SourceFile>,
+    /// The (1-based) line number.
+    pub line: u32,
+    /// The (1-based) column number.
+    pub col: u32,
+}
+
+impl CodegenCx<'_, '_> {
+    /// Looks up debug source information about a `BytePos`.
+    // FIXME(eddyb) rename this to better indicate it's a duplicate of
+    // `lookup_char_pos` rather than `dbg_loc`, perhaps by making
+    // `lookup_char_pos` return the right information instead.
+    pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+        let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+            Ok(SourceFileAndLine { sf: file, line }) => {
+                let line_pos = file.lines()[line];
+
+                // Use 1-based indexing.
+                let line = (line + 1) as u32;
+                let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
+
+                (file, line, col)
+            }
+            Err(file) => (file, UNKNOWN_LINE_NUMBER, UNKNOWN_COLUMN_NUMBER),
+        };
+
+        // For MSVC, omit the column number.
+        // Otherwise, emit it. This mimics clang behaviour.
+        // See discussion in https://github.com/rust-lang/rust/issues/42921
+        if self.sess().target.is_like_msvc {
+            DebugLoc { file, line, col: UNKNOWN_COLUMN_NUMBER }
+        } else {
+            DebugLoc { file, line, col }
+        }
+    }
+}
+
+impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn create_function_debug_context(
+        &self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        llfn: &'ll Value,
+        mir: &mir::Body<'tcx>,
+    ) -> Option<FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>> {
+        if self.sess().opts.debuginfo == DebugInfo::None {
+            return None;
+        }
+
+        // Initialize fn debug context (including scopes).
+        let empty_scope = DebugScope {
+            dbg_scope: self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
+            inlined_at: None,
+            file_start_pos: BytePos(0),
+            file_end_pos: BytePos(0),
+        };
+        let mut fn_debug_context = FunctionDebugContext {
+            scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes),
+            inlined_function_scopes: Default::default(),
+        };
+
+        // Fill in all the scopes, with the information from the MIR body.
+        compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
+
+        Some(fn_debug_context)
+    }
+
+    fn dbg_scope_fn(
+        &self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        maybe_definition_llfn: Option<&'ll Value>,
+    ) -> &'ll DIScope {
+        let tcx = self.tcx;
+
+        let def_id = instance.def_id();
+        let (containing_scope, is_method) = get_containing_scope(self, instance);
+        let span = tcx.def_span(def_id);
+        let loc = self.lookup_debug_loc(span.lo());
+        let file_metadata = file_metadata(self, &loc.file);
+
+        let function_type_metadata = unsafe {
+            let fn_signature = get_function_signature(self, fn_abi);
+            llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
+        };
+
+        let mut name = String::with_capacity(64);
+        type_names::push_item_name(tcx, def_id, false, &mut name);
+
+        // Find the enclosing function, in case this is a closure.
+        let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
+
+        // We look up the generics of the enclosing function and truncate the args
+        // to their length in order to cut off extra stuff that might be in there for
+        // closures or coroutines.
+        let generics = tcx.generics_of(enclosing_fn_def_id);
+        let args = instance.args.truncate_to(tcx, generics);
+
+        type_names::push_generic_params(
+            tcx,
+            tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args),
+            enclosing_fn_def_id,
+            &mut name,
+        );
+
+        let template_parameters = get_template_parameters(self, generics, args);
+
+        let linkage_name = &mangled_name_of_instance(self, instance).name;
+        // Omit the linkage_name if it is the same as subprogram name.
+        let linkage_name = if &name == linkage_name { "" } else { linkage_name };
+
+        // FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
+        let scope_line = loc.line;
+
+        let mut flags = DIFlags::FlagPrototyped;
+
+        if fn_abi.ret.layout.abi.is_uninhabited() {
+            flags |= DIFlags::FlagNoReturn;
+        }
+
+        let mut spflags = DISPFlags::SPFlagDefinition;
+        if is_node_local_to_unit(self, def_id) {
+            spflags |= DISPFlags::SPFlagLocalToUnit;
+        }
+        if self.sess().opts.optimize != config::OptLevel::No {
+            spflags |= DISPFlags::SPFlagOptimized;
+        }
+        if let Some((id, _)) = tcx.entry_fn(()) {
+            if id == def_id {
+                spflags |= DISPFlags::SPFlagMainSubprogram;
+            }
+        }
+
+        // When we're adding a method to a type DIE, we only want a DW_AT_declaration there, because
+        // LLVM LTO can't unify type definitions when a child DIE is a full subprogram definition.
+        // When we use this `decl` below, the subprogram definition gets created at the CU level
+        // with a DW_AT_specification pointing back to the type's declaration.
+        let decl = is_method.then(|| unsafe {
+            llvm::LLVMRustDIBuilderCreateMethod(
+                DIB(self),
+                containing_scope,
+                name.as_ptr().cast(),
+                name.len(),
+                linkage_name.as_ptr().cast(),
+                linkage_name.len(),
+                file_metadata,
+                loc.line,
+                function_type_metadata,
+                flags,
+                spflags & !DISPFlags::SPFlagDefinition,
+                template_parameters,
+            )
+        });
+
+        return unsafe {
+            llvm::LLVMRustDIBuilderCreateFunction(
+                DIB(self),
+                containing_scope,
+                name.as_ptr().cast(),
+                name.len(),
+                linkage_name.as_ptr().cast(),
+                linkage_name.len(),
+                file_metadata,
+                loc.line,
+                function_type_metadata,
+                scope_line,
+                flags,
+                spflags,
+                maybe_definition_llfn,
+                template_parameters,
+                decl,
+            )
+        };
+
+        fn get_function_signature<'ll, 'tcx>(
+            cx: &CodegenCx<'ll, 'tcx>,
+            fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        ) -> &'ll DIArray {
+            if cx.sess().opts.debuginfo != DebugInfo::Full {
+                return create_DIArray(DIB(cx), &[]);
+            }
+
+            let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
+
+            // Return type -- llvm::DIBuilder wants this at index 0
+            signature.push(if fn_abi.ret.is_ignore() {
+                None
+            } else {
+                Some(type_di_node(cx, fn_abi.ret.layout.ty))
+            });
+
+            // Arguments types
+            if cx.sess().target.is_like_msvc {
+                // FIXME(#42800):
+                // There is a bug in MSDIA that leads to a crash when it encounters
+                // a fixed-size array of `u8` or something zero-sized in a
+                // function-type (see #40477).
+                // As a workaround, we replace those fixed-size arrays with a
+                // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
+                // appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
+                // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
+                // This transformed type is wrong, but these function types are
+                // already inaccurate due to ABI adjustments (see #42800).
+                signature.extend(fn_abi.args.iter().map(|arg| {
+                    let t = arg.layout.ty;
+                    let t = match t.kind() {
+                        ty::Array(ct, _)
+                            if (*ct == cx.tcx.types.u8) || cx.layout_of(*ct).is_zst() =>
+                        {
+                            Ty::new_imm_ptr(cx.tcx, *ct)
+                        }
+                        _ => t,
+                    };
+                    Some(type_di_node(cx, t))
+                }));
+            } else {
+                signature
+                    .extend(fn_abi.args.iter().map(|arg| Some(type_di_node(cx, arg.layout.ty))));
+            }
+
+            create_DIArray(DIB(cx), &signature[..])
+        }
+
+        fn get_template_parameters<'ll, 'tcx>(
+            cx: &CodegenCx<'ll, 'tcx>,
+            generics: &ty::Generics,
+            args: GenericArgsRef<'tcx>,
+        ) -> &'ll DIArray {
+            if args.types().next().is_none() {
+                return create_DIArray(DIB(cx), &[]);
+            }
+
+            // Again, only create type information if full debuginfo is enabled
+            let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
+                let names = get_parameter_names(cx, generics);
+                iter::zip(args, names)
+                    .filter_map(|(kind, name)| {
+                        kind.as_type().map(|ty| {
+                            let actual_type =
+                                cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+                            let actual_type_metadata = type_di_node(cx, actual_type);
+                            let name = name.as_str();
+                            unsafe {
+                                Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+                                    DIB(cx),
+                                    None,
+                                    name.as_ptr().cast(),
+                                    name.len(),
+                                    actual_type_metadata,
+                                ))
+                            }
+                        })
+                    })
+                    .collect()
+            } else {
+                vec![]
+            };
+
+            create_DIArray(DIB(cx), &template_params)
+        }
+
+        fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+            let mut names = generics.parent.map_or_else(Vec::new, |def_id| {
+                get_parameter_names(cx, cx.tcx.generics_of(def_id))
+            });
+            names.extend(generics.params.iter().map(|param| param.name));
+            names
+        }
+
+        /// Returns a scope, plus `true` if that's a type scope for "class" methods,
+        /// otherwise `false` for plain namespace scopes.
+        fn get_containing_scope<'ll, 'tcx>(
+            cx: &CodegenCx<'ll, 'tcx>,
+            instance: Instance<'tcx>,
+        ) -> (&'ll DIScope, bool) {
+            // First, let's see if this is a method within an inherent impl. Because
+            // if yes, we want to make the result subroutine DIE a child of the
+            // subroutine's self-type.
+            if let Some(impl_def_id) = cx.tcx.impl_of_method(instance.def_id()) {
+                // If the method does *not* belong to a trait, proceed
+                if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
+                    let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions(
+                        instance.args,
+                        ty::ParamEnv::reveal_all(),
+                        cx.tcx.type_of(impl_def_id),
+                    );
+
+                    // Only "class" methods are generally understood by LLVM,
+                    // so avoid methods on other types (e.g., `<*mut T>::null`).
+                    if let ty::Adt(def, ..) = impl_self_ty.kind()
+                        && !def.is_box()
+                    {
+                        // Again, only create type information if full debuginfo is enabled
+                        if cx.sess().opts.debuginfo == DebugInfo::Full && !impl_self_ty.has_param()
+                        {
+                            return (type_di_node(cx, impl_self_ty), true);
+                        } else {
+                            return (namespace::item_namespace(cx, def.did()), false);
+                        }
+                    }
+                } else {
+                    // For trait method impls we still use the "parallel namespace"
+                    // strategy
+                }
+            }
+
+            let scope = namespace::item_namespace(
+                cx,
+                DefId {
+                    krate: instance.def_id().krate,
+                    index: cx
+                        .tcx
+                        .def_key(instance.def_id())
+                        .parent
+                        .expect("get_containing_scope: missing parent?"),
+                },
+            );
+            (scope, false)
+        }
+    }
+
+    fn dbg_loc(
+        &self,
+        scope: &'ll DIScope,
+        inlined_at: Option<&'ll DILocation>,
+        span: Span,
+    ) -> &'ll DILocation {
+        let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
+
+        unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation(line, col, scope, inlined_at) }
+    }
+
+    fn create_vtable_debuginfo(
+        &self,
+        ty: Ty<'tcx>,
+        trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+        vtable: Self::Value,
+    ) {
+        metadata::create_vtable_di_node(self, ty, trait_ref, vtable)
+    }
+
+    fn extend_scope_to_file(
+        &self,
+        scope_metadata: &'ll DIScope,
+        file: &rustc_span::SourceFile,
+    ) -> &'ll DILexicalBlock {
+        metadata::extend_scope_to_file(self, scope_metadata, file)
+    }
+
+    fn debuginfo_finalize(&self) {
+        finalize(self)
+    }
+
+    // FIXME(eddyb) find a common convention for all of the debuginfo-related
+    // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+    fn create_dbg_var(
+        &self,
+        variable_name: Symbol,
+        variable_type: Ty<'tcx>,
+        scope_metadata: &'ll DIScope,
+        variable_kind: VariableKind,
+        span: Span,
+    ) -> &'ll DIVariable {
+        let loc = self.lookup_debug_loc(span.lo());
+        let file_metadata = file_metadata(self, &loc.file);
+
+        let type_metadata = type_di_node(self, variable_type);
+
+        let (argument_index, dwarf_tag) = match variable_kind {
+            ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
+            LocalVariable => (0, DW_TAG_auto_variable),
+        };
+        let align = self.align_of(variable_type);
+
+        let name = variable_name.as_str();
+        unsafe {
+            llvm::LLVMRustDIBuilderCreateVariable(
+                DIB(self),
+                dwarf_tag,
+                scope_metadata,
+                name.as_ptr().cast(),
+                name.len(),
+                file_metadata,
+                loc.line,
+                type_metadata,
+                true,
+                DIFlags::FlagZero,
+                argument_index,
+                align.bytes() as u32,
+            )
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
new file mode 100644
index 00000000000..fa61c7dde18
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -0,0 +1,48 @@
+// Namespace Handling.
+
+use super::utils::{debug_context, DIB};
+use rustc_codegen_ssa::debuginfo::type_names;
+use rustc_middle::ty::{self, Instance};
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::DIScope;
+use rustc_hir::def_id::DefId;
+
+pub fn mangled_name_of_instance<'a, 'tcx>(
+    cx: &CodegenCx<'a, 'tcx>,
+    instance: Instance<'tcx>,
+) -> ty::SymbolName<'tcx> {
+    let tcx = cx.tcx;
+    tcx.symbol_name(instance)
+}
+
+pub fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+    if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) {
+        return scope;
+    }
+
+    let def_key = cx.tcx.def_key(def_id);
+    let parent_scope = def_key
+        .parent
+        .map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
+
+    let namespace_name_string = {
+        let mut output = String::with_capacity(64);
+        type_names::push_item_name(cx.tcx, def_id, false, &mut output);
+        output
+    };
+
+    let scope = unsafe {
+        llvm::LLVMRustDIBuilderCreateNameSpace(
+            DIB(cx),
+            parent_scope,
+            namespace_name_string.as_ptr().cast(),
+            namespace_name_string.len(),
+            false, // ExportSymbols (only relevant for C++ anonymous namespaces)
+        )
+    };
+
+    debug_context(cx).namespace_map.borrow_mut().insert(def_id, scope);
+    scope
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
new file mode 100644
index 00000000000..c758010c581
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -0,0 +1,98 @@
+// Utility Functions.
+
+use super::namespace::item_namespace;
+use super::CodegenUnitDebugContext;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
+use rustc_middle::ty::{self, Ty};
+use trace;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DIArray, DIBuilder, DIDescriptor, DIScope};
+
+pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool {
+    // The is_local_to_unit flag indicates whether a function is local to the
+    // current compilation unit (i.e., if it is *static* in the C-sense). The
+    // *reachable* set should provide a good approximation of this, as it
+    // contains everything that might leak out of the current crate (by being
+    // externally visible or by being inlined into something externally
+    // visible). It might better to use the `exported_items` set from
+    // `driver::CrateAnalysis` in the future, but (atm) this set is not
+    // available in the codegen pass.
+    !cx.tcx.is_reachable_non_generic(def_id)
+}
+
+#[allow(non_snake_case)]
+pub fn create_DIArray<'ll>(
+    builder: &DIBuilder<'ll>,
+    arr: &[Option<&'ll DIDescriptor>],
+) -> &'ll DIArray {
+    unsafe { llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) }
+}
+
+#[inline]
+pub fn debug_context<'a, 'll, 'tcx>(
+    cx: &'a CodegenCx<'ll, 'tcx>,
+) -> &'a CodegenUnitDebugContext<'ll, 'tcx> {
+    cx.dbg_cx.as_ref().unwrap()
+}
+
+#[inline]
+#[allow(non_snake_case)]
+pub fn DIB<'a, 'll>(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> {
+    cx.dbg_cx.as_ref().unwrap().builder
+}
+
+pub fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+    item_namespace(cx, cx.tcx.parent(def_id))
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(crate) enum FatPtrKind {
+    Slice,
+    Dyn,
+}
+
+/// Determines if `pointee_ty` is slice-like or trait-object-like, i.e.
+/// if the second field of the fat pointer is a length or a vtable-pointer.
+/// If `pointee_ty` does not require a fat pointer (because it is Sized) then
+/// the function returns `None`.
+pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    pointee_ty: Ty<'tcx>,
+) -> Option<FatPtrKind> {
+    let pointee_tail_ty = cx.tcx.struct_tail_erasing_lifetimes(pointee_ty, cx.param_env());
+    let layout = cx.layout_of(pointee_tail_ty);
+    trace!(
+        "fat_pointer_kind: {:?} has layout {:?} (is_unsized? {})",
+        pointee_tail_ty,
+        layout,
+        layout.is_unsized()
+    );
+
+    if layout.is_sized() {
+        return None;
+    }
+
+    match *pointee_tail_ty.kind() {
+        ty::Str | ty::Slice(_) => Some(FatPtrKind::Slice),
+        ty::Dynamic(..) => Some(FatPtrKind::Dyn),
+        ty::Foreign(_) => {
+            // Assert that pointers to foreign types really are thin:
+            debug_assert_eq!(
+                cx.size_of(Ty::new_imm_ptr(cx.tcx, pointee_tail_ty)),
+                cx.size_of(Ty::new_imm_ptr(cx.tcx, cx.tcx.types.u8))
+            );
+            None
+        }
+        _ => {
+            // For all other pointee types we should already have returned None
+            // at the beginning of the function.
+            panic!(
+                "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {pointee_tail_ty:?}"
+            )
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
new file mode 100644
index 00000000000..78c0725a637
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -0,0 +1,231 @@
+//! Declare various LLVM values.
+//!
+//! Prefer using functions and methods from this module rather than calling LLVM
+//! functions directly. These functions do some additional work to ensure we do
+//! the right thing given the preconceptions of codegen.
+//!
+//! Some useful guidelines:
+//!
+//! * Use declare_* family of methods if you are declaring, but are not
+//!   interested in defining the Value they return.
+//! * Use define_* family of methods when you might be defining the Value.
+//! * When in doubt, define.
+
+use crate::abi::{FnAbi, FnAbiLlvmExt};
+use crate::attributes;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::AttributePlace::Function;
+use crate::type_::Type;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::TypeMembershipMethods;
+use rustc_middle::ty::{Instance, Ty};
+use rustc_symbol_mangling::typeid::{
+    kcfi_typeid_for_fnabi, kcfi_typeid_for_instance, typeid_for_fnabi, typeid_for_instance,
+    TypeIdOptions,
+};
+use smallvec::SmallVec;
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    name: &str,
+    callconv: llvm::CallConv,
+    unnamed: llvm::UnnamedAddr,
+    visibility: llvm::Visibility,
+    ty: &'ll Type,
+) -> &'ll Value {
+    debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
+    let llfn = unsafe {
+        llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
+    };
+
+    llvm::SetFunctionCallConv(llfn, callconv);
+    llvm::SetUnnamedAddress(llfn, unnamed);
+    llvm::set_visibility(llfn, visibility);
+
+    let mut attrs = SmallVec::<[_; 4]>::new();
+
+    if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) {
+        attrs.push(llvm::AttributeKind::NoRedZone.create_attr(cx.llcx));
+    }
+
+    attrs.extend(attributes::non_lazy_bind_attr(cx));
+
+    attributes::apply_to_llfn(llfn, Function, &attrs);
+
+    llfn
+}
+
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+    /// Declare a global value.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// return its Value instead.
+    pub fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
+        debug!("declare_global(name={:?})", name);
+        unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
+    }
+
+    /// Declare a C ABI function.
+    ///
+    /// Only use this for foreign function ABIs and glue. For Rust functions use
+    /// `declare_fn` instead.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// update the declaration and return existing Value instead.
+    pub fn declare_cfn(
+        &self,
+        name: &str,
+        unnamed: llvm::UnnamedAddr,
+        fn_type: &'ll Type,
+    ) -> &'ll Value {
+        // Declare C ABI functions with the visibility used by C by default.
+        let visibility = if self.tcx.sess.default_hidden_visibility() {
+            llvm::Visibility::Hidden
+        } else {
+            llvm::Visibility::Default
+        };
+
+        declare_raw_fn(self, name, llvm::CCallConv, unnamed, visibility, fn_type)
+    }
+
+    /// Declare an entry Function
+    ///
+    /// The ABI of this function can change depending on the target (although for now the same as
+    /// `declare_cfn`)
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// update the declaration and return existing Value instead.
+    pub fn declare_entry_fn(
+        &self,
+        name: &str,
+        callconv: llvm::CallConv,
+        unnamed: llvm::UnnamedAddr,
+        fn_type: &'ll Type,
+    ) -> &'ll Value {
+        let visibility = if self.tcx.sess.default_hidden_visibility() {
+            llvm::Visibility::Hidden
+        } else {
+            llvm::Visibility::Default
+        };
+        declare_raw_fn(self, name, callconv, unnamed, visibility, fn_type)
+    }
+
+    /// Declare a Rust function.
+    ///
+    /// If there’s a value with the same name already declared, the function will
+    /// update the declaration and return existing Value instead.
+    pub fn declare_fn(
+        &self,
+        name: &str,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        instance: Option<Instance<'tcx>>,
+    ) -> &'ll Value {
+        debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
+
+        // Function addresses in Rust are never significant, allowing functions to
+        // be merged.
+        let llfn = declare_raw_fn(
+            self,
+            name,
+            fn_abi.llvm_cconv(),
+            llvm::UnnamedAddr::Global,
+            llvm::Visibility::Default,
+            fn_abi.llvm_type(self),
+        );
+        fn_abi.apply_attrs_llfn(self, llfn);
+
+        if self.tcx.sess.is_sanitizer_cfi_enabled() {
+            if let Some(instance) = instance {
+                let typeid = typeid_for_instance(self.tcx, &instance, TypeIdOptions::empty());
+                self.set_type_metadata(llfn, typeid);
+                let typeid =
+                    typeid_for_instance(self.tcx, &instance, TypeIdOptions::GENERALIZE_POINTERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid =
+                    typeid_for_instance(self.tcx, &instance, TypeIdOptions::NORMALIZE_INTEGERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid = typeid_for_instance(
+                    self.tcx,
+                    &instance,
+                    TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS,
+                );
+                self.add_type_metadata(llfn, typeid);
+            } else {
+                let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::empty());
+                self.set_type_metadata(llfn, typeid);
+                let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::GENERALIZE_POINTERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::NORMALIZE_INTEGERS);
+                self.add_type_metadata(llfn, typeid);
+                let typeid = typeid_for_fnabi(
+                    self.tcx,
+                    fn_abi,
+                    TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS,
+                );
+                self.add_type_metadata(llfn, typeid);
+            }
+        }
+
+        if self.tcx.sess.is_sanitizer_kcfi_enabled() {
+            // LLVM KCFI does not support multiple !kcfi_type attachments
+            let mut options = TypeIdOptions::empty();
+            if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() {
+                options.insert(TypeIdOptions::GENERALIZE_POINTERS);
+            }
+            if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() {
+                options.insert(TypeIdOptions::NORMALIZE_INTEGERS);
+            }
+
+            if let Some(instance) = instance {
+                let kcfi_typeid = kcfi_typeid_for_instance(self.tcx, &instance, options);
+                self.set_kcfi_type_metadata(llfn, kcfi_typeid);
+            } else {
+                let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options);
+                self.set_kcfi_type_metadata(llfn, kcfi_typeid);
+            }
+        }
+
+        llfn
+    }
+
+    /// Declare a global with an intention to define it.
+    ///
+    /// Use this function when you intend to define a global. This function will
+    /// return `None` if the name already has a definition associated with it. In that
+    /// case an error should be reported to the user, because it usually happens due
+    /// to user’s fault (e.g., misuse of `#[no_mangle]` or `#[export_name]` attributes).
+    pub fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
+        if self.get_defined_value(name).is_some() {
+            None
+        } else {
+            Some(self.declare_global(name, ty))
+        }
+    }
+
+    /// Declare a private global
+    ///
+    /// Use this function when you intend to define a global without a name.
+    pub fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
+        unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) }
+    }
+
+    /// Gets declared value by name.
+    pub fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
+        debug!("get_declared_value(name={:?})", name);
+        unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
+    }
+
+    /// Gets defined or externally defined (AvailableExternally linkage) value by
+    /// name.
+    pub fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
+        self.get_declared_value(name).and_then(|val| {
+            let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
+            if !declaration { Some(val) } else { None }
+        })
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs
new file mode 100644
index 00000000000..e15eda7c66c
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/errors.rs
@@ -0,0 +1,256 @@
+use std::borrow::Cow;
+use std::ffi::CString;
+use std::path::Path;
+
+use crate::fluent_generated as fluent;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{Diag, DiagCtxt, Diagnostic, EmissionGuarantee, Level};
+use rustc_macros::{Diagnostic, Subdiagnostic};
+use rustc_span::Span;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_ctarget_feature_prefix)]
+#[note]
+pub(crate) struct UnknownCTargetFeaturePrefix<'a> {
+    pub feature: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_ctarget_feature)]
+#[note]
+pub(crate) struct UnknownCTargetFeature<'a> {
+    pub feature: &'a str,
+    #[subdiagnostic]
+    pub rust_feature: PossibleFeature<'a>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unstable_ctarget_feature)]
+#[note]
+pub(crate) struct UnstableCTargetFeature<'a> {
+    pub feature: &'a str,
+}
+
+#[derive(Subdiagnostic)]
+pub(crate) enum PossibleFeature<'a> {
+    #[help(codegen_llvm_possible_feature)]
+    Some { rust_feature: &'a str },
+    #[help(codegen_llvm_consider_filing_feature_request)]
+    None,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_error_creating_import_library)]
+pub(crate) struct ErrorCreatingImportLibrary<'a> {
+    pub lib_name: &'a str,
+    pub error: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_symbol_already_defined)]
+pub(crate) struct SymbolAlreadyDefined<'a> {
+    #[primary_span]
+    pub span: Span,
+    pub symbol_name: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_invalid_minimum_alignment_not_power_of_two)]
+pub(crate) struct InvalidMinimumAlignmentNotPowerOfTwo {
+    pub align: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_invalid_minimum_alignment_too_large)]
+pub(crate) struct InvalidMinimumAlignmentTooLarge {
+    pub align: u64,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_sanitizer_memtag_requires_mte)]
+pub(crate) struct SanitizerMemtagRequiresMte;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_error_writing_def_file)]
+pub(crate) struct ErrorWritingDEFFile {
+    pub error: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_error_calling_dlltool)]
+pub(crate) struct ErrorCallingDllTool<'a> {
+    pub dlltool_path: Cow<'a, str>,
+    pub error: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_dlltool_fail_import_library)]
+pub(crate) struct DlltoolFailImportLibrary<'a> {
+    pub dlltool_path: Cow<'a, str>,
+    pub dlltool_args: String,
+    pub stdout: Cow<'a, str>,
+    pub stderr: Cow<'a, str>,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_dynamic_linking_with_lto)]
+#[note]
+pub(crate) struct DynamicLinkingWithLTO;
+
+pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>);
+
+impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> {
+    fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
+        let diag: Diag<'_, G> = self.0.into_diag(dcx, level);
+        let (message, _) = diag.messages.first().expect("`LlvmError` with no message");
+        let message = dcx.eagerly_translate_to_string(message.clone(), diag.args.iter());
+        Diag::new(dcx, level, fluent::codegen_llvm_parse_target_machine_config)
+            .with_arg("error", message)
+    }
+}
+
+pub(crate) struct TargetFeatureDisableOrEnable<'a> {
+    pub features: &'a [&'a str],
+    pub span: Option<Span>,
+    pub missing_features: Option<MissingFeatures>,
+}
+
+#[derive(Subdiagnostic)]
+#[help(codegen_llvm_missing_features)]
+pub(crate) struct MissingFeatures;
+
+impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> {
+    fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
+        let mut diag = Diag::new(dcx, level, fluent::codegen_llvm_target_feature_disable_or_enable);
+        if let Some(span) = self.span {
+            diag.span(span);
+        };
+        if let Some(missing_features) = self.missing_features {
+            diag.subdiagnostic(dcx, missing_features);
+        }
+        diag.arg("features", self.features.join(", "));
+        diag
+    }
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_disallowed)]
+pub(crate) struct LtoDisallowed;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_dylib)]
+pub(crate) struct LtoDylib;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_proc_macro)]
+pub(crate) struct LtoProcMacro;
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_lto_bitcode_from_rlib)]
+pub(crate) struct LtoBitcodeFromRlib {
+    pub llvm_err: String,
+}
+
+#[derive(Diagnostic)]
+pub enum LlvmError<'a> {
+    #[diag(codegen_llvm_write_output)]
+    WriteOutput { path: &'a Path },
+    #[diag(codegen_llvm_target_machine)]
+    CreateTargetMachine { triple: SmallCStr },
+    #[diag(codegen_llvm_run_passes)]
+    RunLlvmPasses,
+    #[diag(codegen_llvm_serialize_module)]
+    SerializeModule { name: &'a str },
+    #[diag(codegen_llvm_write_ir)]
+    WriteIr { path: &'a Path },
+    #[diag(codegen_llvm_prepare_thin_lto_context)]
+    PrepareThinLtoContext,
+    #[diag(codegen_llvm_load_bitcode)]
+    LoadBitcode { name: CString },
+    #[diag(codegen_llvm_write_thinlto_key)]
+    WriteThinLtoKey { err: std::io::Error },
+    #[diag(codegen_llvm_multiple_source_dicompileunit)]
+    MultipleSourceDiCompileUnit,
+    #[diag(codegen_llvm_prepare_thin_lto_module)]
+    PrepareThinLtoModule,
+    #[diag(codegen_llvm_parse_bitcode)]
+    ParseBitcode,
+}
+
+pub(crate) struct WithLlvmError<'a>(pub LlvmError<'a>, pub String);
+
+impl<G: EmissionGuarantee> Diagnostic<'_, G> for WithLlvmError<'_> {
+    fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> {
+        use LlvmError::*;
+        let msg_with_llvm_err = match &self.0 {
+            WriteOutput { .. } => fluent::codegen_llvm_write_output_with_llvm_err,
+            CreateTargetMachine { .. } => fluent::codegen_llvm_target_machine_with_llvm_err,
+            RunLlvmPasses => fluent::codegen_llvm_run_passes_with_llvm_err,
+            SerializeModule { .. } => fluent::codegen_llvm_serialize_module_with_llvm_err,
+            WriteIr { .. } => fluent::codegen_llvm_write_ir_with_llvm_err,
+            PrepareThinLtoContext => fluent::codegen_llvm_prepare_thin_lto_context_with_llvm_err,
+            LoadBitcode { .. } => fluent::codegen_llvm_load_bitcode_with_llvm_err,
+            WriteThinLtoKey { .. } => fluent::codegen_llvm_write_thinlto_key_with_llvm_err,
+            MultipleSourceDiCompileUnit => {
+                fluent::codegen_llvm_multiple_source_dicompileunit_with_llvm_err
+            }
+            PrepareThinLtoModule => fluent::codegen_llvm_prepare_thin_lto_module_with_llvm_err,
+            ParseBitcode => fluent::codegen_llvm_parse_bitcode_with_llvm_err,
+        };
+        self.0
+            .into_diag(dcx, level)
+            .with_primary_message(msg_with_llvm_err)
+            .with_arg("llvm_err", self.1)
+    }
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_from_llvm_optimization_diag)]
+pub(crate) struct FromLlvmOptimizationDiag<'a> {
+    pub filename: &'a str,
+    pub line: std::ffi::c_uint,
+    pub column: std::ffi::c_uint,
+    pub pass_name: &'a str,
+    pub kind: &'a str,
+    pub message: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_from_llvm_diag)]
+pub(crate) struct FromLlvmDiag {
+    pub message: String,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_write_bytecode)]
+pub(crate) struct WriteBytecode<'a> {
+    pub path: &'a Path,
+    pub err: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_copy_bitcode)]
+pub(crate) struct CopyBitcode {
+    pub err: std::io::Error,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_unknown_debuginfo_compression)]
+pub struct UnknownCompression {
+    pub algorithm: &'static str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_mismatch_data_layout)]
+pub struct MismatchedDataLayout<'a> {
+    pub rustc_target: &'a str,
+    pub rustc_layout: &'a str,
+    pub llvm_target: &'a str,
+    pub llvm_layout: &'a str,
+}
+
+#[derive(Diagnostic)]
+#[diag(codegen_llvm_invalid_target_feature_prefix)]
+pub(crate) struct InvalidTargetFeaturePrefix<'a> {
+    pub feature: &'a str,
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
new file mode 100644
index 00000000000..71b69a94e99
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -0,0 +1,2433 @@
+use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::va_arg::emit_va_arg;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh};
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir as hir;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
+use rustc_middle::ty::{self, GenericArgsRef, Ty};
+use rustc_middle::{bug, span_bug};
+use rustc_span::{sym, Span, Symbol};
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
+use rustc_target::spec::{HasTargetSpec, PanicStrategy};
+
+use std::cmp::Ordering;
+
+fn get_simple_intrinsic<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    name: Symbol,
+) -> Option<(&'ll Type, &'ll Value)> {
+    let llvm_name = match name {
+        sym::sqrtf16 => "llvm.sqrt.f16",
+        sym::sqrtf32 => "llvm.sqrt.f32",
+        sym::sqrtf64 => "llvm.sqrt.f64",
+        sym::sqrtf128 => "llvm.sqrt.f128",
+
+        sym::powif16 => "llvm.powi.f16",
+        sym::powif32 => "llvm.powi.f32",
+        sym::powif64 => "llvm.powi.f64",
+        sym::powif128 => "llvm.powi.f128",
+
+        sym::sinf16 => "llvm.sin.f16",
+        sym::sinf32 => "llvm.sin.f32",
+        sym::sinf64 => "llvm.sin.f64",
+        sym::sinf128 => "llvm.sin.f128",
+
+        sym::cosf16 => "llvm.cos.f16",
+        sym::cosf32 => "llvm.cos.f32",
+        sym::cosf64 => "llvm.cos.f64",
+        sym::cosf128 => "llvm.cos.f128",
+
+        sym::powf16 => "llvm.pow.f16",
+        sym::powf32 => "llvm.pow.f32",
+        sym::powf64 => "llvm.pow.f64",
+        sym::powf128 => "llvm.pow.f128",
+
+        sym::expf16 => "llvm.exp.f16",
+        sym::expf32 => "llvm.exp.f32",
+        sym::expf64 => "llvm.exp.f64",
+        sym::expf128 => "llvm.exp.f128",
+
+        sym::exp2f16 => "llvm.exp2.f16",
+        sym::exp2f32 => "llvm.exp2.f32",
+        sym::exp2f64 => "llvm.exp2.f64",
+        sym::exp2f128 => "llvm.exp2.f128",
+
+        sym::logf16 => "llvm.log.f16",
+        sym::logf32 => "llvm.log.f32",
+        sym::logf64 => "llvm.log.f64",
+        sym::logf128 => "llvm.log.f128",
+
+        sym::log10f16 => "llvm.log10.f16",
+        sym::log10f32 => "llvm.log10.f32",
+        sym::log10f64 => "llvm.log10.f64",
+        sym::log10f128 => "llvm.log10.f128",
+
+        sym::log2f16 => "llvm.log2.f16",
+        sym::log2f32 => "llvm.log2.f32",
+        sym::log2f64 => "llvm.log2.f64",
+        sym::log2f128 => "llvm.log2.f128",
+
+        sym::fmaf16 => "llvm.fma.f16",
+        sym::fmaf32 => "llvm.fma.f32",
+        sym::fmaf64 => "llvm.fma.f64",
+        sym::fmaf128 => "llvm.fma.f128",
+
+        sym::fabsf16 => "llvm.fabs.f16",
+        sym::fabsf32 => "llvm.fabs.f32",
+        sym::fabsf64 => "llvm.fabs.f64",
+        sym::fabsf128 => "llvm.fabs.f128",
+
+        sym::minnumf16 => "llvm.minnum.f16",
+        sym::minnumf32 => "llvm.minnum.f32",
+        sym::minnumf64 => "llvm.minnum.f64",
+        sym::minnumf128 => "llvm.minnum.f128",
+
+        sym::maxnumf16 => "llvm.maxnum.f16",
+        sym::maxnumf32 => "llvm.maxnum.f32",
+        sym::maxnumf64 => "llvm.maxnum.f64",
+        sym::maxnumf128 => "llvm.maxnum.f128",
+
+        sym::copysignf16 => "llvm.copysign.f16",
+        sym::copysignf32 => "llvm.copysign.f32",
+        sym::copysignf64 => "llvm.copysign.f64",
+        sym::copysignf128 => "llvm.copysign.f128",
+
+        sym::floorf16 => "llvm.floor.f16",
+        sym::floorf32 => "llvm.floor.f32",
+        sym::floorf64 => "llvm.floor.f64",
+        sym::floorf128 => "llvm.floor.f128",
+
+        sym::ceilf16 => "llvm.ceil.f16",
+        sym::ceilf32 => "llvm.ceil.f32",
+        sym::ceilf64 => "llvm.ceil.f64",
+        sym::ceilf128 => "llvm.ceil.f128",
+
+        sym::truncf16 => "llvm.trunc.f16",
+        sym::truncf32 => "llvm.trunc.f32",
+        sym::truncf64 => "llvm.trunc.f64",
+        sym::truncf128 => "llvm.trunc.f128",
+
+        sym::rintf16 => "llvm.rint.f16",
+        sym::rintf32 => "llvm.rint.f32",
+        sym::rintf64 => "llvm.rint.f64",
+        sym::rintf128 => "llvm.rint.f128",
+
+        sym::nearbyintf16 => "llvm.nearbyint.f16",
+        sym::nearbyintf32 => "llvm.nearbyint.f32",
+        sym::nearbyintf64 => "llvm.nearbyint.f64",
+        sym::nearbyintf128 => "llvm.nearbyint.f128",
+
+        sym::roundf16 => "llvm.round.f16",
+        sym::roundf32 => "llvm.round.f32",
+        sym::roundf64 => "llvm.round.f64",
+        sym::roundf128 => "llvm.round.f128",
+
+        sym::ptr_mask => "llvm.ptrmask",
+
+        sym::roundevenf16 => "llvm.roundeven.f16",
+        sym::roundevenf32 => "llvm.roundeven.f32",
+        sym::roundevenf64 => "llvm.roundeven.f64",
+        sym::roundevenf128 => "llvm.roundeven.f128",
+
+        _ => return None,
+    };
+    Some(cx.get_intrinsic(llvm_name))
+}
+
+impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+    fn codegen_intrinsic_call(
+        &mut self,
+        instance: ty::Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        args: &[OperandRef<'tcx, &'ll Value>],
+        llresult: &'ll Value,
+        span: Span,
+    ) -> Result<(), ty::Instance<'tcx>> {
+        let tcx = self.tcx;
+        let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+        let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
+            bug!("expected fn item type, found {}", callee_ty);
+        };
+
+        let sig = callee_ty.fn_sig(tcx);
+        let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+        let arg_tys = sig.inputs();
+        let ret_ty = sig.output();
+        let name = tcx.item_name(def_id);
+
+        let llret_ty = self.layout_of(ret_ty).llvm_type(self);
+        let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+        let simple = get_simple_intrinsic(self, name);
+        let llval = match name {
+            _ if simple.is_some() => {
+                let (simple_ty, simple_fn) = simple.unwrap();
+                self.call(
+                    simple_ty,
+                    None,
+                    None,
+                    simple_fn,
+                    &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+                    None,
+                )
+            }
+            sym::likely => {
+                self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)])
+            }
+            sym::is_val_statically_known => {
+                let intrinsic_type = args[0].layout.immediate_llvm_type(self.cx);
+                match self.type_kind(intrinsic_type) {
+                    TypeKind::Pointer | TypeKind::Integer | TypeKind::Float | TypeKind::Double => {
+                        self.call_intrinsic(
+                            &format!("llvm.is.constant.{:?}", intrinsic_type),
+                            &[args[0].immediate()],
+                        )
+                    }
+                    _ => self.const_bool(false),
+                }
+            }
+            sym::unlikely => self
+                .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]),
+            sym::catch_unwind => {
+                catch_unwind_intrinsic(
+                    self,
+                    args[0].immediate(),
+                    args[1].immediate(),
+                    args[2].immediate(),
+                    llresult,
+                );
+                return Ok(());
+            }
+            sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
+            sym::va_copy => {
+                self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
+            }
+            sym::va_arg => {
+                match fn_abi.ret.layout.abi {
+                    abi::Abi::Scalar(scalar) => {
+                        match scalar.primitive() {
+                            Primitive::Int(..) => {
+                                if self.cx().size_of(ret_ty).bytes() < 4 {
+                                    // `va_arg` should not be called on an integer type
+                                    // less than 4 bytes in length. If it is, promote
+                                    // the integer to an `i32` and truncate the result
+                                    // back to the smaller type.
+                                    let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
+                                    self.trunc(promoted_result, llret_ty)
+                                } else {
+                                    emit_va_arg(self, args[0], ret_ty)
+                                }
+                            }
+                            Primitive::F16 => bug!("the va_arg intrinsic does not work with `f16`"),
+                            Primitive::F64 | Primitive::Pointer(_) => {
+                                emit_va_arg(self, args[0], ret_ty)
+                            }
+                            // `va_arg` should never be used with the return type f32.
+                            Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
+                            Primitive::F128 => {
+                                bug!("the va_arg intrinsic does not work with `f128`")
+                            }
+                        }
+                    }
+                    _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
+                }
+            }
+
+            sym::volatile_load | sym::unaligned_volatile_load => {
+                let tp_ty = fn_args.type_at(0);
+                let ptr = args[0].immediate();
+                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
+                    let llty = ty.llvm_type(self);
+                    self.volatile_load(llty, ptr)
+                } else {
+                    self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
+                };
+                let align = if name == sym::unaligned_volatile_load {
+                    1
+                } else {
+                    self.align_of(tp_ty).bytes() as u32
+                };
+                unsafe {
+                    llvm::LLVMSetAlignment(load, align);
+                }
+                if !result.layout.is_zst() {
+                    self.store(load, result.llval, result.align);
+                }
+                return Ok(());
+            }
+            sym::volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::unaligned_volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.unaligned_volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::prefetch_read_data
+            | sym::prefetch_write_data
+            | sym::prefetch_read_instruction
+            | sym::prefetch_write_instruction => {
+                let (rw, cache_type) = match name {
+                    sym::prefetch_read_data => (0, 1),
+                    sym::prefetch_write_data => (1, 1),
+                    sym::prefetch_read_instruction => (0, 0),
+                    sym::prefetch_write_instruction => (1, 0),
+                    _ => bug!(),
+                };
+                self.call_intrinsic(
+                    "llvm.prefetch",
+                    &[
+                        args[0].immediate(),
+                        self.const_i32(rw),
+                        args[1].immediate(),
+                        self.const_i32(cache_type),
+                    ],
+                )
+            }
+            sym::ctlz
+            | sym::ctlz_nonzero
+            | sym::cttz
+            | sym::cttz_nonzero
+            | sym::ctpop
+            | sym::bswap
+            | sym::bitreverse
+            | sym::rotate_left
+            | sym::rotate_right
+            | sym::saturating_add
+            | sym::saturating_sub => {
+                let ty = arg_tys[0];
+                match int_type_width_signed(ty, self) {
+                    Some((width, signed)) => match name {
+                        sym::ctlz | sym::cttz => {
+                            let y = self.const_bool(false);
+                            self.call_intrinsic(
+                                &format!("llvm.{name}.i{width}"),
+                                &[args[0].immediate(), y],
+                            )
+                        }
+                        sym::ctlz_nonzero => {
+                            let y = self.const_bool(true);
+                            let llvm_name = &format!("llvm.ctlz.i{width}");
+                            self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+                        }
+                        sym::cttz_nonzero => {
+                            let y = self.const_bool(true);
+                            let llvm_name = &format!("llvm.cttz.i{width}");
+                            self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+                        }
+                        sym::ctpop => self.call_intrinsic(
+                            &format!("llvm.ctpop.i{width}"),
+                            &[args[0].immediate()],
+                        ),
+                        sym::bswap => {
+                            if width == 8 {
+                                args[0].immediate() // byte swap a u8/i8 is just a no-op
+                            } else {
+                                self.call_intrinsic(
+                                    &format!("llvm.bswap.i{width}"),
+                                    &[args[0].immediate()],
+                                )
+                            }
+                        }
+                        sym::bitreverse => self.call_intrinsic(
+                            &format!("llvm.bitreverse.i{width}"),
+                            &[args[0].immediate()],
+                        ),
+                        sym::rotate_left | sym::rotate_right => {
+                            let is_left = name == sym::rotate_left;
+                            let val = args[0].immediate();
+                            let raw_shift = args[1].immediate();
+                            // rotate = funnel shift with first two args the same
+                            let llvm_name =
+                                &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
+                            self.call_intrinsic(llvm_name, &[val, val, raw_shift])
+                        }
+                        sym::saturating_add | sym::saturating_sub => {
+                            let is_add = name == sym::saturating_add;
+                            let lhs = args[0].immediate();
+                            let rhs = args[1].immediate();
+                            let llvm_name = &format!(
+                                "llvm.{}{}.sat.i{}",
+                                if signed { 's' } else { 'u' },
+                                if is_add { "add" } else { "sub" },
+                                width
+                            );
+                            self.call_intrinsic(llvm_name, &[lhs, rhs])
+                        }
+                        _ => bug!(),
+                    },
+                    None => {
+                        tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
+                            span,
+                            name,
+                            ty,
+                        });
+                        return Ok(());
+                    }
+                }
+            }
+
+            sym::raw_eq => {
+                use abi::Abi::*;
+                let tp_ty = fn_args.type_at(0);
+                let layout = self.layout_of(tp_ty).layout;
+                let use_integer_compare = match layout.abi() {
+                    Scalar(_) | ScalarPair(_, _) => true,
+                    Uninhabited | Vector { .. } => false,
+                    Aggregate { .. } => {
+                        // For rusty ABIs, small aggregates are actually passed
+                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+                        // so we re-use that same threshold here.
+                        layout.size() <= self.data_layout().pointer_size * 2
+                    }
+                };
+
+                let a = args[0].immediate();
+                let b = args[1].immediate();
+                if layout.size().bytes() == 0 {
+                    self.const_bool(true)
+                } else if use_integer_compare {
+                    let integer_ty = self.type_ix(layout.size().bits());
+                    let a_val = self.load(integer_ty, a, layout.align().abi);
+                    let b_val = self.load(integer_ty, b, layout.align().abi);
+                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
+                } else {
+                    let n = self.const_usize(layout.size().bytes());
+                    let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
+                    match self.cx.sess().target.arch.as_ref() {
+                        "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
+                        _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
+                    }
+                }
+            }
+
+            sym::compare_bytes => {
+                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                let cmp = self.call_intrinsic(
+                    "memcmp",
+                    &[args[0].immediate(), args[1].immediate(), args[2].immediate()],
+                );
+                // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`.
+                self.sext(cmp, self.type_ix(32))
+            }
+
+            sym::black_box => {
+                args[0].val.store(self, result);
+                let result_val_span = [result.llval];
+                // We need to "use" the argument in some way LLVM can't introspect, and on
+                // targets that support it we can typically leverage inline assembly to do
+                // this. LLVM's interpretation of inline assembly is that it's, well, a black
+                // box. This isn't the greatest implementation since it probably deoptimizes
+                // more than we want, but it's so far good enough.
+                //
+                // For zero-sized types, the location pointed to by the result may be
+                // uninitialized. Do not "use" the result in this case; instead just clobber
+                // the memory.
+                let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() {
+                    ("~{memory}", &[])
+                } else {
+                    ("r,~{memory}", &result_val_span)
+                };
+                crate::asm::inline_asm_call(
+                    self,
+                    "",
+                    constraint,
+                    inputs,
+                    self.type_void(),
+                    &[],
+                    true,
+                    false,
+                    llvm::AsmDialect::Att,
+                    &[span],
+                    false,
+                    None,
+                    None,
+                )
+                .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
+
+                // We have copied the value to `result` already.
+                return Ok(());
+            }
+
+            _ if name.as_str().starts_with("simd_") => {
+                match generic_simd_intrinsic(
+                    self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
+                ) {
+                    Ok(llval) => llval,
+                    Err(()) => return Ok(()),
+                }
+            }
+
+            _ => {
+                debug!("unknown intrinsic '{}' -- falling back to default body", name);
+                // Call the fallback body instead of generating the intrinsic code
+                return Err(ty::Instance::new(instance.def_id(), instance.args));
+            }
+        };
+
+        if !fn_abi.ret.is_ignore() {
+            if let PassMode::Cast { .. } = &fn_abi.ret.mode {
+                self.store(llval, result.llval, result.align);
+            } else {
+                OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+                    .val
+                    .store(self, result);
+            }
+        }
+        Ok(())
+    }
+
+    fn abort(&mut self) {
+        self.call_intrinsic("llvm.trap", &[]);
+    }
+
+    fn assume(&mut self, val: Self::Value) {
+        self.call_intrinsic("llvm.assume", &[val]);
+    }
+
+    fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
+        self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
+    }
+
+    fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
+        // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
+        // optimization pass replaces calls to this intrinsic with code to test type membership.
+        self.call_intrinsic("llvm.type.test", &[pointer, typeid])
+    }
+
+    fn type_checked_load(
+        &mut self,
+        llvtable: &'ll Value,
+        vtable_byte_offset: u64,
+        typeid: &'ll Value,
+    ) -> Self::Value {
+        let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
+        let type_checked_load =
+            self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]);
+        self.extract_value(type_checked_load, 0)
+    }
+
+    fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
+        self.call_intrinsic("llvm.va_start", &[va_list])
+    }
+
+    fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
+        self.call_intrinsic("llvm.va_end", &[va_list])
+    }
+}
+
+fn catch_unwind_intrinsic<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    try_func: &'ll Value,
+    data: &'ll Value,
+    catch_func: &'ll Value,
+    dest: &'ll Value,
+) {
+    if bx.sess().panic_strategy() == PanicStrategy::Abort {
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.call(try_func_ty, None, None, try_func, &[data], None);
+        // Return 0 unconditionally from the intrinsic call;
+        // we can never unwind.
+        let ret_align = bx.tcx().data_layout.i32_align.abi;
+        bx.store(bx.const_i32(0), dest, ret_align);
+    } else if wants_msvc_seh(bx.sess()) {
+        codegen_msvc_try(bx, try_func, data, catch_func, dest);
+    } else if wants_wasm_eh(bx.sess()) {
+        codegen_wasm_try(bx, try_func, data, catch_func, dest);
+    } else if bx.sess().target.os == "emscripten" {
+        codegen_emcc_try(bx, try_func, data, catch_func, dest);
+    } else {
+        codegen_gnu_try(bx, try_func, data, catch_func, dest);
+    }
+}
+
+// MSVC's definition of the `rust_try` function.
+//
+// This implementation uses the new exception handling instructions in LLVM
+// which have support in LLVM for SEH on MSVC targets. Although these
+// instructions are meant to work for all targets, as of the time of this
+// writing, however, LLVM does not recommend the usage of these new instructions
+// as the old ones are still more optimized.
+fn codegen_msvc_try<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    try_func: &'ll Value,
+    data: &'ll Value,
+    catch_func: &'ll Value,
+    dest: &'ll Value,
+) {
+    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+        bx.set_personality_fn(bx.eh_personality());
+
+        let normal = bx.append_sibling_block("normal");
+        let catchswitch = bx.append_sibling_block("catchswitch");
+        let catchpad_rust = bx.append_sibling_block("catchpad_rust");
+        let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
+        let caught = bx.append_sibling_block("caught");
+
+        let try_func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let catch_func = llvm::get_param(bx.llfn(), 2);
+
+        // We're generating an IR snippet that looks like:
+        //
+        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
+        //      %slot = alloca i8*
+        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
+        //
+        //   normal:
+        //      ret i32 0
+        //
+        //   catchswitch:
+        //      %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
+        //
+        //   catchpad_rust:
+        //      %tok = catchpad within %cs [%type_descriptor, 8, %slot]
+        //      %ptr = load %slot
+        //      call %catch_func(%data, %ptr)
+        //      catchret from %tok to label %caught
+        //
+        //   catchpad_foreign:
+        //      %tok = catchpad within %cs [null, 64, null]
+        //      call %catch_func(%data, null)
+        //      catchret from %tok to label %caught
+        //
+        //   caught:
+        //      ret i32 1
+        //   }
+        //
+        // This structure follows the basic usage of throw/try/catch in LLVM.
+        // For example, compile this C++ snippet to see what LLVM generates:
+        //
+        //      struct rust_panic {
+        //          rust_panic(const rust_panic&);
+        //          ~rust_panic();
+        //
+        //          void* x[2];
+        //      };
+        //
+        //      int __rust_try(
+        //          void (*try_func)(void*),
+        //          void *data,
+        //          void (*catch_func)(void*, void*) noexcept
+        //      ) {
+        //          try {
+        //              try_func(data);
+        //              return 0;
+        //          } catch(rust_panic& a) {
+        //              catch_func(data, &a);
+        //              return 1;
+        //          } catch(...) {
+        //              catch_func(data, NULL);
+        //              return 1;
+        //          }
+        //      }
+        //
+        // More information can be found in libstd's seh.rs implementation.
+        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let slot = bx.alloca(bx.type_ptr(), ptr_align);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
+
+        bx.switch_to_block(normal);
+        bx.ret(bx.const_i32(0));
+
+        bx.switch_to_block(catchswitch);
+        let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
+
+        // We can't use the TypeDescriptor defined in libpanic_unwind because it
+        // might be in another DLL and the SEH encoding only supports specifying
+        // a TypeDescriptor from the current module.
+        //
+        // However this isn't an issue since the MSVC runtime uses string
+        // comparison on the type name to match TypeDescriptors rather than
+        // pointer equality.
+        //
+        // So instead we generate a new TypeDescriptor in each module that uses
+        // `try` and let the linker merge duplicate definitions in the same
+        // module.
+        //
+        // When modifying, make sure that the type_name string exactly matches
+        // the one used in library/panic_unwind/src/seh.rs.
+        let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
+        let type_name = bx.const_bytes(b"rust_panic\0");
+        let type_info =
+            bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
+        let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
+        unsafe {
+            llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
+            llvm::SetUniqueComdat(bx.llmod, tydesc);
+            llvm::LLVMSetInitializer(tydesc, type_info);
+        }
+
+        // The flag value of 8 indicates that we are catching the exception by
+        // reference instead of by value. We can't use catch by value because
+        // that requires copying the exception object, which we don't support
+        // since our exception object effectively contains a Box.
+        //
+        // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
+        bx.switch_to_block(catchpad_rust);
+        let flags = bx.const_i32(8);
+        let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
+        let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
+        bx.catch_ret(&funclet, caught);
+
+        // The flag value of 64 indicates a "catch-all".
+        bx.switch_to_block(catchpad_foreign);
+        let flags = bx.const_i32(64);
+        let null = bx.const_null(bx.type_ptr());
+        let funclet = bx.catch_pad(cs, &[null, flags, null]);
+        bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet));
+        bx.catch_ret(&funclet, caught);
+
+        bx.switch_to_block(caught);
+        bx.ret(bx.const_i32(1));
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
+    let i32_align = bx.tcx().data_layout.i32_align.abi;
+    bx.store(ret, dest, i32_align);
+}
+
+// WASM's definition of the `rust_try` function.
+fn codegen_wasm_try<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    try_func: &'ll Value,
+    data: &'ll Value,
+    catch_func: &'ll Value,
+    dest: &'ll Value,
+) {
+    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+        bx.set_personality_fn(bx.eh_personality());
+
+        let normal = bx.append_sibling_block("normal");
+        let catchswitch = bx.append_sibling_block("catchswitch");
+        let catchpad = bx.append_sibling_block("catchpad");
+        let caught = bx.append_sibling_block("caught");
+
+        let try_func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let catch_func = llvm::get_param(bx.llfn(), 2);
+
+        // We're generating an IR snippet that looks like:
+        //
+        //   declare i32 @rust_try(%try_func, %data, %catch_func) {
+        //      %slot = alloca i8*
+        //      invoke %try_func(%data) to label %normal unwind label %catchswitch
+        //
+        //   normal:
+        //      ret i32 0
+        //
+        //   catchswitch:
+        //      %cs = catchswitch within none [%catchpad] unwind to caller
+        //
+        //   catchpad:
+        //      %tok = catchpad within %cs [null]
+        //      %ptr = call @llvm.wasm.get.exception(token %tok)
+        //      %sel = call @llvm.wasm.get.ehselector(token %tok)
+        //      call %catch_func(%data, %ptr)
+        //      catchret from %tok to label %caught
+        //
+        //   caught:
+        //      ret i32 1
+        //   }
+        //
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
+
+        bx.switch_to_block(normal);
+        bx.ret(bx.const_i32(0));
+
+        bx.switch_to_block(catchswitch);
+        let cs = bx.catch_switch(None, None, &[catchpad]);
+
+        bx.switch_to_block(catchpad);
+        let null = bx.const_null(bx.type_ptr());
+        let funclet = bx.catch_pad(cs, &[null]);
+
+        let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
+        let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
+
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
+        bx.catch_ret(&funclet, caught);
+
+        bx.switch_to_block(caught);
+        bx.ret(bx.const_i32(1));
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
+    let i32_align = bx.tcx().data_layout.i32_align.abi;
+    bx.store(ret, dest, i32_align);
+}
+
+// Definition of the standard `try` function for Rust using the GNU-like model
+// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
+// instructions).
+//
+// This codegen is a little surprising because we always call a shim
+// function instead of inlining the call to `invoke` manually here. This is done
+// because in LLVM we're only allowed to have one personality per function
+// definition. The call to the `try` intrinsic is being inlined into the
+// function calling it, and that function may already have other personality
+// functions in play. By calling a shim we're guaranteed that our shim will have
+// the right personality function.
+fn codegen_gnu_try<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    try_func: &'ll Value,
+    data: &'ll Value,
+    catch_func: &'ll Value,
+    dest: &'ll Value,
+) {
+    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+        // Codegens the shims described above:
+        //
+        //   bx:
+        //      invoke %try_func(%data) normal %normal unwind %catch
+        //
+        //   normal:
+        //      ret 0
+        //
+        //   catch:
+        //      (%ptr, _) = landingpad
+        //      call %catch_func(%data, %ptr)
+        //      ret 1
+        let then = bx.append_sibling_block("then");
+        let catch = bx.append_sibling_block("catch");
+
+        let try_func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let catch_func = llvm::get_param(bx.llfn(), 2);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
+
+        bx.switch_to_block(then);
+        bx.ret(bx.const_i32(0));
+
+        // Type indicator for the exception being thrown.
+        //
+        // The first value in this tuple is a pointer to the exception object
+        // being thrown. The second value is a "selector" indicating which of
+        // the landing pad clauses the exception's type had been matched to.
+        // rust_try ignores the selector.
+        bx.switch_to_block(catch);
+        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
+        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
+        let tydesc = bx.const_null(bx.type_ptr());
+        bx.add_clause(vals, tydesc);
+        let ptr = bx.extract_value(vals, 0);
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, ptr], None);
+        bx.ret(bx.const_i32(1));
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
+    let i32_align = bx.tcx().data_layout.i32_align.abi;
+    bx.store(ret, dest, i32_align);
+}
+
+// Variant of codegen_gnu_try used for emscripten where Rust panics are
+// implemented using C++ exceptions. Here we use exceptions of a specific type
+// (`struct rust_panic`) to represent Rust panics.
+fn codegen_emcc_try<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    try_func: &'ll Value,
+    data: &'ll Value,
+    catch_func: &'ll Value,
+    dest: &'ll Value,
+) {
+    let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+        // Codegens the shims described above:
+        //
+        //   bx:
+        //      invoke %try_func(%data) normal %normal unwind %catch
+        //
+        //   normal:
+        //      ret 0
+        //
+        //   catch:
+        //      (%ptr, %selector) = landingpad
+        //      %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
+        //      %is_rust_panic = %selector == %rust_typeid
+        //      %catch_data = alloca { i8*, i8 }
+        //      %catch_data[0] = %ptr
+        //      %catch_data[1] = %is_rust_panic
+        //      call %catch_func(%data, %catch_data)
+        //      ret 1
+        let then = bx.append_sibling_block("then");
+        let catch = bx.append_sibling_block("catch");
+
+        let try_func = llvm::get_param(bx.llfn(), 0);
+        let data = llvm::get_param(bx.llfn(), 1);
+        let catch_func = llvm::get_param(bx.llfn(), 2);
+        let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
+        bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
+
+        bx.switch_to_block(then);
+        bx.ret(bx.const_i32(0));
+
+        // Type indicator for the exception being thrown.
+        //
+        // The first value in this tuple is a pointer to the exception object
+        // being thrown. The second value is a "selector" indicating which of
+        // the landing pad clauses the exception's type had been matched to.
+        bx.switch_to_block(catch);
+        let tydesc = bx.eh_catch_typeinfo();
+        let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
+        let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
+        bx.add_clause(vals, tydesc);
+        bx.add_clause(vals, bx.const_null(bx.type_ptr()));
+        let ptr = bx.extract_value(vals, 0);
+        let selector = bx.extract_value(vals, 1);
+
+        // Check if the typeid we got is the one for a Rust panic.
+        let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
+        let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
+        let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
+
+        // We need to pass two values to catch_func (ptr and is_rust_panic), so
+        // create an alloca and pass a pointer to that.
+        let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+        let i8_align = bx.tcx().data_layout.i8_align.abi;
+        let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
+        let catch_data = bx.alloca(catch_data_type, ptr_align);
+        let catch_data_0 =
+            bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
+        bx.store(ptr, catch_data_0, ptr_align);
+        let catch_data_1 =
+            bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
+        bx.store(is_rust_panic, catch_data_1, i8_align);
+
+        let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
+        bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None);
+        bx.ret(bx.const_i32(1));
+    });
+
+    // Note that no invoke is used here because by definition this function
+    // can't panic (that's what it's catching).
+    let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None);
+    let i32_align = bx.tcx().data_layout.i32_align.abi;
+    bx.store(ret, dest, i32_align);
+}
+
+// Helper function to give a Block to a closure to codegen a shim function.
+// This is currently primarily used for the `try` intrinsic functions above.
+fn gen_fn<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    name: &str,
+    rust_fn_sig: ty::PolyFnSig<'tcx>,
+    codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> (&'ll Type, &'ll Value) {
+    let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
+    let llty = fn_abi.llvm_type(cx);
+    let llfn = cx.declare_fn(name, fn_abi, None);
+    cx.set_frame_pointer_type(llfn);
+    cx.apply_target_cpu_attr(llfn);
+    // FIXME(eddyb) find a nicer way to do this.
+    unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
+    let llbb = Builder::append_block(cx, llfn, "entry-block");
+    let bx = Builder::build(cx, llbb);
+    codegen(bx);
+    (llty, llfn)
+}
+
+// Helper function used to get a handle to the `__rust_try` function used to
+// catch exceptions.
+//
+// This function is only generated once and is then cached.
+fn get_rust_try_fn<'ll, 'tcx>(
+    cx: &CodegenCx<'ll, 'tcx>,
+    codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> (&'ll Type, &'ll Value) {
+    if let Some(llfn) = cx.rust_try_fn.get() {
+        return llfn;
+    }
+
+    // Define the type up front for the signature of the rust_try function.
+    let tcx = cx.tcx;
+    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
+    // `unsafe fn(*mut i8) -> ()`
+    let try_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p],
+            Ty::new_unit(tcx),
+            false,
+            hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
+    // `unsafe fn(*mut i8, *mut i8) -> ()`
+    let catch_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p, i8p],
+            Ty::new_unit(tcx),
+            false,
+            hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
+    // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
+    let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
+        [try_fn_ty, i8p, catch_fn_ty],
+        tcx.types.i32,
+        false,
+        hir::Unsafety::Unsafe,
+        Abi::Rust,
+    ));
+    let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
+    cx.rust_try_fn.set(Some(rust_try));
+    rust_try
+}
+
+fn generic_simd_intrinsic<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    name: Symbol,
+    callee_ty: Ty<'tcx>,
+    fn_args: GenericArgsRef<'tcx>,
+    args: &[OperandRef<'tcx, &'ll Value>],
+    ret_ty: Ty<'tcx>,
+    llret_ty: &'ll Type,
+    span: Span,
+) -> Result<&'ll Value, ()> {
+    macro_rules! return_error {
+        ($diag: expr) => {{
+            bx.sess().dcx().emit_err($diag);
+            return Err(());
+        }};
+    }
+
+    macro_rules! require {
+        ($cond: expr, $diag: expr) => {
+            if !$cond {
+                return_error!($diag);
+            }
+        };
+    }
+
+    macro_rules! require_simd {
+        ($ty: expr, $variant:ident) => {{
+            require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty });
+            $ty.simd_size_and_type(bx.tcx())
+        }};
+    }
+
+    let tcx = bx.tcx();
+    let sig =
+        tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
+    let arg_tys = sig.inputs();
+
+    // Vectors must be immediates (non-power-of-2 #[repr(packed)] are not)
+    for (ty, arg) in arg_tys.iter().zip(args) {
+        if ty.is_simd() && !matches!(arg.val, OperandValue::Immediate(_)) {
+            return_error!(InvalidMonomorphization::SimdArgument { span, name, ty: *ty });
+        }
+    }
+
+    if name == sym::simd_select_bitmask {
+        let (len, _) = require_simd!(arg_tys[1], SimdArgument);
+
+        let expected_int_bits = (len.max(8) - 1).next_power_of_two();
+        let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
+
+        let mask_ty = arg_tys[0];
+        let mask = match mask_ty.kind() {
+            ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+            ty::Array(elem, len)
+                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+                    && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
+                        == Some(expected_bytes) =>
+            {
+                let place = PlaceRef::alloca(bx, args[0].layout);
+                args[0].val.store(bx, place);
+                let int_ty = bx.type_ix(expected_bytes * 8);
+                bx.load(int_ty, place.llval, Align::ONE)
+            }
+            _ => return_error!(InvalidMonomorphization::InvalidBitmask {
+                span,
+                name,
+                mask_ty,
+                expected_int_bits,
+                expected_bytes
+            }),
+        };
+
+        let i1 = bx.type_i1();
+        let im = bx.type_ix(len);
+        let i1xn = bx.type_vector(i1, len);
+        let m_im = bx.trunc(mask, im);
+        let m_i1s = bx.bitcast(m_im, i1xn);
+        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+    }
+
+    // every intrinsic below takes a SIMD vector as its first argument
+    let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput);
+    let in_ty = arg_tys[0];
+
+    let comparison = match name {
+        sym::simd_eq => Some(hir::BinOpKind::Eq),
+        sym::simd_ne => Some(hir::BinOpKind::Ne),
+        sym::simd_lt => Some(hir::BinOpKind::Lt),
+        sym::simd_le => Some(hir::BinOpKind::Le),
+        sym::simd_gt => Some(hir::BinOpKind::Gt),
+        sym::simd_ge => Some(hir::BinOpKind::Ge),
+        _ => None,
+    };
+
+    if let Some(cmp_op) = comparison {
+        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
+
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+        require!(
+            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+            InvalidMonomorphization::ReturnIntegerType { span, name, ret_ty, out_ty }
+        );
+
+        return Ok(compare_simd_types(
+            bx,
+            args[0].immediate(),
+            args[1].immediate(),
+            in_elem,
+            llret_ty,
+            cmp_op,
+        ));
+    }
+
+    if name == sym::simd_shuffle_generic {
+        let idx = fn_args[2]
+            .expect_const()
+            .eval(tcx, ty::ParamEnv::reveal_all(), span)
+            .unwrap()
+            .unwrap_branch();
+        let n = idx.len() as u64;
+
+        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
+        require!(
+            out_len == n,
+            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+        );
+        require!(
+            in_elem == out_ty,
+            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+        );
+
+        let total_len = in_len * 2;
+
+        let indices: Option<Vec<_>> = idx
+            .iter()
+            .enumerate()
+            .map(|(arg_idx, val)| {
+                let idx = val.unwrap_leaf().try_to_i32().unwrap();
+                if idx >= i32::try_from(total_len).unwrap() {
+                    bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
+                        span,
+                        name,
+                        arg_idx: arg_idx as u64,
+                        total_len: total_len.into(),
+                    });
+                    None
+                } else {
+                    Some(bx.const_i32(idx))
+                }
+            })
+            .collect();
+        let Some(indices) = indices else {
+            return Ok(bx.const_null(llret_ty));
+        };
+
+        return Ok(bx.shuffle_vector(
+            args[0].immediate(),
+            args[1].immediate(),
+            bx.const_vector(&indices),
+        ));
+    }
+
+    if name == sym::simd_shuffle {
+        // Make sure this is actually an array, since typeck only checks the length-suffixed
+        // version of this intrinsic.
+        let n: u64 = match args[2].layout.ty.kind() {
+            ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+                len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
+                    || span_bug!(span, "could not evaluate shuffle index array length"),
+                )
+            }
+            _ => return_error!(InvalidMonomorphization::SimdShuffle {
+                span,
+                name,
+                ty: args[2].layout.ty
+            }),
+        };
+
+        let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn);
+        require!(
+            out_len == n,
+            InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len }
+        );
+        require!(
+            in_elem == out_ty,
+            InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty }
+        );
+
+        let total_len = u128::from(in_len) * 2;
+
+        let vector = args[2].immediate();
+
+        let indices: Option<Vec<_>> = (0..n)
+            .map(|i| {
+                let arg_idx = i;
+                let val = bx.const_get_elt(vector, i as u64);
+                match bx.const_to_opt_u128(val, true) {
+                    None => {
+                        bug!("typeck should have already ensured that these are const")
+                    }
+                    Some(idx) if idx >= total_len => {
+                        bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
+                            span,
+                            name,
+                            arg_idx,
+                            total_len,
+                        });
+                        None
+                    }
+                    Some(idx) => Some(bx.const_i32(idx as i32)),
+                }
+            })
+            .collect();
+        let Some(indices) = indices else {
+            return Ok(bx.const_null(llret_ty));
+        };
+
+        return Ok(bx.shuffle_vector(
+            args[0].immediate(),
+            args[1].immediate(),
+            bx.const_vector(&indices),
+        ));
+    }
+
+    if name == sym::simd_insert {
+        require!(
+            in_elem == arg_tys[2],
+            InvalidMonomorphization::InsertedType {
+                span,
+                name,
+                in_elem,
+                in_ty,
+                out_ty: arg_tys[2]
+            }
+        );
+        let idx = bx
+            .const_to_opt_u128(args[1].immediate(), false)
+            .expect("typeck should have ensure that this is a const");
+        if idx >= in_len.into() {
+            bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
+                span,
+                name,
+                arg_idx: 1,
+                total_len: in_len.into(),
+            });
+            return Ok(bx.const_null(llret_ty));
+        }
+        return Ok(bx.insert_element(
+            args[0].immediate(),
+            args[2].immediate(),
+            bx.const_i32(idx as i32),
+        ));
+    }
+    if name == sym::simd_extract {
+        require!(
+            ret_ty == in_elem,
+            InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+        );
+        let idx = bx
+            .const_to_opt_u128(args[1].immediate(), false)
+            .expect("typeck should have ensure that this is a const");
+        if idx >= in_len.into() {
+            bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds {
+                span,
+                name,
+                arg_idx: 1,
+                total_len: in_len.into(),
+            });
+            return Ok(bx.const_null(llret_ty));
+        }
+        return Ok(bx.extract_element(args[0].immediate(), bx.const_i32(idx as i32)));
+    }
+
+    if name == sym::simd_select {
+        let m_elem_ty = in_elem;
+        let m_len = in_len;
+        let (v_len, _) = require_simd!(arg_tys[1], SimdArgument);
+        require!(
+            m_len == v_len,
+            InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
+        );
+        match m_elem_ty.kind() {
+            ty::Int(_) => {}
+            _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
+        }
+        // truncate the mask to a vector of i1s
+        let i1 = bx.type_i1();
+        let i1xn = bx.type_vector(i1, m_len as u64);
+        let m_i1s = bx.trunc(args[0].immediate(), i1xn);
+        return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+    }
+
+    if name == sym::simd_bitmask {
+        // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
+        // vector mask and returns the most significant bit (MSB) of each lane in the form
+        // of either:
+        // * an unsigned integer
+        // * an array of `u8`
+        // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
+        //
+        // The bit order of the result depends on the byte endianness, LSB-first for little
+        // endian and MSB-first for big endian.
+        let expected_int_bits = in_len.max(8);
+        let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
+
+        // Integer vector <i{in_bitwidth} x in_len>:
+        let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
+            ty::Int(i) => (
+                args[0].immediate(),
+                i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+            ),
+            ty::Uint(i) => (
+                args[0].immediate(),
+                i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+            ),
+            _ => return_error!(InvalidMonomorphization::VectorArgument {
+                span,
+                name,
+                in_ty,
+                in_elem
+            }),
+        };
+
+        // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
+        let shift_indices =
+            vec![
+                bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
+                in_len as _
+            ];
+        let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
+        // Truncate vector to an <i1 x N>
+        let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
+        // Bitcast <i1 x N> to iN:
+        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
+
+        match ret_ty.kind() {
+            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
+                // Zero-extend iN to the bitmask type:
+                return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
+            }
+            ty::Array(elem, len)
+                if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+                    && len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
+                        == Some(expected_bytes) =>
+            {
+                // Zero-extend iN to the array length:
+                let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
+
+                // Convert the integer to a byte array
+                let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
+                bx.store(ze, ptr, Align::ONE);
+                let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
+                return Ok(bx.load(array_ty, ptr, Align::ONE));
+            }
+            _ => return_error!(InvalidMonomorphization::CannotReturn {
+                span,
+                name,
+                ret_ty,
+                expected_int_bits,
+                expected_bytes
+            }),
+        }
+    }
+
+    fn simd_simple_float_intrinsic<'ll, 'tcx>(
+        name: Symbol,
+        in_elem: Ty<'_>,
+        in_ty: Ty<'_>,
+        in_len: u64,
+        bx: &mut Builder<'_, 'll, 'tcx>,
+        span: Span,
+        args: &[OperandRef<'tcx, &'ll Value>],
+    ) -> Result<&'ll Value, ()> {
+        macro_rules! return_error {
+            ($diag: expr) => {{
+                bx.sess().dcx().emit_err($diag);
+                return Err(());
+            }};
+        }
+
+        let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
+            let elem_ty = bx.cx.type_float_from_ty(*f);
+            match f.bit_width() {
+                32 => ("f32", elem_ty),
+                64 => ("f64", elem_ty),
+                _ => return_error!(InvalidMonomorphization::FloatingPointVector {
+                    span,
+                    name,
+                    f_ty: *f,
+                    in_ty,
+                }),
+            }
+        } else {
+            return_error!(InvalidMonomorphization::FloatingPointType { span, name, in_ty });
+        };
+
+        let vec_ty = bx.type_vector(elem_ty, in_len);
+
+        let (intr_name, fn_ty) = match name {
+            sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
+            sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
+            sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
+            sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
+            sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
+            _ => return_error!(InvalidMonomorphization::UnrecognizedIntrinsic { span, name }),
+        };
+        let llvm_name = &format!("llvm.{intr_name}.v{in_len}{elem_ty_str}");
+        let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
+        let c = bx.call(
+            fn_ty,
+            None,
+            None,
+            f,
+            &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+            None,
+        );
+        Ok(c)
+    }
+
+    if std::matches!(
+        name,
+        sym::simd_ceil
+            | sym::simd_fabs
+            | sym::simd_fcos
+            | sym::simd_fexp2
+            | sym::simd_fexp
+            | sym::simd_flog10
+            | sym::simd_flog2
+            | sym::simd_flog
+            | sym::simd_floor
+            | sym::simd_fma
+            | sym::simd_fpow
+            | sym::simd_fpowi
+            | sym::simd_fsin
+            | sym::simd_fsqrt
+            | sym::simd_round
+            | sym::simd_trunc
+    ) {
+        return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
+    }
+
+    // FIXME: use:
+    //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
+    //  https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
+    fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
+        match *elem_ty.kind() {
+            ty::Int(v) => format!(
+                "v{}i{}",
+                vec_len,
+                // Normalize to prevent crash if v: IntTy::Isize
+                v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
+            ),
+            ty::Uint(v) => format!(
+                "v{}i{}",
+                vec_len,
+                // Normalize to prevent crash if v: UIntTy::Usize
+                v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
+            ),
+            ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
+            ty::RawPtr(_) => format!("v{}p0", vec_len),
+            _ => unreachable!(),
+        }
+    }
+
+    fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
+        let elem_ty = match *elem_ty.kind() {
+            ty::Int(v) => cx.type_int_from_ty(v),
+            ty::Uint(v) => cx.type_uint_from_ty(v),
+            ty::Float(v) => cx.type_float_from_ty(v),
+            ty::RawPtr(_) => cx.type_ptr(),
+            _ => unreachable!(),
+        };
+        cx.type_vector(elem_ty, vec_len)
+    }
+
+    if name == sym::simd_gather {
+        // simd_gather(values: <N x T>, pointers: <N x *_ T>,
+        //             mask: <N x i{M}>) -> <N x T>
+        // * N: number of elements in the input vectors
+        // * T: type of the element to load
+        // * M: any integer width is supported, will be truncated to i1
+
+        // All types must be simd vector types
+
+        // The second argument must be a simd vector with an element type that's a pointer
+        // to the element type of the first argument
+        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
+        let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
+        // The element type of the third argument must be a signed integer type of any width:
+        let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
+        require_simd!(ret_ty, SimdReturn);
+
+        // Of the same length:
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::SecondArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[1],
+                out_len
+            }
+        );
+        require!(
+            in_len == out_len2,
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[2],
+                out_len: out_len2
+            }
+        );
+
+        // The return type must match the first argument type
+        require!(
+            ret_ty == in_ty,
+            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
+        );
+
+        require!(
+            matches!(
+                element_ty1.kind(),
+                ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: element_ty1,
+                second_arg: arg_tys[1],
+                in_elem,
+                in_ty,
+                mutability: ExpectedPointerMutability::Not,
+            }
+        );
+
+        match element_ty2.kind() {
+            ty::Int(_) => (),
+            _ => {
+                return_error!(InvalidMonomorphization::ThirdArgElementType {
+                    span,
+                    name,
+                    expected_element: element_ty2,
+                    third_arg: arg_tys[2]
+                });
+            }
+        }
+
+        // Alignment of T, must be a constant integer value:
+        let alignment_ty = bx.type_i32();
+        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+        // Truncate the mask vector to a vector of i1s:
+        let (mask, mask_ty) = {
+            let i1 = bx.type_i1();
+            let i1xn = bx.type_vector(i1, in_len);
+            (bx.trunc(args[2].immediate(), i1xn), i1xn)
+        };
+
+        // Type of the vector of pointers:
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
+
+        // Type of the vector of elements:
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
+
+        let llvm_intrinsic =
+            format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
+        let fn_ty = bx.type_func(
+            &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
+            llvm_elem_vec_ty,
+        );
+        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+        let v = bx.call(
+            fn_ty,
+            None,
+            None,
+            f,
+            &[args[1].immediate(), alignment, mask, args[0].immediate()],
+            None,
+        );
+        return Ok(v);
+    }
+
+    if name == sym::simd_masked_load {
+        // simd_masked_load(mask: <N x i{M}>, pointer: *_ T, values: <N x T>) -> <N x T>
+        // * N: number of elements in the input vectors
+        // * T: type of the element to load
+        // * M: any integer width is supported, will be truncated to i1
+        // Loads contiguous elements from memory behind `pointer`, but only for
+        // those lanes whose `mask` bit is enabled.
+        // The memory addresses corresponding to the “off” lanes are not accessed.
+
+        // The element type of the "mask" argument must be a signed integer type of any width
+        let mask_ty = in_ty;
+        let (mask_len, mask_elem) = (in_len, in_elem);
+
+        // The second argument must be a pointer matching the element type
+        let pointer_ty = arg_tys[1];
+
+        // The last argument is a passthrough vector providing values for disabled lanes
+        let values_ty = arg_tys[2];
+        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
+
+        require_simd!(ret_ty, SimdReturn);
+
+        // Of the same length:
+        require!(
+            values_len == mask_len,
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len: mask_len,
+                in_ty: mask_ty,
+                arg_ty: values_ty,
+                out_len: values_len
+            }
+        );
+
+        // The return type must match the last argument type
+        require!(
+            ret_ty == values_ty,
+            InvalidMonomorphization::ExpectedReturnType { span, name, in_ty: values_ty, ret_ty }
+        );
+
+        require!(
+            matches!(
+                pointer_ty.kind(),
+                ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: values_elem,
+                second_arg: pointer_ty,
+                in_elem: values_elem,
+                in_ty: values_ty,
+                mutability: ExpectedPointerMutability::Not,
+            }
+        );
+
+        require!(
+            matches!(mask_elem.kind(), ty::Int(_)),
+            InvalidMonomorphization::ThirdArgElementType {
+                span,
+                name,
+                expected_element: values_elem,
+                third_arg: mask_ty,
+            }
+        );
+
+        // Alignment of T, must be a constant integer value:
+        let alignment_ty = bx.type_i32();
+        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
+
+        // Truncate the mask vector to a vector of i1s:
+        let (mask, mask_ty) = {
+            let i1 = bx.type_i1();
+            let i1xn = bx.type_vector(i1, mask_len);
+            (bx.trunc(args[0].immediate(), i1xn), i1xn)
+        };
+
+        let llvm_pointer = bx.type_ptr();
+
+        // Type of the vector of elements:
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
+
+        let llvm_intrinsic = format!("llvm.masked.load.{llvm_elem_vec_str}.p0");
+        let fn_ty = bx
+            .type_func(&[llvm_pointer, alignment_ty, mask_ty, llvm_elem_vec_ty], llvm_elem_vec_ty);
+        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+        let v = bx.call(
+            fn_ty,
+            None,
+            None,
+            f,
+            &[args[1].immediate(), alignment, mask, args[2].immediate()],
+            None,
+        );
+        return Ok(v);
+    }
+
+    if name == sym::simd_masked_store {
+        // simd_masked_store(mask: <N x i{M}>, pointer: *mut T, values: <N x T>) -> ()
+        // * N: number of elements in the input vectors
+        // * T: type of the element to load
+        // * M: any integer width is supported, will be truncated to i1
+        // Stores contiguous elements to memory behind `pointer`, but only for
+        // those lanes whose `mask` bit is enabled.
+        // The memory addresses corresponding to the “off” lanes are not accessed.
+
+        // The element type of the "mask" argument must be a signed integer type of any width
+        let mask_ty = in_ty;
+        let (mask_len, mask_elem) = (in_len, in_elem);
+
+        // The second argument must be a pointer matching the element type
+        let pointer_ty = arg_tys[1];
+
+        // The last argument specifies the values to store to memory
+        let values_ty = arg_tys[2];
+        let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
+
+        // Of the same length:
+        require!(
+            values_len == mask_len,
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len: mask_len,
+                in_ty: mask_ty,
+                arg_ty: values_ty,
+                out_len: values_len
+            }
+        );
+
+        // The second argument must be a mutable pointer type matching the element type
+        require!(
+            matches!(
+                pointer_ty.kind(),
+                ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() && p.mutbl.is_mut()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: values_elem,
+                second_arg: pointer_ty,
+                in_elem: values_elem,
+                in_ty: values_ty,
+                mutability: ExpectedPointerMutability::Mut,
+            }
+        );
+
+        require!(
+            matches!(mask_elem.kind(), ty::Int(_)),
+            InvalidMonomorphization::ThirdArgElementType {
+                span,
+                name,
+                expected_element: values_elem,
+                third_arg: mask_ty,
+            }
+        );
+
+        // Alignment of T, must be a constant integer value:
+        let alignment_ty = bx.type_i32();
+        let alignment = bx.const_i32(bx.align_of(values_elem).bytes() as i32);
+
+        // Truncate the mask vector to a vector of i1s:
+        let (mask, mask_ty) = {
+            let i1 = bx.type_i1();
+            let i1xn = bx.type_vector(i1, in_len);
+            (bx.trunc(args[0].immediate(), i1xn), i1xn)
+        };
+
+        let ret_t = bx.type_void();
+
+        let llvm_pointer = bx.type_ptr();
+
+        // Type of the vector of elements:
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, values_elem, values_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, values_elem, values_len);
+
+        let llvm_intrinsic = format!("llvm.masked.store.{llvm_elem_vec_str}.p0");
+        let fn_ty = bx.type_func(&[llvm_elem_vec_ty, llvm_pointer, alignment_ty, mask_ty], ret_t);
+        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+        let v = bx.call(
+            fn_ty,
+            None,
+            None,
+            f,
+            &[args[2].immediate(), args[1].immediate(), alignment, mask],
+            None,
+        );
+        return Ok(v);
+    }
+
+    if name == sym::simd_scatter {
+        // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
+        //             mask: <N x i{M}>) -> ()
+        // * N: number of elements in the input vectors
+        // * T: type of the element to load
+        // * M: any integer width is supported, will be truncated to i1
+
+        // All types must be simd vector types
+        // The second argument must be a simd vector with an element type that's a pointer
+        // to the element type of the first argument
+        let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
+        let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
+        let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
+
+        // Of the same length:
+        require!(
+            in_len == element_len1,
+            InvalidMonomorphization::SecondArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[1],
+                out_len: element_len1
+            }
+        );
+        require!(
+            in_len == element_len2,
+            InvalidMonomorphization::ThirdArgumentLength {
+                span,
+                name,
+                in_len,
+                in_ty,
+                arg_ty: arg_tys[2],
+                out_len: element_len2
+            }
+        );
+
+        require!(
+            matches!(
+                element_ty1.kind(),
+                ty::RawPtr(p)
+                    if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind()
+            ),
+            InvalidMonomorphization::ExpectedElementType {
+                span,
+                name,
+                expected_element: element_ty1,
+                second_arg: arg_tys[1],
+                in_elem,
+                in_ty,
+                mutability: ExpectedPointerMutability::Mut,
+            }
+        );
+
+        // The element type of the third argument must be a signed integer type of any width:
+        match element_ty2.kind() {
+            ty::Int(_) => (),
+            _ => {
+                return_error!(InvalidMonomorphization::ThirdArgElementType {
+                    span,
+                    name,
+                    expected_element: element_ty2,
+                    third_arg: arg_tys[2]
+                });
+            }
+        }
+
+        // Alignment of T, must be a constant integer value:
+        let alignment_ty = bx.type_i32();
+        let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+        // Truncate the mask vector to a vector of i1s:
+        let (mask, mask_ty) = {
+            let i1 = bx.type_i1();
+            let i1xn = bx.type_vector(i1, in_len);
+            (bx.trunc(args[2].immediate(), i1xn), i1xn)
+        };
+
+        let ret_t = bx.type_void();
+
+        // Type of the vector of pointers:
+        let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
+        let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
+
+        // Type of the vector of elements:
+        let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
+        let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
+
+        let llvm_intrinsic =
+            format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
+        let fn_ty =
+            bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
+        let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+        let v = bx.call(
+            fn_ty,
+            None,
+            None,
+            f,
+            &[args[0].immediate(), args[1].immediate(), alignment, mask],
+            None,
+        );
+        return Ok(v);
+    }
+
+    macro_rules! arith_red {
+        ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
+         $identity:expr) => {
+            if name == sym::$name {
+                require!(
+                    ret_ty == in_elem,
+                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+                );
+                return match in_elem.kind() {
+                    ty::Int(_) | ty::Uint(_) => {
+                        let r = bx.$integer_reduce(args[0].immediate());
+                        if $ordered {
+                            // if overflow occurs, the result is the
+                            // mathematical result modulo 2^n:
+                            Ok(bx.$op(args[1].immediate(), r))
+                        } else {
+                            Ok(bx.$integer_reduce(args[0].immediate()))
+                        }
+                    }
+                    ty::Float(f) => {
+                        let acc = if $ordered {
+                            // ordered arithmetic reductions take an accumulator
+                            args[1].immediate()
+                        } else {
+                            // unordered arithmetic reductions use the identity accumulator
+                            match f.bit_width() {
+                                32 => bx.const_real(bx.type_f32(), $identity),
+                                64 => bx.const_real(bx.type_f64(), $identity),
+                                v => return_error!(
+                                    InvalidMonomorphization::UnsupportedSymbolOfSize {
+                                        span,
+                                        name,
+                                        symbol: sym::$name,
+                                        in_ty,
+                                        in_elem,
+                                        size: v,
+                                        ret_ty
+                                    }
+                                ),
+                            }
+                        };
+                        Ok(bx.$float_reduce(acc, args[0].immediate()))
+                    }
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
+                        in_ty,
+                        in_elem,
+                        ret_ty
+                    }),
+                };
+            }
+        };
+    }
+
+    arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
+    arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
+    arith_red!(
+        simd_reduce_add_unordered: vector_reduce_add,
+        vector_reduce_fadd_reassoc,
+        false,
+        add,
+        0.0
+    );
+    arith_red!(
+        simd_reduce_mul_unordered: vector_reduce_mul,
+        vector_reduce_fmul_reassoc,
+        false,
+        mul,
+        1.0
+    );
+
+    macro_rules! minmax_red {
+        ($name:ident: $int_red:ident, $float_red:ident) => {
+            if name == sym::$name {
+                require!(
+                    ret_ty == in_elem,
+                    InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+                );
+                return match in_elem.kind() {
+                    ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
+                    ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
+                    ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
+                        in_ty,
+                        in_elem,
+                        ret_ty
+                    }),
+                };
+            }
+        };
+    }
+
+    minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
+    minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
+
+    macro_rules! bitwise_red {
+        ($name:ident : $red:ident, $boolean:expr) => {
+            if name == sym::$name {
+                let input = if !$boolean {
+                    require!(
+                        ret_ty == in_elem,
+                        InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
+                    );
+                    args[0].immediate()
+                } else {
+                    match in_elem.kind() {
+                        ty::Int(_) | ty::Uint(_) => {}
+                        _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                            span,
+                            name,
+                            symbol: sym::$name,
+                            in_ty,
+                            in_elem,
+                            ret_ty
+                        }),
+                    }
+
+                    // boolean reductions operate on vectors of i1s:
+                    let i1 = bx.type_i1();
+                    let i1xn = bx.type_vector(i1, in_len as u64);
+                    bx.trunc(args[0].immediate(), i1xn)
+                };
+                return match in_elem.kind() {
+                    ty::Int(_) | ty::Uint(_) => {
+                        let r = bx.$red(input);
+                        Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
+                    }
+                    _ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
+                        span,
+                        name,
+                        symbol: sym::$name,
+                        in_ty,
+                        in_elem,
+                        ret_ty
+                    }),
+                };
+            }
+        };
+    }
+
+    bitwise_red!(simd_reduce_and: vector_reduce_and, false);
+    bitwise_red!(simd_reduce_or: vector_reduce_or, false);
+    bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
+    bitwise_red!(simd_reduce_all: vector_reduce_and, true);
+    bitwise_red!(simd_reduce_any: vector_reduce_or, true);
+
+    if name == sym::simd_cast_ptr {
+        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+
+        match in_elem.kind() {
+            ty::RawPtr(p) => {
+                let metadata = p.ty.ptr_metadata_ty(bx.tcx, |ty| {
+                    bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
+                });
+                require!(
+                    metadata.is_unit(),
+                    InvalidMonomorphization::CastFatPointer { span, name, ty: in_elem }
+                );
+            }
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
+            }
+        }
+        match out_elem.kind() {
+            ty::RawPtr(p) => {
+                let metadata = p.ty.ptr_metadata_ty(bx.tcx, |ty| {
+                    bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty)
+                });
+                require!(
+                    metadata.is_unit(),
+                    InvalidMonomorphization::CastFatPointer { span, name, ty: out_elem }
+                );
+            }
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
+            }
+        }
+
+        return Ok(args[0].immediate());
+    }
+
+    if name == sym::simd_expose_addr {
+        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+
+        match in_elem.kind() {
+            ty::RawPtr(_) => {}
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem })
+            }
+        }
+        match out_elem.kind() {
+            ty::Uint(ty::UintTy::Usize) => {}
+            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: out_elem }),
+        }
+
+        return Ok(bx.ptrtoint(args[0].immediate(), llret_ty));
+    }
+
+    if name == sym::simd_from_exposed_addr {
+        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+
+        match in_elem.kind() {
+            ty::Uint(ty::UintTy::Usize) => {}
+            _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }),
+        }
+        match out_elem.kind() {
+            ty::RawPtr(_) => {}
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem })
+            }
+        }
+
+        return Ok(bx.inttoptr(args[0].immediate(), llret_ty));
+    }
+
+    if name == sym::simd_cast || name == sym::simd_as {
+        let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn);
+        require!(
+            in_len == out_len,
+            InvalidMonomorphization::ReturnLengthInputType {
+                span,
+                name,
+                in_len,
+                in_ty,
+                ret_ty,
+                out_len
+            }
+        );
+        // casting cares about nominal type, not just structural type
+        if in_elem == out_elem {
+            return Ok(args[0].immediate());
+        }
+
+        #[derive(Copy, Clone)]
+        enum Sign {
+            Unsigned,
+            Signed,
+        }
+        use Sign::*;
+
+        enum Style {
+            Float,
+            Int(Sign),
+            Unsupported,
+        }
+
+        let (in_style, in_width) = match in_elem.kind() {
+            // vectors of pointer-sized integers should've been
+            // disallowed before here, so this unwrap is safe.
+            ty::Int(i) => (
+                Style::Int(Signed),
+                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+            ),
+            ty::Uint(u) => (
+                Style::Int(Unsigned),
+                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+            ),
+            ty::Float(f) => (Style::Float, f.bit_width()),
+            _ => (Style::Unsupported, 0),
+        };
+        let (out_style, out_width) = match out_elem.kind() {
+            ty::Int(i) => (
+                Style::Int(Signed),
+                i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+            ),
+            ty::Uint(u) => (
+                Style::Int(Unsigned),
+                u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+            ),
+            ty::Float(f) => (Style::Float, f.bit_width()),
+            _ => (Style::Unsupported, 0),
+        };
+
+        match (in_style, out_style) {
+            (Style::Int(sign), Style::Int(_)) => {
+                return Ok(match in_width.cmp(&out_width) {
+                    Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
+                    Ordering::Equal => args[0].immediate(),
+                    Ordering::Less => match sign {
+                        Sign::Signed => bx.sext(args[0].immediate(), llret_ty),
+                        Sign::Unsigned => bx.zext(args[0].immediate(), llret_ty),
+                    },
+                });
+            }
+            (Style::Int(Sign::Signed), Style::Float) => {
+                return Ok(bx.sitofp(args[0].immediate(), llret_ty));
+            }
+            (Style::Int(Sign::Unsigned), Style::Float) => {
+                return Ok(bx.uitofp(args[0].immediate(), llret_ty));
+            }
+            (Style::Float, Style::Int(sign)) => {
+                return Ok(match (sign, name == sym::simd_as) {
+                    (Sign::Unsigned, false) => bx.fptoui(args[0].immediate(), llret_ty),
+                    (Sign::Signed, false) => bx.fptosi(args[0].immediate(), llret_ty),
+                    (_, true) => bx.cast_float_to_int(
+                        matches!(sign, Sign::Signed),
+                        args[0].immediate(),
+                        llret_ty,
+                    ),
+                });
+            }
+            (Style::Float, Style::Float) => {
+                return Ok(match in_width.cmp(&out_width) {
+                    Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
+                    Ordering::Equal => args[0].immediate(),
+                    Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
+                });
+            }
+            _ => { /* Unsupported. Fallthrough. */ }
+        }
+        return_error!(InvalidMonomorphization::UnsupportedCast {
+            span,
+            name,
+            in_ty,
+            in_elem,
+            ret_ty,
+            out_elem
+        });
+    }
+    macro_rules! arith_binary {
+        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+            $(if name == sym::$name {
+                match in_elem.kind() {
+                    $($(ty::$p(_))|* => {
+                        return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+                    })*
+                    _ => {},
+                }
+                return_error!(
+                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+                );
+            })*
+        }
+    }
+    arith_binary! {
+        simd_add: Uint, Int => add, Float => fadd;
+        simd_sub: Uint, Int => sub, Float => fsub;
+        simd_mul: Uint, Int => mul, Float => fmul;
+        simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+        simd_rem: Uint => urem, Int => srem, Float => frem;
+        simd_shl: Uint, Int => shl;
+        simd_shr: Uint => lshr, Int => ashr;
+        simd_and: Uint, Int => and;
+        simd_or: Uint, Int => or;
+        simd_xor: Uint, Int => xor;
+        simd_fmax: Float => maxnum;
+        simd_fmin: Float => minnum;
+
+    }
+    macro_rules! arith_unary {
+        ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+            $(if name == sym::$name {
+                match in_elem.kind() {
+                    $($(ty::$p(_))|* => {
+                        return Ok(bx.$call(args[0].immediate()))
+                    })*
+                    _ => {},
+                }
+                return_error!(
+                    InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+                );
+            })*
+        }
+    }
+    arith_unary! {
+        simd_neg: Int => neg, Float => fneg;
+    }
+
+    // Unary integer intrinsics
+    if matches!(name, sym::simd_bswap | sym::simd_bitreverse | sym::simd_ctlz | sym::simd_cttz) {
+        let vec_ty = bx.cx.type_vector(
+            match *in_elem.kind() {
+                ty::Int(i) => bx.cx.type_int_from_ty(i),
+                ty::Uint(i) => bx.cx.type_uint_from_ty(i),
+                _ => return_error!(InvalidMonomorphization::UnsupportedOperation {
+                    span,
+                    name,
+                    in_ty,
+                    in_elem
+                }),
+            },
+            in_len as u64,
+        );
+        let intrinsic_name = match name {
+            sym::simd_bswap => "bswap",
+            sym::simd_bitreverse => "bitreverse",
+            sym::simd_ctlz => "ctlz",
+            sym::simd_cttz => "cttz",
+            _ => unreachable!(),
+        };
+        let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits();
+        let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,);
+
+        return if name == sym::simd_bswap && int_size == 8 {
+            // byte swap is no-op for i8/u8
+            Ok(args[0].immediate())
+        } else if matches!(name, sym::simd_ctlz | sym::simd_cttz) {
+            let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty);
+            let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+            Ok(bx.call(
+                fn_ty,
+                None,
+                None,
+                f,
+                &[args[0].immediate(), bx.const_int(bx.type_i1(), 0)],
+                None,
+            ))
+        } else {
+            let fn_ty = bx.type_func(&[vec_ty], vec_ty);
+            let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+            Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None))
+        };
+    }
+
+    if name == sym::simd_arith_offset {
+        // This also checks that the first operand is a ptr type.
+        let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
+            span_bug!(span, "must be called with a vector of pointer types as first argument")
+        });
+        let layout = bx.layout_of(pointee.ty);
+        let ptrs = args[0].immediate();
+        // The second argument must be a ptr-sized integer.
+        // (We don't care about the signedness, this is wrapping anyway.)
+        let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
+        if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
+            span_bug!(
+                span,
+                "must be called with a vector of pointer-sized integers as second argument"
+            );
+        }
+        let offsets = args[1].immediate();
+
+        return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
+    }
+
+    if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
+        let lhs = args[0].immediate();
+        let rhs = args[1].immediate();
+        let is_add = name == sym::simd_saturating_add;
+        let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+        let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+            ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
+            ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
+            _ => {
+                return_error!(InvalidMonomorphization::ExpectedVectorElementType {
+                    span,
+                    name,
+                    expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
+                    vector_type: arg_tys[0]
+                });
+            }
+        };
+        let llvm_intrinsic = &format!(
+            "llvm.{}{}.sat.v{}i{}",
+            if signed { 's' } else { 'u' },
+            if is_add { "add" } else { "sub" },
+            in_len,
+            elem_width
+        );
+        let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
+
+        let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
+        let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+        let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None);
+        return Ok(v);
+    }
+
+    span_bug!(span, "unknown SIMD intrinsic");
+}
+
+// Returns the width of an int Ty, and if it's signed or not
+// Returns None if the type is not an integer
+// FIXME: there’s multiple of this functions, investigate using some of the already existing
+// stuffs.
+fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
+    match ty.kind() {
+        ty::Int(t) => {
+            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
+        }
+        ty::Uint(t) => {
+            Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))
+        }
+        _ => None,
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
new file mode 100644
index 00000000000..c84461e53eb
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -0,0 +1,473 @@
+//! The Rust compiler.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![allow(internal_features)]
+#![feature(rustdoc_internals)]
+#![doc(rust_logo)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(exact_size_is_empty)]
+#![feature(extern_types)]
+#![feature(hash_raw_entry)]
+#![feature(iter_intersperse)]
+#![feature(let_chains)]
+#![feature(impl_trait_in_assoc_type)]
+
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate tracing;
+
+use back::owned_target_machine::OwnedTargetMachine;
+use back::write::{create_informational_target_machine, create_target_machine};
+
+use errors::ParseTargetMachineConfig;
+pub use llvm_util::target_features;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::back::write::{
+    CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::ModuleCodegen;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule};
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_errors::{DiagCtxt, ErrorGuaranteed, FatalError};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::Providers;
+use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+
+use std::any::Any;
+use std::ffi::CStr;
+use std::io::Write;
+use std::mem::ManuallyDrop;
+
+mod back {
+    pub mod archive;
+    pub mod lto;
+    pub mod owned_target_machine;
+    mod profiling;
+    pub mod write;
+}
+
+mod abi;
+mod allocator;
+mod asm;
+mod attributes;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod errors;
+mod intrinsic;
+
+// The following is a workaround that replaces `pub mod llvm;` and that fixes issue 53912.
+#[path = "llvm/mod.rs"]
+mod llvm_;
+pub mod llvm {
+    pub use super::llvm_::*;
+}
+
+mod llvm_util;
+mod mono_item;
+mod type_;
+mod type_of;
+mod va_arg;
+mod value;
+
+rustc_fluent_macro::fluent_messages! { "../messages.ftl" }
+
+#[derive(Clone)]
+pub struct LlvmCodegenBackend(());
+
+struct TimeTraceProfiler {
+    enabled: bool,
+}
+
+impl TimeTraceProfiler {
+    fn new(enabled: bool) -> Self {
+        if enabled {
+            unsafe { llvm::LLVMRustTimeTraceProfilerInitialize() }
+        }
+        TimeTraceProfiler { enabled }
+    }
+}
+
+impl Drop for TimeTraceProfiler {
+    fn drop(&mut self) {
+        if self.enabled {
+            unsafe { llvm::LLVMRustTimeTraceProfilerFinishThread() }
+        }
+    }
+}
+
+impl ExtraBackendMethods for LlvmCodegenBackend {
+    fn codegen_allocator<'tcx>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        module_name: &str,
+        kind: AllocatorKind,
+        alloc_error_handler_kind: AllocatorKind,
+    ) -> ModuleLlvm {
+        let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name);
+        unsafe {
+            allocator::codegen(tcx, &mut module_llvm, module_name, kind, alloc_error_handler_kind);
+        }
+        module_llvm
+    }
+    fn compile_codegen_unit(
+        &self,
+        tcx: TyCtxt<'_>,
+        cgu_name: Symbol,
+    ) -> (ModuleCodegen<ModuleLlvm>, u64) {
+        base::compile_codegen_unit(tcx, cgu_name)
+    }
+    fn target_machine_factory(
+        &self,
+        sess: &Session,
+        optlvl: OptLevel,
+        target_features: &[String],
+    ) -> TargetMachineFactoryFn<Self> {
+        back::write::target_machine_factory(sess, optlvl, target_features)
+    }
+
+    fn spawn_named_thread<F, T>(
+        time_trace: bool,
+        name: String,
+        f: F,
+    ) -> std::io::Result<std::thread::JoinHandle<T>>
+    where
+        F: FnOnce() -> T,
+        F: Send + 'static,
+        T: Send + 'static,
+    {
+        std::thread::Builder::new().name(name).spawn(move || {
+            let _profiler = TimeTraceProfiler::new(time_trace);
+            f()
+        })
+    }
+}
+
+impl WriteBackendMethods for LlvmCodegenBackend {
+    type Module = ModuleLlvm;
+    type ModuleBuffer = back::lto::ModuleBuffer;
+    type TargetMachine = OwnedTargetMachine;
+    type TargetMachineError = crate::errors::LlvmError<'static>;
+    type ThinData = back::lto::ThinData;
+    type ThinBuffer = back::lto::ThinBuffer;
+    fn print_pass_timings(&self) {
+        unsafe {
+            let mut size = 0;
+            let cstr = llvm::LLVMRustPrintPassTimings(std::ptr::addr_of_mut!(size));
+            if cstr.is_null() {
+                println!("failed to get pass timings");
+            } else {
+                let timings = std::slice::from_raw_parts(cstr as *const u8, size);
+                std::io::stdout().write_all(timings).unwrap();
+                libc::free(cstr as *mut _);
+            }
+        }
+    }
+    fn print_statistics(&self) {
+        unsafe {
+            let mut size = 0;
+            let cstr = llvm::LLVMRustPrintStatistics(std::ptr::addr_of_mut!(size));
+            if cstr.is_null() {
+                println!("failed to get pass stats");
+            } else {
+                let stats = std::slice::from_raw_parts(cstr as *const u8, size);
+                std::io::stdout().write_all(stats).unwrap();
+                libc::free(cstr as *mut _);
+            }
+        }
+    }
+    fn run_link(
+        cgcx: &CodegenContext<Self>,
+        dcx: &DiagCtxt,
+        modules: Vec<ModuleCodegen<Self::Module>>,
+    ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+        back::write::link(cgcx, dcx, modules)
+    }
+    fn run_fat_lto(
+        cgcx: &CodegenContext<Self>,
+        modules: Vec<FatLtoInput<Self>>,
+        cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<LtoModuleCodegen<Self>, FatalError> {
+        back::lto::run_fat(cgcx, modules, cached_modules)
+    }
+    fn run_thin_lto(
+        cgcx: &CodegenContext<Self>,
+        modules: Vec<(String, Self::ThinBuffer)>,
+        cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+        back::lto::run_thin(cgcx, modules, cached_modules)
+    }
+    unsafe fn optimize(
+        cgcx: &CodegenContext<Self>,
+        dcx: &DiagCtxt,
+        module: &ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<(), FatalError> {
+        back::write::optimize(cgcx, dcx, module, config)
+    }
+    fn optimize_fat(
+        cgcx: &CodegenContext<Self>,
+        module: &mut ModuleCodegen<Self::Module>,
+    ) -> Result<(), FatalError> {
+        let dcx = cgcx.create_dcx();
+        back::lto::run_pass_manager(cgcx, &dcx, module, false)
+    }
+    unsafe fn optimize_thin(
+        cgcx: &CodegenContext<Self>,
+        thin: ThinModule<Self>,
+    ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+        back::lto::optimize_thin_module(thin, cgcx)
+    }
+    unsafe fn codegen(
+        cgcx: &CodegenContext<Self>,
+        dcx: &DiagCtxt,
+        module: ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<CompiledModule, FatalError> {
+        back::write::codegen(cgcx, dcx, module, config)
+    }
+    fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+        back::lto::prepare_thin(module)
+    }
+    fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+        (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod()))
+    }
+}
+
+unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
+unsafe impl Sync for LlvmCodegenBackend {}
+
+impl LlvmCodegenBackend {
+    pub fn new() -> Box<dyn CodegenBackend> {
+        Box::new(LlvmCodegenBackend(()))
+    }
+}
+
+impl CodegenBackend for LlvmCodegenBackend {
+    fn locale_resource(&self) -> &'static str {
+        crate::DEFAULT_LOCALE_RESOURCE
+    }
+
+    fn init(&self, sess: &Session) {
+        llvm_util::init(sess); // Make sure llvm is inited
+    }
+
+    fn provide(&self, providers: &mut Providers) {
+        providers.global_backend_features =
+            |tcx, ()| llvm_util::global_llvm_features(tcx.sess, true)
+    }
+
+    fn print(&self, req: &PrintRequest, out: &mut dyn PrintBackendInfo, sess: &Session) {
+        match req.kind {
+            PrintKind::RelocationModels => {
+                writeln!(out, "Available relocation models:");
+                for name in &[
+                    "static",
+                    "pic",
+                    "pie",
+                    "dynamic-no-pic",
+                    "ropi",
+                    "rwpi",
+                    "ropi-rwpi",
+                    "default",
+                ] {
+                    writeln!(out, "    {name}");
+                }
+                writeln!(out);
+            }
+            PrintKind::CodeModels => {
+                writeln!(out, "Available code models:");
+                for name in &["tiny", "small", "kernel", "medium", "large"] {
+                    writeln!(out, "    {name}");
+                }
+                writeln!(out);
+            }
+            PrintKind::TlsModels => {
+                writeln!(out, "Available TLS models:");
+                for name in
+                    &["global-dynamic", "local-dynamic", "initial-exec", "local-exec", "emulated"]
+                {
+                    writeln!(out, "    {name}");
+                }
+                writeln!(out);
+            }
+            PrintKind::StackProtectorStrategies => {
+                writeln!(
+                    out,
+                    r#"Available stack protector strategies:
+    all
+        Generate stack canaries in all functions.
+
+    strong
+        Generate stack canaries in a function if it either:
+        - has a local variable of `[T; N]` type, regardless of `T` and `N`
+        - takes the address of a local variable.
+
+          (Note that a local variable being borrowed is not equivalent to its
+          address being taken: e.g. some borrows may be removed by optimization,
+          while by-value argument passing may be implemented with reference to a
+          local stack variable in the ABI.)
+
+    basic
+        Generate stack canaries in functions with local variables of `[T; N]`
+        type, where `T` is byte-sized and `N` >= 8.
+
+    none
+        Do not generate stack canaries.
+"#
+                );
+            }
+            _other => llvm_util::print(req, out, sess),
+        }
+    }
+
+    fn print_passes(&self) {
+        llvm_util::print_passes();
+    }
+
+    fn print_version(&self) {
+        llvm_util::print_version();
+    }
+
+    fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+        target_features(sess, allow_unstable)
+    }
+
+    fn codegen_crate<'tcx>(
+        &self,
+        tcx: TyCtxt<'tcx>,
+        metadata: EncodedMetadata,
+        need_metadata_module: bool,
+    ) -> Box<dyn Any> {
+        Box::new(rustc_codegen_ssa::base::codegen_crate(
+            LlvmCodegenBackend(()),
+            tcx,
+            crate::llvm_util::target_cpu(tcx.sess).to_string(),
+            metadata,
+            need_metadata_module,
+        ))
+    }
+
+    fn join_codegen(
+        &self,
+        ongoing_codegen: Box<dyn Any>,
+        sess: &Session,
+        outputs: &OutputFilenames,
+    ) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
+        let (codegen_results, work_products) = ongoing_codegen
+            .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
+            .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
+            .join(sess);
+
+        if sess.opts.unstable_opts.llvm_time_trace {
+            sess.time("llvm_dump_timing_file", || {
+                let file_name = outputs.with_extension("llvm_timings.json");
+                llvm_util::time_trace_profiler_finish(&file_name);
+            });
+        }
+
+        (codegen_results, work_products)
+    }
+
+    fn link(
+        &self,
+        sess: &Session,
+        codegen_results: CodegenResults,
+        outputs: &OutputFilenames,
+    ) -> Result<(), ErrorGuaranteed> {
+        use crate::back::archive::LlvmArchiveBuilderBuilder;
+        use rustc_codegen_ssa::back::link::link_binary;
+
+        // Run the linker on any artifacts that resulted from the LLVM run.
+        // This should produce either a finished executable or library.
+        link_binary(sess, &LlvmArchiveBuilderBuilder, &codegen_results, outputs)
+    }
+}
+
+pub struct ModuleLlvm {
+    llcx: &'static mut llvm::Context,
+    llmod_raw: *const llvm::Module,
+
+    // This field is `ManuallyDrop` because it is important that the `TargetMachine`
+    // is disposed prior to the `Context` being disposed otherwise UAFs can occur.
+    tm: ManuallyDrop<OwnedTargetMachine>,
+}
+
+unsafe impl Send for ModuleLlvm {}
+unsafe impl Sync for ModuleLlvm {}
+
+impl ModuleLlvm {
+    fn new(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+        unsafe {
+            let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+            let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+            ModuleLlvm {
+                llmod_raw,
+                llcx,
+                tm: ManuallyDrop::new(create_target_machine(tcx, mod_name)),
+            }
+        }
+    }
+
+    fn new_metadata(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+        unsafe {
+            let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+            let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+            ModuleLlvm {
+                llmod_raw,
+                llcx,
+                tm: ManuallyDrop::new(create_informational_target_machine(tcx.sess)),
+            }
+        }
+    }
+
+    fn parse(
+        cgcx: &CodegenContext<LlvmCodegenBackend>,
+        name: &CStr,
+        buffer: &[u8],
+        dcx: &DiagCtxt,
+    ) -> Result<Self, FatalError> {
+        unsafe {
+            let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+            let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx)?;
+            let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap());
+            let tm = match (cgcx.tm_factory)(tm_factory_config) {
+                Ok(m) => m,
+                Err(e) => {
+                    return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e)));
+                }
+            };
+
+            Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) })
+        }
+    }
+
+    fn llmod(&self) -> &llvm::Module {
+        unsafe { &*self.llmod_raw }
+    }
+}
+
+impl Drop for ModuleLlvm {
+    fn drop(&mut self) {
+        unsafe {
+            ManuallyDrop::drop(&mut self.tm);
+            llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
new file mode 100644
index 00000000000..7d948970223
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
@@ -0,0 +1,94 @@
+//! A wrapper around LLVM's archive (.a) code
+
+use rustc_fs_util::path_to_c_string;
+use std::path::Path;
+use std::slice;
+use std::str;
+
+pub struct ArchiveRO {
+    pub raw: &'static mut super::Archive,
+}
+
+unsafe impl Send for ArchiveRO {}
+
+pub struct Iter<'a> {
+    raw: &'a mut super::ArchiveIterator<'a>,
+}
+
+pub struct Child<'a> {
+    pub raw: &'a mut super::ArchiveChild<'a>,
+}
+
+impl ArchiveRO {
+    /// Opens a static archive for read-only purposes. This is more optimized
+    /// than the `open` method because it uses LLVM's internal `Archive` class
+    /// rather than shelling out to `ar` for everything.
+    ///
+    /// If this archive is used with a mutable method, then an error will be
+    /// raised.
+    pub fn open(dst: &Path) -> Result<ArchiveRO, String> {
+        unsafe {
+            let s = path_to_c_string(dst);
+            let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| {
+                super::last_error().unwrap_or_else(|| "failed to open archive".to_owned())
+            })?;
+            Ok(ArchiveRO { raw: ar })
+        }
+    }
+
+    pub fn iter(&self) -> Iter<'_> {
+        unsafe { Iter { raw: super::LLVMRustArchiveIteratorNew(self.raw) } }
+    }
+}
+
+impl Drop for ArchiveRO {
+    fn drop(&mut self) {
+        unsafe {
+            super::LLVMRustDestroyArchive(&mut *(self.raw as *mut _));
+        }
+    }
+}
+
+impl<'a> Iterator for Iter<'a> {
+    type Item = Result<Child<'a>, String>;
+
+    fn next(&mut self) -> Option<Result<Child<'a>, String>> {
+        unsafe {
+            match super::LLVMRustArchiveIteratorNext(self.raw) {
+                Some(raw) => Some(Ok(Child { raw })),
+                None => super::last_error().map(Err),
+            }
+        }
+    }
+}
+
+impl<'a> Drop for Iter<'a> {
+    fn drop(&mut self) {
+        unsafe {
+            super::LLVMRustArchiveIteratorFree(&mut *(self.raw as *mut _));
+        }
+    }
+}
+
+impl<'a> Child<'a> {
+    pub fn name(&self) -> Option<&'a str> {
+        unsafe {
+            let mut name_len = 0;
+            let name_ptr = super::LLVMRustArchiveChildName(self.raw, &mut name_len);
+            if name_ptr.is_null() {
+                None
+            } else {
+                let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize);
+                str::from_utf8(name).ok().map(|s| s.trim())
+            }
+        }
+    }
+}
+
+impl<'a> Drop for Child<'a> {
+    fn drop(&mut self) {
+        unsafe {
+            super::LLVMRustArchiveChildFree(&mut *(self.raw as *mut _));
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
new file mode 100644
index 00000000000..f9b28178ddb
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -0,0 +1,213 @@
+//! LLVM diagnostic reports.
+
+pub use self::Diagnostic::*;
+pub use self::OptimizationDiagnosticKind::*;
+
+use crate::value::Value;
+use libc::c_uint;
+
+use super::{DiagnosticInfo, SMDiagnostic};
+use rustc_span::InnerSpan;
+
+#[derive(Copy, Clone, Debug)]
+pub enum OptimizationDiagnosticKind {
+    OptimizationRemark,
+    OptimizationMissed,
+    OptimizationAnalysis,
+    OptimizationAnalysisFPCommute,
+    OptimizationAnalysisAliasing,
+    OptimizationFailure,
+    OptimizationRemarkOther,
+}
+
+pub struct OptimizationDiagnostic<'ll> {
+    pub kind: OptimizationDiagnosticKind,
+    pub pass_name: String,
+    pub function: &'ll Value,
+    pub line: c_uint,
+    pub column: c_uint,
+    pub filename: String,
+    pub message: String,
+}
+
+impl<'ll> OptimizationDiagnostic<'ll> {
+    unsafe fn unpack(kind: OptimizationDiagnosticKind, di: &'ll DiagnosticInfo) -> Self {
+        let mut function = None;
+        let mut line = 0;
+        let mut column = 0;
+
+        let mut message = None;
+        let mut filename = None;
+        let pass_name = super::build_string(|pass_name| {
+            message = super::build_string(|message| {
+                filename = super::build_string(|filename| {
+                    super::LLVMRustUnpackOptimizationDiagnostic(
+                        di,
+                        pass_name,
+                        &mut function,
+                        &mut line,
+                        &mut column,
+                        filename,
+                        message,
+                    )
+                })
+                .ok()
+            })
+            .ok()
+        })
+        .ok();
+
+        let mut filename = filename.unwrap_or_default();
+        if filename.is_empty() {
+            filename.push_str("<unknown file>");
+        }
+
+        OptimizationDiagnostic {
+            kind,
+            pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"),
+            function: function.unwrap(),
+            line,
+            column,
+            filename,
+            message: message.expect("got a non-UTF8 OptimizationDiagnostic message from LLVM"),
+        }
+    }
+}
+
+pub struct SrcMgrDiagnostic {
+    pub level: super::DiagnosticLevel,
+    pub message: String,
+    pub source: Option<(String, Vec<InnerSpan>)>,
+}
+
+impl SrcMgrDiagnostic {
+    pub unsafe fn unpack(diag: &SMDiagnostic) -> SrcMgrDiagnostic {
+        // Recover the post-substitution assembly code from LLVM for better
+        // diagnostics.
+        let mut have_source = false;
+        let mut buffer = String::new();
+        let mut level = super::DiagnosticLevel::Error;
+        let mut loc = 0;
+        let mut ranges = [0; 8];
+        let mut num_ranges = ranges.len() / 2;
+        let message = super::build_string(|message| {
+            buffer = super::build_string(|buffer| {
+                have_source = super::LLVMRustUnpackSMDiagnostic(
+                    diag,
+                    message,
+                    buffer,
+                    &mut level,
+                    &mut loc,
+                    ranges.as_mut_ptr(),
+                    &mut num_ranges,
+                );
+            })
+            .expect("non-UTF8 inline asm");
+        })
+        .expect("non-UTF8 SMDiagnostic");
+
+        SrcMgrDiagnostic {
+            message,
+            level,
+            source: have_source.then(|| {
+                let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)];
+                for i in 0..num_ranges {
+                    spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize));
+                }
+                (buffer, spans)
+            }),
+        }
+    }
+}
+
+#[derive(Clone)]
+pub struct InlineAsmDiagnostic {
+    pub level: super::DiagnosticLevel,
+    pub cookie: u64,
+    pub message: String,
+    pub source: Option<(String, Vec<InnerSpan>)>,
+}
+
+impl InlineAsmDiagnostic {
+    unsafe fn unpackInlineAsm(di: &DiagnosticInfo) -> Self {
+        let mut cookie = 0;
+        let mut message = None;
+        let mut level = super::DiagnosticLevel::Error;
+
+        super::LLVMRustUnpackInlineAsmDiagnostic(di, &mut level, &mut cookie, &mut message);
+
+        InlineAsmDiagnostic {
+            level,
+            cookie,
+            message: super::twine_to_string(message.unwrap()),
+            source: None,
+        }
+    }
+
+    unsafe fn unpackSrcMgr(di: &DiagnosticInfo) -> Self {
+        let mut cookie = 0;
+        let smdiag = SrcMgrDiagnostic::unpack(super::LLVMRustGetSMDiagnostic(di, &mut cookie));
+        InlineAsmDiagnostic {
+            level: smdiag.level,
+            cookie: cookie.into(),
+            message: smdiag.message,
+            source: smdiag.source,
+        }
+    }
+}
+
+pub enum Diagnostic<'ll> {
+    Optimization(OptimizationDiagnostic<'ll>),
+    InlineAsm(InlineAsmDiagnostic),
+    PGO(&'ll DiagnosticInfo),
+    Linker(&'ll DiagnosticInfo),
+    Unsupported(&'ll DiagnosticInfo),
+
+    /// LLVM has other types that we do not wrap here.
+    UnknownDiagnostic(&'ll DiagnosticInfo),
+}
+
+impl<'ll> Diagnostic<'ll> {
+    pub unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
+        use super::DiagnosticKind as Dk;
+        let kind = super::LLVMRustGetDiagInfoKind(di);
+
+        match kind {
+            Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpackInlineAsm(di)),
+
+            Dk::OptimizationRemark => {
+                Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di))
+            }
+            Dk::OptimizationRemarkOther => {
+                Optimization(OptimizationDiagnostic::unpack(OptimizationRemarkOther, di))
+            }
+            Dk::OptimizationRemarkMissed => {
+                Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di))
+            }
+
+            Dk::OptimizationRemarkAnalysis => {
+                Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di))
+            }
+
+            Dk::OptimizationRemarkAnalysisFPCommute => {
+                Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisFPCommute, di))
+            }
+
+            Dk::OptimizationRemarkAnalysisAliasing => {
+                Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisAliasing, di))
+            }
+
+            Dk::OptimizationFailure => {
+                Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di))
+            }
+
+            Dk::PGOProfile => PGO(di),
+            Dk::Linker => Linker(di),
+            Dk::Unsupported => Unsupported(di),
+
+            Dk::SrcMgr => InlineAsm(InlineAsmDiagnostic::unpackSrcMgr(di)),
+
+            _ => UnknownDiagnostic(di),
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
new file mode 100644
index 00000000000..284bc74d5c4
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -0,0 +1,2421 @@
+#![allow(non_camel_case_types)]
+#![allow(non_upper_case_globals)]
+
+use super::debuginfo::{
+    DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
+    DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
+    DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable,
+    DebugEmissionKind, DebugNameTableKind,
+};
+
+use libc::{c_char, c_int, c_uint, size_t};
+use libc::{c_ulonglong, c_void};
+
+use std::marker::PhantomData;
+
+use super::RustString;
+
+pub type Bool = c_uint;
+
+pub const True: Bool = 1 as Bool;
+pub const False: Bool = 0 as Bool;
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum LLVMRustResult {
+    Success,
+    Failure,
+}
+
+// Rust version of the C struct with the same name in rustc_llvm/llvm-wrapper/RustWrapper.cpp.
+#[repr(C)]
+pub struct LLVMRustCOFFShortExport {
+    pub name: *const c_char,
+    pub ordinal_present: bool,
+    /// value of `ordinal` only important when `ordinal_present` is true
+    pub ordinal: u16,
+}
+
+impl LLVMRustCOFFShortExport {
+    pub fn new(name: *const c_char, ordinal: Option<u16>) -> LLVMRustCOFFShortExport {
+        LLVMRustCOFFShortExport {
+            name,
+            ordinal_present: ordinal.is_some(),
+            ordinal: ordinal.unwrap_or(0),
+        }
+    }
+}
+
+/// Translation of LLVM's MachineTypes enum, defined in llvm\include\llvm\BinaryFormat\COFF.h.
+///
+/// We include only architectures supported on Windows.
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum LLVMMachineType {
+    AMD64 = 0x8664,
+    I386 = 0x14c,
+    ARM64 = 0xaa64,
+    ARM64EC = 0xa641,
+    ARM = 0x01c0,
+}
+
+/// LLVM's Module::ModFlagBehavior, defined in llvm/include/llvm/IR/Module.h.
+///
+/// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are
+/// resolved according to the merge behaviors specified here. Flags differing only in merge
+/// behavior are still considered to be in conflict.
+///
+/// In order for Rust-C LTO to work, we must specify behaviors compatible with Clang. Notably,
+/// 'Error' and 'Warning' cannot be mixed for a given flag.
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum LLVMModFlagBehavior {
+    Error = 1,
+    Warning = 2,
+    Require = 3,
+    Override = 4,
+    Append = 5,
+    AppendUnique = 6,
+    Max = 7,
+    Min = 8,
+}
+
+// Consts for the LLVM CallConv type, pre-cast to usize.
+
+/// LLVM CallingConv::ID. Should we wrap this?
+///
+/// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h>
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum CallConv {
+    CCallConv = 0,
+    FastCallConv = 8,
+    ColdCallConv = 9,
+    PreserveMost = 14,
+    PreserveAll = 15,
+    Tail = 18,
+    X86StdcallCallConv = 64,
+    X86FastcallCallConv = 65,
+    ArmAapcsCallConv = 67,
+    Msp430Intr = 69,
+    X86_ThisCall = 70,
+    PtxKernel = 71,
+    X86_64_SysV = 78,
+    X86_64_Win64 = 79,
+    X86_VectorCall = 80,
+    X86_Intr = 83,
+    AvrNonBlockingInterrupt = 84,
+    AvrInterrupt = 85,
+}
+
+/// LLVMRustLinkage
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum Linkage {
+    ExternalLinkage = 0,
+    AvailableExternallyLinkage = 1,
+    LinkOnceAnyLinkage = 2,
+    LinkOnceODRLinkage = 3,
+    WeakAnyLinkage = 4,
+    WeakODRLinkage = 5,
+    AppendingLinkage = 6,
+    InternalLinkage = 7,
+    PrivateLinkage = 8,
+    ExternalWeakLinkage = 9,
+    CommonLinkage = 10,
+}
+
+// LLVMRustVisibility
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq)]
+pub enum Visibility {
+    Default = 0,
+    Hidden = 1,
+    Protected = 2,
+}
+
+/// LLVMUnnamedAddr
+#[repr(C)]
+pub enum UnnamedAddr {
+    No,
+    Local,
+    Global,
+}
+
+/// LLVMDLLStorageClass
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum DLLStorageClass {
+    #[allow(dead_code)]
+    Default = 0,
+    DllImport = 1, // Function to be imported from DLL.
+    #[allow(dead_code)]
+    DllExport = 2, // Function to be accessible from DLL.
+}
+
+/// Matches LLVMRustAttribute in LLVMWrapper.h
+/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind,
+/// though it is not ABI compatible (since it's a C++ enum)
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub enum AttributeKind {
+    AlwaysInline = 0,
+    ByVal = 1,
+    Cold = 2,
+    InlineHint = 3,
+    MinSize = 4,
+    Naked = 5,
+    NoAlias = 6,
+    NoCapture = 7,
+    NoInline = 8,
+    NonNull = 9,
+    NoRedZone = 10,
+    NoReturn = 11,
+    NoUnwind = 12,
+    OptimizeForSize = 13,
+    ReadOnly = 14,
+    SExt = 15,
+    StructRet = 16,
+    UWTable = 17,
+    ZExt = 18,
+    InReg = 19,
+    SanitizeThread = 20,
+    SanitizeAddress = 21,
+    SanitizeMemory = 22,
+    NonLazyBind = 23,
+    OptimizeNone = 24,
+    ReadNone = 26,
+    SanitizeHWAddress = 28,
+    WillReturn = 29,
+    StackProtectReq = 30,
+    StackProtectStrong = 31,
+    StackProtect = 32,
+    NoUndef = 33,
+    SanitizeMemTag = 34,
+    NoCfCheck = 35,
+    ShadowCallStack = 36,
+    AllocSize = 37,
+    AllocatedPointer = 38,
+    AllocAlign = 39,
+    SanitizeSafeStack = 40,
+    FnRetThunkExtern = 41,
+}
+
+/// LLVMIntPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum IntPredicate {
+    IntEQ = 32,
+    IntNE = 33,
+    IntUGT = 34,
+    IntUGE = 35,
+    IntULT = 36,
+    IntULE = 37,
+    IntSGT = 38,
+    IntSGE = 39,
+    IntSLT = 40,
+    IntSLE = 41,
+}
+
+impl IntPredicate {
+    pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self {
+        match intpre {
+            rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ,
+            rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE,
+            rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT,
+            rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE,
+            rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT,
+            rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE,
+            rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT,
+            rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE,
+            rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT,
+            rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE,
+        }
+    }
+}
+
+/// LLVMRealPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum RealPredicate {
+    RealPredicateFalse = 0,
+    RealOEQ = 1,
+    RealOGT = 2,
+    RealOGE = 3,
+    RealOLT = 4,
+    RealOLE = 5,
+    RealONE = 6,
+    RealORD = 7,
+    RealUNO = 8,
+    RealUEQ = 9,
+    RealUGT = 10,
+    RealUGE = 11,
+    RealULT = 12,
+    RealULE = 13,
+    RealUNE = 14,
+    RealPredicateTrue = 15,
+}
+
+impl RealPredicate {
+    pub fn from_generic(realp: rustc_codegen_ssa::common::RealPredicate) -> Self {
+        match realp {
+            rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => {
+                RealPredicate::RealPredicateFalse
+            }
+            rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
+            rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
+            rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
+            rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
+            rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
+            rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
+            rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
+            rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
+            rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
+            rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
+            rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
+            rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
+            rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
+            rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
+            rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => {
+                RealPredicate::RealPredicateTrue
+            }
+        }
+    }
+}
+
+/// LLVMTypeKind
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum TypeKind {
+    Void = 0,
+    Half = 1,
+    Float = 2,
+    Double = 3,
+    X86_FP80 = 4,
+    FP128 = 5,
+    PPC_FP128 = 6,
+    Label = 7,
+    Integer = 8,
+    Function = 9,
+    Struct = 10,
+    Array = 11,
+    Pointer = 12,
+    Vector = 13,
+    Metadata = 14,
+    X86_MMX = 15,
+    Token = 16,
+    ScalableVector = 17,
+    BFloat = 18,
+    X86_AMX = 19,
+}
+
+impl TypeKind {
+    pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
+        match self {
+            TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void,
+            TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half,
+            TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float,
+            TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double,
+            TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80,
+            TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128,
+            TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128,
+            TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label,
+            TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer,
+            TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function,
+            TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct,
+            TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array,
+            TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer,
+            TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector,
+            TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata,
+            TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX,
+            TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
+            TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector,
+            TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat,
+            TypeKind::X86_AMX => rustc_codegen_ssa::common::TypeKind::X86_AMX,
+        }
+    }
+}
+
+/// LLVMAtomicRmwBinOp
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicRmwBinOp {
+    AtomicXchg = 0,
+    AtomicAdd = 1,
+    AtomicSub = 2,
+    AtomicAnd = 3,
+    AtomicNand = 4,
+    AtomicOr = 5,
+    AtomicXor = 6,
+    AtomicMax = 7,
+    AtomicMin = 8,
+    AtomicUMax = 9,
+    AtomicUMin = 10,
+}
+
+impl AtomicRmwBinOp {
+    pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self {
+        match op {
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax,
+            rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin,
+        }
+    }
+}
+
+/// LLVMAtomicOrdering
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicOrdering {
+    #[allow(dead_code)]
+    NotAtomic = 0,
+    Unordered = 1,
+    Monotonic = 2,
+    // Consume = 3,  // Not specified yet.
+    Acquire = 4,
+    Release = 5,
+    AcquireRelease = 6,
+    SequentiallyConsistent = 7,
+}
+
+impl AtomicOrdering {
+    pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
+        match ao {
+            rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
+            rustc_codegen_ssa::common::AtomicOrdering::Relaxed => AtomicOrdering::Monotonic,
+            rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire,
+            rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release,
+            rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => {
+                AtomicOrdering::AcquireRelease
+            }
+            rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => {
+                AtomicOrdering::SequentiallyConsistent
+            }
+        }
+    }
+}
+
+/// LLVMRustFileType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum FileType {
+    AssemblyFile,
+    ObjectFile,
+}
+
+/// LLVMMetadataType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum MetadataType {
+    MD_dbg = 0,
+    MD_tbaa = 1,
+    MD_prof = 2,
+    MD_fpmath = 3,
+    MD_range = 4,
+    MD_tbaa_struct = 5,
+    MD_invariant_load = 6,
+    MD_alias_scope = 7,
+    MD_noalias = 8,
+    MD_nontemporal = 9,
+    MD_mem_parallel_loop_access = 10,
+    MD_nonnull = 11,
+    MD_align = 17,
+    MD_type = 19,
+    MD_vcall_visibility = 28,
+    MD_noundef = 29,
+    MD_kcfi_type = 36,
+}
+
+/// LLVMRustAsmDialect
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum AsmDialect {
+    Att,
+    Intel,
+}
+
+/// LLVMRustCodeGenOptLevel
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptLevel {
+    None,
+    Less,
+    Default,
+    Aggressive,
+}
+
+/// LLVMRustPassBuilderOptLevel
+#[repr(C)]
+pub enum PassBuilderOptLevel {
+    O0,
+    O1,
+    O2,
+    O3,
+    Os,
+    Oz,
+}
+
+/// LLVMRustOptStage
+#[derive(PartialEq)]
+#[repr(C)]
+pub enum OptStage {
+    PreLinkNoLTO,
+    PreLinkThinLTO,
+    PreLinkFatLTO,
+    ThinLTO,
+    FatLTO,
+}
+
+/// LLVMRustSanitizerOptions
+#[repr(C)]
+pub struct SanitizerOptions {
+    pub sanitize_address: bool,
+    pub sanitize_address_recover: bool,
+    pub sanitize_cfi: bool,
+    pub sanitize_dataflow: bool,
+    pub sanitize_dataflow_abilist: *const *const c_char,
+    pub sanitize_dataflow_abilist_len: size_t,
+    pub sanitize_kcfi: bool,
+    pub sanitize_memory: bool,
+    pub sanitize_memory_recover: bool,
+    pub sanitize_memory_track_origins: c_int,
+    pub sanitize_thread: bool,
+    pub sanitize_hwaddress: bool,
+    pub sanitize_hwaddress_recover: bool,
+    pub sanitize_kernel_address: bool,
+    pub sanitize_kernel_address_recover: bool,
+}
+
+/// LLVMRelocMode
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum RelocModel {
+    Static,
+    PIC,
+    DynamicNoPic,
+    ROPI,
+    RWPI,
+    ROPI_RWPI,
+}
+
+/// LLVMRustCodeModel
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum CodeModel {
+    Tiny,
+    Small,
+    Kernel,
+    Medium,
+    Large,
+    None,
+}
+
+/// LLVMRustDiagnosticKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticKind {
+    Other,
+    InlineAsm,
+    StackSize,
+    DebugMetadataVersion,
+    SampleProfile,
+    OptimizationRemark,
+    OptimizationRemarkMissed,
+    OptimizationRemarkAnalysis,
+    OptimizationRemarkAnalysisFPCommute,
+    OptimizationRemarkAnalysisAliasing,
+    OptimizationRemarkOther,
+    OptimizationFailure,
+    PGOProfile,
+    Linker,
+    Unsupported,
+    SrcMgr,
+}
+
+/// LLVMRustDiagnosticLevel
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticLevel {
+    Error,
+    Warning,
+    Note,
+    Remark,
+}
+
+/// LLVMRustArchiveKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ArchiveKind {
+    K_GNU,
+    K_BSD,
+    K_DARWIN,
+    K_COFF,
+    K_AIXBIG,
+}
+
+// LLVMRustThinLTOData
+extern "C" {
+    pub type ThinLTOData;
+}
+
+// LLVMRustThinLTOBuffer
+extern "C" {
+    pub type ThinLTOBuffer;
+}
+
+/// LLVMRustThinLTOModule
+#[repr(C)]
+pub struct ThinLTOModule {
+    pub identifier: *const c_char,
+    pub data: *const u8,
+    pub len: usize,
+}
+
+/// LLVMThreadLocalMode
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ThreadLocalMode {
+    NotThreadLocal,
+    GeneralDynamic,
+    LocalDynamic,
+    InitialExec,
+    LocalExec,
+}
+
+/// LLVMRustTailCallKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum TailCallKind {
+    None,
+    Tail,
+    MustTail,
+    NoTail,
+}
+
+/// LLVMRustChecksumKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ChecksumKind {
+    None,
+    MD5,
+    SHA1,
+    SHA256,
+}
+
+/// LLVMRustMemoryEffects
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum MemoryEffects {
+    None,
+    ReadOnly,
+    InaccessibleMemOnly,
+}
+
+extern "C" {
+    type Opaque;
+}
+#[repr(C)]
+struct InvariantOpaque<'a> {
+    _marker: PhantomData<&'a mut &'a ()>,
+    _opaque: Opaque,
+}
+
+// Opaque pointer types
+extern "C" {
+    pub type Module;
+}
+extern "C" {
+    pub type Context;
+}
+extern "C" {
+    pub type Type;
+}
+extern "C" {
+    pub type Value;
+}
+extern "C" {
+    pub type ConstantInt;
+}
+extern "C" {
+    pub type Attribute;
+}
+extern "C" {
+    pub type Metadata;
+}
+extern "C" {
+    pub type BasicBlock;
+}
+#[repr(C)]
+pub struct Builder<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct PassManager<'a>(InvariantOpaque<'a>);
+extern "C" {
+    pub type Pass;
+}
+extern "C" {
+    pub type TargetMachine;
+}
+extern "C" {
+    pub type Archive;
+}
+#[repr(C)]
+pub struct ArchiveIterator<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct ArchiveChild<'a>(InvariantOpaque<'a>);
+extern "C" {
+    pub type Twine;
+}
+extern "C" {
+    pub type DiagnosticInfo;
+}
+extern "C" {
+    pub type SMDiagnostic;
+}
+#[repr(C)]
+pub struct RustArchiveMember<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct OperandBundleDef<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct Linker<'a>(InvariantOpaque<'a>);
+
+extern "C" {
+    pub type DiagnosticHandler;
+}
+
+pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
+pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
+
+pub mod debuginfo {
+    use super::{InvariantOpaque, Metadata};
+    use bitflags::bitflags;
+
+    #[repr(C)]
+    pub struct DIBuilder<'a>(InvariantOpaque<'a>);
+
+    pub type DIDescriptor = Metadata;
+    pub type DILocation = Metadata;
+    pub type DIScope = DIDescriptor;
+    pub type DIFile = DIScope;
+    pub type DILexicalBlock = DIScope;
+    pub type DISubprogram = DIScope;
+    pub type DINameSpace = DIScope;
+    pub type DIType = DIDescriptor;
+    pub type DIBasicType = DIType;
+    pub type DIDerivedType = DIType;
+    pub type DICompositeType = DIDerivedType;
+    pub type DIVariable = DIDescriptor;
+    pub type DIGlobalVariableExpression = DIDescriptor;
+    pub type DIArray = DIDescriptor;
+    pub type DISubrange = DIDescriptor;
+    pub type DIEnumerator = DIDescriptor;
+    pub type DITemplateTypeParameter = DIDescriptor;
+
+    // These values **must** match with LLVMRustDIFlags!!
+    bitflags! {
+        #[repr(transparent)]
+        #[derive(Clone, Copy, Default)]
+        pub struct DIFlags: u32 {
+            const FlagZero                = 0;
+            const FlagPrivate             = 1;
+            const FlagProtected           = 2;
+            const FlagPublic              = 3;
+            const FlagFwdDecl             = (1 << 2);
+            const FlagAppleBlock          = (1 << 3);
+            const FlagBlockByrefStruct    = (1 << 4);
+            const FlagVirtual             = (1 << 5);
+            const FlagArtificial          = (1 << 6);
+            const FlagExplicit            = (1 << 7);
+            const FlagPrototyped          = (1 << 8);
+            const FlagObjcClassComplete   = (1 << 9);
+            const FlagObjectPointer       = (1 << 10);
+            const FlagVector              = (1 << 11);
+            const FlagStaticMember        = (1 << 12);
+            const FlagLValueReference     = (1 << 13);
+            const FlagRValueReference     = (1 << 14);
+            const FlagExternalTypeRef     = (1 << 15);
+            const FlagIntroducedVirtual   = (1 << 18);
+            const FlagBitField            = (1 << 19);
+            const FlagNoReturn            = (1 << 20);
+        }
+    }
+
+    // These values **must** match with LLVMRustDISPFlags!!
+    bitflags! {
+        #[repr(transparent)]
+        #[derive(Clone, Copy, Default)]
+        pub struct DISPFlags: u32 {
+            const SPFlagZero              = 0;
+            const SPFlagVirtual           = 1;
+            const SPFlagPureVirtual       = 2;
+            const SPFlagLocalToUnit       = (1 << 2);
+            const SPFlagDefinition        = (1 << 3);
+            const SPFlagOptimized         = (1 << 4);
+            const SPFlagMainSubprogram    = (1 << 5);
+        }
+    }
+
+    /// LLVMRustDebugEmissionKind
+    #[derive(Copy, Clone)]
+    #[repr(C)]
+    pub enum DebugEmissionKind {
+        NoDebug,
+        FullDebug,
+        LineTablesOnly,
+        DebugDirectivesOnly,
+    }
+
+    impl DebugEmissionKind {
+        pub fn from_generic(kind: rustc_session::config::DebugInfo) -> Self {
+            // We should be setting LLVM's emission kind to `LineTablesOnly` if
+            // we are compiling with "limited" debuginfo. However, some of the
+            // existing tools relied on slightly more debuginfo being generated than
+            // would be the case with `LineTablesOnly`, and we did not want to break
+            // these tools in a "drive-by fix", without a good idea or plan about
+            // what limited debuginfo should exactly look like. So for now we are
+            // instead adding a new debuginfo option "line-tables-only" so as to
+            // not break anything and to allow users to have 'limited' debug info.
+            //
+            // See https://github.com/rust-lang/rust/issues/60020 for details.
+            use rustc_session::config::DebugInfo;
+            match kind {
+                DebugInfo::None => DebugEmissionKind::NoDebug,
+                DebugInfo::LineDirectivesOnly => DebugEmissionKind::DebugDirectivesOnly,
+                DebugInfo::LineTablesOnly => DebugEmissionKind::LineTablesOnly,
+                DebugInfo::Limited | DebugInfo::Full => DebugEmissionKind::FullDebug,
+            }
+        }
+    }
+
+    /// LLVMRustDebugNameTableKind
+    #[derive(Clone, Copy)]
+    #[repr(C)]
+    pub enum DebugNameTableKind {
+        Default,
+        Gnu,
+        None,
+    }
+}
+
+use bitflags::bitflags;
+// These values **must** match with LLVMRustAllocKindFlags
+bitflags! {
+    #[repr(transparent)]
+    #[derive(Default)]
+    pub struct AllocKindFlags : u64 {
+        const Unknown = 0;
+        const Alloc = 1;
+        const Realloc = 1 << 1;
+        const Free = 1 << 2;
+        const Uninitialized = 1 << 3;
+        const Zeroed = 1 << 4;
+        const Aligned = 1 << 5;
+    }
+}
+
+extern "C" {
+    pub type ModuleBuffer;
+}
+
+pub type SelfProfileBeforePassCallback =
+    unsafe extern "C" fn(*mut c_void, *const c_char, *const c_char);
+pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void);
+
+pub type GetSymbolsCallback = unsafe extern "C" fn(*mut c_void, *const c_char) -> *mut c_void;
+pub type GetSymbolsErrorCallback = unsafe extern "C" fn(*const c_char) -> *mut c_void;
+
+extern "C" {
+    // Create and destroy contexts.
+    pub fn LLVMContextDispose(C: &'static mut Context);
+    pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint;
+
+    // Create modules.
+    pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module;
+    pub fn LLVMGetModuleContext(M: &Module) -> &Context;
+    pub fn LLVMCloneModule(M: &Module) -> &Module;
+
+    /// Data layout. See Module::getDataLayout.
+    pub fn LLVMGetDataLayoutStr(M: &Module) -> *const c_char;
+    pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
+
+    /// See Module::setModuleInlineAsm.
+    pub fn LLVMAppendModuleInlineAsm(M: &Module, Asm: *const c_char, Len: size_t);
+
+    // Operations on integer types
+    pub fn LLVMInt1TypeInContext(C: &Context) -> &Type;
+    pub fn LLVMInt8TypeInContext(C: &Context) -> &Type;
+    pub fn LLVMInt16TypeInContext(C: &Context) -> &Type;
+    pub fn LLVMInt32TypeInContext(C: &Context) -> &Type;
+    pub fn LLVMInt64TypeInContext(C: &Context) -> &Type;
+    pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type;
+
+    pub fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint;
+
+    // Operations on real types
+    pub fn LLVMHalfTypeInContext(C: &Context) -> &Type;
+    pub fn LLVMFloatTypeInContext(C: &Context) -> &Type;
+    pub fn LLVMDoubleTypeInContext(C: &Context) -> &Type;
+    pub fn LLVMFP128TypeInContext(C: &Context) -> &Type;
+
+    // Operations on function types
+    pub fn LLVMFunctionType<'a>(
+        ReturnType: &'a Type,
+        ParamTypes: *const &'a Type,
+        ParamCount: c_uint,
+        IsVarArg: Bool,
+    ) -> &'a Type;
+    pub fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint;
+    pub fn LLVMGetParamTypes<'a>(FunctionTy: &'a Type, Dest: *mut &'a Type);
+
+    // Operations on struct types
+    pub fn LLVMStructTypeInContext<'a>(
+        C: &'a Context,
+        ElementTypes: *const &'a Type,
+        ElementCount: c_uint,
+        Packed: Bool,
+    ) -> &'a Type;
+
+    // Operations on array, pointer, and vector types (sequence types)
+    pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type;
+    pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
+
+    pub fn LLVMGetElementType(Ty: &Type) -> &Type;
+    pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;
+
+    // Operations on other types
+    pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
+    pub fn LLVMTokenTypeInContext(C: &Context) -> &Type;
+    pub fn LLVMMetadataTypeInContext(C: &Context) -> &Type;
+
+    // Operations on all values
+    pub fn LLVMTypeOf(Val: &Value) -> &Type;
+    pub fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char;
+    pub fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t);
+    pub fn LLVMReplaceAllUsesWith<'a>(OldVal: &'a Value, NewVal: &'a Value);
+    pub fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Node: &'a Value);
+    pub fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
+    pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
+
+    // Operations on constants of any type
+    pub fn LLVMConstNull(Ty: &Type) -> &Value;
+    pub fn LLVMGetUndef(Ty: &Type) -> &Value;
+    pub fn LLVMGetPoison(Ty: &Type) -> &Value;
+
+    // Operations on metadata
+    // FIXME: deprecated, replace with LLVMMDStringInContext2
+    pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
+
+    pub fn LLVMMDStringInContext2(C: &Context, Str: *const c_char, SLen: size_t) -> &Metadata;
+
+    // FIXME: deprecated, replace with LLVMMDNodeInContext2
+    pub fn LLVMMDNodeInContext<'a>(
+        C: &'a Context,
+        Vals: *const &'a Value,
+        Count: c_uint,
+    ) -> &'a Value;
+    pub fn LLVMMDNodeInContext2<'a>(
+        C: &'a Context,
+        Vals: *const &'a Metadata,
+        Count: size_t,
+    ) -> &'a Metadata;
+    pub fn LLVMAddNamedMetadataOperand<'a>(M: &'a Module, Name: *const c_char, Val: &'a Value);
+
+    // Operations on scalar constants
+    pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value;
+    pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value;
+    pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value;
+
+    // Operations on composite constants
+    pub fn LLVMConstArray2<'a>(
+        ElementTy: &'a Type,
+        ConstantVals: *const &'a Value,
+        Length: u64,
+    ) -> &'a Value;
+    pub fn LLVMArrayType2(ElementType: &Type, ElementCount: u64) -> &Type;
+    pub fn LLVMConstStringInContext2(
+        C: &Context,
+        Str: *const c_char,
+        Length: size_t,
+        DontNullTerminate: Bool,
+    ) -> &Value;
+    pub fn LLVMConstStructInContext<'a>(
+        C: &'a Context,
+        ConstantVals: *const &'a Value,
+        Count: c_uint,
+        Packed: Bool,
+    ) -> &'a Value;
+    pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
+
+    // Constant expressions
+    pub fn LLVMConstInBoundsGEP2<'a>(
+        ty: &'a Type,
+        ConstantVal: &'a Value,
+        ConstantIndices: *const &'a Value,
+        NumIndices: c_uint,
+    ) -> &'a Value;
+    pub fn LLVMConstPtrToInt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+    pub fn LLVMConstIntToPtr<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+    pub fn LLVMConstBitCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+    pub fn LLVMGetAggregateElement(ConstantVal: &Value, Idx: c_uint) -> Option<&Value>;
+
+    // Operations on global variables, functions, and aliases (globals)
+    pub fn LLVMIsDeclaration(Global: &Value) -> Bool;
+    pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
+    pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
+    pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
+    pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
+
+    // Operations on global variables
+    pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>;
+    pub fn LLVMAddGlobal<'a>(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+    pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>;
+    pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>;
+    pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>;
+    pub fn LLVMDeleteGlobal(GlobalVar: &Value);
+    pub fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>;
+    pub fn LLVMSetInitializer<'a>(GlobalVar: &'a Value, ConstantVal: &'a Value);
+    pub fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool;
+    pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode);
+    pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool;
+    pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool);
+    pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
+
+    // Operations on attributes
+    pub fn LLVMCreateStringAttribute(
+        C: &Context,
+        Name: *const c_char,
+        NameLen: c_uint,
+        Value: *const c_char,
+        ValueLen: c_uint,
+    ) -> &Attribute;
+
+    // Operations on functions
+    pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
+
+    // Operations on parameters
+    pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>;
+    pub fn LLVMCountParams(Fn: &Value) -> c_uint;
+    pub fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value;
+
+    // Operations on basic blocks
+    pub fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value;
+    pub fn LLVMAppendBasicBlockInContext<'a>(
+        C: &'a Context,
+        Fn: &'a Value,
+        Name: *const c_char,
+    ) -> &'a BasicBlock;
+
+    // Operations on instructions
+    pub fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>;
+    pub fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
+
+    // Operations on call sites
+    pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);
+
+    // Operations on load/store instructions (only)
+    pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);
+
+    // Operations on phi nodes
+    pub fn LLVMAddIncoming<'a>(
+        PhiNode: &'a Value,
+        IncomingValues: *const &'a Value,
+        IncomingBlocks: *const &'a BasicBlock,
+        Count: c_uint,
+    );
+
+    // Instruction builders
+    pub fn LLVMCreateBuilderInContext(C: &Context) -> &mut Builder<'_>;
+    pub fn LLVMPositionBuilderAtEnd<'a>(Builder: &Builder<'a>, Block: &'a BasicBlock);
+    pub fn LLVMGetInsertBlock<'a>(Builder: &Builder<'a>) -> &'a BasicBlock;
+    pub fn LLVMDisposeBuilder<'a>(Builder: &'a mut Builder<'a>);
+
+    // Metadata
+    pub fn LLVMSetCurrentDebugLocation2<'a>(Builder: &Builder<'a>, Loc: &'a Metadata);
+
+    // Terminators
+    pub fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value;
+    pub fn LLVMBuildRet<'a>(B: &Builder<'a>, V: &'a Value) -> &'a Value;
+    pub fn LLVMBuildBr<'a>(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value;
+    pub fn LLVMBuildCondBr<'a>(
+        B: &Builder<'a>,
+        If: &'a Value,
+        Then: &'a BasicBlock,
+        Else: &'a BasicBlock,
+    ) -> &'a Value;
+    pub fn LLVMBuildSwitch<'a>(
+        B: &Builder<'a>,
+        V: &'a Value,
+        Else: &'a BasicBlock,
+        NumCases: c_uint,
+    ) -> &'a Value;
+    pub fn LLVMBuildLandingPad<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        PersFn: Option<&'a Value>,
+        NumClauses: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildResume<'a>(B: &Builder<'a>, Exn: &'a Value) -> &'a Value;
+    pub fn LLVMBuildUnreachable<'a>(B: &Builder<'a>) -> &'a Value;
+
+    pub fn LLVMBuildCleanupPad<'a>(
+        B: &Builder<'a>,
+        ParentPad: Option<&'a Value>,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        Name: *const c_char,
+    ) -> Option<&'a Value>;
+    pub fn LLVMBuildCleanupRet<'a>(
+        B: &Builder<'a>,
+        CleanupPad: &'a Value,
+        BB: Option<&'a BasicBlock>,
+    ) -> Option<&'a Value>;
+    pub fn LLVMBuildCatchPad<'a>(
+        B: &Builder<'a>,
+        ParentPad: &'a Value,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        Name: *const c_char,
+    ) -> Option<&'a Value>;
+    pub fn LLVMBuildCatchRet<'a>(
+        B: &Builder<'a>,
+        CatchPad: &'a Value,
+        BB: &'a BasicBlock,
+    ) -> Option<&'a Value>;
+    pub fn LLVMBuildCatchSwitch<'a>(
+        Builder: &Builder<'a>,
+        ParentPad: Option<&'a Value>,
+        UnwindBB: Option<&'a BasicBlock>,
+        NumHandlers: c_uint,
+        Name: *const c_char,
+    ) -> Option<&'a Value>;
+    pub fn LLVMAddHandler<'a>(CatchSwitch: &'a Value, Dest: &'a BasicBlock);
+    pub fn LLVMSetPersonalityFn<'a>(Func: &'a Value, Pers: &'a Value);
+
+    // Add a case to the switch instruction
+    pub fn LLVMAddCase<'a>(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock);
+
+    // Add a clause to the landing pad instruction
+    pub fn LLVMAddClause<'a>(LandingPad: &'a Value, ClauseVal: &'a Value);
+
+    // Set the cleanup on a landing pad instruction
+    pub fn LLVMSetCleanup(LandingPad: &Value, Val: Bool);
+
+    // Arithmetic
+    pub fn LLVMBuildAdd<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFAdd<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildSub<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFSub<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildMul<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFMul<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildUDiv<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildExactUDiv<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildSDiv<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildExactSDiv<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFDiv<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildURem<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildSRem<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFRem<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildShl<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildLShr<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildAShr<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildNSWAdd<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildNUWAdd<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildNSWSub<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildNUWSub<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildNSWMul<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildNUWMul<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildAnd<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildOr<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildXor<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+    pub fn LLVMBuildFNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+    pub fn LLVMBuildNot<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+
+    // Memory
+    pub fn LLVMBuildAlloca<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+    pub fn LLVMBuildArrayAlloca<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Val: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildLoad2<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        PointerVal: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    pub fn LLVMBuildStore<'a>(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
+
+    pub fn LLVMBuildGEP2<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Pointer: &'a Value,
+        Indices: *const &'a Value,
+        NumIndices: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildInBoundsGEP2<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Pointer: &'a Value,
+        Indices: *const &'a Value,
+        NumIndices: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    // Casts
+    pub fn LLVMBuildTrunc<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildZExt<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildSExt<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFPToUI<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFPToSI<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildUIToFP<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildSIToFP<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFPTrunc<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFPExt<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildPtrToInt<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildIntToPtr<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildBitCast<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildPointerCast<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildIntCast2<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        DestTy: &'a Type,
+        IsSigned: Bool,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    // Comparisons
+    pub fn LLVMBuildICmp<'a>(
+        B: &Builder<'a>,
+        Op: c_uint,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildFCmp<'a>(
+        B: &Builder<'a>,
+        Op: c_uint,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    // Miscellaneous instructions
+    pub fn LLVMBuildPhi<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+    pub fn LLVMBuildSelect<'a>(
+        B: &Builder<'a>,
+        If: &'a Value,
+        Then: &'a Value,
+        Else: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildVAArg<'a>(
+        B: &Builder<'a>,
+        list: &'a Value,
+        Ty: &'a Type,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildExtractElement<'a>(
+        B: &Builder<'a>,
+        VecVal: &'a Value,
+        Index: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildInsertElement<'a>(
+        B: &Builder<'a>,
+        VecVal: &'a Value,
+        EltVal: &'a Value,
+        Index: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildShuffleVector<'a>(
+        B: &Builder<'a>,
+        V1: &'a Value,
+        V2: &'a Value,
+        Mask: &'a Value,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildExtractValue<'a>(
+        B: &Builder<'a>,
+        AggVal: &'a Value,
+        Index: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+    pub fn LLVMBuildInsertValue<'a>(
+        B: &Builder<'a>,
+        AggVal: &'a Value,
+        EltVal: &'a Value,
+        Index: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    // Atomic Operations
+    pub fn LLVMBuildAtomicCmpXchg<'a>(
+        B: &Builder<'a>,
+        LHS: &'a Value,
+        CMP: &'a Value,
+        RHS: &'a Value,
+        Order: AtomicOrdering,
+        FailureOrder: AtomicOrdering,
+        SingleThreaded: Bool,
+    ) -> &'a Value;
+
+    pub fn LLVMSetWeak(CmpXchgInst: &Value, IsWeak: Bool);
+
+    pub fn LLVMBuildAtomicRMW<'a>(
+        B: &Builder<'a>,
+        Op: AtomicRmwBinOp,
+        LHS: &'a Value,
+        RHS: &'a Value,
+        Order: AtomicOrdering,
+        SingleThreaded: Bool,
+    ) -> &'a Value;
+
+    pub fn LLVMBuildFence<'a>(
+        B: &Builder<'a>,
+        Order: AtomicOrdering,
+        SingleThreaded: Bool,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    /// Writes a module to the specified path. Returns 0 on success.
+    pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
+
+    /// Creates a legacy pass manager -- only used for final codegen.
+    pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>;
+
+    pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>);
+
+    pub fn LLVMGetHostCPUFeatures() -> *mut c_char;
+
+    pub fn LLVMDisposeMessage(message: *mut c_char);
+
+    pub fn LLVMIsMultithreaded() -> Bool;
+
+    pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
+
+    pub fn LLVMStructSetBody<'a>(
+        StructTy: &'a Type,
+        ElementTypes: *const &'a Type,
+        ElementCount: c_uint,
+        Packed: Bool,
+    );
+
+    pub fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value;
+
+    pub fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr);
+
+    pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>;
+}
+
+#[link(name = "llvm-wrapper", kind = "static")]
+extern "C" {
+    pub fn LLVMRustInstallErrorHandlers();
+    pub fn LLVMRustDisableSystemDialogsOnCrash();
+
+    // Create and destroy contexts.
+    pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context;
+
+    /// See llvm::LLVMTypeKind::getTypeID.
+    pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind;
+
+    // Operations on all values
+    pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
+    pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool;
+
+    // Operations on scalar constants
+    pub fn LLVMRustConstIntGetZExtValue(ConstantVal: &ConstantInt, Value: &mut u64) -> bool;
+    pub fn LLVMRustConstInt128Get(
+        ConstantVal: &ConstantInt,
+        SExt: bool,
+        high: &mut u64,
+        low: &mut u64,
+    ) -> bool;
+
+    // Operations on global variables, functions, and aliases (globals)
+    pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage;
+    pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage);
+    pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
+    pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
+    pub fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool);
+
+    // Operations on global variables
+    pub fn LLVMRustGetOrInsertGlobal<'a>(
+        M: &'a Module,
+        Name: *const c_char,
+        NameLen: size_t,
+        T: &'a Type,
+    ) -> &'a Value;
+    pub fn LLVMRustInsertPrivateGlobal<'a>(M: &'a Module, T: &'a Type) -> &'a Value;
+    pub fn LLVMRustGetNamedValue(
+        M: &Module,
+        Name: *const c_char,
+        NameLen: size_t,
+    ) -> Option<&Value>;
+    pub fn LLVMRustSetTailCallKind(CallInst: &Value, TKC: TailCallKind);
+
+    // Operations on attributes
+    pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute;
+    pub fn LLVMRustCreateAlignmentAttr(C: &Context, bytes: u64) -> &Attribute;
+    pub fn LLVMRustCreateDereferenceableAttr(C: &Context, bytes: u64) -> &Attribute;
+    pub fn LLVMRustCreateDereferenceableOrNullAttr(C: &Context, bytes: u64) -> &Attribute;
+    pub fn LLVMRustCreateByValAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+    pub fn LLVMRustCreateStructRetAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+    pub fn LLVMRustCreateElementTypeAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+    pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute;
+    pub fn LLVMRustCreateAllocSizeAttr(C: &Context, size_arg: u32) -> &Attribute;
+    pub fn LLVMRustCreateAllocKindAttr(C: &Context, size_arg: u64) -> &Attribute;
+    pub fn LLVMRustCreateMemoryEffectsAttr(C: &Context, effects: MemoryEffects) -> &Attribute;
+
+    // Operations on functions
+    pub fn LLVMRustGetOrInsertFunction<'a>(
+        M: &'a Module,
+        Name: *const c_char,
+        NameLen: size_t,
+        FunctionTy: &'a Type,
+    ) -> &'a Value;
+    pub fn LLVMRustAddFunctionAttributes<'a>(
+        Fn: &'a Value,
+        index: c_uint,
+        Attrs: *const &'a Attribute,
+        AttrsLen: size_t,
+    );
+
+    // Operations on call sites
+    pub fn LLVMRustAddCallSiteAttributes<'a>(
+        Instr: &'a Value,
+        index: c_uint,
+        Attrs: *const &'a Attribute,
+        AttrsLen: size_t,
+    );
+
+    pub fn LLVMRustBuildInvoke<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Fn: &'a Value,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        Then: &'a BasicBlock,
+        Catch: &'a BasicBlock,
+        OpBundles: *const &OperandBundleDef<'a>,
+        NumOpBundles: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    pub fn LLVMRustBuildCallBr<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Fn: &'a Value,
+        DefaultDest: &'a BasicBlock,
+        IndirectDests: *const &'a BasicBlock,
+        NumIndirectDests: c_uint,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        OpBundles: *const &OperandBundleDef<'a>,
+        NumOpBundles: c_uint,
+        Name: *const c_char,
+    ) -> &'a Value;
+
+    pub fn LLVMRustSetFastMath(Instr: &Value);
+    pub fn LLVMRustSetAlgebraicMath(Instr: &Value);
+    pub fn LLVMRustSetAllowReassoc(Instr: &Value);
+
+    // Miscellaneous instructions
+    pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value;
+    pub fn LLVMRustBuildCall<'a>(
+        B: &Builder<'a>,
+        Ty: &'a Type,
+        Fn: &'a Value,
+        Args: *const &'a Value,
+        NumArgs: c_uint,
+        OpBundles: *const &OperandBundleDef<'a>,
+        NumOpBundles: c_uint,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildMemCpy<'a>(
+        B: &Builder<'a>,
+        Dst: &'a Value,
+        DstAlign: c_uint,
+        Src: &'a Value,
+        SrcAlign: c_uint,
+        Size: &'a Value,
+        IsVolatile: bool,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildMemMove<'a>(
+        B: &Builder<'a>,
+        Dst: &'a Value,
+        DstAlign: c_uint,
+        Src: &'a Value,
+        SrcAlign: c_uint,
+        Size: &'a Value,
+        IsVolatile: bool,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildMemSet<'a>(
+        B: &Builder<'a>,
+        Dst: &'a Value,
+        DstAlign: c_uint,
+        Val: &'a Value,
+        Size: &'a Value,
+        IsVolatile: bool,
+    ) -> &'a Value;
+
+    pub fn LLVMRustBuildVectorReduceFAdd<'a>(
+        B: &Builder<'a>,
+        Acc: &'a Value,
+        Src: &'a Value,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceFMul<'a>(
+        B: &Builder<'a>,
+        Acc: &'a Value,
+        Src: &'a Value,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceAdd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceMul<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceAnd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceOr<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceXor<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceMin<'a>(
+        B: &Builder<'a>,
+        Src: &'a Value,
+        IsSigned: bool,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceMax<'a>(
+        B: &Builder<'a>,
+        Src: &'a Value,
+        IsSigned: bool,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceFMin<'a>(
+        B: &Builder<'a>,
+        Src: &'a Value,
+        IsNaN: bool,
+    ) -> &'a Value;
+    pub fn LLVMRustBuildVectorReduceFMax<'a>(
+        B: &Builder<'a>,
+        Src: &'a Value,
+        IsNaN: bool,
+    ) -> &'a Value;
+
+    pub fn LLVMRustBuildMinNum<'a>(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+    pub fn LLVMRustBuildMaxNum<'a>(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+
+    // Atomic Operations
+    pub fn LLVMRustBuildAtomicLoad<'a>(
+        B: &Builder<'a>,
+        ElementType: &'a Type,
+        PointerVal: &'a Value,
+        Name: *const c_char,
+        Order: AtomicOrdering,
+    ) -> &'a Value;
+
+    pub fn LLVMRustBuildAtomicStore<'a>(
+        B: &Builder<'a>,
+        Val: &'a Value,
+        Ptr: &'a Value,
+        Order: AtomicOrdering,
+    ) -> &'a Value;
+
+    pub fn LLVMRustTimeTraceProfilerInitialize();
+
+    pub fn LLVMRustTimeTraceProfilerFinishThread();
+
+    pub fn LLVMRustTimeTraceProfilerFinish(FileName: *const c_char);
+
+    /// Returns a string describing the last error caused by an LLVMRust* call.
+    pub fn LLVMRustGetLastError() -> *const c_char;
+
+    /// Print the pass timings since static dtors aren't picking them up.
+    pub fn LLVMRustPrintPassTimings(size: *const size_t) -> *const c_char;
+
+    /// Print the statistics since static dtors aren't picking them up.
+    pub fn LLVMRustPrintStatistics(size: *const size_t) -> *const c_char;
+
+    /// Prepares inline assembly.
+    pub fn LLVMRustInlineAsm(
+        Ty: &Type,
+        AsmString: *const c_char,
+        AsmStringLen: size_t,
+        Constraints: *const c_char,
+        ConstraintsLen: size_t,
+        SideEffects: Bool,
+        AlignStack: Bool,
+        Dialect: AsmDialect,
+        CanThrow: Bool,
+    ) -> &Value;
+    pub fn LLVMRustInlineAsmVerify(
+        Ty: &Type,
+        Constraints: *const c_char,
+        ConstraintsLen: size_t,
+    ) -> bool;
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
+        Filenames: *const *const c_char,
+        FilenamesLen: size_t,
+        Lengths: *const size_t,
+        LengthsLen: size_t,
+        BufferOut: &RustString,
+    );
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustCoverageWriteMappingToBuffer(
+        VirtualFileMappingIDs: *const c_uint,
+        NumVirtualFileMappingIDs: c_uint,
+        Expressions: *const crate::coverageinfo::ffi::CounterExpression,
+        NumExpressions: c_uint,
+        MappingRegions: *const crate::coverageinfo::ffi::CounterMappingRegion,
+        NumMappingRegions: c_uint,
+        BufferOut: &RustString,
+    );
+
+    pub fn LLVMRustCoverageCreatePGOFuncNameVar(
+        F: &Value,
+        FuncName: *const c_char,
+        FuncNameLen: size_t,
+    ) -> &Value;
+    pub fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustCoverageWriteMapSectionNameToString(M: &Module, Str: &RustString);
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustCoverageWriteFuncSectionNameToString(M: &Module, Str: &RustString);
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString);
+
+    pub fn LLVMRustCoverageMappingVersion() -> u32;
+    pub fn LLVMRustDebugMetadataVersion() -> u32;
+    pub fn LLVMRustVersionMajor() -> u32;
+    pub fn LLVMRustVersionMinor() -> u32;
+    pub fn LLVMRustVersionPatch() -> u32;
+
+    /// Add LLVM module flags.
+    ///
+    /// In order for Rust-C LTO to work, module flags must be compatible with Clang. What
+    /// "compatible" means depends on the merge behaviors involved.
+    pub fn LLVMRustAddModuleFlag(
+        M: &Module,
+        merge_behavior: LLVMModFlagBehavior,
+        name: *const c_char,
+        value: u32,
+    );
+    pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool;
+
+    pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>;
+
+    pub fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>);
+
+    pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder<'_>);
+
+    pub fn LLVMRustDIBuilderCreateCompileUnit<'a>(
+        Builder: &DIBuilder<'a>,
+        Lang: c_uint,
+        File: &'a DIFile,
+        Producer: *const c_char,
+        ProducerLen: size_t,
+        isOptimized: bool,
+        Flags: *const c_char,
+        RuntimeVer: c_uint,
+        SplitName: *const c_char,
+        SplitNameLen: size_t,
+        kind: DebugEmissionKind,
+        DWOId: u64,
+        SplitDebugInlining: bool,
+        DebugNameTableKind: DebugNameTableKind,
+    ) -> &'a DIDescriptor;
+
+    pub fn LLVMRustDIBuilderCreateFile<'a>(
+        Builder: &DIBuilder<'a>,
+        Filename: *const c_char,
+        FilenameLen: size_t,
+        Directory: *const c_char,
+        DirectoryLen: size_t,
+        CSKind: ChecksumKind,
+        Checksum: *const c_char,
+        ChecksumLen: size_t,
+    ) -> &'a DIFile;
+
+    pub fn LLVMRustDIBuilderCreateSubroutineType<'a>(
+        Builder: &DIBuilder<'a>,
+        ParameterTypes: &'a DIArray,
+    ) -> &'a DICompositeType;
+
+    pub fn LLVMRustDIBuilderCreateFunction<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIDescriptor,
+        Name: *const c_char,
+        NameLen: size_t,
+        LinkageName: *const c_char,
+        LinkageNameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        Ty: &'a DIType,
+        ScopeLine: c_uint,
+        Flags: DIFlags,
+        SPFlags: DISPFlags,
+        MaybeFn: Option<&'a Value>,
+        TParam: &'a DIArray,
+        Decl: Option<&'a DIDescriptor>,
+    ) -> &'a DISubprogram;
+
+    pub fn LLVMRustDIBuilderCreateMethod<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIDescriptor,
+        Name: *const c_char,
+        NameLen: size_t,
+        LinkageName: *const c_char,
+        LinkageNameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        Ty: &'a DIType,
+        Flags: DIFlags,
+        SPFlags: DISPFlags,
+        TParam: &'a DIArray,
+    ) -> &'a DISubprogram;
+
+    pub fn LLVMRustDIBuilderCreateBasicType<'a>(
+        Builder: &DIBuilder<'a>,
+        Name: *const c_char,
+        NameLen: size_t,
+        SizeInBits: u64,
+        Encoding: c_uint,
+    ) -> &'a DIBasicType;
+
+    pub fn LLVMRustDIBuilderCreateTypedef<'a>(
+        Builder: &DIBuilder<'a>,
+        Type: &'a DIBasicType,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        Scope: Option<&'a DIScope>,
+    ) -> &'a DIDerivedType;
+
+    pub fn LLVMRustDIBuilderCreatePointerType<'a>(
+        Builder: &DIBuilder<'a>,
+        PointeeTy: &'a DIType,
+        SizeInBits: u64,
+        AlignInBits: u32,
+        AddressSpace: c_uint,
+        Name: *const c_char,
+        NameLen: size_t,
+    ) -> &'a DIDerivedType;
+
+    pub fn LLVMRustDIBuilderCreateStructType<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: Option<&'a DIDescriptor>,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNumber: c_uint,
+        SizeInBits: u64,
+        AlignInBits: u32,
+        Flags: DIFlags,
+        DerivedFrom: Option<&'a DIType>,
+        Elements: &'a DIArray,
+        RunTimeLang: c_uint,
+        VTableHolder: Option<&'a DIType>,
+        UniqueId: *const c_char,
+        UniqueIdLen: size_t,
+    ) -> &'a DICompositeType;
+
+    pub fn LLVMRustDIBuilderCreateMemberType<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIDescriptor,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        SizeInBits: u64,
+        AlignInBits: u32,
+        OffsetInBits: u64,
+        Flags: DIFlags,
+        Ty: &'a DIType,
+    ) -> &'a DIDerivedType;
+
+    pub fn LLVMRustDIBuilderCreateVariantMemberType<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIScope,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNumber: c_uint,
+        SizeInBits: u64,
+        AlignInBits: u32,
+        OffsetInBits: u64,
+        Discriminant: Option<&'a Value>,
+        Flags: DIFlags,
+        Ty: &'a DIType,
+    ) -> &'a DIType;
+
+    pub fn LLVMRustDIBuilderCreateStaticMemberType<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIDescriptor,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        Ty: &'a DIType,
+        Flags: DIFlags,
+        val: Option<&'a Value>,
+        AlignInBits: u32,
+    ) -> &'a DIDerivedType;
+
+    pub fn LLVMRustDIBuilderCreateLexicalBlock<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIScope,
+        File: &'a DIFile,
+        Line: c_uint,
+        Col: c_uint,
+    ) -> &'a DILexicalBlock;
+
+    pub fn LLVMRustDIBuilderCreateLexicalBlockFile<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIScope,
+        File: &'a DIFile,
+    ) -> &'a DILexicalBlock;
+
+    pub fn LLVMRustDIBuilderCreateStaticVariable<'a>(
+        Builder: &DIBuilder<'a>,
+        Context: Option<&'a DIScope>,
+        Name: *const c_char,
+        NameLen: size_t,
+        LinkageName: *const c_char,
+        LinkageNameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        Ty: &'a DIType,
+        isLocalToUnit: bool,
+        Val: &'a Value,
+        Decl: Option<&'a DIDescriptor>,
+        AlignInBits: u32,
+    ) -> &'a DIGlobalVariableExpression;
+
+    pub fn LLVMRustDIBuilderCreateVariable<'a>(
+        Builder: &DIBuilder<'a>,
+        Tag: c_uint,
+        Scope: &'a DIDescriptor,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        Ty: &'a DIType,
+        AlwaysPreserve: bool,
+        Flags: DIFlags,
+        ArgNo: c_uint,
+        AlignInBits: u32,
+    ) -> &'a DIVariable;
+
+    pub fn LLVMRustDIBuilderCreateArrayType<'a>(
+        Builder: &DIBuilder<'a>,
+        Size: u64,
+        AlignInBits: u32,
+        Ty: &'a DIType,
+        Subscripts: &'a DIArray,
+    ) -> &'a DIType;
+
+    pub fn LLVMRustDIBuilderGetOrCreateSubrange<'a>(
+        Builder: &DIBuilder<'a>,
+        Lo: i64,
+        Count: i64,
+    ) -> &'a DISubrange;
+
+    pub fn LLVMRustDIBuilderGetOrCreateArray<'a>(
+        Builder: &DIBuilder<'a>,
+        Ptr: *const Option<&'a DIDescriptor>,
+        Count: c_uint,
+    ) -> &'a DIArray;
+
+    pub fn LLVMRustDIBuilderInsertDeclareAtEnd<'a>(
+        Builder: &DIBuilder<'a>,
+        Val: &'a Value,
+        VarInfo: &'a DIVariable,
+        AddrOps: *const u64,
+        AddrOpsCount: c_uint,
+        DL: &'a DILocation,
+        InsertAtEnd: &'a BasicBlock,
+    ) -> &'a Value;
+
+    pub fn LLVMRustDIBuilderCreateEnumerator<'a>(
+        Builder: &DIBuilder<'a>,
+        Name: *const c_char,
+        NameLen: size_t,
+        Value: *const u64,
+        SizeInBits: c_uint,
+        IsUnsigned: bool,
+    ) -> &'a DIEnumerator;
+
+    pub fn LLVMRustDIBuilderCreateEnumerationType<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIScope,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNumber: c_uint,
+        SizeInBits: u64,
+        AlignInBits: u32,
+        Elements: &'a DIArray,
+        ClassType: &'a DIType,
+        IsScoped: bool,
+    ) -> &'a DIType;
+
+    pub fn LLVMRustDIBuilderCreateUnionType<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: Option<&'a DIScope>,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNumber: c_uint,
+        SizeInBits: u64,
+        AlignInBits: u32,
+        Flags: DIFlags,
+        Elements: Option<&'a DIArray>,
+        RunTimeLang: c_uint,
+        UniqueId: *const c_char,
+        UniqueIdLen: size_t,
+    ) -> &'a DIType;
+
+    pub fn LLVMRustDIBuilderCreateVariantPart<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: &'a DIScope,
+        Name: *const c_char,
+        NameLen: size_t,
+        File: &'a DIFile,
+        LineNo: c_uint,
+        SizeInBits: u64,
+        AlignInBits: u32,
+        Flags: DIFlags,
+        Discriminator: Option<&'a DIDerivedType>,
+        Elements: &'a DIArray,
+        UniqueId: *const c_char,
+        UniqueIdLen: size_t,
+    ) -> &'a DIDerivedType;
+
+    pub fn LLVMRustDIBuilderCreateTemplateTypeParameter<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: Option<&'a DIScope>,
+        Name: *const c_char,
+        NameLen: size_t,
+        Ty: &'a DIType,
+    ) -> &'a DITemplateTypeParameter;
+
+    pub fn LLVMRustDIBuilderCreateNameSpace<'a>(
+        Builder: &DIBuilder<'a>,
+        Scope: Option<&'a DIScope>,
+        Name: *const c_char,
+        NameLen: size_t,
+        ExportSymbols: bool,
+    ) -> &'a DINameSpace;
+
+    pub fn LLVMRustDICompositeTypeReplaceArrays<'a>(
+        Builder: &DIBuilder<'a>,
+        CompositeType: &'a DIType,
+        Elements: Option<&'a DIArray>,
+        Params: Option<&'a DIArray>,
+    );
+
+    pub fn LLVMRustDIBuilderCreateDebugLocation<'a>(
+        Line: c_uint,
+        Column: c_uint,
+        Scope: &'a DIScope,
+        InlinedAt: Option<&'a DILocation>,
+    ) -> &'a DILocation;
+    pub fn LLVMRustDIBuilderCreateOpDeref() -> u64;
+    pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> u64;
+    pub fn LLVMRustDIBuilderCreateOpLLVMFragment() -> u64;
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString);
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString);
+
+    pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
+
+    pub fn LLVMRustPrintTargetCPUs(
+        T: &TargetMachine,
+        cpu: *const c_char,
+        print: unsafe extern "C" fn(out: *mut c_void, string: *const c_char, len: usize),
+        out: *mut c_void,
+    );
+    pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
+    pub fn LLVMRustGetTargetFeature(
+        T: &TargetMachine,
+        Index: size_t,
+        Feature: &mut *const c_char,
+        Desc: &mut *const c_char,
+    );
+
+    pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+
+    // This function makes copies of pointed to data, so the data's lifetime may end after this function returns
+    pub fn LLVMRustCreateTargetMachine(
+        Triple: *const c_char,
+        CPU: *const c_char,
+        Features: *const c_char,
+        Abi: *const c_char,
+        Model: CodeModel,
+        Reloc: RelocModel,
+        Level: CodeGenOptLevel,
+        UseSoftFP: bool,
+        FunctionSections: bool,
+        DataSections: bool,
+        UniqueSectionNames: bool,
+        TrapUnreachable: bool,
+        Singlethread: bool,
+        AsmComments: bool,
+        EmitStackSizeSection: bool,
+        RelaxELFRelocations: bool,
+        UseInitArray: bool,
+        SplitDwarfFile: *const c_char,
+        OutputObjFile: *const c_char,
+        DebugInfoCompression: *const c_char,
+        UseEmulatedTls: bool,
+        ArgsCstrBuff: *const c_char,
+        ArgsCstrBuffLen: usize,
+    ) -> *mut TargetMachine;
+
+    pub fn LLVMRustDisposeTargetMachine(T: *mut TargetMachine);
+    pub fn LLVMRustAddLibraryInfo<'a>(
+        PM: &PassManager<'a>,
+        M: &'a Module,
+        DisableSimplifyLibCalls: bool,
+    );
+    pub fn LLVMRustWriteOutputFile<'a>(
+        T: &'a TargetMachine,
+        PM: &PassManager<'a>,
+        M: &'a Module,
+        Output: *const c_char,
+        DwoOutput: *const c_char,
+        FileType: FileType,
+    ) -> LLVMRustResult;
+    pub fn LLVMRustOptimize<'a>(
+        M: &'a Module,
+        TM: &'a TargetMachine,
+        OptLevel: PassBuilderOptLevel,
+        OptStage: OptStage,
+        IsLinkerPluginLTO: bool,
+        NoPrepopulatePasses: bool,
+        VerifyIR: bool,
+        UseThinLTOBuffers: bool,
+        MergeFunctions: bool,
+        UnrollLoops: bool,
+        SLPVectorize: bool,
+        LoopVectorize: bool,
+        DisableSimplifyLibCalls: bool,
+        EmitLifetimeMarkers: bool,
+        SanitizerOptions: Option<&SanitizerOptions>,
+        PGOGenPath: *const c_char,
+        PGOUsePath: *const c_char,
+        InstrumentCoverage: bool,
+        InstrProfileOutput: *const c_char,
+        InstrumentGCOV: bool,
+        PGOSampleUsePath: *const c_char,
+        DebugInfoForProfiling: bool,
+        llvm_selfprofiler: *mut c_void,
+        begin_callback: SelfProfileBeforePassCallback,
+        end_callback: SelfProfileAfterPassCallback,
+        ExtraPasses: *const c_char,
+        ExtraPassesLen: size_t,
+        LLVMPlugins: *const c_char,
+        LLVMPluginsLen: size_t,
+    ) -> LLVMRustResult;
+    pub fn LLVMRustPrintModule(
+        M: &Module,
+        Output: *const c_char,
+        Demangle: extern "C" fn(*const c_char, size_t, *mut c_char, size_t) -> size_t,
+    ) -> LLVMRustResult;
+    pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
+    pub fn LLVMRustPrintPasses();
+    pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
+    pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
+
+    pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>;
+    pub fn LLVMRustArchiveIteratorNew(AR: &Archive) -> &mut ArchiveIterator<'_>;
+    pub fn LLVMRustArchiveIteratorNext<'a>(
+        AIR: &ArchiveIterator<'a>,
+    ) -> Option<&'a mut ArchiveChild<'a>>;
+    pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
+    pub fn LLVMRustArchiveChildFree<'a>(ACR: &'a mut ArchiveChild<'a>);
+    pub fn LLVMRustArchiveIteratorFree<'a>(AIR: &'a mut ArchiveIterator<'a>);
+    pub fn LLVMRustDestroyArchive(AR: &'static mut Archive);
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString);
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustUnpackOptimizationDiagnostic<'a>(
+        DI: &'a DiagnosticInfo,
+        pass_name_out: &RustString,
+        function_out: &mut Option<&'a Value>,
+        loc_line_out: &mut c_uint,
+        loc_column_out: &mut c_uint,
+        loc_filename_out: &RustString,
+        message_out: &RustString,
+    );
+
+    pub fn LLVMRustUnpackInlineAsmDiagnostic<'a>(
+        DI: &'a DiagnosticInfo,
+        level_out: &mut DiagnosticLevel,
+        cookie_out: &mut u64,
+        message_out: &mut Option<&'a Twine>,
+    );
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString);
+    pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind;
+
+    pub fn LLVMRustGetSMDiagnostic<'a>(
+        DI: &'a DiagnosticInfo,
+        cookie_out: &mut c_uint,
+    ) -> &'a SMDiagnostic;
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustUnpackSMDiagnostic(
+        d: &SMDiagnostic,
+        message_out: &RustString,
+        buffer_out: &RustString,
+        level_out: &mut DiagnosticLevel,
+        loc_out: &mut c_uint,
+        ranges_out: *mut c_uint,
+        num_ranges: &mut usize,
+    ) -> bool;
+
+    pub fn LLVMRustWriteArchive(
+        Dst: *const c_char,
+        NumMembers: size_t,
+        Members: *const &RustArchiveMember<'_>,
+        WriteSymbtab: bool,
+        Kind: ArchiveKind,
+    ) -> LLVMRustResult;
+    pub fn LLVMRustArchiveMemberNew<'a>(
+        Filename: *const c_char,
+        Name: *const c_char,
+        Child: Option<&ArchiveChild<'a>>,
+    ) -> &'a mut RustArchiveMember<'a>;
+    pub fn LLVMRustArchiveMemberFree<'a>(Member: &'a mut RustArchiveMember<'a>);
+
+    pub fn LLVMRustWriteImportLibrary(
+        ImportName: *const c_char,
+        Path: *const c_char,
+        Exports: *const LLVMRustCOFFShortExport,
+        NumExports: usize,
+        Machine: u16,
+        MinGW: bool,
+    ) -> LLVMRustResult;
+
+    pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
+
+    pub fn LLVMRustBuildOperandBundleDef(
+        Name: *const c_char,
+        Inputs: *const &'_ Value,
+        NumInputs: c_uint,
+    ) -> &mut OperandBundleDef<'_>;
+    pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>);
+
+    pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
+
+    pub fn LLVMRustSetComdat<'a>(M: &'a Module, V: &'a Value, Name: *const c_char, NameLen: size_t);
+    pub fn LLVMRustSetModulePICLevel(M: &Module);
+    pub fn LLVMRustSetModulePIELevel(M: &Module);
+    pub fn LLVMRustSetModuleCodeModel(M: &Module, Model: CodeModel);
+    pub fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer;
+    pub fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8;
+    pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
+    pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
+    pub fn LLVMRustModuleCost(M: &Module) -> u64;
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustModuleInstructionStats(M: &Module, Str: &RustString);
+
+    pub fn LLVMRustThinLTOBufferCreate(M: &Module, is_thin: bool) -> &'static mut ThinLTOBuffer;
+    pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
+    pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char;
+    pub fn LLVMRustThinLTOBufferLen(M: &ThinLTOBuffer) -> size_t;
+    pub fn LLVMRustCreateThinLTOData(
+        Modules: *const ThinLTOModule,
+        NumModules: c_uint,
+        PreservedSymbols: *const *const c_char,
+        PreservedSymbolsLen: c_uint,
+    ) -> Option<&'static mut ThinLTOData>;
+    pub fn LLVMRustPrepareThinLTORename(
+        Data: &ThinLTOData,
+        Module: &Module,
+        Target: &TargetMachine,
+    ) -> bool;
+    pub fn LLVMRustPrepareThinLTOResolveWeak(Data: &ThinLTOData, Module: &Module) -> bool;
+    pub fn LLVMRustPrepareThinLTOInternalize(Data: &ThinLTOData, Module: &Module) -> bool;
+    pub fn LLVMRustPrepareThinLTOImport(
+        Data: &ThinLTOData,
+        Module: &Module,
+        Target: &TargetMachine,
+    ) -> bool;
+    pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData);
+    pub fn LLVMRustParseBitcodeForLTO(
+        Context: &Context,
+        Data: *const u8,
+        len: usize,
+        Identifier: *const c_char,
+    ) -> Option<&Module>;
+    pub fn LLVMRustGetSliceFromObjectDataByName(
+        data: *const u8,
+        len: usize,
+        name: *const u8,
+        out_len: &mut usize,
+    ) -> *const u8;
+
+    pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
+    pub fn LLVMRustLinkerAdd(
+        linker: &Linker<'_>,
+        bytecode: *const c_char,
+        bytecode_len: usize,
+    ) -> bool;
+    pub fn LLVMRustLinkerFree<'a>(linker: &'a mut Linker<'a>);
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustComputeLTOCacheKey(
+        key_out: &RustString,
+        mod_id: *const c_char,
+        data: &ThinLTOData,
+    );
+
+    pub fn LLVMRustContextGetDiagnosticHandler(Context: &Context) -> Option<&DiagnosticHandler>;
+    pub fn LLVMRustContextSetDiagnosticHandler(
+        context: &Context,
+        diagnostic_handler: Option<&DiagnosticHandler>,
+    );
+    pub fn LLVMRustContextConfigureDiagnosticHandler(
+        context: &Context,
+        diagnostic_handler_callback: DiagnosticHandlerTy,
+        diagnostic_handler_context: *mut c_void,
+        remark_all_passes: bool,
+        remark_passes: *const *const c_char,
+        remark_passes_len: usize,
+        remark_file: *const c_char,
+        pgo_available: bool,
+    );
+
+    #[allow(improper_ctypes)]
+    pub fn LLVMRustGetMangledName(V: &Value, out: &RustString);
+
+    pub fn LLVMRustGetElementTypeArgIndex(CallSite: &Value) -> i32;
+
+    pub fn LLVMRustIsBitcode(ptr: *const u8, len: usize) -> bool;
+
+    pub fn LLVMRustLLVMHasZlibCompressionForDebugSymbols() -> bool;
+
+    pub fn LLVMRustLLVMHasZstdCompressionForDebugSymbols() -> bool;
+
+    pub fn LLVMRustGetSymbols(
+        buf_ptr: *const u8,
+        buf_len: usize,
+        state: *mut c_void,
+        callback: GetSymbolsCallback,
+        error_callback: GetSymbolsErrorCallback,
+    ) -> *mut c_void;
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
new file mode 100644
index 00000000000..4f5cc575da6
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -0,0 +1,326 @@
+#![allow(non_snake_case)]
+
+pub use self::AtomicRmwBinOp::*;
+pub use self::CallConv::*;
+pub use self::CodeGenOptSize::*;
+pub use self::IntPredicate::*;
+pub use self::Linkage::*;
+pub use self::MetadataType::*;
+pub use self::RealPredicate::*;
+
+use libc::c_uint;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_llvm::RustString;
+use std::cell::RefCell;
+use std::ffi::{CStr, CString};
+use std::str::FromStr;
+use std::string::FromUtf8Error;
+
+pub mod archive_ro;
+pub mod diagnostic;
+mod ffi;
+
+pub use self::ffi::*;
+
+impl LLVMRustResult {
+    pub fn into_result(self) -> Result<(), ()> {
+        match self {
+            LLVMRustResult::Success => Ok(()),
+            LLVMRustResult::Failure => Err(()),
+        }
+    }
+}
+
+pub fn AddFunctionAttributes<'ll>(llfn: &'ll Value, idx: AttributePlace, attrs: &[&'ll Attribute]) {
+    unsafe {
+        LLVMRustAddFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len());
+    }
+}
+
+pub fn AddCallSiteAttributes<'ll>(
+    callsite: &'ll Value,
+    idx: AttributePlace,
+    attrs: &[&'ll Attribute],
+) {
+    unsafe {
+        LLVMRustAddCallSiteAttributes(callsite, idx.as_uint(), attrs.as_ptr(), attrs.len());
+    }
+}
+
+pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &str, value: &str) -> &'ll Attribute {
+    unsafe {
+        LLVMCreateStringAttribute(
+            llcx,
+            attr.as_ptr().cast(),
+            attr.len().try_into().unwrap(),
+            value.as_ptr().cast(),
+            value.len().try_into().unwrap(),
+        )
+    }
+}
+
+pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &str) -> &'ll Attribute {
+    unsafe {
+        LLVMCreateStringAttribute(
+            llcx,
+            attr.as_ptr().cast(),
+            attr.len().try_into().unwrap(),
+            std::ptr::null(),
+            0,
+        )
+    }
+}
+
+pub fn CreateAlignmentAttr(llcx: &Context, bytes: u64) -> &Attribute {
+    unsafe { LLVMRustCreateAlignmentAttr(llcx, bytes) }
+}
+
+pub fn CreateDereferenceableAttr(llcx: &Context, bytes: u64) -> &Attribute {
+    unsafe { LLVMRustCreateDereferenceableAttr(llcx, bytes) }
+}
+
+pub fn CreateDereferenceableOrNullAttr(llcx: &Context, bytes: u64) -> &Attribute {
+    unsafe { LLVMRustCreateDereferenceableOrNullAttr(llcx, bytes) }
+}
+
+pub fn CreateByValAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
+    unsafe { LLVMRustCreateByValAttr(llcx, ty) }
+}
+
+pub fn CreateStructRetAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
+    unsafe { LLVMRustCreateStructRetAttr(llcx, ty) }
+}
+
+pub fn CreateUWTableAttr(llcx: &Context, async_: bool) -> &Attribute {
+    unsafe { LLVMRustCreateUWTableAttr(llcx, async_) }
+}
+
+pub fn CreateAllocSizeAttr(llcx: &Context, size_arg: u32) -> &Attribute {
+    unsafe { LLVMRustCreateAllocSizeAttr(llcx, size_arg) }
+}
+
+pub fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> &Attribute {
+    unsafe { LLVMRustCreateAllocKindAttr(llcx, kind_arg.bits()) }
+}
+
+#[derive(Copy, Clone)]
+pub enum AttributePlace {
+    ReturnValue,
+    Argument(u32),
+    Function,
+}
+
+impl AttributePlace {
+    pub fn as_uint(self) -> c_uint {
+        match self {
+            AttributePlace::ReturnValue => 0,
+            AttributePlace::Argument(i) => 1 + i,
+            AttributePlace::Function => !0,
+        }
+    }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptSize {
+    CodeGenOptSizeNone = 0,
+    CodeGenOptSizeDefault = 1,
+    CodeGenOptSizeAggressive = 2,
+}
+
+impl FromStr for ArchiveKind {
+    type Err = ();
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match s {
+            "gnu" => Ok(ArchiveKind::K_GNU),
+            "bsd" => Ok(ArchiveKind::K_BSD),
+            "darwin" => Ok(ArchiveKind::K_DARWIN),
+            "coff" => Ok(ArchiveKind::K_COFF),
+            "aix_big" => Ok(ArchiveKind::K_AIXBIG),
+            _ => Err(()),
+        }
+    }
+}
+
+pub fn SetInstructionCallConv(instr: &Value, cc: CallConv) {
+    unsafe {
+        LLVMSetInstructionCallConv(instr, cc as c_uint);
+    }
+}
+pub fn SetFunctionCallConv(fn_: &Value, cc: CallConv) {
+    unsafe {
+        LLVMSetFunctionCallConv(fn_, cc as c_uint);
+    }
+}
+
+// Externally visible symbols that might appear in multiple codegen units need to appear in
+// their own comdat section so that the duplicates can be discarded at link time. This can for
+// example happen for generics when using multiple codegen units. This function simply uses the
+// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
+// function.
+// For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52
+pub fn SetUniqueComdat(llmod: &Module, val: &Value) {
+    unsafe {
+        let name = get_value_name(val);
+        LLVMRustSetComdat(llmod, val, name.as_ptr().cast(), name.len());
+    }
+}
+
+pub fn SetUnnamedAddress(global: &Value, unnamed: UnnamedAddr) {
+    unsafe {
+        LLVMSetUnnamedAddress(global, unnamed);
+    }
+}
+
+pub fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) {
+    unsafe {
+        LLVMSetThreadLocalMode(global, mode);
+    }
+}
+
+impl AttributeKind {
+    /// Create an LLVM Attribute with no associated value.
+    pub fn create_attr(self, llcx: &Context) -> &Attribute {
+        unsafe { LLVMRustCreateAttrNoValue(llcx, self) }
+    }
+}
+
+impl MemoryEffects {
+    /// Create an LLVM Attribute with these memory effects.
+    pub fn create_attr(self, llcx: &Context) -> &Attribute {
+        unsafe { LLVMRustCreateMemoryEffectsAttr(llcx, self) }
+    }
+}
+
+pub fn set_section(llglobal: &Value, section_name: &str) {
+    let section_name_cstr = CString::new(section_name).expect("unexpected CString error");
+    unsafe {
+        LLVMSetSection(llglobal, section_name_cstr.as_ptr());
+    }
+}
+
+pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name: &str) -> &'a Value {
+    let name_cstr = CString::new(name).expect("unexpected CString error");
+    unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
+}
+
+pub fn set_initializer(llglobal: &Value, constant_val: &Value) {
+    unsafe {
+        LLVMSetInitializer(llglobal, constant_val);
+    }
+}
+
+pub fn set_global_constant(llglobal: &Value, is_constant: bool) {
+    unsafe {
+        LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False });
+    }
+}
+
+pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
+    unsafe {
+        LLVMRustSetLinkage(llglobal, linkage);
+    }
+}
+
+pub fn set_visibility(llglobal: &Value, visibility: Visibility) {
+    unsafe {
+        LLVMRustSetVisibility(llglobal, visibility);
+    }
+}
+
+pub fn set_alignment(llglobal: &Value, bytes: usize) {
+    unsafe {
+        ffi::LLVMSetAlignment(llglobal, bytes as c_uint);
+    }
+}
+
+pub fn set_comdat(llmod: &Module, llglobal: &Value, name: &str) {
+    unsafe {
+        LLVMRustSetComdat(llmod, llglobal, name.as_ptr().cast(), name.len());
+    }
+}
+
+/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
+pub fn get_param(llfn: &Value, index: c_uint) -> &Value {
+    unsafe {
+        assert!(
+            index < LLVMCountParams(llfn),
+            "out of bounds argument access: {} out of {} arguments",
+            index,
+            LLVMCountParams(llfn)
+        );
+        LLVMGetParam(llfn, index)
+    }
+}
+
+/// Safe wrapper for `LLVMGetValueName2` into a byte slice
+pub fn get_value_name(value: &Value) -> &[u8] {
+    unsafe {
+        let mut len = 0;
+        let data = LLVMGetValueName2(value, &mut len);
+        std::slice::from_raw_parts(data.cast(), len)
+    }
+}
+
+/// Safe wrapper for `LLVMSetValueName2` from a byte slice
+pub fn set_value_name(value: &Value, name: &[u8]) {
+    unsafe {
+        let data = name.as_ptr().cast();
+        LLVMSetValueName2(value, data, name.len());
+    }
+}
+
+pub fn build_string(f: impl FnOnce(&RustString)) -> Result<String, FromUtf8Error> {
+    let sr = RustString { bytes: RefCell::new(Vec::new()) };
+    f(&sr);
+    String::from_utf8(sr.bytes.into_inner())
+}
+
+pub fn build_byte_buffer(f: impl FnOnce(&RustString)) -> Vec<u8> {
+    let sr = RustString { bytes: RefCell::new(Vec::new()) };
+    f(&sr);
+    sr.bytes.into_inner()
+}
+
+pub fn twine_to_string(tr: &Twine) -> String {
+    unsafe {
+        build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM")
+    }
+}
+
+pub fn last_error() -> Option<String> {
+    unsafe {
+        let cstr = LLVMRustGetLastError();
+        if cstr.is_null() {
+            None
+        } else {
+            let err = CStr::from_ptr(cstr).to_bytes();
+            let err = String::from_utf8_lossy(err).to_string();
+            libc::free(cstr as *mut _);
+            Some(err)
+        }
+    }
+}
+
+pub struct OperandBundleDef<'a> {
+    pub raw: &'a mut ffi::OperandBundleDef<'a>,
+}
+
+impl<'a> OperandBundleDef<'a> {
+    pub fn new(name: &str, vals: &[&'a Value]) -> Self {
+        let name = SmallCStr::new(name);
+        let def = unsafe {
+            LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
+        };
+        OperandBundleDef { raw: def }
+    }
+}
+
+impl Drop for OperandBundleDef<'_> {
+    fn drop(&mut self) {
+        unsafe {
+            LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
new file mode 100644
index 00000000000..c9e62e504ae
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -0,0 +1,647 @@
+use crate::back::write::create_informational_target_machine;
+use crate::errors::{
+    InvalidTargetFeaturePrefix, PossibleFeature, TargetFeatureDisableOrEnable,
+    UnknownCTargetFeature, UnknownCTargetFeaturePrefix, UnstableCTargetFeature,
+};
+use crate::llvm;
+use libc::c_int;
+use rustc_codegen_ssa::base::wants_wasm_eh;
+use rustc_codegen_ssa::traits::PrintBackendInfo;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_fs_util::path_to_c_string;
+use rustc_middle::bug;
+use rustc_session::config::{PrintKind, PrintRequest};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::{MergeFunctions, PanicStrategy};
+use rustc_target::target_features::RUSTC_SPECIFIC_FEATURES;
+
+use std::ffi::{c_char, c_void, CStr, CString};
+use std::path::Path;
+use std::ptr;
+use std::slice;
+use std::str;
+use std::sync::Once;
+
+static INIT: Once = Once::new();
+
+pub(crate) fn init(sess: &Session) {
+    unsafe {
+        // Before we touch LLVM, make sure that multithreading is enabled.
+        if llvm::LLVMIsMultithreaded() != 1 {
+            bug!("LLVM compiled without support for threads");
+        }
+        INIT.call_once(|| {
+            configure_llvm(sess);
+        });
+    }
+}
+
+fn require_inited() {
+    if !INIT.is_completed() {
+        bug!("LLVM is not initialized");
+    }
+}
+
+unsafe fn configure_llvm(sess: &Session) {
+    let n_args = sess.opts.cg.llvm_args.len() + sess.target.llvm_args.len();
+    let mut llvm_c_strs = Vec::with_capacity(n_args + 1);
+    let mut llvm_args = Vec::with_capacity(n_args + 1);
+
+    llvm::LLVMRustInstallErrorHandlers();
+    // On Windows, an LLVM assertion will open an Abort/Retry/Ignore dialog
+    // box for the purpose of launching a debugger. However, on CI this will
+    // cause it to hang until it times out, which can take several hours.
+    if std::env::var_os("CI").is_some() {
+        llvm::LLVMRustDisableSystemDialogsOnCrash();
+    }
+
+    fn llvm_arg_to_arg_name(full_arg: &str) -> &str {
+        full_arg.trim().split(|c: char| c == '=' || c.is_whitespace()).next().unwrap_or("")
+    }
+
+    let cg_opts = sess.opts.cg.llvm_args.iter().map(AsRef::as_ref);
+    let tg_opts = sess.target.llvm_args.iter().map(AsRef::as_ref);
+    let sess_args = cg_opts.chain(tg_opts);
+
+    let user_specified_args: FxHashSet<_> =
+        sess_args.clone().map(|s| llvm_arg_to_arg_name(s)).filter(|s| !s.is_empty()).collect();
+
+    {
+        // This adds the given argument to LLVM. Unless `force` is true
+        // user specified arguments are *not* overridden.
+        let mut add = |arg: &str, force: bool| {
+            if force || !user_specified_args.contains(llvm_arg_to_arg_name(arg)) {
+                let s = CString::new(arg).unwrap();
+                llvm_args.push(s.as_ptr());
+                llvm_c_strs.push(s);
+            }
+        };
+        // Set the llvm "program name" to make usage and invalid argument messages more clear.
+        add("rustc -Cllvm-args=\"...\" with", true);
+        if sess.opts.unstable_opts.time_llvm_passes {
+            add("-time-passes", false);
+        }
+        if sess.opts.unstable_opts.print_llvm_passes {
+            add("-debug-pass=Structure", false);
+        }
+        if sess.target.generate_arange_section
+            && !sess.opts.unstable_opts.no_generate_arange_section
+        {
+            add("-generate-arange-section", false);
+        }
+
+        match sess.opts.unstable_opts.merge_functions.unwrap_or(sess.target.merge_functions) {
+            MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
+            MergeFunctions::Aliases => {
+                add("-mergefunc-use-aliases", false);
+            }
+        }
+
+        if wants_wasm_eh(sess) {
+            add("-wasm-enable-eh", false);
+        }
+
+        if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind {
+            add("-enable-emscripten-cxx-exceptions", false);
+        }
+
+        // HACK(eddyb) LLVM inserts `llvm.assume` calls to preserve align attributes
+        // during inlining. Unfortunately these may block other optimizations.
+        add("-preserve-alignment-assumptions-during-inlining=false", false);
+
+        // Use non-zero `import-instr-limit` multiplier for cold callsites.
+        add("-import-cold-multiplier=0.1", false);
+
+        if sess.print_llvm_stats() {
+            add("-stats", false);
+        }
+
+        for arg in sess_args {
+            add(&(*arg), true);
+        }
+    }
+
+    if sess.opts.unstable_opts.llvm_time_trace {
+        llvm::LLVMRustTimeTraceProfilerInitialize();
+    }
+
+    rustc_llvm::initialize_available_targets();
+
+    llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
+}
+
+pub fn time_trace_profiler_finish(file_name: &Path) {
+    unsafe {
+        let file_name = path_to_c_string(file_name);
+        llvm::LLVMRustTimeTraceProfilerFinish(file_name.as_ptr());
+    }
+}
+
+pub enum TargetFeatureFoldStrength<'a> {
+    // The feature is only tied when enabling the feature, disabling
+    // this feature shouldn't disable the tied feature.
+    EnableOnly(&'a str),
+    // The feature is tied for both enabling and disabling this feature.
+    Both(&'a str),
+}
+
+impl<'a> TargetFeatureFoldStrength<'a> {
+    fn as_str(&self) -> &'a str {
+        match self {
+            TargetFeatureFoldStrength::EnableOnly(feat) => feat,
+            TargetFeatureFoldStrength::Both(feat) => feat,
+        }
+    }
+}
+
+pub struct LLVMFeature<'a> {
+    pub llvm_feature_name: &'a str,
+    pub dependency: Option<TargetFeatureFoldStrength<'a>>,
+}
+
+impl<'a> LLVMFeature<'a> {
+    pub fn new(llvm_feature_name: &'a str) -> Self {
+        Self { llvm_feature_name, dependency: None }
+    }
+
+    pub fn with_dependency(
+        llvm_feature_name: &'a str,
+        dependency: TargetFeatureFoldStrength<'a>,
+    ) -> Self {
+        Self { llvm_feature_name, dependency: Some(dependency) }
+    }
+
+    pub fn contains(&self, feat: &str) -> bool {
+        self.iter().any(|dep| dep == feat)
+    }
+
+    pub fn iter(&'a self) -> impl Iterator<Item = &'a str> {
+        let dependencies = self.dependency.iter().map(|feat| feat.as_str());
+        std::iter::once(self.llvm_feature_name).chain(dependencies)
+    }
+}
+
+impl<'a> IntoIterator for LLVMFeature<'a> {
+    type Item = &'a str;
+    type IntoIter = impl Iterator<Item = &'a str>;
+
+    fn into_iter(self) -> Self::IntoIter {
+        let dependencies = self.dependency.into_iter().map(|feat| feat.as_str());
+        std::iter::once(self.llvm_feature_name).chain(dependencies)
+    }
+}
+
+// WARNING: the features after applying `to_llvm_features` must be known
+// to LLVM or the feature detection code will walk past the end of the feature
+// array, leading to crashes.
+//
+// To find a list of LLVM's names, see llvm-project/llvm/lib/Target/{ARCH}/*.td
+// where `{ARCH}` is the architecture name. Look for instances of `SubtargetFeature`.
+//
+// Check the current rustc fork of LLVM in the repo at https://github.com/rust-lang/llvm-project/.
+// The commit in use can be found via the `llvm-project` submodule in https://github.com/rust-lang/rust/tree/master/src
+// Though note that Rust can also be build with an external precompiled version of LLVM
+// which might lead to failures if the oldest tested / supported LLVM version
+// doesn't yet support the relevant intrinsics
+pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> {
+    let arch = if sess.target.arch == "x86_64" {
+        "x86"
+    } else if sess.target.arch == "arm64ec" {
+        "aarch64"
+    } else {
+        &*sess.target.arch
+    };
+    match (arch, s) {
+        ("x86", "sse4.2") => {
+            LLVMFeature::with_dependency("sse4.2", TargetFeatureFoldStrength::EnableOnly("crc32"))
+        }
+        ("x86", "pclmulqdq") => LLVMFeature::new("pclmul"),
+        ("x86", "rdrand") => LLVMFeature::new("rdrnd"),
+        ("x86", "bmi1") => LLVMFeature::new("bmi"),
+        ("x86", "cmpxchg16b") => LLVMFeature::new("cx16"),
+        ("x86", "lahfsahf") => LLVMFeature::new("sahf"),
+        ("aarch64", "rcpc2") => LLVMFeature::new("rcpc-immo"),
+        ("aarch64", "dpb") => LLVMFeature::new("ccpp"),
+        ("aarch64", "dpb2") => LLVMFeature::new("ccdp"),
+        ("aarch64", "frintts") => LLVMFeature::new("fptoint"),
+        ("aarch64", "fcma") => LLVMFeature::new("complxnum"),
+        ("aarch64", "pmuv3") => LLVMFeature::new("perfmon"),
+        ("aarch64", "paca") => LLVMFeature::new("pauth"),
+        ("aarch64", "pacg") => LLVMFeature::new("pauth"),
+        // Rust ties fp and neon together.
+        ("aarch64", "neon") => {
+            LLVMFeature::with_dependency("neon", TargetFeatureFoldStrength::Both("fp-armv8"))
+        }
+        // In LLVM neon implicitly enables fp, but we manually enable
+        // neon when a feature only implicitly enables fp
+        ("aarch64", "f32mm") => {
+            LLVMFeature::with_dependency("f32mm", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "f64mm") => {
+            LLVMFeature::with_dependency("f64mm", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "fhm") => {
+            LLVMFeature::with_dependency("fp16fml", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "fp16") => {
+            LLVMFeature::with_dependency("fullfp16", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "jsconv") => {
+            LLVMFeature::with_dependency("jsconv", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve") => {
+            LLVMFeature::with_dependency("sve", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2") => {
+            LLVMFeature::with_dependency("sve2", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-aes") => {
+            LLVMFeature::with_dependency("sve2-aes", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-sm4") => {
+            LLVMFeature::with_dependency("sve2-sm4", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-sha3") => {
+            LLVMFeature::with_dependency("sve2-sha3", TargetFeatureFoldStrength::EnableOnly("neon"))
+        }
+        ("aarch64", "sve2-bitperm") => LLVMFeature::with_dependency(
+            "sve2-bitperm",
+            TargetFeatureFoldStrength::EnableOnly("neon"),
+        ),
+        // The unaligned-scalar-mem feature was renamed to fast-unaligned-access.
+        ("riscv32" | "riscv64", "fast-unaligned-access") if get_version().0 <= 17 => {
+            LLVMFeature::new("unaligned-scalar-mem")
+        }
+        // For LLVM 18, enable the evex512 target feature if a avx512 target feature is enabled.
+        ("x86", s) if get_version().0 >= 18 && s.starts_with("avx512") => {
+            LLVMFeature::with_dependency(s, TargetFeatureFoldStrength::EnableOnly("evex512"))
+        }
+        (_, s) => LLVMFeature::new(s),
+    }
+}
+
+/// Given a map from target_features to whether they are enabled or disabled,
+/// ensure only valid combinations are allowed.
+pub fn check_tied_features(
+    sess: &Session,
+    features: &FxHashMap<&str, bool>,
+) -> Option<&'static [&'static str]> {
+    if !features.is_empty() {
+        for tied in sess.target.tied_target_features() {
+            // Tied features must be set to the same value, or not set at all
+            let mut tied_iter = tied.iter();
+            let enabled = features.get(tied_iter.next().unwrap());
+            if tied_iter.any(|f| enabled != features.get(f)) {
+                return Some(tied);
+            }
+        }
+    }
+    return None;
+}
+
+/// Used to generate cfg variables and apply features
+/// Must express features in the way Rust understands them
+pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+    let target_machine = create_informational_target_machine(sess);
+    sess.target
+        .supported_target_features()
+        .iter()
+        .filter_map(|&(feature, gate)| {
+            if sess.is_nightly_build() || allow_unstable || gate.is_stable() {
+                Some(feature)
+            } else {
+                None
+            }
+        })
+        .filter(|feature| {
+            // check that all features in a given smallvec are enabled
+            for llvm_feature in to_llvm_features(sess, feature) {
+                let cstr = SmallCStr::new(llvm_feature);
+                if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } {
+                    return false;
+                }
+            }
+            true
+        })
+        .map(|feature| Symbol::intern(feature))
+        .collect()
+}
+
+pub fn print_version() {
+    let (major, minor, patch) = get_version();
+    println!("LLVM version: {major}.{minor}.{patch}");
+}
+
+pub fn get_version() -> (u32, u32, u32) {
+    // Can be called without initializing LLVM
+    unsafe {
+        (llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor(), llvm::LLVMRustVersionPatch())
+    }
+}
+
+pub fn print_passes() {
+    // Can be called without initializing LLVM
+    unsafe {
+        llvm::LLVMRustPrintPasses();
+    }
+}
+
+fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
+    let len = unsafe { llvm::LLVMRustGetTargetFeaturesCount(tm) };
+    let mut ret = Vec::with_capacity(len);
+    for i in 0..len {
+        unsafe {
+            let mut feature = ptr::null();
+            let mut desc = ptr::null();
+            llvm::LLVMRustGetTargetFeature(tm, i, &mut feature, &mut desc);
+            if feature.is_null() || desc.is_null() {
+                bug!("LLVM returned a `null` target feature string");
+            }
+            let feature = CStr::from_ptr(feature).to_str().unwrap_or_else(|e| {
+                bug!("LLVM returned a non-utf8 feature string: {}", e);
+            });
+            let desc = CStr::from_ptr(desc).to_str().unwrap_or_else(|e| {
+                bug!("LLVM returned a non-utf8 feature string: {}", e);
+            });
+            ret.push((feature, desc));
+        }
+    }
+    ret
+}
+
+fn print_target_features(out: &mut dyn PrintBackendInfo, sess: &Session, tm: &llvm::TargetMachine) {
+    let mut llvm_target_features = llvm_target_features(tm);
+    let mut known_llvm_target_features = FxHashSet::<&'static str>::default();
+    let mut rustc_target_features = sess
+        .target
+        .supported_target_features()
+        .iter()
+        .map(|(feature, _gate)| {
+            // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
+            let llvm_feature = to_llvm_features(sess, *feature).llvm_feature_name;
+            let desc =
+                match llvm_target_features.binary_search_by_key(&llvm_feature, |(f, _d)| f).ok() {
+                    Some(index) => {
+                        known_llvm_target_features.insert(llvm_feature);
+                        llvm_target_features[index].1
+                    }
+                    None => "",
+                };
+
+            (*feature, desc)
+        })
+        .collect::<Vec<_>>();
+    rustc_target_features.extend_from_slice(&[(
+        "crt-static",
+        "Enables C Run-time Libraries to be statically linked",
+    )]);
+    llvm_target_features.retain(|(f, _d)| !known_llvm_target_features.contains(f));
+
+    let max_feature_len = llvm_target_features
+        .iter()
+        .chain(rustc_target_features.iter())
+        .map(|(feature, _desc)| feature.len())
+        .max()
+        .unwrap_or(0);
+
+    writeln!(out, "Features supported by rustc for this target:");
+    for (feature, desc) in &rustc_target_features {
+        writeln!(out, "    {feature:max_feature_len$} - {desc}.");
+    }
+    writeln!(out, "\nCode-generation features supported by LLVM for this target:");
+    for (feature, desc) in &llvm_target_features {
+        writeln!(out, "    {feature:max_feature_len$} - {desc}.");
+    }
+    if llvm_target_features.is_empty() {
+        writeln!(out, "    Target features listing is not supported by this LLVM version.");
+    }
+    writeln!(out, "\nUse +feature to enable a feature, or -feature to disable it.");
+    writeln!(out, "For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
+    writeln!(out, "Code-generation features cannot be used in cfg or #[target_feature],");
+    writeln!(out, "and may be renamed or removed in a future version of LLVM or rustc.\n");
+}
+
+pub(crate) fn print(req: &PrintRequest, mut out: &mut dyn PrintBackendInfo, sess: &Session) {
+    require_inited();
+    let tm = create_informational_target_machine(sess);
+    match req.kind {
+        PrintKind::TargetCPUs => {
+            // SAFETY generate a C compatible string from a byte slice to pass
+            // the target CPU name into LLVM, the lifetime of the reference is
+            // at least as long as the C function
+            let cpu_cstring = CString::new(handle_native(sess.target.cpu.as_ref()))
+                .unwrap_or_else(|e| bug!("failed to convert to cstring: {}", e));
+            unsafe extern "C" fn callback(out: *mut c_void, string: *const c_char, len: usize) {
+                let out = &mut *(out as *mut &mut dyn PrintBackendInfo);
+                let bytes = slice::from_raw_parts(string as *const u8, len);
+                write!(out, "{}", String::from_utf8_lossy(bytes));
+            }
+            unsafe {
+                llvm::LLVMRustPrintTargetCPUs(
+                    &tm,
+                    cpu_cstring.as_ptr(),
+                    callback,
+                    std::ptr::addr_of_mut!(out) as *mut c_void,
+                );
+            }
+        }
+        PrintKind::TargetFeatures => print_target_features(out, sess, &tm),
+        _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
+    }
+}
+
+fn handle_native(name: &str) -> &str {
+    if name != "native" {
+        return name;
+    }
+
+    unsafe {
+        let mut len = 0;
+        let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
+        str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
+    }
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+    match sess.opts.cg.target_cpu {
+        Some(ref name) => handle_native(name),
+        None => handle_native(sess.target.cpu.as_ref()),
+    }
+}
+
+/// The list of LLVM features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+/// `--target` and similar).
+pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<String> {
+    // Features that come earlier are overridden by conflicting features later in the string.
+    // Typically we'll want more explicit settings to override the implicit ones, so:
+    //
+    // * Features from -Ctarget-cpu=*; are overridden by [^1]
+    // * Features implied by --target; are overridden by
+    // * Features from -Ctarget-feature; are overridden by
+    // * function specific features.
+    //
+    // [^1]: target-cpu=native is handled here, other target-cpu values are handled implicitly
+    // through LLVM TargetMachine implementation.
+    //
+    // FIXME(nagisa): it isn't clear what's the best interaction between features implied by
+    // `-Ctarget-cpu` and `--target` are. On one hand, you'd expect CLI arguments to always
+    // override anything that's implicit, so e.g. when there's no `--target` flag, features implied
+    // the host target are overridden by `-Ctarget-cpu=*`. On the other hand, what about when both
+    // `--target` and `-Ctarget-cpu=*` are specified? Both then imply some target features and both
+    // flags are specified by the user on the CLI. It isn't as clear-cut which order of precedence
+    // should be taken in cases like these.
+    let mut features = vec![];
+
+    // -Ctarget-cpu=native
+    match sess.opts.cg.target_cpu {
+        Some(ref s) if s == "native" => {
+            let features_string = unsafe {
+                let ptr = llvm::LLVMGetHostCPUFeatures();
+                let features_string = if !ptr.is_null() {
+                    CStr::from_ptr(ptr)
+                        .to_str()
+                        .unwrap_or_else(|e| {
+                            bug!("LLVM returned a non-utf8 features string: {}", e);
+                        })
+                        .to_owned()
+                } else {
+                    bug!("could not allocate host CPU features, LLVM returned a `null` string");
+                };
+
+                llvm::LLVMDisposeMessage(ptr);
+
+                features_string
+            };
+            features.extend(features_string.split(',').map(String::from));
+        }
+        Some(_) | None => {}
+    };
+
+    // Features implied by an implicit or explicit `--target`.
+    features.extend(
+        sess.target
+            .features
+            .split(',')
+            .filter(|v| !v.is_empty() && backend_feature_name(sess, v).is_some())
+            .map(String::from),
+    );
+
+    if wants_wasm_eh(sess) && sess.panic_strategy() == PanicStrategy::Unwind {
+        features.push("+exception-handling".into());
+    }
+
+    // -Ctarget-features
+    let supported_features = sess.target.supported_target_features();
+    let mut featsmap = FxHashMap::default();
+    let feats = sess
+        .opts
+        .cg
+        .target_feature
+        .split(',')
+        .filter_map(|s| {
+            let enable_disable = match s.chars().next() {
+                None => return None,
+                Some(c @ ('+' | '-')) => c,
+                Some(_) => {
+                    if diagnostics {
+                        sess.dcx().emit_warn(UnknownCTargetFeaturePrefix { feature: s });
+                    }
+                    return None;
+                }
+            };
+
+            let feature = backend_feature_name(sess, s)?;
+            // Warn against use of LLVM specific feature names and unstable features on the CLI.
+            if diagnostics {
+                let feature_state = supported_features.iter().find(|&&(v, _)| v == feature);
+                if feature_state.is_none() {
+                    let rust_feature = supported_features.iter().find_map(|&(rust_feature, _)| {
+                        let llvm_features = to_llvm_features(sess, rust_feature);
+                        if llvm_features.contains(feature) && !llvm_features.contains(rust_feature)
+                        {
+                            Some(rust_feature)
+                        } else {
+                            None
+                        }
+                    });
+                    let unknown_feature = if let Some(rust_feature) = rust_feature {
+                        UnknownCTargetFeature {
+                            feature,
+                            rust_feature: PossibleFeature::Some { rust_feature },
+                        }
+                    } else {
+                        UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
+                    };
+                    sess.dcx().emit_warn(unknown_feature);
+                } else if feature_state
+                    .is_some_and(|(_name, feature_gate)| !feature_gate.is_stable())
+                {
+                    // An unstable feature. Warn about using it.
+                    sess.dcx().emit_warn(UnstableCTargetFeature { feature });
+                }
+            }
+
+            if diagnostics {
+                // FIXME(nagisa): figure out how to not allocate a full hashset here.
+                featsmap.insert(feature, enable_disable == '+');
+            }
+
+            // rustc-specific features do not get passed down to LLVM…
+            if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+                return None;
+            }
+            // ... otherwise though we run through `to_llvm_features` when
+            // passing requests down to LLVM. This means that all in-language
+            // features also work on the command line instead of having two
+            // different names when the LLVM name and the Rust name differ.
+            let llvm_feature = to_llvm_features(sess, feature);
+
+            Some(
+                std::iter::once(format!("{}{}", enable_disable, llvm_feature.llvm_feature_name))
+                    .chain(llvm_feature.dependency.into_iter().filter_map(move |feat| {
+                        match (enable_disable, feat) {
+                            ('-' | '+', TargetFeatureFoldStrength::Both(f))
+                            | ('+', TargetFeatureFoldStrength::EnableOnly(f)) => {
+                                Some(format!("{enable_disable}{f}"))
+                            }
+                            _ => None,
+                        }
+                    })),
+            )
+        })
+        .flatten();
+    features.extend(feats);
+
+    if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) {
+        sess.dcx().emit_err(TargetFeatureDisableOrEnable {
+            features: f,
+            span: None,
+            missing_features: None,
+        });
+    }
+
+    features
+}
+
+/// Returns a feature name for the given `+feature` or `-feature` string.
+///
+/// Only allows features that are backend specific (i.e. not [`RUSTC_SPECIFIC_FEATURES`].)
+fn backend_feature_name<'a>(sess: &Session, s: &'a str) -> Option<&'a str> {
+    // features must start with a `+` or `-`.
+    let feature = s
+        .strip_prefix(&['+', '-'][..])
+        .unwrap_or_else(|| sess.dcx().emit_fatal(InvalidTargetFeaturePrefix { feature: s }));
+    // Rustc-specific feature requests like `+crt-static` or `-crt-static`
+    // are not passed down to LLVM.
+    if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+        return None;
+    }
+    Some(feature)
+}
+
+pub fn tune_cpu(sess: &Session) -> Option<&str> {
+    let name = sess.opts.unstable_opts.tune_cpu.as_ref()?;
+    Some(handle_native(name))
+}
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
new file mode 100644
index 00000000000..29100a64171
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -0,0 +1,161 @@
+use crate::attributes;
+use crate::base;
+use crate::context::CodegenCx;
+use crate::errors::SymbolAlreadyDefined;
+use crate::llvm;
+use crate::type_of::LayoutLlvmExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_middle::bug;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
+use rustc_session::config::CrateType;
+use rustc_target::spec::RelocModel;
+
+impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> {
+    fn predefine_static(
+        &self,
+        def_id: DefId,
+        linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    ) {
+        let instance = Instance::mono(self.tcx, def_id);
+        let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
+        // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out
+        // the llvm type from the actual evaluated initializer.
+        let ty = if nested {
+            self.tcx.types.unit
+        } else {
+            instance.ty(self.tcx, ty::ParamEnv::reveal_all())
+        };
+        let llty = self.layout_of(ty).llvm_type(self);
+
+        let g = self.define_global(symbol_name, llty).unwrap_or_else(|| {
+            self.sess()
+                .dcx()
+                .emit_fatal(SymbolAlreadyDefined { span: self.tcx.def_span(def_id), symbol_name })
+        });
+
+        unsafe {
+            llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage));
+            llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
+            if self.should_assume_dso_local(g, false) {
+                llvm::LLVMRustSetDSOLocal(g, true);
+            }
+        }
+
+        self.instances.borrow_mut().insert(instance, g);
+    }
+
+    fn predefine_fn(
+        &self,
+        instance: Instance<'tcx>,
+        linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    ) {
+        assert!(!instance.args.has_infer());
+
+        let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+        let lldecl = self.declare_fn(symbol_name, fn_abi, Some(instance));
+        unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
+        let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+        base::set_link_section(lldecl, attrs);
+        if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
+            llvm::SetUniqueComdat(self.llmod, lldecl);
+        }
+
+        // If we're compiling the compiler-builtins crate, e.g., the equivalent of
+        // compiler-rt, then we want to implicitly compile everything with hidden
+        // visibility as we're going to link this object all over the place but
+        // don't want the symbols to get exported.
+        if linkage != Linkage::Internal
+            && linkage != Linkage::Private
+            && self.tcx.is_compiler_builtins(LOCAL_CRATE)
+        {
+            unsafe {
+                llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden);
+            }
+        } else {
+            unsafe {
+                llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility));
+            }
+        }
+
+        debug!("predefine_fn: instance = {:?}", instance);
+
+        attributes::from_fn_attrs(self, lldecl, instance);
+
+        unsafe {
+            if self.should_assume_dso_local(lldecl, false) {
+                llvm::LLVMRustSetDSOLocal(lldecl, true);
+            }
+        }
+
+        self.instances.borrow_mut().insert(instance, lldecl);
+    }
+}
+
+impl CodegenCx<'_, '_> {
+    /// Whether a definition or declaration can be assumed to be local to a group of
+    /// libraries that form a single DSO or executable.
+    pub(crate) unsafe fn should_assume_dso_local(
+        &self,
+        llval: &llvm::Value,
+        is_declaration: bool,
+    ) -> bool {
+        let linkage = llvm::LLVMRustGetLinkage(llval);
+        let visibility = llvm::LLVMRustGetVisibility(llval);
+
+        if matches!(linkage, llvm::Linkage::InternalLinkage | llvm::Linkage::PrivateLinkage) {
+            return true;
+        }
+
+        if visibility != llvm::Visibility::Default && linkage != llvm::Linkage::ExternalWeakLinkage
+        {
+            return true;
+        }
+
+        // Symbols from executables can't really be imported any further.
+        let all_exe = self.tcx.crate_types().iter().all(|ty| *ty == CrateType::Executable);
+        let is_declaration_for_linker =
+            is_declaration || linkage == llvm::Linkage::AvailableExternallyLinkage;
+        if all_exe && !is_declaration_for_linker {
+            return true;
+        }
+
+        // PowerPC64 prefers TOC indirection to avoid copy relocations.
+        if matches!(&*self.tcx.sess.target.arch, "powerpc64" | "powerpc64le") {
+            return false;
+        }
+
+        // Match clang by only supporting COFF and ELF for now.
+        if self.tcx.sess.target.is_like_osx {
+            return false;
+        }
+
+        // With pie relocation model calls of functions defined in the translation
+        // unit can use copy relocations.
+        if self.tcx.sess.relocation_model() == RelocModel::Pie && !is_declaration {
+            return true;
+        }
+
+        // Thread-local variables generally don't support copy relocations.
+        let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval)
+            .is_some_and(|v| llvm::LLVMIsThreadLocal(v) == llvm::True);
+        if is_thread_local_var {
+            return false;
+        }
+
+        // Respect the direct-access-external-data to override default behavior if present.
+        if let Some(direct) = self.tcx.sess.direct_access_external_data() {
+            return direct;
+        }
+
+        // Static relocation model should force copy relocations everywhere.
+        self.tcx.sess.relocation_model() == RelocModel::Static
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
new file mode 100644
index 00000000000..af1bbda4d08
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -0,0 +1,361 @@
+pub use crate::llvm::Type;
+
+use crate::abi::{FnAbiLlvmExt, LlvmType};
+use crate::common;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::{Bool, False, True};
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use std::fmt;
+use std::ptr;
+
+use libc::{c_char, c_uint};
+
+impl PartialEq for Type {
+    fn eq(&self, other: &Self) -> bool {
+        ptr::eq(self, other)
+    }
+}
+
+impl fmt::Debug for Type {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(
+            &llvm::build_string(|s| unsafe {
+                llvm::LLVMRustWriteTypeToString(self, s);
+            })
+            .expect("non-UTF8 type description from LLVM"),
+        )
+    }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+    pub(crate) fn type_named_struct(&self, name: &str) -> &'ll Type {
+        let name = SmallCStr::new(name);
+        unsafe { llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) }
+    }
+
+    pub(crate) fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
+        unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) }
+    }
+
+    pub(crate) fn type_void(&self) -> &'ll Type {
+        unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
+    }
+
+    pub(crate) fn type_token(&self) -> &'ll Type {
+        unsafe { llvm::LLVMTokenTypeInContext(self.llcx) }
+    }
+
+    pub(crate) fn type_metadata(&self) -> &'ll Type {
+        unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) }
+    }
+
+    ///x Creates an integer type with the given number of bits, e.g., i24
+    pub(crate) fn type_ix(&self, num_bits: u64) -> &'ll Type {
+        unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) }
+    }
+
+    pub(crate) fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+        unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
+    }
+
+    pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
+        unsafe {
+            let n_args = llvm::LLVMCountParamTypes(ty) as usize;
+            let mut args = Vec::with_capacity(n_args);
+            llvm::LLVMGetParamTypes(ty, args.as_mut_ptr());
+            args.set_len(n_args);
+            args
+        }
+    }
+
+    pub(crate) fn type_bool(&self) -> &'ll Type {
+        self.type_i8()
+    }
+
+    pub(crate) fn type_int_from_ty(&self, t: ty::IntTy) -> &'ll Type {
+        match t {
+            ty::IntTy::Isize => self.type_isize(),
+            ty::IntTy::I8 => self.type_i8(),
+            ty::IntTy::I16 => self.type_i16(),
+            ty::IntTy::I32 => self.type_i32(),
+            ty::IntTy::I64 => self.type_i64(),
+            ty::IntTy::I128 => self.type_i128(),
+        }
+    }
+
+    pub(crate) fn type_uint_from_ty(&self, t: ty::UintTy) -> &'ll Type {
+        match t {
+            ty::UintTy::Usize => self.type_isize(),
+            ty::UintTy::U8 => self.type_i8(),
+            ty::UintTy::U16 => self.type_i16(),
+            ty::UintTy::U32 => self.type_i32(),
+            ty::UintTy::U64 => self.type_i64(),
+            ty::UintTy::U128 => self.type_i128(),
+        }
+    }
+
+    pub(crate) fn type_float_from_ty(&self, t: ty::FloatTy) -> &'ll Type {
+        match t {
+            ty::FloatTy::F16 => self.type_f16(),
+            ty::FloatTy::F32 => self.type_f32(),
+            ty::FloatTy::F64 => self.type_f64(),
+            ty::FloatTy::F128 => self.type_f128(),
+        }
+    }
+
+    /// Return an LLVM type that has at most the required alignment,
+    /// and exactly the required size, as a best-effort padding array.
+    pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
+        let unit = Integer::approximate_align(self, align);
+        let size = size.bytes();
+        let unit_size = unit.size().bytes();
+        assert_eq!(size % unit_size, 0);
+        self.type_array(self.type_from_integer(unit), size / unit_size)
+    }
+
+    pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+        unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) }
+    }
+}
+
+impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn type_i1(&self) -> &'ll Type {
+        unsafe { llvm::LLVMInt1TypeInContext(self.llcx) }
+    }
+
+    fn type_i8(&self) -> &'ll Type {
+        unsafe { llvm::LLVMInt8TypeInContext(self.llcx) }
+    }
+
+    fn type_i16(&self) -> &'ll Type {
+        unsafe { llvm::LLVMInt16TypeInContext(self.llcx) }
+    }
+
+    fn type_i32(&self) -> &'ll Type {
+        unsafe { llvm::LLVMInt32TypeInContext(self.llcx) }
+    }
+
+    fn type_i64(&self) -> &'ll Type {
+        unsafe { llvm::LLVMInt64TypeInContext(self.llcx) }
+    }
+
+    fn type_i128(&self) -> &'ll Type {
+        unsafe { llvm::LLVMIntTypeInContext(self.llcx, 128) }
+    }
+
+    fn type_isize(&self) -> &'ll Type {
+        self.isize_ty
+    }
+
+    fn type_f16(&self) -> &'ll Type {
+        unsafe { llvm::LLVMHalfTypeInContext(self.llcx) }
+    }
+
+    fn type_f32(&self) -> &'ll Type {
+        unsafe { llvm::LLVMFloatTypeInContext(self.llcx) }
+    }
+
+    fn type_f64(&self) -> &'ll Type {
+        unsafe { llvm::LLVMDoubleTypeInContext(self.llcx) }
+    }
+
+    fn type_f128(&self) -> &'ll Type {
+        unsafe { llvm::LLVMFP128TypeInContext(self.llcx) }
+    }
+
+    fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+        unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) }
+    }
+
+    fn type_struct(&self, els: &[&'ll Type], packed: bool) -> &'ll Type {
+        unsafe {
+            llvm::LLVMStructTypeInContext(
+                self.llcx,
+                els.as_ptr(),
+                els.len() as c_uint,
+                packed as Bool,
+            )
+        }
+    }
+
+    fn type_kind(&self, ty: &'ll Type) -> TypeKind {
+        unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
+    }
+
+    fn type_ptr(&self) -> &'ll Type {
+        self.type_ptr_ext(AddressSpace::DATA)
+    }
+
+    fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
+        unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) }
+    }
+
+    fn element_type(&self, ty: &'ll Type) -> &'ll Type {
+        match self.type_kind(ty) {
+            TypeKind::Array | TypeKind::Vector => unsafe { llvm::LLVMGetElementType(ty) },
+            TypeKind::Pointer => bug!("element_type is not supported for opaque pointers"),
+            other => bug!("element_type called on unsupported type {other:?}"),
+        }
+    }
+
+    fn vector_length(&self, ty: &'ll Type) -> usize {
+        unsafe { llvm::LLVMGetVectorSize(ty) as usize }
+    }
+
+    fn float_width(&self, ty: &'ll Type) -> usize {
+        match self.type_kind(ty) {
+            TypeKind::Half => 16,
+            TypeKind::Float => 32,
+            TypeKind::Double => 64,
+            TypeKind::X86_FP80 => 80,
+            TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
+            other => bug!("llvm_float_width called on a non-float type {other:?}"),
+        }
+    }
+
+    fn int_width(&self, ty: &'ll Type) -> u64 {
+        unsafe { llvm::LLVMGetIntTypeWidth(ty) as u64 }
+    }
+
+    fn val_ty(&self, v: &'ll Value) -> &'ll Type {
+        common::val_ty(v)
+    }
+
+    fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+        unsafe { llvm::LLVMArrayType2(ty, len) }
+    }
+}
+
+impl Type {
+    /// Creates an integer type with the given number of bits, e.g., i24
+    pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type {
+        unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
+    }
+
+    pub fn ptr_llcx(llcx: &llvm::Context) -> &Type {
+        unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) }
+    }
+}
+
+impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+        layout.llvm_type(self)
+    }
+    fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+        layout.immediate_llvm_type(self)
+    }
+    fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+        layout.is_llvm_immediate()
+    }
+    fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+        layout.is_llvm_scalar_pair()
+    }
+    fn scalar_pair_element_backend_type(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        index: usize,
+        immediate: bool,
+    ) -> &'ll Type {
+        layout.scalar_pair_element_llvm_type(self, index, immediate)
+    }
+    fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
+        ty.llvm_type(self)
+    }
+    fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+        fn_abi.llvm_type(self)
+    }
+    fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+        fn_abi.ptr_to_llvm_type(self)
+    }
+    fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
+        ty.llvm_type(self)
+    }
+    fn scalar_copy_backend_type(&self, layout: TyAndLayout<'tcx>) -> Option<Self::Type> {
+        layout.scalar_copy_llvm_type(self)
+    }
+}
+
+impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+    fn add_type_metadata(&self, function: &'ll Value, typeid: String) {
+        let typeid_metadata = self.typeid_metadata(typeid).unwrap();
+        let v = [self.const_usize(0), typeid_metadata];
+        unsafe {
+            llvm::LLVMRustGlobalAddMetadata(
+                function,
+                llvm::MD_type as c_uint,
+                llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+                    self.llcx,
+                    v.as_ptr(),
+                    v.len() as c_uint,
+                )),
+            )
+        }
+    }
+
+    fn set_type_metadata(&self, function: &'ll Value, typeid: String) {
+        let typeid_metadata = self.typeid_metadata(typeid).unwrap();
+        let v = [self.const_usize(0), typeid_metadata];
+        unsafe {
+            llvm::LLVMGlobalSetMetadata(
+                function,
+                llvm::MD_type as c_uint,
+                llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+                    self.llcx,
+                    v.as_ptr(),
+                    v.len() as c_uint,
+                )),
+            )
+        }
+    }
+
+    fn typeid_metadata(&self, typeid: String) -> Option<&'ll Value> {
+        Some(unsafe {
+            llvm::LLVMMDStringInContext(
+                self.llcx,
+                typeid.as_ptr() as *const c_char,
+                typeid.len() as c_uint,
+            )
+        })
+    }
+
+    fn add_kcfi_type_metadata(&self, function: &'ll Value, kcfi_typeid: u32) {
+        let kcfi_type_metadata = self.const_u32(kcfi_typeid);
+        unsafe {
+            llvm::LLVMRustGlobalAddMetadata(
+                function,
+                llvm::MD_kcfi_type as c_uint,
+                llvm::LLVMMDNodeInContext2(
+                    self.llcx,
+                    &llvm::LLVMValueAsMetadata(kcfi_type_metadata),
+                    1,
+                ),
+            )
+        }
+    }
+
+    fn set_kcfi_type_metadata(&self, function: &'ll Value, kcfi_typeid: u32) {
+        let kcfi_type_metadata = self.const_u32(kcfi_typeid);
+        unsafe {
+            llvm::LLVMGlobalSetMetadata(
+                function,
+                llvm::MD_kcfi_type as c_uint,
+                llvm::LLVMMDNodeInContext2(
+                    self.llcx,
+                    &llvm::LLVMValueAsMetadata(kcfi_type_metadata),
+                    1,
+                ),
+            )
+        }
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
new file mode 100644
index 00000000000..d10a083765b
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -0,0 +1,351 @@
+use crate::common::*;
+use crate::type_::Type;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
+use rustc_middle::ty::{self, Ty, TypeVisitableExt};
+use rustc_target::abi::HasDataLayout;
+use rustc_target::abi::{Abi, Align, FieldsShape};
+use rustc_target::abi::{Int, Pointer, F128, F16, F32, F64};
+use rustc_target::abi::{Scalar, Size, Variants};
+
+use std::fmt::Write;
+
+fn uncached_llvm_type<'a, 'tcx>(
+    cx: &CodegenCx<'a, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+    defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
+) -> &'a Type {
+    match layout.abi {
+        Abi::Scalar(_) => bug!("handled elsewhere"),
+        Abi::Vector { element, count } => {
+            let element = layout.scalar_llvm_type_at(cx, element);
+            return cx.type_vector(element, count);
+        }
+        Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
+    }
+
+    let name = match layout.ty.kind() {
+        // FIXME(eddyb) producing readable type names for trait objects can result
+        // in problematically distinct types due to HRTB and subtyping (see #47638).
+        // ty::Dynamic(..) |
+        ty::Adt(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Foreign(..) | ty::Coroutine(..) | ty::Str
+            // For performance reasons we use names only when emitting LLVM IR.
+            if !cx.sess().fewer_names() =>
+        {
+            let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
+            if let (&ty::Adt(def, _), &Variants::Single { index }) =
+                (layout.ty.kind(), &layout.variants)
+            {
+                if def.is_enum() && !def.variants().is_empty() {
+                    write!(&mut name, "::{}", def.variant(index).name).unwrap();
+                }
+            }
+            if let (&ty::Coroutine(_, _), &Variants::Single { index }) =
+                (layout.ty.kind(), &layout.variants)
+            {
+                write!(&mut name, "::{}", ty::CoroutineArgs::variant_name(index)).unwrap();
+            }
+            Some(name)
+        }
+        _ => None,
+    };
+
+    match layout.fields {
+        FieldsShape::Primitive | FieldsShape::Union(_) => {
+            let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+            let packed = false;
+            match name {
+                None => cx.type_struct(&[fill], packed),
+                Some(ref name) => {
+                    let llty = cx.type_named_struct(name);
+                    cx.set_struct_body(llty, &[fill], packed);
+                    llty
+                }
+            }
+        }
+        FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
+        FieldsShape::Arbitrary { .. } => match name {
+            None => {
+                let (llfields, packed) = struct_llfields(cx, layout);
+                cx.type_struct(&llfields, packed)
+            }
+            Some(ref name) => {
+                let llty = cx.type_named_struct(name);
+                *defer = Some((llty, layout));
+                llty
+            }
+        },
+    }
+}
+
+fn struct_llfields<'a, 'tcx>(
+    cx: &CodegenCx<'a, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+) -> (Vec<&'a Type>, bool) {
+    debug!("struct_llfields: {:#?}", layout);
+    let field_count = layout.fields.count();
+
+    let mut packed = false;
+    let mut offset = Size::ZERO;
+    let mut prev_effective_align = layout.align.abi;
+    let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+    for i in layout.fields.index_by_increasing_offset() {
+        let target_offset = layout.fields.offset(i as usize);
+        let field = layout.field(cx, i);
+        let effective_field_align =
+            layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+        packed |= effective_field_align < field.align.abi;
+
+        debug!(
+            "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
+                effective_field_align: {}",
+            i,
+            field,
+            offset,
+            target_offset,
+            effective_field_align.bytes()
+        );
+        assert!(target_offset >= offset);
+        let padding = target_offset - offset;
+        if padding != Size::ZERO {
+            let padding_align = prev_effective_align.min(effective_field_align);
+            assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+            result.push(cx.type_padding_filler(padding, padding_align));
+            debug!("    padding before: {:?}", padding);
+        }
+        result.push(field.llvm_type(cx));
+        offset = target_offset + field.size;
+        prev_effective_align = effective_field_align;
+    }
+    if layout.is_sized() && field_count > 0 {
+        if offset > layout.size {
+            bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+        }
+        let padding = layout.size - offset;
+        if padding != Size::ZERO {
+            let padding_align = prev_effective_align;
+            assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+            debug!(
+                "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
+                padding, offset, layout.size
+            );
+            result.push(cx.type_padding_filler(padding, padding_align));
+        }
+    } else {
+        debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
+    }
+    (result, packed)
+}
+
+impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
+    pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
+        self.layout_of(ty).align.abi
+    }
+
+    pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
+        self.layout_of(ty).size
+    }
+
+    pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
+        let layout = self.layout_of(ty);
+        (layout.size, layout.align.abi)
+    }
+}
+
+pub trait LayoutLlvmExt<'tcx> {
+    fn is_llvm_immediate(&self) -> bool;
+    fn is_llvm_scalar_pair(&self) -> bool;
+    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type;
+    fn scalar_pair_element_llvm_type<'a>(
+        &self,
+        cx: &CodegenCx<'a, 'tcx>,
+        index: usize,
+        immediate: bool,
+    ) -> &'a Type;
+    fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>;
+}
+
+impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
+    fn is_llvm_immediate(&self) -> bool {
+        match self.abi {
+            Abi::Scalar(_) | Abi::Vector { .. } => true,
+            Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
+        }
+    }
+
+    fn is_llvm_scalar_pair(&self) -> bool {
+        match self.abi {
+            Abi::ScalarPair(..) => true,
+            Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+        }
+    }
+
+    /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+    /// The pointee type of the pointer in `PlaceRef` is always this type.
+    /// For sized types, it is also the right LLVM type for an `alloca`
+    /// containing a value of that type, and most immediates (except `bool`).
+    /// Unsized types, however, are represented by a "minimal unit", e.g.
+    /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+    /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+    /// If the type is an unsized struct, the regular layout is generated,
+    /// with the inner-most trailing unsized field using the "minimal unit"
+    /// of that field's type - this is useful for taking the address of
+    /// that field and ensuring the struct has the right alignment.
+    fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
+        if let Abi::Scalar(scalar) = self.abi {
+            // Use a different cache for scalars because pointers to DSTs
+            // can be either fat or thin (data pointers of fat pointers).
+            if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
+                return llty;
+            }
+            let llty = self.scalar_llvm_type_at(cx, scalar);
+            cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
+            return llty;
+        }
+
+        // Check the cache.
+        let variant_index = match self.variants {
+            Variants::Single { index } => Some(index),
+            _ => None,
+        };
+        if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
+            return llty;
+        }
+
+        debug!("llvm_type({:#?})", self);
+
+        assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+        // Make sure lifetimes are erased, to avoid generating distinct LLVM
+        // types for Rust types that only differ in the choice of lifetimes.
+        let normal_ty = cx.tcx.erase_regions(self.ty);
+
+        let mut defer = None;
+        let llty = if self.ty != normal_ty {
+            let mut layout = cx.layout_of(normal_ty);
+            if let Some(v) = variant_index {
+                layout = layout.for_variant(cx, v);
+            }
+            layout.llvm_type(cx)
+        } else {
+            uncached_llvm_type(cx, *self, &mut defer)
+        };
+        debug!("--> mapped {:#?} to llty={:?}", self, llty);
+
+        cx.type_lowering.borrow_mut().insert((self.ty, variant_index), llty);
+
+        if let Some((llty, layout)) = defer {
+            let (llfields, packed) = struct_llfields(cx, layout);
+            cx.set_struct_body(llty, &llfields, packed);
+        }
+        llty
+    }
+
+    fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+        match self.abi {
+            Abi::Scalar(scalar) => {
+                if scalar.is_bool() {
+                    return cx.type_i1();
+                }
+            }
+            Abi::ScalarPair(..) => {
+                // An immediate pair always contains just the two elements, without any padding
+                // filler, as it should never be stored to memory.
+                return cx.type_struct(
+                    &[
+                        self.scalar_pair_element_llvm_type(cx, 0, true),
+                        self.scalar_pair_element_llvm_type(cx, 1, true),
+                    ],
+                    false,
+                );
+            }
+            _ => {}
+        };
+        self.llvm_type(cx)
+    }
+
+    fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type {
+        match scalar.primitive() {
+            Int(i, _) => cx.type_from_integer(i),
+            F16 => cx.type_f16(),
+            F32 => cx.type_f32(),
+            F64 => cx.type_f64(),
+            F128 => cx.type_f128(),
+            Pointer(address_space) => cx.type_ptr_ext(address_space),
+        }
+    }
+
+    fn scalar_pair_element_llvm_type<'a>(
+        &self,
+        cx: &CodegenCx<'a, 'tcx>,
+        index: usize,
+        immediate: bool,
+    ) -> &'a Type {
+        // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
+        // In other words, this should generally not look at the type at all, but only at the
+        // layout.
+        let Abi::ScalarPair(a, b) = self.abi else {
+            bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
+        };
+        let scalar = [a, b][index];
+
+        // Make sure to return the same type `immediate_llvm_type` would when
+        // dealing with an immediate pair. This means that `(bool, bool)` is
+        // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+        // immediate, just like `bool` is typically `i8` in memory and only `i1`
+        // when immediate. We need to load/store `bool` as `i8` to avoid
+        // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+        if immediate && scalar.is_bool() {
+            return cx.type_i1();
+        }
+
+        self.scalar_llvm_type_at(cx, scalar)
+    }
+
+    fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> {
+        debug_assert!(self.is_sized());
+
+        // FIXME: this is a fairly arbitrary choice, but 128 bits on WASM
+        // (matching the 128-bit SIMD types proposal) and 256 bits on x64
+        // (like AVX2 registers) seems at least like a tolerable starting point.
+        let threshold = cx.data_layout().pointer_size * 4;
+        if self.layout.size() > threshold {
+            return None;
+        }
+
+        // Vectors, even for non-power-of-two sizes, have the same layout as
+        // arrays but don't count as aggregate types
+        // While LLVM theoretically supports non-power-of-two sizes, and they
+        // often work fine, sometimes x86-isel deals with them horribly
+        // (see #115212) so for now only use power-of-two ones.
+        if let FieldsShape::Array { count, .. } = self.layout.fields()
+            && count.is_power_of_two()
+            && let element = self.field(cx, 0)
+            && element.ty.is_integral()
+        {
+            // `cx.type_ix(bits)` is tempting here, but while that works great
+            // for things that *stay* as memory-to-memory copies, it also ends
+            // up suppressing vectorization as it introduces shifts when it
+            // extracts all the individual values.
+
+            let ety = element.llvm_type(cx);
+            if *count == 1 {
+                // Emitting `<1 x T>` would be silly; just use the scalar.
+                return Some(ety);
+            } else {
+                return Some(cx.type_vector(ety, *count));
+            }
+        }
+
+        // FIXME: The above only handled integer arrays; surely more things
+        // would also be possible. Be careful about provenance, though!
+        None
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
new file mode 100644
index 00000000000..220bb77d3fd
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -0,0 +1,311 @@
+use crate::builder::Builder;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::{
+    common::IntPredicate,
+    traits::{BaseTypeMethods, BuilderMethods, ConstMethods},
+};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Align, Endian, HasDataLayout, Size};
+
+fn round_pointer_up_to_alignment<'ll>(
+    bx: &mut Builder<'_, 'll, '_>,
+    addr: &'ll Value,
+    align: Align,
+    ptr_ty: &'ll Type,
+) -> &'ll Value {
+    let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
+    ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
+    ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
+    bx.inttoptr(ptr_as_int, ptr_ty)
+}
+
+fn emit_direct_ptr_va_arg<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    list: OperandRef<'tcx, &'ll Value>,
+    size: Size,
+    align: Align,
+    slot_size: Align,
+    allow_higher_align: bool,
+) -> (&'ll Value, Align) {
+    let va_list_ty = bx.type_ptr();
+    let va_list_addr = list.immediate();
+
+    let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+    let (addr, addr_align) = if allow_higher_align && align > slot_size {
+        (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
+    } else {
+        (ptr, slot_size)
+    };
+
+    let aligned_size = size.align_to(slot_size).bytes() as i32;
+    let full_direct_size = bx.cx().const_i32(aligned_size);
+    let next = bx.inbounds_ptradd(addr, full_direct_size);
+    bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+    if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
+        let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
+        let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
+        (adjusted, addr_align)
+    } else {
+        (addr, addr_align)
+    }
+}
+
+fn emit_ptr_va_arg<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    list: OperandRef<'tcx, &'ll Value>,
+    target_ty: Ty<'tcx>,
+    indirect: bool,
+    slot_size: Align,
+    allow_higher_align: bool,
+) -> &'ll Value {
+    let layout = bx.cx.layout_of(target_ty);
+    let (llty, size, align) = if indirect {
+        (
+            bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
+            bx.cx.data_layout().pointer_size,
+            bx.cx.data_layout().pointer_align,
+        )
+    } else {
+        (layout.llvm_type(bx.cx), layout.size, layout.align)
+    };
+    let (addr, addr_align) =
+        emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
+    if indirect {
+        let tmp_ret = bx.load(llty, addr, addr_align);
+        bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
+    } else {
+        bx.load(llty, addr, addr_align)
+    }
+}
+
+fn emit_aapcs_va_arg<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    list: OperandRef<'tcx, &'ll Value>,
+    target_ty: Ty<'tcx>,
+) -> &'ll Value {
+    let dl = bx.cx.data_layout();
+
+    // Implementation of the AAPCS64 calling convention for va_args see
+    // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
+    //
+    // typedef struct  va_list {
+    //     void * stack; // next stack param
+    //     void * gr_top; // end of GP arg reg save area
+    //     void * vr_top; // end of FP/SIMD arg reg save area
+    //     int gr_offs; // offset from  gr_top to next GP register arg
+    //     int vr_offs; // offset from  vr_top to next FP/SIMD register arg
+    // } va_list;
+    let va_list_addr = list.immediate();
+
+    // There is no padding between fields since `void*` is size=8 align=8, `int` is size=4 align=4.
+    // See https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
+    // Table 1, Byte size and byte alignment of fundamental data types
+    // Table 3, Mapping of C & C++ built-in data types
+    let ptr_offset = 8;
+    let i32_offset = 4;
+    let gr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(ptr_offset));
+    let vr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * ptr_offset));
+    let gr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset));
+    let vr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset + i32_offset));
+
+    let layout = bx.cx.layout_of(target_ty);
+
+    let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
+    let in_reg = bx.append_sibling_block("va_arg.in_reg");
+    let on_stack = bx.append_sibling_block("va_arg.on_stack");
+    let end = bx.append_sibling_block("va_arg.end");
+    let zero = bx.const_i32(0);
+    let offset_align = Align::from_bytes(4).unwrap();
+
+    let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
+    let (reg_off, reg_top, slot_size) = if gr_type {
+        let nreg = (layout.size.bytes() + 7) / 8;
+        (gr_offs, gr_top, nreg * 8)
+    } else {
+        let nreg = (layout.size.bytes() + 15) / 16;
+        (vr_offs, vr_top, nreg * 16)
+    };
+
+    // if the offset >= 0 then the value will be on the stack
+    let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
+    let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
+    bx.cond_br(use_stack, on_stack, maybe_reg);
+
+    // The value at this point might be in a register, but there is a chance that
+    // it could be on the stack so we have to update the offset and then check
+    // the offset again.
+
+    bx.switch_to_block(maybe_reg);
+    if gr_type && layout.align.abi.bytes() > 8 {
+        reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
+        reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
+    }
+    let new_reg_off_v = bx.add(reg_off_v, bx.const_i32(slot_size as i32));
+
+    bx.store(new_reg_off_v, reg_off, offset_align);
+
+    // Check to see if we have overflowed the registers as a result of this.
+    // If we have then we need to use the stack for this value
+    let use_stack = bx.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
+    bx.cond_br(use_stack, on_stack, in_reg);
+
+    bx.switch_to_block(in_reg);
+    let top_type = bx.type_ptr();
+    let top = bx.load(top_type, reg_top, dl.pointer_align.abi);
+
+    // reg_value = *(@top + reg_off_v);
+    let mut reg_addr = bx.ptradd(top, reg_off_v);
+    if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
+        // On big-endian systems the value is right-aligned in its slot.
+        let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
+        reg_addr = bx.ptradd(reg_addr, offset);
+    }
+    let reg_type = layout.llvm_type(bx);
+    let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
+    bx.br(end);
+
+    // On Stack block
+    bx.switch_to_block(on_stack);
+    let stack_value =
+        emit_ptr_va_arg(bx, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
+    bx.br(end);
+
+    bx.switch_to_block(end);
+    let val =
+        bx.phi(layout.immediate_llvm_type(bx), &[reg_value, stack_value], &[in_reg, on_stack]);
+
+    val
+}
+
+fn emit_s390x_va_arg<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    list: OperandRef<'tcx, &'ll Value>,
+    target_ty: Ty<'tcx>,
+) -> &'ll Value {
+    let dl = bx.cx.data_layout();
+
+    // Implementation of the s390x ELF ABI calling convention for va_args see
+    // https://github.com/IBM/s390x-abi (chapter 1.2.4)
+    //
+    // typedef struct __va_list_tag {
+    //     long __gpr;
+    //     long __fpr;
+    //     void *__overflow_arg_area;
+    //     void *__reg_save_area;
+    // } va_list[1];
+    let va_list_addr = list.immediate();
+
+    // There is no padding between fields since `long` and `void*` both have size=8 align=8.
+    // https://github.com/IBM/s390x-abi (Table 1.1.: Scalar types)
+    let i64_offset = 8;
+    let ptr_offset = 8;
+    let gpr = va_list_addr;
+    let fpr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(i64_offset));
+    let overflow_arg_area = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset));
+    let reg_save_area =
+        bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset + ptr_offset));
+
+    let layout = bx.cx.layout_of(target_ty);
+
+    let in_reg = bx.append_sibling_block("va_arg.in_reg");
+    let in_mem = bx.append_sibling_block("va_arg.in_mem");
+    let end = bx.append_sibling_block("va_arg.end");
+
+    // FIXME: vector ABI not yet supported.
+    let target_ty_size = bx.cx.size_of(target_ty).bytes();
+    let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
+    let unpadded_size = if indirect { 8 } else { target_ty_size };
+    let padded_size = 8;
+    let padding = padded_size - unpadded_size;
+
+    let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
+    let (max_regs, reg_count, reg_save_index, reg_padding) =
+        if gpr_type { (5, gpr, 2, padding) } else { (4, fpr, 16, 0) };
+
+    // Check whether the value was passed in a register or in memory.
+    let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
+    let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
+    bx.cond_br(use_regs, in_reg, in_mem);
+
+    // Emit code to load the value if it was passed in a register.
+    bx.switch_to_block(in_reg);
+
+    // Work out the address of the value in the register save area.
+    let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, dl.pointer_align.abi);
+    let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
+    let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
+    let reg_addr = bx.ptradd(reg_ptr_v, reg_off);
+
+    // Update the register count.
+    let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
+    bx.store(new_reg_count_v, reg_count, Align::from_bytes(8).unwrap());
+    bx.br(end);
+
+    // Emit code to load the value if it was passed in memory.
+    bx.switch_to_block(in_mem);
+
+    // Work out the address of the value in the argument overflow area.
+    let arg_ptr_v =
+        bx.load(bx.type_ptr(), overflow_arg_area, bx.tcx().data_layout.pointer_align.abi);
+    let arg_off = bx.const_u64(padding);
+    let mem_addr = bx.ptradd(arg_ptr_v, arg_off);
+
+    // Update the argument overflow area pointer.
+    let arg_size = bx.cx().const_u64(padded_size);
+    let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size);
+    bx.store(new_arg_ptr_v, overflow_arg_area, dl.pointer_align.abi);
+    bx.br(end);
+
+    // Return the appropriate result.
+    bx.switch_to_block(end);
+    let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
+    let val_type = layout.llvm_type(bx);
+    let val_addr =
+        if indirect { bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) } else { val_addr };
+    bx.load(val_type, val_addr, layout.align.abi)
+}
+
+pub(super) fn emit_va_arg<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    addr: OperandRef<'tcx, &'ll Value>,
+    target_ty: Ty<'tcx>,
+) -> &'ll Value {
+    // Determine the va_arg implementation to use. The LLVM va_arg instruction
+    // is lacking in some instances, so we should only use it as a fallback.
+    let target = &bx.cx.tcx.sess.target;
+    let arch = &bx.cx.tcx.sess.target.arch;
+    match &**arch {
+        // Windows x86
+        "x86" if target.is_like_windows => {
+            emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
+        }
+        // Generic x86
+        "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
+        // Windows AArch64
+        "aarch64" | "arm64ec" if target.is_like_windows => {
+            emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
+        }
+        // macOS / iOS AArch64
+        "aarch64" if target.is_like_osx => {
+            emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
+        }
+        "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
+        "s390x" => emit_s390x_va_arg(bx, addr, target_ty),
+        // Windows x86_64
+        "x86_64" if target.is_like_windows => {
+            let target_ty_size = bx.cx.size_of(target_ty).bytes();
+            let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
+            emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
+        }
+        // For all other architecture/OS combinations fall back to using
+        // the LLVM va_arg instruction.
+        // https://llvm.org/docs/LangRef.html#va-arg-instruction
+        _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
+    }
+}
diff --git a/compiler/rustc_codegen_llvm/src/value.rs b/compiler/rustc_codegen_llvm/src/value.rs
new file mode 100644
index 00000000000..1338a229566
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/value.rs
@@ -0,0 +1,32 @@
+pub use crate::llvm::Value;
+
+use crate::llvm;
+
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::ptr;
+
+impl PartialEq for Value {
+    fn eq(&self, other: &Self) -> bool {
+        ptr::eq(self, other)
+    }
+}
+
+impl Eq for Value {}
+
+impl Hash for Value {
+    fn hash<H: Hasher>(&self, hasher: &mut H) {
+        (self as *const Self).hash(hasher);
+    }
+}
+
+impl fmt::Debug for Value {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        f.write_str(
+            &llvm::build_string(|s| unsafe {
+                llvm::LLVMRustWriteValueToString(self, s);
+            })
+            .expect("non-UTF8 value description from LLVM"),
+        )
+    }
+}