about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs15
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs140
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs49
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs25
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs5
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs158
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs92
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs51
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs7
13 files changed, 497 insertions, 58 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index c0b43b77897..123aef0f310 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -2,6 +2,7 @@ use crate::attributes;
 use crate::builder::Builder;
 use crate::context::CodegenCx;
 use crate::llvm::{self, Attribute, AttributePlace};
+use crate::llvm_util;
 use crate::type_::Type;
 use crate::type_of::LayoutLlvmExt;
 use crate::value::Value;
@@ -227,7 +228,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 //   when passed by value, making it larger.
                 let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes());
                 // Allocate some scratch space...
-                let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
+                let llscratch = bx.alloca(scratch_size, scratch_align);
                 bx.lifetime_start(llscratch, scratch_size);
                 // ...store the value...
                 bx.store(val, llscratch, scratch_align);
@@ -425,6 +426,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()),
                 );
                 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
+                if cx.sess().opts.optimize != config::OptLevel::No
+                    && llvm_util::get_version() >= (18, 0, 0)
+                {
+                    attributes::apply_to_llfn(
+                        llfn,
+                        llvm::AttributePlace::Argument(i),
+                        &[
+                            llvm::AttributeKind::Writable.create_attr(cx.llcx),
+                            llvm::AttributeKind::DeadOnUnwind.create_attr(cx.llcx),
+                        ],
+                    );
+                }
             }
             PassMode::Cast { cast, pad_i32: _ } => {
                 cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
index 0619000364b..d4a3e39cef7 100644
--- a/compiler/rustc_codegen_llvm/src/back/archive.rs
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -415,6 +415,7 @@ impl<'a> LlvmArchiveBuilder<'a> {
                 members.as_ptr() as *const &_,
                 true,
                 kind,
+                self.sess.target.arch == "arm64ec",
             );
             let ret = if r.into_result().is_err() {
                 let err = llvm::LLVMRustGetLastError();
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 06d9be1869c..1a1b4ae3831 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -468,9 +468,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         val
     }
 
-    fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+    fn alloca(&mut self, size: Size, align: Align) -> &'ll Value {
         let mut bx = Builder::with_cx(self.cx);
         bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
+        let ty = self.cx().type_array(self.cx().type_i8(), size.bytes());
         unsafe {
             let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED);
             llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
@@ -478,10 +479,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }
     }
 
-    fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value {
+    fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value {
         unsafe {
             let alloca =
-                llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED);
+                llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED);
             llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
             alloca
         }
@@ -1149,12 +1150,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         order: rustc_codegen_ssa::common::AtomicOrdering,
     ) -> &'ll Value {
         // The only RMW operation that LLVM supports on pointers is compare-exchange.
-        if self.val_ty(src) == self.type_ptr()
-            && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg
-        {
+        let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
+            && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg;
+        if requires_cast_to_int {
             src = self.ptrtoint(src, self.type_isize());
         }
-        unsafe {
+        let mut res = unsafe {
             llvm::LLVMBuildAtomicRMW(
                 self.llbuilder,
                 AtomicRmwBinOp::from_generic(op),
@@ -1163,7 +1164,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 AtomicOrdering::from_generic(order),
                 llvm::False, // SingleThreaded
             )
+        };
+        if requires_cast_to_int {
+            res = self.inttoptr(res, self.type_ptr());
         }
+        res
     }
 
     fn atomic_fence(
@@ -1698,4 +1703,125 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
         };
         kcfi_bundle
     }
+
+    /// Emits a call to `llvm.instrprof.mcdc.parameters`.
+    ///
+    /// This doesn't produce any code directly, but is used as input by
+    /// the LLVM pass that handles coverage instrumentation.
+    ///
+    /// (See clang's [`CodeGenPGO::emitMCDCParameters`] for comparison.)
+    ///
+    /// [`CodeGenPGO::emitMCDCParameters`]:
+    ///     https://github.com/rust-lang/llvm-project/blob/5399a24/clang/lib/CodeGen/CodeGenPGO.cpp#L1124
+    pub(crate) fn mcdc_parameters(
+        &mut self,
+        fn_name: &'ll Value,
+        hash: &'ll Value,
+        bitmap_bytes: &'ll Value,
+    ) {
+        debug!("mcdc_parameters() with args ({:?}, {:?}, {:?})", fn_name, hash, bitmap_bytes);
+
+        assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later");
+
+        let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCParametersIntrinsic(self.cx().llmod) };
+        let llty = self.cx.type_func(
+            &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32()],
+            self.cx.type_void(),
+        );
+        let args = &[fn_name, hash, bitmap_bytes];
+        let args = self.check_call("call", llty, llfn, args);
+
+        unsafe {
+            let _ = llvm::LLVMRustBuildCall(
+                self.llbuilder,
+                llty,
+                llfn,
+                args.as_ptr() as *const &llvm::Value,
+                args.len() as c_uint,
+                [].as_ptr(),
+                0 as c_uint,
+            );
+        }
+    }
+
+    pub(crate) fn mcdc_tvbitmap_update(
+        &mut self,
+        fn_name: &'ll Value,
+        hash: &'ll Value,
+        bitmap_bytes: &'ll Value,
+        bitmap_index: &'ll Value,
+        mcdc_temp: &'ll Value,
+    ) {
+        debug!(
+            "mcdc_tvbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})",
+            fn_name, hash, bitmap_bytes, bitmap_index, mcdc_temp
+        );
+        assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later");
+
+        let llfn =
+            unsafe { llvm::LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(self.cx().llmod) };
+        let llty = self.cx.type_func(
+            &[
+                self.cx.type_ptr(),
+                self.cx.type_i64(),
+                self.cx.type_i32(),
+                self.cx.type_i32(),
+                self.cx.type_ptr(),
+            ],
+            self.cx.type_void(),
+        );
+        let args = &[fn_name, hash, bitmap_bytes, bitmap_index, mcdc_temp];
+        let args = self.check_call("call", llty, llfn, args);
+        unsafe {
+            let _ = llvm::LLVMRustBuildCall(
+                self.llbuilder,
+                llty,
+                llfn,
+                args.as_ptr() as *const &llvm::Value,
+                args.len() as c_uint,
+                [].as_ptr(),
+                0 as c_uint,
+            );
+        }
+        self.store(self.const_i32(0), mcdc_temp, self.tcx.data_layout.i32_align.abi);
+    }
+
+    pub(crate) fn mcdc_condbitmap_update(
+        &mut self,
+        fn_name: &'ll Value,
+        hash: &'ll Value,
+        cond_loc: &'ll Value,
+        mcdc_temp: &'ll Value,
+        bool_value: &'ll Value,
+    ) {
+        debug!(
+            "mcdc_condbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})",
+            fn_name, hash, cond_loc, mcdc_temp, bool_value
+        );
+        assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later");
+        let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(self.cx().llmod) };
+        let llty = self.cx.type_func(
+            &[
+                self.cx.type_ptr(),
+                self.cx.type_i64(),
+                self.cx.type_i32(),
+                self.cx.type_ptr(),
+                self.cx.type_i1(),
+            ],
+            self.cx.type_void(),
+        );
+        let args = &[fn_name, hash, cond_loc, mcdc_temp, bool_value];
+        self.check_call("call", llty, llfn, args);
+        unsafe {
+            let _ = llvm::LLVMRustBuildCall(
+                self.llbuilder,
+                llty,
+                llfn,
+                args.as_ptr() as *const &llvm::Value,
+                args.len() as c_uint,
+                [].as_ptr(),
+                0 as c_uint,
+            );
+        }
+    }
 }
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
index 568fcc3f3cf..ec33ce6292a 100644
--- a/compiler/rustc_codegen_llvm/src/common.rs
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -255,21 +255,38 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                 let (prov, offset) = ptr.into_parts();
                 let (base_addr, base_addr_space) = match self.tcx.global_alloc(prov.alloc_id()) {
                     GlobalAlloc::Memory(alloc) => {
-                        let init = const_alloc_to_llvm(self, alloc);
-                        let alloc = alloc.inner();
-                        let value = match alloc.mutability {
-                            Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
-                            _ => self.static_addr_of(init, alloc.align, None),
-                        };
-                        if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() {
-                            let hash = self.tcx.with_stable_hashing_context(|mut hcx| {
-                                let mut hasher = StableHasher::new();
-                                alloc.hash_stable(&mut hcx, &mut hasher);
-                                hasher.finish::<Hash128>()
-                            });
-                            llvm::set_value_name(value, format!("alloc_{hash:032x}").as_bytes());
+                        // For ZSTs directly codegen an aligned pointer.
+                        // This avoids generating a zero-sized constant value and actually needing a
+                        // real address at runtime.
+                        if alloc.inner().len() == 0 {
+                            assert_eq!(offset.bytes(), 0);
+                            let llval = self.const_usize(alloc.inner().align.bytes());
+                            return if matches!(layout.primitive(), Pointer(_)) {
+                                unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+                            } else {
+                                self.const_bitcast(llval, llty)
+                            };
+                        } else {
+                            let init = const_alloc_to_llvm(self, alloc, /*static*/ false);
+                            let alloc = alloc.inner();
+                            let value = match alloc.mutability {
+                                Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+                                _ => self.static_addr_of(init, alloc.align, None),
+                            };
+                            if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty()
+                            {
+                                let hash = self.tcx.with_stable_hashing_context(|mut hcx| {
+                                    let mut hasher = StableHasher::new();
+                                    alloc.hash_stable(&mut hcx, &mut hasher);
+                                    hasher.finish::<Hash128>()
+                                });
+                                llvm::set_value_name(
+                                    value,
+                                    format!("alloc_{hash:032x}").as_bytes(),
+                                );
+                            }
+                            (value, AddressSpace::DATA)
                         }
-                        (value, AddressSpace::DATA)
                     }
                     GlobalAlloc::Function(fn_instance) => (
                         self.get_fn_addr(fn_instance.polymorphize(self.tcx)),
@@ -280,7 +297,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
                             .tcx
                             .global_alloc(self.tcx.vtable_allocation((ty, trait_ref)))
                             .unwrap_memory();
-                        let init = const_alloc_to_llvm(self, alloc);
+                        let init = const_alloc_to_llvm(self, alloc, /*static*/ false);
                         let value = self.static_addr_of(init, alloc.inner().align, None);
                         (value, AddressSpace::DATA)
                     }
@@ -308,7 +325,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     }
 
     fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value {
-        const_alloc_to_llvm(self, alloc)
+        const_alloc_to_llvm(self, alloc, /*static*/ false)
     }
 
     fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 4afa230e598..9e85c2d88f9 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -26,8 +26,22 @@ use rustc_target::abi::{
 };
 use std::ops::Range;
 
-pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
+pub fn const_alloc_to_llvm<'ll>(
+    cx: &CodegenCx<'ll, '_>,
+    alloc: ConstAllocation<'_>,
+    is_static: bool,
+) -> &'ll Value {
     let alloc = alloc.inner();
+    // We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or
+    // integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be
+    // producing empty LLVM allocations as they're just adding noise to binaries and forcing less
+    // optimal codegen.
+    //
+    // Statics have a guaranteed meaningful address so it's less clear that we want to do
+    // something like this; it's also harder.
+    if !is_static {
+        assert!(alloc.len() != 0);
+    }
     let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
     let dl = cx.data_layout();
     let pointer_size = dl.pointer_size.bytes() as usize;
@@ -120,7 +134,7 @@ fn codegen_static_initializer<'ll, 'tcx>(
     def_id: DefId,
 ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> {
     let alloc = cx.tcx.eval_static_initializer(def_id)?;
-    Ok((const_alloc_to_llvm(cx, alloc), alloc))
+    Ok((const_alloc_to_llvm(cx, alloc, /*static*/ true), alloc))
 }
 
 fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
@@ -246,7 +260,8 @@ impl<'ll> CodegenCx<'ll, '_> {
 
     #[instrument(level = "debug", skip(self, llty))]
     pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value {
-        if let Some(&g) = self.instances.borrow().get(&Instance::mono(self.tcx, def_id)) {
+        let instance = Instance::mono(self.tcx, def_id);
+        if let Some(&g) = self.instances.borrow().get(&instance) {
             trace!("used cached value");
             return g;
         }
@@ -259,7 +274,7 @@ impl<'ll> CodegenCx<'ll, '_> {
                  statics defined in the same CGU, but did not for `{def_id:?}`"
         );
 
-        let sym = self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name;
+        let sym = self.tcx.symbol_name(instance).name;
         let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
 
         debug!(?sym, ?fn_attrs);
@@ -349,7 +364,7 @@ impl<'ll> CodegenCx<'ll, '_> {
             }
         }
 
-        self.instances.borrow_mut().insert(Instance::mono(self.tcx, def_id), g);
+        self.instances.borrow_mut().insert(instance, g);
         g
     }
 
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index d32baa6dc02..653abb4e41b 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -11,7 +11,8 @@ use crate::value::Value;
 use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh};
 use rustc_codegen_ssa::errors as ssa_errors;
 use rustc_codegen_ssa::traits::*;
-use rustc_data_structures::base_n;
+use rustc_data_structures::base_n::ToBaseN;
+use rustc_data_structures::base_n::ALPHANUMERIC_ONLY;
 use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::small_c_str::SmallCStr;
 use rustc_hir::def_id::DefId;
@@ -1015,7 +1016,7 @@ impl CodegenCx<'_, '_> {
         let mut name = String::with_capacity(prefix.len() + 6);
         name.push_str(prefix);
         name.push('.');
-        base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+        name.push_str(&(idx as u64).to_base(ALPHANUMERIC_ONLY));
         name
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
index 2af28146a51..12a846a49ec 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs
@@ -1,4 +1,6 @@
-use rustc_middle::mir::coverage::{CodeRegion, CounterId, CovTerm, ExpressionId, MappingKind};
+use rustc_middle::mir::coverage::{
+    CodeRegion, ConditionInfo, CounterId, CovTerm, DecisionInfo, ExpressionId, MappingKind,
+};
 
 /// Must match the layout of `LLVMRustCounterKind`.
 #[derive(Copy, Clone, Debug)]
@@ -99,6 +101,86 @@ pub enum RegionKind {
     /// associated with two counters, each representing the number of times the
     /// expression evaluates to true or false.
     BranchRegion = 4,
+
+    /// A DecisionRegion represents a top-level boolean expression and is
+    /// associated with a variable length bitmap index and condition number.
+    MCDCDecisionRegion = 5,
+
+    /// A Branch Region can be extended to include IDs to facilitate MC/DC.
+    MCDCBranchRegion = 6,
+}
+
+pub mod mcdc {
+    use rustc_middle::mir::coverage::{ConditionInfo, DecisionInfo};
+
+    /// Must match the layout of `LLVMRustMCDCDecisionParameters`.
+    #[repr(C)]
+    #[derive(Clone, Copy, Debug, Default)]
+    pub struct DecisionParameters {
+        bitmap_idx: u32,
+        conditions_num: u16,
+    }
+
+    // ConditionId in llvm is `unsigned int` at 18 while `int16_t` at [19](https://github.com/llvm/llvm-project/pull/81257)
+    type LLVMConditionId = i16;
+
+    /// Must match the layout of `LLVMRustMCDCBranchParameters`.
+    #[repr(C)]
+    #[derive(Clone, Copy, Debug, Default)]
+    pub struct BranchParameters {
+        condition_id: LLVMConditionId,
+        condition_ids: [LLVMConditionId; 2],
+    }
+
+    #[repr(C)]
+    #[derive(Clone, Copy, Debug)]
+    pub enum ParameterTag {
+        None = 0,
+        Decision = 1,
+        Branch = 2,
+    }
+    /// Same layout with `LLVMRustMCDCParameters`
+    #[repr(C)]
+    #[derive(Clone, Copy, Debug)]
+    pub struct Parameters {
+        tag: ParameterTag,
+        decision_params: DecisionParameters,
+        branch_params: BranchParameters,
+    }
+
+    impl Parameters {
+        pub fn none() -> Self {
+            Self {
+                tag: ParameterTag::None,
+                decision_params: Default::default(),
+                branch_params: Default::default(),
+            }
+        }
+        pub fn decision(decision_params: DecisionParameters) -> Self {
+            Self { tag: ParameterTag::Decision, decision_params, branch_params: Default::default() }
+        }
+        pub fn branch(branch_params: BranchParameters) -> Self {
+            Self { tag: ParameterTag::Branch, decision_params: Default::default(), branch_params }
+        }
+    }
+
+    impl From<ConditionInfo> for BranchParameters {
+        fn from(value: ConditionInfo) -> Self {
+            Self {
+                condition_id: value.condition_id.as_u32() as LLVMConditionId,
+                condition_ids: [
+                    value.false_next_id.as_u32() as LLVMConditionId,
+                    value.true_next_id.as_u32() as LLVMConditionId,
+                ],
+            }
+        }
+    }
+
+    impl From<DecisionInfo> for DecisionParameters {
+        fn from(value: DecisionInfo) -> Self {
+            Self { bitmap_idx: value.bitmap_idx, conditions_num: value.conditions_num }
+        }
+    }
 }
 
 /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
@@ -122,6 +204,7 @@ pub struct CounterMappingRegion {
     /// for the false branch of the region.
     false_counter: Counter,
 
+    mcdc_params: mcdc::Parameters,
     /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
     /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
     /// that, in turn, are used to look up the filename for this region.
@@ -173,6 +256,26 @@ impl CounterMappingRegion {
                 end_line,
                 end_col,
             ),
+            MappingKind::MCDCBranch { true_term, false_term, mcdc_params } => {
+                Self::mcdc_branch_region(
+                    Counter::from_term(true_term),
+                    Counter::from_term(false_term),
+                    mcdc_params,
+                    local_file_id,
+                    start_line,
+                    start_col,
+                    end_line,
+                    end_col,
+                )
+            }
+            MappingKind::MCDCDecision(decision_info) => Self::decision_region(
+                decision_info,
+                local_file_id,
+                start_line,
+                start_col,
+                end_line,
+                end_col,
+            ),
         }
     }
 
@@ -187,6 +290,7 @@ impl CounterMappingRegion {
         Self {
             counter,
             false_counter: Counter::ZERO,
+            mcdc_params: mcdc::Parameters::none(),
             file_id,
             expanded_file_id: 0,
             start_line,
@@ -209,6 +313,7 @@ impl CounterMappingRegion {
         Self {
             counter,
             false_counter,
+            mcdc_params: mcdc::Parameters::none(),
             file_id,
             expanded_file_id: 0,
             start_line,
@@ -219,6 +324,54 @@ impl CounterMappingRegion {
         }
     }
 
+    pub(crate) fn mcdc_branch_region(
+        counter: Counter,
+        false_counter: Counter,
+        condition_info: ConditionInfo,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        Self {
+            counter,
+            false_counter,
+            mcdc_params: mcdc::Parameters::branch(condition_info.into()),
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::MCDCBranchRegion,
+        }
+    }
+
+    pub(crate) fn decision_region(
+        decision_info: DecisionInfo,
+        file_id: u32,
+        start_line: u32,
+        start_col: u32,
+        end_line: u32,
+        end_col: u32,
+    ) -> Self {
+        let mcdc_params = mcdc::Parameters::decision(decision_info.into());
+
+        Self {
+            counter: Counter::ZERO,
+            false_counter: Counter::ZERO,
+            mcdc_params,
+            file_id,
+            expanded_file_id: 0,
+            start_line,
+            start_col,
+            end_line,
+            end_col,
+            kind: RegionKind::MCDCDecisionRegion,
+        }
+    }
+
     // This function might be used in the future; the LLVM API is still evolving, as is coverage
     // support.
     #[allow(dead_code)]
@@ -233,6 +386,7 @@ impl CounterMappingRegion {
         Self {
             counter: Counter::ZERO,
             false_counter: Counter::ZERO,
+            mcdc_params: mcdc::Parameters::none(),
             file_id,
             expanded_file_id,
             start_line,
@@ -256,6 +410,7 @@ impl CounterMappingRegion {
         Self {
             counter: Counter::ZERO,
             false_counter: Counter::ZERO,
+            mcdc_params: mcdc::Parameters::none(),
             file_id,
             expanded_file_id: 0,
             start_line,
@@ -280,6 +435,7 @@ impl CounterMappingRegion {
         Self {
             counter,
             false_counter: Counter::ZERO,
+            mcdc_params: mcdc::Parameters::none(),
             file_id,
             expanded_file_id: 0,
             start_line,
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
index 140566e8da9..c51a7744a30 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -16,7 +16,7 @@ use rustc_middle::bug;
 use rustc_middle::mir::coverage::CoverageKind;
 use rustc_middle::ty::layout::HasTyCtxt;
 use rustc_middle::ty::Instance;
-use rustc_target::abi::Align;
+use rustc_target::abi::{Align, Size};
 
 use std::cell::RefCell;
 
@@ -30,6 +30,7 @@ pub struct CrateCoverageContext<'ll, 'tcx> {
     pub(crate) function_coverage_map:
         RefCell<FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>>>,
     pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>,
+    pub(crate) mcdc_condition_bitmap_map: RefCell<FxHashMap<Instance<'tcx>, Vec<&'ll llvm::Value>>>,
 }
 
 impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
@@ -37,6 +38,7 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
         Self {
             function_coverage_map: Default::default(),
             pgo_func_name_var_map: Default::default(),
+            mcdc_condition_bitmap_map: Default::default(),
         }
     }
 
@@ -45,6 +47,23 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
     ) -> FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>> {
         self.function_coverage_map.replace(FxIndexMap::default())
     }
+
+    /// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is called condition bitmap.
+    /// In order to handle nested decisions, several condition bitmaps can be
+    /// allocated for a function body.
+    /// These values are named `mcdc.addr.{i}` and are a 32-bit integers.
+    /// They respectively hold the condition bitmaps for decisions with a depth of `i`.
+    fn try_get_mcdc_condition_bitmap(
+        &self,
+        instance: &Instance<'tcx>,
+        decision_depth: u16,
+    ) -> Option<&'ll llvm::Value> {
+        self.mcdc_condition_bitmap_map
+            .borrow()
+            .get(instance)
+            .and_then(|bitmap_map| bitmap_map.get(decision_depth as usize))
+            .copied() // Dereference Option<&&Value> to Option<&Value>
+    }
 }
 
 // These methods used to be part of trait `CoverageInfoMethods`, which no longer
@@ -72,6 +91,42 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
 }
 
 impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+    fn init_coverage(&mut self, instance: Instance<'tcx>) {
+        let Some(function_coverage_info) =
+            self.tcx.instance_mir(instance.def).function_coverage_info.as_deref()
+        else {
+            return;
+        };
+
+        // If there are no MC/DC bitmaps to set up, return immediately.
+        if function_coverage_info.mcdc_bitmap_bytes == 0 {
+            return;
+        }
+
+        let fn_name = self.get_pgo_func_name_var(instance);
+        let hash = self.const_u64(function_coverage_info.function_source_hash);
+        let bitmap_bytes = self.const_u32(function_coverage_info.mcdc_bitmap_bytes);
+        self.mcdc_parameters(fn_name, hash, bitmap_bytes);
+
+        // Create pointers named `mcdc.addr.{i}` to stack-allocated condition bitmaps.
+        let mut cond_bitmaps = vec![];
+        for i in 0..function_coverage_info.mcdc_num_condition_bitmaps {
+            // MC/DC intrinsics will perform loads/stores that use the ABI default
+            // alignment for i32, so our variable declaration should match.
+            let align = self.tcx.data_layout.i32_align.abi;
+            let cond_bitmap = self.alloca(Size::from_bytes(4), align);
+            llvm::set_value_name(cond_bitmap, format!("mcdc.addr.{i}").as_bytes());
+            self.store(self.const_i32(0), cond_bitmap, align);
+            cond_bitmaps.push(cond_bitmap);
+        }
+
+        self.coverage_context()
+            .expect("always present when coverage is enabled")
+            .mcdc_condition_bitmap_map
+            .borrow_mut()
+            .insert(instance, cond_bitmaps);
+    }
+
     #[instrument(level = "debug", skip(self))]
     fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) {
         // Our caller should have already taken care of inlining subtleties,
@@ -131,6 +186,41 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
             CoverageKind::ExpressionUsed { id } => {
                 func_coverage.mark_expression_id_seen(id);
             }
+            CoverageKind::CondBitmapUpdate { id, value, decision_depth } => {
+                drop(coverage_map);
+                assert_ne!(
+                    id.as_u32(),
+                    0,
+                    "ConditionId of evaluated conditions should never be zero"
+                );
+                let cond_bitmap = coverage_context
+                    .try_get_mcdc_condition_bitmap(&instance, decision_depth)
+                    .expect("mcdc cond bitmap should have been allocated for updating");
+                let cond_loc = bx.const_i32(id.as_u32() as i32 - 1);
+                let bool_value = bx.const_bool(value);
+                let fn_name = bx.get_pgo_func_name_var(instance);
+                let hash = bx.const_u64(function_coverage_info.function_source_hash);
+                bx.mcdc_condbitmap_update(fn_name, hash, cond_loc, cond_bitmap, bool_value);
+            }
+            CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth } => {
+                drop(coverage_map);
+                let cond_bitmap = coverage_context
+                                    .try_get_mcdc_condition_bitmap(&instance, decision_depth)
+                                    .expect("mcdc cond bitmap should have been allocated for merging into the global bitmap");
+                let bitmap_bytes = bx.tcx().coverage_ids_info(instance.def).mcdc_bitmap_bytes;
+                assert!(bitmap_idx < bitmap_bytes, "bitmap index of the decision out of range");
+                assert!(
+                    bitmap_bytes <= function_coverage_info.mcdc_bitmap_bytes,
+                    "bitmap length disagreement: query says {bitmap_bytes} but function info only has {}",
+                    function_coverage_info.mcdc_bitmap_bytes
+                );
+
+                let fn_name = bx.get_pgo_func_name_var(instance);
+                let hash = bx.const_u64(function_coverage_info.function_source_hash);
+                let bitmap_bytes = bx.const_u32(bitmap_bytes);
+                let bitmap_index = bx.const_u32(bitmap_idx);
+                bx.mcdc_tvbitmap_update(fn_name, hash, bitmap_bytes, bitmap_index, cond_bitmap);
+            }
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
index 1aec65cf949..e521d5e259c 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
@@ -5,6 +5,7 @@ use rustc_data_structures::{
     fx::FxHashMap,
     stable_hasher::{HashStable, StableHasher},
 };
+use rustc_macros::HashStable;
 use rustc_middle::{
     bug,
     ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt},
@@ -23,6 +24,8 @@ use crate::{
 use super::{unknown_file_metadata, SmallVec, UNKNOWN_LINE_NUMBER};
 
 mod private {
+    use rustc_macros::HashStable;
+
     // This type cannot be constructed outside of this module because
     // it has a private field. We make use of this in order to prevent
     // `UniqueTypeId` from being constructed directly, without asserting
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 2bed7c1bd1c..56550dbfa4b 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -18,7 +18,7 @@ use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
 use rustc_middle::ty::{self, GenericArgsRef, Ty};
 use rustc_middle::{bug, span_bug};
 use rustc_span::{sym, Span, Symbol};
-use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size};
 use rustc_target::spec::{HasTargetSpec, PanicStrategy};
 
 use std::cmp::Ordering;
@@ -315,25 +315,32 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     Some((width, signed)) => match name {
                         sym::ctlz | sym::cttz => {
                             let y = self.const_bool(false);
-                            self.call_intrinsic(
+                            let ret = self.call_intrinsic(
                                 &format!("llvm.{name}.i{width}"),
                                 &[args[0].immediate(), y],
-                            )
+                            );
+
+                            self.intcast(ret, llret_ty, false)
                         }
                         sym::ctlz_nonzero => {
                             let y = self.const_bool(true);
                             let llvm_name = &format!("llvm.ctlz.i{width}");
-                            self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+                            let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
+                            self.intcast(ret, llret_ty, false)
                         }
                         sym::cttz_nonzero => {
                             let y = self.const_bool(true);
                             let llvm_name = &format!("llvm.cttz.i{width}");
-                            self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+                            let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
+                            self.intcast(ret, llret_ty, false)
+                        }
+                        sym::ctpop => {
+                            let ret = self.call_intrinsic(
+                                &format!("llvm.ctpop.i{width}"),
+                                &[args[0].immediate()],
+                            );
+                            self.intcast(ret, llret_ty, false)
                         }
-                        sym::ctpop => self.call_intrinsic(
-                            &format!("llvm.ctpop.i{width}"),
-                            &[args[0].immediate()],
-                        ),
                         sym::bswap => {
                             if width == 8 {
                                 args[0].immediate() // byte swap a u8/i8 is just a no-op
@@ -355,6 +362,10 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                             // rotate = funnel shift with first two args the same
                             let llvm_name =
                                 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
+
+                            // llvm expects shift to be the same type as the values, but rust always uses `u32`
+                            let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
+
                             self.call_intrinsic(llvm_name, &[val, val, raw_shift])
                         }
                         sym::saturating_add | sym::saturating_sub => {
@@ -638,8 +649,9 @@ fn codegen_msvc_try<'ll>(
         //      }
         //
         // More information can be found in libstd's seh.rs implementation.
+        let ptr_size = bx.tcx().data_layout.pointer_size;
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
-        let slot = bx.alloca(bx.type_ptr(), ptr_align);
+        let slot = bx.alloca(ptr_size, ptr_align);
         let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
         bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None);
 
@@ -909,15 +921,14 @@ fn codegen_emcc_try<'ll>(
 
         // We need to pass two values to catch_func (ptr and is_rust_panic), so
         // create an alloca and pass a pointer to that.
+        let ptr_size = bx.tcx().data_layout.pointer_size;
         let ptr_align = bx.tcx().data_layout.pointer_align.abi;
         let i8_align = bx.tcx().data_layout.i8_align.abi;
-        let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
-        let catch_data = bx.alloca(catch_data_type, ptr_align);
-        let catch_data_0 =
-            bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
-        bx.store(ptr, catch_data_0, ptr_align);
-        let catch_data_1 =
-            bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
+        // Required in order for there to be no padding between the fields.
+        assert!(i8_align <= ptr_align);
+        let catch_data = bx.alloca(2 * ptr_size, ptr_align);
+        bx.store(ptr, catch_data, ptr_align);
+        let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes()));
         bx.store(is_rust_panic, catch_data_1, i8_align);
 
         let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
@@ -973,7 +984,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
         tcx,
         ty::Binder::dummy(tcx.mk_fn_sig(
             [i8p],
-            Ty::new_unit(tcx),
+            tcx.types.unit,
             false,
             hir::Unsafety::Unsafe,
             Abi::Rust,
@@ -984,7 +995,7 @@ fn get_rust_try_fn<'ll, 'tcx>(
         tcx,
         ty::Binder::dummy(tcx.mk_fn_sig(
             [i8p, i8p],
-            Ty::new_unit(tcx),
+            tcx.types.unit,
             false,
             hir::Unsafety::Unsafe,
             Abi::Rust,
@@ -1363,7 +1374,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
 
                 // Convert the integer to a byte array
-                let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
+                let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE);
                 bx.store(ze, ptr, Align::ONE);
                 let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
                 return Ok(bx.load(array_ty, ptr, Align::ONE));
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index c84461e53eb..1cecf682e5d 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -16,8 +16,6 @@
 #![feature(impl_trait_in_assoc_type)]
 
 #[macro_use]
-extern crate rustc_macros;
-#[macro_use]
 extern crate tracing;
 
 use back::owned_target_machine::OwnedTargetMachine;
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 5509baaa3e9..7a34e21628d 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -200,6 +200,8 @@ pub enum AttributeKind {
     AllocAlign = 39,
     SanitizeSafeStack = 40,
     FnRetThunkExtern = 41,
+    Writable = 42,
+    DeadOnUnwind = 43,
 }
 
 /// LLVMIntPredicate
@@ -1631,6 +1633,10 @@ extern "C" {
 
     // Miscellaneous instructions
     pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value;
+    pub fn LLVMRustGetInstrProfMCDCParametersIntrinsic(M: &Module) -> &Value;
+    pub fn LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(M: &Module) -> &Value;
+    pub fn LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(M: &Module) -> &Value;
+
     pub fn LLVMRustBuildCall<'a>(
         B: &Builder<'a>,
         Ty: &'a Type,
@@ -2303,6 +2309,7 @@ extern "C" {
         Members: *const &RustArchiveMember<'_>,
         WriteSymbtab: bool,
         Kind: ArchiveKind,
+        isEC: bool,
     ) -> LLVMRustResult;
     pub fn LLVMRustArchiveMemberNew<'a>(
         Filename: *const c_char,
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index c9e62e504ae..5552b381025 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -270,9 +270,10 @@ pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> {
             "sve2-bitperm",
             TargetFeatureFoldStrength::EnableOnly("neon"),
         ),
-        // The unaligned-scalar-mem feature was renamed to fast-unaligned-access.
-        ("riscv32" | "riscv64", "fast-unaligned-access") if get_version().0 <= 17 => {
-            LLVMFeature::new("unaligned-scalar-mem")
+        // In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single feature called
+        // `fast-unaligned-access`. In LLVM 19, it was split back out.
+        ("riscv32" | "riscv64", "unaligned-scalar-mem") if get_version().0 == 18 => {
+            LLVMFeature::new("fast-unaligned-access")
         }
         // For LLVM 18, enable the evex512 target feature if a avx512 target feature is enabled.
         ("x86", s) if get_version().0 >= 18 && s.starts_with("avx512") => {