diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 15 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/archive.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 166 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/common.rs | 49 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 25 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs | 158 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs | 70 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 55 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 5 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm_util.rs | 7 |
11 files changed, 481 insertions, 72 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 94d67859f2f..123aef0f310 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -8,7 +8,7 @@ use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; -use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; use rustc_middle::bug; @@ -208,7 +208,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // Sized indirect arguments PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => { let align = attrs.pointee_align.unwrap_or(self.layout.align.abi); - OperandValue::Ref(val, None, align).store(bx, dst); + OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst); } // Unsized indirect qrguments PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { @@ -228,13 +228,13 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // when passed by value, making it larger. let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes()); // Allocate some scratch space... - let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); + let llscratch = bx.alloca(scratch_size, scratch_align); bx.lifetime_start(llscratch, scratch_size); // ...store the value... bx.store(val, llscratch, scratch_align); // ... and then memcpy it to the intended destination. bx.memcpy( - dst.llval, + dst.val.llval, self.layout.align.abi, llscratch, scratch_align, @@ -266,7 +266,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { OperandValue::Pair(next(), next()).store(bx, dst); } PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { - OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); + let place_val = PlaceValue { + llval: next(), + llextra: Some(next()), + align: self.layout.align.abi, + }; + OperandValue::Ref(place_val).store(bx, dst); } PassMode::Direct(_) | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 500904ce188..e09869cf425 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -220,7 +220,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { constraints.append(&mut clobbers); if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) { match asm_arch { - InlineAsmArch::AArch64 | InlineAsmArch::Arm => { + InlineAsmArch::AArch64 | InlineAsmArch::Arm64EC | InlineAsmArch::Arm => { constraints.push("~{cc}".to_string()); } InlineAsmArch::X86 | InlineAsmArch::X86_64 => { diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 0619000364b..d4a3e39cef7 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -415,6 +415,7 @@ impl<'a> LlvmArchiveBuilder<'a> { members.as_ptr() as *const &_, true, kind, + self.sess.target.arch == "arm64ec", ); let ret = if r.into_result().is_err() { let err = llvm::LLVMRustGetLastError(); diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index b7235972204..f7546039540 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -17,7 +17,7 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ - FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; use rustc_sanitizers::{cfi, kcfi}; @@ -468,9 +468,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { val } - fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { + fn alloca(&mut self, size: Size, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); + let ty = self.cx().type_array(self.cx().type_i8(), size.bytes()); unsafe { let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); @@ -478,10 +479,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value { unsafe { let alloca = - llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED); + llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } @@ -535,7 +536,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { panic!("unsized locals must not be `extern` types"); } } - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { return OperandRef::zero_sized(place.layout); @@ -579,13 +580,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) + let val = if let Some(_) = place.val.llextra { + // FIXME: Merge with the `else` below? + OperandValue::Ref(place.val) } else if place.layout.is_llvm_immediate() { let mut const_llval = None; let llty = place.layout.llvm_type(self); unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { if llvm::LLVMIsGlobalConstant(global) == llvm::True { if let Some(init) = llvm::LLVMGetInitializer(global) { if self.val_ty(init) == llty { @@ -596,7 +598,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } let llval = const_llval.unwrap_or_else(|| { - let load = self.load(llty, place.llval, place.align); + let load = self.load(llty, place.val.llval, place.val.align); if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } @@ -608,9 +610,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut load = |i, scalar: abi::Scalar, layout, align, offset| { let llptr = if i == 0 { - place.llval + place.val.llval } else { - self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes())) + self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes())) }; let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); @@ -619,11 +621,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; OperandValue::Pair( - load(0, a, place.layout, place.align, Size::ZERO), - load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset), + load(0, a, place.layout, place.val.align, Size::ZERO), + load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset), ) } else { - OperandValue::Ref(place.llval, None, place.align) + OperandValue::Ref(place.val) }; OperandRef { val, layout: place.layout } @@ -1148,12 +1150,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { // The only RMW operation that LLVM supports on pointers is compare-exchange. - if self.val_ty(src) == self.type_ptr() - && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg - { + let requires_cast_to_int = self.val_ty(src) == self.type_ptr() + && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg; + if requires_cast_to_int { src = self.ptrtoint(src, self.type_isize()); } - unsafe { + let mut res = unsafe { llvm::LLVMBuildAtomicRMW( self.llbuilder, AtomicRmwBinOp::from_generic(op), @@ -1162,7 +1164,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { AtomicOrdering::from_generic(order), llvm::False, // SingleThreaded ) + }; + if requires_cast_to_int { + res = self.inttoptr(res, self.type_ptr()); } + res } fn atomic_fence( @@ -1697,4 +1703,128 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { }; kcfi_bundle } + + pub(crate) fn mcdc_parameters( + &mut self, + fn_name: &'ll Value, + hash: &'ll Value, + bitmap_bytes: &'ll Value, + ) -> &'ll Value { + debug!("mcdc_parameters() with args ({:?}, {:?}, {:?})", fn_name, hash, bitmap_bytes); + + assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later"); + + let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCParametersIntrinsic(self.cx().llmod) }; + let llty = self.cx.type_func( + &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32()], + self.cx.type_void(), + ); + let args = &[fn_name, hash, bitmap_bytes]; + let args = self.check_call("call", llty, llfn, args); + + unsafe { + let _ = llvm::LLVMRustBuildCall( + self.llbuilder, + llty, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + [].as_ptr(), + 0 as c_uint, + ); + // Create condition bitmap named `mcdc.addr`. + let mut bx = Builder::with_cx(self.cx); + bx.position_at_start(llvm::LLVMGetFirstBasicBlock(self.llfn())); + let cond_bitmap = { + let alloca = + llvm::LLVMBuildAlloca(bx.llbuilder, bx.cx.type_i32(), c"mcdc.addr".as_ptr()); + llvm::LLVMSetAlignment(alloca, 4); + alloca + }; + bx.store(self.const_i32(0), cond_bitmap, self.tcx().data_layout.i32_align.abi); + cond_bitmap + } + } + + pub(crate) fn mcdc_tvbitmap_update( + &mut self, + fn_name: &'ll Value, + hash: &'ll Value, + bitmap_bytes: &'ll Value, + bitmap_index: &'ll Value, + mcdc_temp: &'ll Value, + ) { + debug!( + "mcdc_tvbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})", + fn_name, hash, bitmap_bytes, bitmap_index, mcdc_temp + ); + assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later"); + + let llfn = + unsafe { llvm::LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(self.cx().llmod) }; + let llty = self.cx.type_func( + &[ + self.cx.type_ptr(), + self.cx.type_i64(), + self.cx.type_i32(), + self.cx.type_i32(), + self.cx.type_ptr(), + ], + self.cx.type_void(), + ); + let args = &[fn_name, hash, bitmap_bytes, bitmap_index, mcdc_temp]; + let args = self.check_call("call", llty, llfn, args); + unsafe { + let _ = llvm::LLVMRustBuildCall( + self.llbuilder, + llty, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + [].as_ptr(), + 0 as c_uint, + ); + } + let i32_align = self.tcx().data_layout.i32_align.abi; + self.store(self.const_i32(0), mcdc_temp, i32_align); + } + + pub(crate) fn mcdc_condbitmap_update( + &mut self, + fn_name: &'ll Value, + hash: &'ll Value, + cond_loc: &'ll Value, + mcdc_temp: &'ll Value, + bool_value: &'ll Value, + ) { + debug!( + "mcdc_condbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})", + fn_name, hash, cond_loc, mcdc_temp, bool_value + ); + assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later"); + let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(self.cx().llmod) }; + let llty = self.cx.type_func( + &[ + self.cx.type_ptr(), + self.cx.type_i64(), + self.cx.type_i32(), + self.cx.type_ptr(), + self.cx.type_i1(), + ], + self.cx.type_void(), + ); + let args = &[fn_name, hash, cond_loc, mcdc_temp, bool_value]; + self.check_call("call", llty, llfn, args); + unsafe { + let _ = llvm::LLVMRustBuildCall( + self.llbuilder, + llty, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + [].as_ptr(), + 0 as c_uint, + ); + } + } } diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 568fcc3f3cf..ec33ce6292a 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -255,21 +255,38 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let (prov, offset) = ptr.into_parts(); let (base_addr, base_addr_space) = match self.tcx.global_alloc(prov.alloc_id()) { GlobalAlloc::Memory(alloc) => { - let init = const_alloc_to_llvm(self, alloc); - let alloc = alloc.inner(); - let value = match alloc.mutability { - Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), - _ => self.static_addr_of(init, alloc.align, None), - }; - if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() { - let hash = self.tcx.with_stable_hashing_context(|mut hcx| { - let mut hasher = StableHasher::new(); - alloc.hash_stable(&mut hcx, &mut hasher); - hasher.finish::<Hash128>() - }); - llvm::set_value_name(value, format!("alloc_{hash:032x}").as_bytes()); + // For ZSTs directly codegen an aligned pointer. + // This avoids generating a zero-sized constant value and actually needing a + // real address at runtime. + if alloc.inner().len() == 0 { + assert_eq!(offset.bytes(), 0); + let llval = self.const_usize(alloc.inner().align.bytes()); + return if matches!(layout.primitive(), Pointer(_)) { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + } else { + self.const_bitcast(llval, llty) + }; + } else { + let init = const_alloc_to_llvm(self, alloc, /*static*/ false); + let alloc = alloc.inner(); + let value = match alloc.mutability { + Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), + _ => self.static_addr_of(init, alloc.align, None), + }; + if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() + { + let hash = self.tcx.with_stable_hashing_context(|mut hcx| { + let mut hasher = StableHasher::new(); + alloc.hash_stable(&mut hcx, &mut hasher); + hasher.finish::<Hash128>() + }); + llvm::set_value_name( + value, + format!("alloc_{hash:032x}").as_bytes(), + ); + } + (value, AddressSpace::DATA) } - (value, AddressSpace::DATA) } GlobalAlloc::Function(fn_instance) => ( self.get_fn_addr(fn_instance.polymorphize(self.tcx)), @@ -280,7 +297,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { .tcx .global_alloc(self.tcx.vtable_allocation((ty, trait_ref))) .unwrap_memory(); - let init = const_alloc_to_llvm(self, alloc); + let init = const_alloc_to_llvm(self, alloc, /*static*/ false); let value = self.static_addr_of(init, alloc.inner().align, None); (value, AddressSpace::DATA) } @@ -308,7 +325,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value { - const_alloc_to_llvm(self, alloc) + const_alloc_to_llvm(self, alloc, /*static*/ false) } fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 4afa230e598..9e85c2d88f9 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -26,8 +26,22 @@ use rustc_target::abi::{ }; use std::ops::Range; -pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value { +pub fn const_alloc_to_llvm<'ll>( + cx: &CodegenCx<'ll, '_>, + alloc: ConstAllocation<'_>, + is_static: bool, +) -> &'ll Value { let alloc = alloc.inner(); + // We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or + // integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be + // producing empty LLVM allocations as they're just adding noise to binaries and forcing less + // optimal codegen. + // + // Statics have a guaranteed meaningful address so it's less clear that we want to do + // something like this; it's also harder. + if !is_static { + assert!(alloc.len() != 0); + } let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; @@ -120,7 +134,7 @@ fn codegen_static_initializer<'ll, 'tcx>( def_id: DefId, ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> { let alloc = cx.tcx.eval_static_initializer(def_id)?; - Ok((const_alloc_to_llvm(cx, alloc), alloc)) + Ok((const_alloc_to_llvm(cx, alloc, /*static*/ true), alloc)) } fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) { @@ -246,7 +260,8 @@ impl<'ll> CodegenCx<'ll, '_> { #[instrument(level = "debug", skip(self, llty))] pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value { - if let Some(&g) = self.instances.borrow().get(&Instance::mono(self.tcx, def_id)) { + let instance = Instance::mono(self.tcx, def_id); + if let Some(&g) = self.instances.borrow().get(&instance) { trace!("used cached value"); return g; } @@ -259,7 +274,7 @@ impl<'ll> CodegenCx<'ll, '_> { statics defined in the same CGU, but did not for `{def_id:?}`" ); - let sym = self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name; + let sym = self.tcx.symbol_name(instance).name; let fn_attrs = self.tcx.codegen_fn_attrs(def_id); debug!(?sym, ?fn_attrs); @@ -349,7 +364,7 @@ impl<'ll> CodegenCx<'ll, '_> { } } - self.instances.borrow_mut().insert(Instance::mono(self.tcx, def_id), g); + self.instances.borrow_mut().insert(instance, g); g } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index 2af28146a51..12a846a49ec 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -1,4 +1,6 @@ -use rustc_middle::mir::coverage::{CodeRegion, CounterId, CovTerm, ExpressionId, MappingKind}; +use rustc_middle::mir::coverage::{ + CodeRegion, ConditionInfo, CounterId, CovTerm, DecisionInfo, ExpressionId, MappingKind, +}; /// Must match the layout of `LLVMRustCounterKind`. #[derive(Copy, Clone, Debug)] @@ -99,6 +101,86 @@ pub enum RegionKind { /// associated with two counters, each representing the number of times the /// expression evaluates to true or false. BranchRegion = 4, + + /// A DecisionRegion represents a top-level boolean expression and is + /// associated with a variable length bitmap index and condition number. + MCDCDecisionRegion = 5, + + /// A Branch Region can be extended to include IDs to facilitate MC/DC. + MCDCBranchRegion = 6, +} + +pub mod mcdc { + use rustc_middle::mir::coverage::{ConditionInfo, DecisionInfo}; + + /// Must match the layout of `LLVMRustMCDCDecisionParameters`. + #[repr(C)] + #[derive(Clone, Copy, Debug, Default)] + pub struct DecisionParameters { + bitmap_idx: u32, + conditions_num: u16, + } + + // ConditionId in llvm is `unsigned int` at 18 while `int16_t` at [19](https://github.com/llvm/llvm-project/pull/81257) + type LLVMConditionId = i16; + + /// Must match the layout of `LLVMRustMCDCBranchParameters`. + #[repr(C)] + #[derive(Clone, Copy, Debug, Default)] + pub struct BranchParameters { + condition_id: LLVMConditionId, + condition_ids: [LLVMConditionId; 2], + } + + #[repr(C)] + #[derive(Clone, Copy, Debug)] + pub enum ParameterTag { + None = 0, + Decision = 1, + Branch = 2, + } + /// Same layout with `LLVMRustMCDCParameters` + #[repr(C)] + #[derive(Clone, Copy, Debug)] + pub struct Parameters { + tag: ParameterTag, + decision_params: DecisionParameters, + branch_params: BranchParameters, + } + + impl Parameters { + pub fn none() -> Self { + Self { + tag: ParameterTag::None, + decision_params: Default::default(), + branch_params: Default::default(), + } + } + pub fn decision(decision_params: DecisionParameters) -> Self { + Self { tag: ParameterTag::Decision, decision_params, branch_params: Default::default() } + } + pub fn branch(branch_params: BranchParameters) -> Self { + Self { tag: ParameterTag::Branch, decision_params: Default::default(), branch_params } + } + } + + impl From<ConditionInfo> for BranchParameters { + fn from(value: ConditionInfo) -> Self { + Self { + condition_id: value.condition_id.as_u32() as LLVMConditionId, + condition_ids: [ + value.false_next_id.as_u32() as LLVMConditionId, + value.true_next_id.as_u32() as LLVMConditionId, + ], + } + } + } + + impl From<DecisionInfo> for DecisionParameters { + fn from(value: DecisionInfo) -> Self { + Self { bitmap_idx: value.bitmap_idx, conditions_num: value.conditions_num } + } + } } /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the @@ -122,6 +204,7 @@ pub struct CounterMappingRegion { /// for the false branch of the region. false_counter: Counter, + mcdc_params: mcdc::Parameters, /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes /// that, in turn, are used to look up the filename for this region. @@ -173,6 +256,26 @@ impl CounterMappingRegion { end_line, end_col, ), + MappingKind::MCDCBranch { true_term, false_term, mcdc_params } => { + Self::mcdc_branch_region( + Counter::from_term(true_term), + Counter::from_term(false_term), + mcdc_params, + local_file_id, + start_line, + start_col, + end_line, + end_col, + ) + } + MappingKind::MCDCDecision(decision_info) => Self::decision_region( + decision_info, + local_file_id, + start_line, + start_col, + end_line, + end_col, + ), } } @@ -187,6 +290,7 @@ impl CounterMappingRegion { Self { counter, false_counter: Counter::ZERO, + mcdc_params: mcdc::Parameters::none(), file_id, expanded_file_id: 0, start_line, @@ -209,6 +313,7 @@ impl CounterMappingRegion { Self { counter, false_counter, + mcdc_params: mcdc::Parameters::none(), file_id, expanded_file_id: 0, start_line, @@ -219,6 +324,54 @@ impl CounterMappingRegion { } } + pub(crate) fn mcdc_branch_region( + counter: Counter, + false_counter: Counter, + condition_info: ConditionInfo, + file_id: u32, + start_line: u32, + start_col: u32, + end_line: u32, + end_col: u32, + ) -> Self { + Self { + counter, + false_counter, + mcdc_params: mcdc::Parameters::branch(condition_info.into()), + file_id, + expanded_file_id: 0, + start_line, + start_col, + end_line, + end_col, + kind: RegionKind::MCDCBranchRegion, + } + } + + pub(crate) fn decision_region( + decision_info: DecisionInfo, + file_id: u32, + start_line: u32, + start_col: u32, + end_line: u32, + end_col: u32, + ) -> Self { + let mcdc_params = mcdc::Parameters::decision(decision_info.into()); + + Self { + counter: Counter::ZERO, + false_counter: Counter::ZERO, + mcdc_params, + file_id, + expanded_file_id: 0, + start_line, + start_col, + end_line, + end_col, + kind: RegionKind::MCDCDecisionRegion, + } + } + // This function might be used in the future; the LLVM API is still evolving, as is coverage // support. #[allow(dead_code)] @@ -233,6 +386,7 @@ impl CounterMappingRegion { Self { counter: Counter::ZERO, false_counter: Counter::ZERO, + mcdc_params: mcdc::Parameters::none(), file_id, expanded_file_id, start_line, @@ -256,6 +410,7 @@ impl CounterMappingRegion { Self { counter: Counter::ZERO, false_counter: Counter::ZERO, + mcdc_params: mcdc::Parameters::none(), file_id, expanded_file_id: 0, start_line, @@ -280,6 +435,7 @@ impl CounterMappingRegion { Self { counter, false_counter: Counter::ZERO, + mcdc_params: mcdc::Parameters::none(), file_id, expanded_file_id: 0, start_line, diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 140566e8da9..085ce15d81f 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -13,7 +13,7 @@ use rustc_codegen_ssa::traits::{ use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_llvm::RustString; use rustc_middle::bug; -use rustc_middle::mir::coverage::CoverageKind; +use rustc_middle::mir::coverage::{CoverageKind, FunctionCoverageInfo}; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::Instance; use rustc_target::abi::Align; @@ -30,6 +30,7 @@ pub struct CrateCoverageContext<'ll, 'tcx> { pub(crate) function_coverage_map: RefCell<FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>>>, pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>, + pub(crate) mcdc_condition_bitmap_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>, } impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> { @@ -37,6 +38,7 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> { Self { function_coverage_map: Default::default(), pgo_func_name_var_map: Default::default(), + mcdc_condition_bitmap_map: Default::default(), } } @@ -45,6 +47,12 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> { ) -> FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>> { self.function_coverage_map.replace(FxIndexMap::default()) } + + /// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is called condition bitmap. + /// This value is named `mcdc.addr` (same as clang) and is a 32-bit integer. + fn try_get_mcdc_condition_bitmap(&self, instance: &Instance<'tcx>) -> Option<&'ll llvm::Value> { + self.mcdc_condition_bitmap_map.borrow().get(instance).copied() + } } // These methods used to be part of trait `CoverageInfoMethods`, which no longer @@ -90,6 +98,10 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { return; }; + if function_coverage_info.mcdc_bitmap_bytes > 0 { + ensure_mcdc_parameters(bx, instance, function_coverage_info); + } + let Some(coverage_context) = bx.coverage_context() else { return }; let mut coverage_map = coverage_context.function_coverage_map.borrow_mut(); let func_coverage = coverage_map @@ -131,10 +143,66 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { CoverageKind::ExpressionUsed { id } => { func_coverage.mark_expression_id_seen(id); } + CoverageKind::CondBitmapUpdate { id, value, .. } => { + drop(coverage_map); + assert_ne!( + id.as_u32(), + 0, + "ConditionId of evaluated conditions should never be zero" + ); + let cond_bitmap = coverage_context + .try_get_mcdc_condition_bitmap(&instance) + .expect("mcdc cond bitmap should have been allocated for updating"); + let cond_loc = bx.const_i32(id.as_u32() as i32 - 1); + let bool_value = bx.const_bool(value); + let fn_name = bx.get_pgo_func_name_var(instance); + let hash = bx.const_u64(function_coverage_info.function_source_hash); + bx.mcdc_condbitmap_update(fn_name, hash, cond_loc, cond_bitmap, bool_value); + } + CoverageKind::TestVectorBitmapUpdate { bitmap_idx } => { + drop(coverage_map); + let cond_bitmap = coverage_context + .try_get_mcdc_condition_bitmap(&instance) + .expect("mcdc cond bitmap should have been allocated for merging into the global bitmap"); + let bitmap_bytes = bx.tcx().coverage_ids_info(instance.def).mcdc_bitmap_bytes; + assert!(bitmap_idx < bitmap_bytes, "bitmap index of the decision out of range"); + assert!( + bitmap_bytes <= function_coverage_info.mcdc_bitmap_bytes, + "bitmap length disagreement: query says {bitmap_bytes} but function info only has {}", + function_coverage_info.mcdc_bitmap_bytes + ); + + let fn_name = bx.get_pgo_func_name_var(instance); + let hash = bx.const_u64(function_coverage_info.function_source_hash); + let bitmap_bytes = bx.const_u32(bitmap_bytes); + let bitmap_index = bx.const_u32(bitmap_idx); + bx.mcdc_tvbitmap_update(fn_name, hash, bitmap_bytes, bitmap_index, cond_bitmap); + } } } } +fn ensure_mcdc_parameters<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + instance: Instance<'tcx>, + function_coverage_info: &FunctionCoverageInfo, +) { + let Some(cx) = bx.coverage_context() else { return }; + if cx.mcdc_condition_bitmap_map.borrow().contains_key(&instance) { + return; + } + + let fn_name = bx.get_pgo_func_name_var(instance); + let hash = bx.const_u64(function_coverage_info.function_source_hash); + let bitmap_bytes = bx.const_u32(function_coverage_info.mcdc_bitmap_bytes); + let cond_bitmap = bx.mcdc_parameters(fn_name, hash, bitmap_bytes); + bx.coverage_context() + .expect("already checked above") + .mcdc_condition_bitmap_map + .borrow_mut() + .insert(instance, cond_bitmap); +} + /// Calls llvm::createPGOFuncNameVar() with the given function instance's /// mangled function name. The LLVM API returns an llvm::GlobalVariable /// containing the function name, with the specific variable name and linkage diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index dc52dd156b7..41347333ea6 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -18,7 +18,7 @@ use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf}; use rustc_middle::ty::{self, GenericArgsRef, Ty}; use rustc_middle::{bug, span_bug}; use rustc_span::{sym, Span, Symbol}; -use rustc_target::abi::{self, Align, HasDataLayout, Primitive}; +use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size}; use rustc_target::spec::{HasTargetSpec, PanicStrategy}; use std::cmp::Ordering; @@ -264,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { llvm::LLVMSetAlignment(load, align); } if !result.layout.is_zst() { - self.store(load, result.llval, result.align); + self.store_to_place(load, result.val); } return Ok(()); } @@ -315,25 +315,32 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { Some((width, signed)) => match name { sym::ctlz | sym::cttz => { let y = self.const_bool(false); - self.call_intrinsic( + let ret = self.call_intrinsic( &format!("llvm.{name}.i{width}"), &[args[0].immediate(), y], - ) + ); + + self.intcast(ret, llret_ty, false) } sym::ctlz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.ctlz.i{width}"); - self.call_intrinsic(llvm_name, &[args[0].immediate(), y]) + let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); + self.intcast(ret, llret_ty, false) } sym::cttz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.cttz.i{width}"); - self.call_intrinsic(llvm_name, &[args[0].immediate(), y]) + let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); + self.intcast(ret, llret_ty, false) + } + sym::ctpop => { + let ret = self.call_intrinsic( + &format!("llvm.ctpop.i{width}"), + &[args[0].immediate()], + ); + self.intcast(ret, llret_ty, false) } - sym::ctpop => self.call_intrinsic( - &format!("llvm.ctpop.i{width}"), - &[args[0].immediate()], - ), sym::bswap => { if width == 8 { args[0].immediate() // byte swap a u8/i8 is just a no-op @@ -355,6 +362,10 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { // rotate = funnel shift with first two args the same let llvm_name = &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width); + + // llvm expects shift to be the same type as the values, but rust always uses `u32` + let raw_shift = self.intcast(raw_shift, self.val_ty(val), false); + self.call_intrinsic(llvm_name, &[val, val, raw_shift]) } sym::saturating_add | sym::saturating_sub => { @@ -428,7 +439,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::black_box => { args[0].val.store(self, result); - let result_val_span = [result.llval]; + let result_val_span = [result.val.llval]; // We need to "use" the argument in some way LLVM can't introspect, and on // targets that support it we can typically leverage inline assembly to do // this. LLVM's interpretation of inline assembly is that it's, well, a black @@ -482,7 +493,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast { .. } = &fn_abi.ret.mode { - self.store(llval, result.llval, result.align); + self.store(llval, result.val.llval, result.val.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -638,8 +649,9 @@ fn codegen_msvc_try<'ll>( // } // // More information can be found in libstd's seh.rs implementation. + let ptr_size = bx.tcx().data_layout.pointer_size; let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(bx.type_ptr(), ptr_align); + let slot = bx.alloca(ptr_size, ptr_align); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None); @@ -909,15 +921,14 @@ fn codegen_emcc_try<'ll>( // We need to pass two values to catch_func (ptr and is_rust_panic), so // create an alloca and pass a pointer to that. + let ptr_size = bx.tcx().data_layout.pointer_size; let ptr_align = bx.tcx().data_layout.pointer_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi; - let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false); - let catch_data = bx.alloca(catch_data_type, ptr_align); - let catch_data_0 = - bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]); - bx.store(ptr, catch_data_0, ptr_align); - let catch_data_1 = - bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]); + // Required in order for there to be no padding between the fields. + assert!(i8_align <= ptr_align); + let catch_data = bx.alloca(2 * ptr_size, ptr_align); + bx.store(ptr, catch_data, ptr_align); + let catch_data_1 = bx.inbounds_ptradd(catch_data, bx.const_usize(ptr_size.bytes())); bx.store(is_rust_panic, catch_data_1, i8_align); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); @@ -1065,7 +1076,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - bx.load(int_ty, place.llval, Align::ONE) + bx.load(int_ty, place.val.llval, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, @@ -1363,7 +1374,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8)); // Convert the integer to a byte array - let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); + let ptr = bx.alloca(Size::from_bytes(expected_bytes), Align::ONE); bx.store(ze, ptr, Align::ONE); let array_ty = bx.type_array(bx.type_i8(), expected_bytes); return Ok(bx.load(array_ty, ptr, Align::ONE)); diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 96265043a1c..7a34e21628d 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1633,6 +1633,10 @@ extern "C" { // Miscellaneous instructions pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value; + pub fn LLVMRustGetInstrProfMCDCParametersIntrinsic(M: &Module) -> &Value; + pub fn LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(M: &Module) -> &Value; + pub fn LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(M: &Module) -> &Value; + pub fn LLVMRustBuildCall<'a>( B: &Builder<'a>, Ty: &'a Type, @@ -2305,6 +2309,7 @@ extern "C" { Members: *const &RustArchiveMember<'_>, WriteSymbtab: bool, Kind: ArchiveKind, + isEC: bool, ) -> LLVMRustResult; pub fn LLVMRustArchiveMemberNew<'a>( Filename: *const c_char, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index c9e62e504ae..5552b381025 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -270,9 +270,10 @@ pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> { "sve2-bitperm", TargetFeatureFoldStrength::EnableOnly("neon"), ), - // The unaligned-scalar-mem feature was renamed to fast-unaligned-access. - ("riscv32" | "riscv64", "fast-unaligned-access") if get_version().0 <= 17 => { - LLVMFeature::new("unaligned-scalar-mem") + // In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single feature called + // `fast-unaligned-access`. In LLVM 19, it was split back out. + ("riscv32" | "riscv64", "unaligned-scalar-mem") if get_version().0 == 18 => { + LLVMFeature::new("fast-unaligned-access") } // For LLVM 18, enable the evex512 target feature if a avx512 target feature is enabled. ("x86", s) if get_version().0 >= 18 && s.starts_with("avx512") => { |
