diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 104 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs | 12 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs | 13 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs | 4 |
4 files changed, 72 insertions, 61 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 147939d3a52..e5f5146fac8 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -203,57 +203,63 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>, ) { - if self.is_ignore() { - return; - } - if self.is_sized_indirect() { - OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) - } else if self.is_unsized_indirect() { - bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); - } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode { - // FIXME(eddyb): Figure out when the simpler Store is safe, clang - // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. - let can_store_through_cast_ptr = false; - if can_store_through_cast_ptr { - bx.store(val, dst.llval, self.layout.align.abi); - } else { - // The actual return type is a struct, but the ABI - // adaptation code has cast it into some scalar type. The - // code that follows is the only reliable way I have - // found to do a transform like i64 -> {i32,i32}. - // Basically we dump the data onto the stack then memcpy it. - // - // Other approaches I tried: - // - Casting rust ret pointer to the foreign type and using Store - // is (a) unsafe if size of foreign type > size of rust type and - // (b) runs afoul of strict aliasing rules, yielding invalid - // assembly under -O (specifically, the store gets removed). - // - Truncating foreign type to correct integral type and then - // bitcasting to the struct type yields invalid cast errors. - - // We instead thus allocate some scratch space... - let scratch_size = cast.size(bx); - let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); - bx.lifetime_start(llscratch, scratch_size); - - // ... where we first store the value... - bx.store(val, llscratch, scratch_align); - - // ... and then memcpy it to the intended destination. - bx.memcpy( - dst.llval, - self.layout.align.abi, - llscratch, - scratch_align, - bx.const_usize(self.layout.size.bytes()), - MemFlags::empty(), - ); + match &self.mode { + PassMode::Ignore => {} + // Sized indirect arguments + PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => { + let align = attrs.pointee_align.unwrap_or(self.layout.align.abi); + OperandValue::Ref(val, None, align).store(bx, dst); + } + // Unsized indirect qrguments + PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { + bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); + } + PassMode::Cast { cast, pad_i32: _ } => { + // FIXME(eddyb): Figure out when the simpler Store is safe, clang + // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. + let can_store_through_cast_ptr = false; + if can_store_through_cast_ptr { + bx.store(val, dst.llval, self.layout.align.abi); + } else { + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + + // We instead thus allocate some scratch space... + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); + bx.lifetime_start(llscratch, scratch_size); + + // ... where we first store the value... + bx.store(val, llscratch, scratch_align); + + // ... and then memcpy it to the intended destination. + bx.memcpy( + dst.llval, + self.layout.align.abi, + llscratch, + scratch_align, + bx.const_usize(self.layout.size.bytes()), + MemFlags::empty(), + ); - bx.lifetime_end(llscratch, scratch_size); + bx.lifetime_end(llscratch, scratch_size); + } + } + _ => { + OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst); } - } else { - OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst); } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index 017843c7e7d..2af28146a51 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -164,6 +164,15 @@ impl CounterMappingRegion { end_line, end_col, ), + MappingKind::Branch { true_term, false_term } => Self::branch_region( + Counter::from_term(true_term), + Counter::from_term(false_term), + local_file_id, + start_line, + start_col, + end_line, + end_col, + ), } } @@ -188,9 +197,6 @@ impl CounterMappingRegion { } } - // This function might be used in the future; the LLVM API is still evolving, as is coverage - // support. - #[allow(dead_code)] pub(crate) fn branch_region( counter: Counter, false_counter: Counter, diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index c45787f35aa..ee7ea342301 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -355,21 +355,20 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) { let tcx = cx.tcx; - let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics(); - let eligible_def_ids = tcx.mir_keys(()).iter().filter_map(|local_def_id| { let def_id = local_def_id.to_def_id(); let kind = tcx.def_kind(def_id); // `mir_keys` will give us `DefId`s for all kinds of things, not // just "functions", like consts, statics, etc. Filter those out. - // If `ignore_unused_generics` was specified, filter out any - // generic functions from consideration as well. if !matches!(kind, DefKind::Fn | DefKind::AssocFn | DefKind::Closure) { return None; } - if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) { - return None; - } + + // FIXME(79651): Consider trying to filter out dummy instantiations of + // unused generic functions from library crates, because they can produce + // "unused instantiation" in coverage reports even when they are actually + // used by some downstream crate in the same binary. + Some(local_def_id.to_def_id()) }); diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 733a77d24c2..133084b7c12 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -88,7 +88,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { match coverage.kind { // Marker statements have no effect during codegen, // so return early and don't create `func_coverage`. - CoverageKind::SpanMarker => return, + CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => return, // Match exhaustively to ensure that newly-added kinds are classified correctly. CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. } => {} } @@ -108,7 +108,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { let Coverage { kind } = coverage; match *kind { - CoverageKind::SpanMarker => unreachable!( + CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => unreachable!( "unexpected marker statement {kind:?} should have caused an early return" ), CoverageKind::CounterIncrement { id } => { |
