diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 9 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 10 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs | 72 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 149 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 1 |
6 files changed, 99 insertions, 144 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 9e834b83df4..6e3a4cae2f6 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -362,9 +362,14 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { // currently use this mode so we have to allow it -- but we absolutely // shouldn't let any more targets do that. // (Also see <https://github.com/rust-lang/rust/issues/115666>.) + // + // The unstable abi `PtxKernel` also uses Direct for now. + // It needs to switch to something else before stabilization can happen. + // (See issue: https://github.com/rust-lang/rust/issues/117271) assert!( - matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64"), - "`PassMode::Direct` for aggregates only allowed on wasm targets\nProblematic type: {:#?}", + matches!(&*cx.tcx.sess.target.arch, "wasm32" | "wasm64") + || self.conv == Conv::PtxKernel, + "`PassMode::Direct` for aggregates only allowed on wasm and `extern \"ptx-kernel\"` fns\nProblematic type: {:#?}", arg.layout, ); } diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 73821b1685d..307c1264dc1 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -374,15 +374,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { let g = self.get_static(def_id); - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = self.val_ty(v); - let v = if val_llty == self.type_i1() { - val_llty = self.type_i8(); - llvm::LLVMConstZExt(v, val_llty) - } else { - v - }; + let val_llty = self.val_ty(v); let instance = Instance::mono(self.tcx, def_id); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index b4b2ab1e1f8..4dd6372b5e0 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -26,8 +26,8 @@ use rustc_middle::{bug, span_bug}; use rustc_session::config::{BranchProtection, CFGuard, CFProtection}; use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet}; use rustc_session::Session; -use rustc_span::source_map::Span; use rustc_span::source_map::Spanned; +use rustc_span::Span; use rustc_target::abi::{ call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx, }; diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs index 93a8a4b1d5e..cd67fafb8e4 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs @@ -102,7 +102,7 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { // have zero as both of their operands, and will therefore always have // a value of zero. Other expressions that refer to these as operands // can have those operands replaced with `CovTerm::Zero`. - let mut zero_expressions = FxIndexSet::default(); + let mut zero_expressions = ZeroExpressions::default(); // Simplify a copy of each expression based on lower-numbered expressions, // and then update the set of always-zero expressions if necessary. @@ -131,16 +131,16 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { ) }; - // If an operand refers to an expression that is always zero, then - // that operand can be replaced with `CovTerm::Zero`. - let maybe_set_operand_to_zero = |operand: &mut CovTerm| match *operand { - CovTerm::Expression(id) => { + // If an operand refers to a counter or expression that is always + // zero, then that operand can be replaced with `CovTerm::Zero`. + let maybe_set_operand_to_zero = |operand: &mut CovTerm| { + if let CovTerm::Expression(id) = *operand { assert_operand_expression_is_lower(id); - if zero_expressions.contains(&id) { - *operand = CovTerm::Zero; - } } - _ => (), + + if is_zero_term(&self.counters_seen, &zero_expressions, *operand) { + *operand = CovTerm::Zero; + } }; maybe_set_operand_to_zero(&mut lhs); maybe_set_operand_to_zero(&mut rhs); @@ -159,7 +159,7 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { } } - ZeroExpressions(zero_expressions) + zero_expressions } pub(crate) fn into_finished(self) -> FunctionCoverage<'tcx> { @@ -205,19 +205,14 @@ impl<'tcx> FunctionCoverage<'tcx> { // thing on the Rust side unless we're confident we can do much better. // (See `CounterExpressionsMinimizer` in `CoverageMappingWriter.cpp`.) - let counter_from_operand = |operand: CovTerm| match operand { - CovTerm::Expression(id) if self.zero_expressions.contains(id) => Counter::ZERO, - _ => Counter::from_term(operand), - }; - self.function_coverage_info.expressions.iter().map(move |&Expression { lhs, op, rhs }| { CounterExpression { - lhs: counter_from_operand(lhs), + lhs: self.counter_for_term(lhs), kind: match op { Op::Add => ExprKind::Add, Op::Subtract => ExprKind::Subtract, }, - rhs: counter_from_operand(rhs), + rhs: self.counter_for_term(rhs), } }) } @@ -227,34 +222,49 @@ impl<'tcx> FunctionCoverage<'tcx> { pub(crate) fn counter_regions( &self, ) -> impl Iterator<Item = (Counter, &CodeRegion)> + ExactSizeIterator { - // Historically, mappings were stored directly in counter/expression - // statements in MIR, and MIR optimizations would sometimes remove them. - // That's mostly no longer true, so now we detect cases where that would - // have happened, and zero out the corresponding mappings here instead. - let counter_for_term = move |term: CovTerm| { - let force_to_zero = match term { - CovTerm::Counter(id) => !self.counters_seen.contains(id), - CovTerm::Expression(id) => self.zero_expressions.contains(id), - CovTerm::Zero => false, - }; - if force_to_zero { Counter::ZERO } else { Counter::from_term(term) } - }; - self.function_coverage_info.mappings.iter().map(move |mapping| { let &Mapping { term, ref code_region } = mapping; - let counter = counter_for_term(term); + let counter = self.counter_for_term(term); (counter, code_region) }) } + + fn counter_for_term(&self, term: CovTerm) -> Counter { + if is_zero_term(&self.counters_seen, &self.zero_expressions, term) { + Counter::ZERO + } else { + Counter::from_term(term) + } + } } /// Set of expression IDs that are known to always evaluate to zero. /// Any mapping or expression operand that refers to these expressions can have /// that reference replaced with a constant zero value. +#[derive(Default)] struct ZeroExpressions(FxIndexSet<ExpressionId>); impl ZeroExpressions { + fn insert(&mut self, id: ExpressionId) { + self.0.insert(id); + } + fn contains(&self, id: ExpressionId) -> bool { self.0.contains(&id) } } + +/// Returns `true` if the given term is known to have a value of zero, taking +/// into account knowledge of which counters are unused and which expressions +/// are always zero. +fn is_zero_term( + counters_seen: &BitSet<CounterId>, + zero_expressions: &ZeroExpressions, + term: CovTerm, +) -> bool { + match term { + CovTerm::Zero => true, + CovTerm::Counter(id) => !counters_seen.contains(id), + CovTerm::Expression(id) => zero_expressions.contains(id), + } +} diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index a97b803fc64..cc7e78b9c62 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -935,9 +935,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } macro_rules! require_simd { - ($ty: expr, $diag: expr) => { - require!($ty.is_simd(), $diag) - }; + ($ty: expr, $variant:ident) => {{ + require!($ty.is_simd(), InvalidMonomorphization::$variant { span, name, ty: $ty }); + $ty.simd_size_and_type(bx.tcx()) + }}; } let tcx = bx.tcx(); @@ -946,12 +947,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let arg_tys = sig.inputs(); if name == sym::simd_select_bitmask { - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } - ); - - let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (len, _) = require_simd!(arg_tys[1], SimdArgument); let expected_int_bits = (len.max(8) - 1).next_power_of_two(); let expected_bytes = len / 8 + ((len % 8 > 0) as u64); @@ -988,7 +984,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } // every intrinsic below takes a SIMD vector as its first argument - require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] }); + let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput); let in_ty = arg_tys[0]; let comparison = match name { @@ -1001,11 +997,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( _ => None, }; - let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx()); if let Some(cmp_op) = comparison { - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); - - let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, @@ -1041,8 +1034,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( .unwrap_branch(); let n = idx.len() as u64; - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); - let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn); require!( out_len == n, InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } @@ -1099,8 +1091,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }), }; - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); - let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn); require!( out_len == n, InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } @@ -1179,11 +1170,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_select { let m_elem_ty = in_elem; let m_len = in_len; - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } - ); - let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (v_len, _) = require_simd!(arg_tys[1], SimdArgument); require!( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } @@ -1401,20 +1388,16 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // * M: any integer width is supported, will be truncated to i1 // All types must be simd vector types - require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } - ); - require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } - ); - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); + + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = require_simd!(in_ty, SimdFirst); + let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond); + // The element type of the third argument must be a signed integer type of any width: + let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); + require_simd!(ret_ty, SimdReturn); // Of the same length: - let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); require!( in_len == out_len, InvalidMonomorphization::SecondArgumentLength { @@ -1444,11 +1427,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>( InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } ); - // The second argument must be a simd vector with an element type that's a pointer - // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - require!( matches!( element_ty1.kind(), @@ -1465,20 +1443,15 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } ); - // The element type of the third argument must be a signed integer type of any width: - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); match element_ty2.kind() { ty::Int(_) => (), _ => { - require!( - false, - InvalidMonomorphization::ThirdArgElementType { - span, - name, - expected_element: element_ty2, - third_arg: arg_tys[2] - } - ); + return_error!(InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + }); } } @@ -1527,19 +1500,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // * M: any integer width is supported, will be truncated to i1 // All types must be simd vector types - require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); - require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } - ); - require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } - ); + // The second argument must be a simd vector with an element type that's a pointer + // to the element type of the first argument + let (_, element_ty0) = require_simd!(in_ty, SimdFirst); + let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond); + let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); // Of the same length: - let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); require!( in_len == element_len1, InvalidMonomorphization::SecondArgumentLength { @@ -1563,12 +1530,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } ); - // The second argument must be a simd vector with an element type that's a pointer - // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); - require!( matches!( element_ty1.kind(), @@ -1590,15 +1551,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( match element_ty2.kind() { ty::Int(_) => (), _ => { - require!( - false, - InvalidMonomorphization::ThirdArgElementType { - span, - name, - expected_element: element_ty2, - third_arg: arg_tys[2] - } - ); + return_error!(InvalidMonomorphization::ThirdArgElementType { + span, + name, + expected_element: element_ty2, + third_arg: arg_tys[2] + }); } } @@ -1794,8 +1752,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( bitwise_red!(simd_reduce_any: vector_reduce_or, true); if name == sym::simd_cast_ptr { - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); - let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { @@ -1843,8 +1800,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } if name == sym::simd_expose_addr { - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); - let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { @@ -1872,8 +1828,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } if name == sym::simd_from_exposed_addr { - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); - let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { @@ -1901,8 +1856,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } if name == sym::simd_cast || name == sym::simd_as { - require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); - let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx()); + let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, InvalidMonomorphization::ReturnLengthInputType { @@ -1989,17 +1943,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } _ => { /* Unsupported. Fallthrough. */ } } - require!( - false, - InvalidMonomorphization::UnsupportedCast { - span, - name, - in_ty, - in_elem, - ret_ty, - out_elem - } - ); + return_error!(InvalidMonomorphization::UnsupportedCast { + span, + name, + in_ty, + in_elem, + ret_ty, + out_elem + }); } macro_rules! arith_binary { ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { @@ -2010,8 +1961,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( })* _ => {}, } - require!( - false, + return_error!( InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem } ); })* @@ -2041,8 +1991,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( })* _ => {}, } - require!( - false, + return_error!( InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem } ); })* diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index a038b3af03d..7fc02a95be0 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -969,7 +969,6 @@ extern "C" { ConstantIndices: *const &'a Value, NumIndices: c_uint, ) -> &'a Value; - pub fn LLVMConstZExt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstPtrToInt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstIntToPtr<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; pub fn LLVMConstBitCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value; |
