diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src/builder.rs')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 373 |
1 files changed, 217 insertions, 156 deletions
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 4a4cccb490d..b4aa001547c 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1,34 +1,32 @@ +use crate::abi::FnAbiLlvmExt; use crate::attributes; use crate::common::Funclet; use crate::context::CodegenCx; -use crate::llvm::{self, BasicBlock, False}; -use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope}; -use crate::llvm_util; +use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; -use cstr::cstr; use libc::{c_char, c_uint}; -use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind}; +use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::Span; +use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions}; use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; -use rustc_target::spec::{HasTargetSpec, Target}; +use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; use std::borrow::Cow; -use std::ffi::CStr; use std::iter; use std::ops::Deref; use std::ptr; -use tracing::{debug, instrument}; // All Builders must have an llfn associated with them #[must_use] @@ -45,13 +43,10 @@ impl Drop for Builder<'_, '_, '_> { } } -// FIXME(eddyb) use a checked constructor when they become `const fn`. -const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") }; - /// Empty string, to be used where LLVM expects an instruction name, indicating /// that the instruction is to be left unnamed (i.e. numbered, in textual IR). // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer. -const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr(); +const UNNAMED: *const c_char = c"".as_ptr(); impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> { type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value; @@ -217,6 +212,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn invoke( &mut self, llty: &'ll Type, + fn_attrs: Option<&CodegenFnAttrs>, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, @@ -226,10 +223,20 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { debug!("invoke {:?} with args ({:?})", llfn, args); let args = self.check_call("invoke", llty, llfn, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); + let funclet_bundle = funclet.map(|funclet| funclet.bundle()); + let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw); + let mut bundles = vec![funclet_bundle]; - unsafe { + // Emit CFI pointer type membership test + self.cfi_type_test(fn_attrs, fn_abi, llfn); + + // Emit KCFI operand bundle + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); + bundles.push(kcfi_bundle); + + bundles.retain(|bundle| bundle.is_some()); + let invoke = unsafe { llvm::LLVMRustBuildInvoke( self.llbuilder, llty, @@ -238,10 +245,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { args.len() as c_uint, then, catch, - bundle, + bundles.as_ptr(), + bundles.len() as c_uint, UNNAMED, ) + }; + if let Some(fn_abi) = fn_abi { + fn_abi.apply_attrs_callsite(self, invoke); } + invoke } fn unreachable(&mut self) { @@ -362,11 +374,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Int(I64) => "llvm.ssub.with.overflow.i64", Int(I128) => "llvm.ssub.with.overflow.i128", - Uint(U8) => "llvm.usub.with.overflow.i8", - Uint(U16) => "llvm.usub.with.overflow.i16", - Uint(U32) => "llvm.usub.with.overflow.i32", - Uint(U64) => "llvm.usub.with.overflow.i64", - Uint(U128) => "llvm.usub.with.overflow.i128", + Uint(_) => { + // Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these + // to be the canonical form. It will attempt to reform llvm.usub.with.overflow + // in the backend if profitable. + let sub = self.sub(lhs, rhs); + let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs); + return (sub, cmp); + } _ => unreachable!(), }, @@ -408,20 +423,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); - bx.dynamic_alloca(ty, align) - } - - fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { unsafe { - let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); + let alloca = llvm::LLVMBuildAlloca(bx.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } } - fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value { + fn byte_array_alloca(&mut self, len: &'ll Value, align: Align) -> &'ll Value { unsafe { - let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); + let alloca = + llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), len, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } @@ -469,7 +481,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { - return OperandRef::new_zst(self, place.layout); + return OperandRef::zero_sized(place.layout); } #[instrument(level = "trace", skip(bx))] @@ -480,7 +492,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { layout: TyAndLayout<'tcx>, offset: Size, ) { - if !scalar.is_always_valid(bx) { + if !scalar.is_uninit_valid() { bx.noundef_metadata(load); } @@ -490,7 +502,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { bx.range_metadata(load, scalar.valid_range(bx)); } } - abi::Pointer => { + abi::Pointer(_) => { if !scalar.valid_range(bx).contains(0) { bx.nonnull_metadata(load); } @@ -553,15 +565,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn write_operand_repeatedly( - mut self, + &mut self, cg_elem: OperandRef<'tcx, &'ll Value>, count: u64, dest: PlaceRef<'tcx, &'ll Value>, - ) -> Self { + ) { let zero = self.const_usize(0); let count = self.const_usize(count); - let start = dest.project_index(&mut self, zero).llval; - let end = dest.project_index(&mut self, count).llval; + let start = dest.project_index(self, zero).llval; + let end = dest.project_index(self, count).llval; let header_bb = self.append_sibling_block("repeat_loop_header"); let body_bb = self.append_sibling_block("repeat_loop_body"); @@ -589,7 +601,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { body_bx.br(header_bb); header_bx.add_incoming_to_phi(current, next, body_bb); - Self::build(self.cx, next_bb) + *self = Self::build(self.cx, next_bb); } fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) { @@ -626,32 +638,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn type_metadata(&mut self, function: &'ll Value, typeid: String) { - let typeid_metadata = self.typeid_metadata(typeid); - let v = [self.const_usize(0), typeid_metadata]; - unsafe { - llvm::LLVMGlobalSetMetadata( - function, - llvm::MD_type as c_uint, - llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext( - self.cx.llcx, - v.as_ptr(), - v.len() as c_uint, - )), - ) - } - } - - fn typeid_metadata(&mut self, typeid: String) -> Self::Value { - unsafe { - llvm::LLVMMDStringInContext( - self.cx.llcx, - typeid.as_ptr() as *const c_char, - typeid.as_bytes().len() as c_uint, - ) - } - } - fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } @@ -752,11 +738,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) } } - fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> { + fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.fptoint_sat(false, val, dest_ty) } - fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> { + fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.fptoint_sat(true, val, dest_ty) } @@ -846,7 +832,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { - unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } + unsafe { + llvm::LLVMBuildIntCast2( + self.llbuilder, + val, + dest_ty, + if is_signed { True } else { False }, + UNNAMED, + ) + } } fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { @@ -984,29 +978,40 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn cleanup_landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value) -> &'ll Value { - let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */); + fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { + let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let landing_pad = self.landing_pad(ty, pers_fn, 0); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } - landing_pad + (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) } - fn resume(&mut self, exn: &'ll Value) { + fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { + let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let landing_pad = self.landing_pad(ty, pers_fn, 1); + self.add_clause(landing_pad, self.const_array(self.type_i8p(), &[])); + (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) + } + + fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) { + let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let mut exn = self.const_poison(ty); + exn = self.insert_value(exn, exn0, 0); + exn = self.insert_value(exn, exn1, 1); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn); } } fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> { - let name = cstr!("cleanuppad"); let ret = unsafe { - llvm::LLVMRustBuildCleanupPad( + llvm::LLVMBuildCleanupPad( self.llbuilder, parent, - args.len() as c_uint, args.as_ptr(), - name.as_ptr(), + args.len() as c_uint, + c"cleanuppad".as_ptr(), ) }; Funclet::new(ret.expect("LLVM does not have support for cleanuppad")) @@ -1014,20 +1019,19 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) { unsafe { - llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) + llvm::LLVMBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) .expect("LLVM does not have support for cleanupret"); } } fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> { - let name = cstr!("catchpad"); let ret = unsafe { - llvm::LLVMRustBuildCatchPad( + llvm::LLVMBuildCatchPad( self.llbuilder, parent, - args.len() as c_uint, args.as_ptr(), - name.as_ptr(), + args.len() as c_uint, + c"catchpad".as_ptr(), ) }; Funclet::new(ret.expect("LLVM does not have support for catchpad")) @@ -1039,20 +1043,19 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unwind: Option<&'ll BasicBlock>, handlers: &[&'ll BasicBlock], ) -> &'ll Value { - let name = cstr!("catchswitch"); let ret = unsafe { - llvm::LLVMRustBuildCatchSwitch( + llvm::LLVMBuildCatchSwitch( self.llbuilder, parent, unwind, handlers.len() as c_uint, - name.as_ptr(), + c"catchswitch".as_ptr(), ) }; let ret = ret.expect("LLVM does not have support for catchswitch"); for handler in handlers { unsafe { - llvm::LLVMRustAddHandler(ret, handler); + llvm::LLVMAddHandler(ret, handler); } } ret @@ -1064,35 +1067,23 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - mut order: rustc_codegen_ssa::common::AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, failure_order: rustc_codegen_ssa::common::AtomicOrdering, weak: bool, ) -> &'ll Value { let weak = if weak { llvm::True } else { llvm::False }; - if llvm_util::get_version() < (13, 0, 0) { - use rustc_codegen_ssa::common::AtomicOrdering::*; - // Older llvm has the pre-C++17 restriction on - // success and failure memory ordering, - // requiring the former to be at least as strong as the latter. - // So, for llvm 12, we upgrade the success ordering to a stronger - // one if necessary. - match (order, failure_order) { - (Relaxed, Acquire) => order = Acquire, - (Release, Acquire) => order = AcquireRelease, - (_, SequentiallyConsistent) => order = SequentiallyConsistent, - _ => {} - } - } unsafe { - llvm::LLVMRustBuildAtomicCmpXchg( + let value = llvm::LLVMBuildAtomicCmpXchg( self.llbuilder, dst, cmp, src, AtomicOrdering::from_generic(order), AtomicOrdering::from_generic(failure_order), - weak, - ) + llvm::False, // SingleThreaded + ); + llvm::LLVMSetWeak(value, weak); + value } } fn atomic_rmw( @@ -1109,7 +1100,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { dst, src, AtomicOrdering::from_generic(order), - False, + llvm::False, // SingleThreaded ) } } @@ -1117,13 +1108,18 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn atomic_fence( &mut self, order: rustc_codegen_ssa::common::AtomicOrdering, - scope: rustc_codegen_ssa::common::SynchronizationScope, + scope: SynchronizationScope, ) { + let single_threaded = match scope { + SynchronizationScope::SingleThread => llvm::True, + SynchronizationScope::CrossThread => llvm::False, + }; unsafe { - llvm::LLVMRustBuildAtomicFence( + llvm::LLVMBuildFence( self.llbuilder, AtomicOrdering::from_generic(order), - SynchronizationScope::from_generic(scope), + single_threaded, + UNNAMED, ); } } @@ -1173,7 +1169,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llfn, args.as_ptr() as *const &llvm::Value, args.len() as c_uint, - None, + [].as_ptr(), + 0 as c_uint, ); } } @@ -1181,6 +1178,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn call( &mut self, llty: &'ll Type, + fn_attrs: Option<&CodegenFnAttrs>, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, @@ -1188,19 +1187,34 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { debug!("call {:?} with args ({:?})", llfn, args); let args = self.check_call("call", llty, llfn, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); + let funclet_bundle = funclet.map(|funclet| funclet.bundle()); + let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw); + let mut bundles = vec![funclet_bundle]; - unsafe { + // Emit CFI pointer type membership test + self.cfi_type_test(fn_attrs, fn_abi, llfn); + + // Emit KCFI operand bundle + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); + bundles.push(kcfi_bundle); + + bundles.retain(|bundle| bundle.is_some()); + let call = unsafe { llvm::LLVMRustBuildCall( self.llbuilder, llty, llfn, args.as_ptr() as *const &llvm::Value, args.len() as c_uint, - bundle, + bundles.as_ptr(), + bundles.len() as c_uint, ) + }; + if let Some(fn_abi) = fn_abi { + fn_abi.apply_attrs_callsite(self, call); } + call } fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { @@ -1360,8 +1374,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { - let ret = - unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; + let ret = unsafe { llvm::LLVMBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for catchret") } @@ -1433,7 +1446,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { let (ty, f) = self.cx.get_intrinsic(intrinsic); - self.call(ty, f, args, None) + self.call(ty, None, None, f, args, None) } fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { @@ -1470,51 +1483,32 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } } - fn fptoint_sat_broken_in_llvm(&self) -> bool { - match self.tcx.sess.target.arch.as_ref() { - // FIXME - https://bugs.llvm.org/show_bug.cgi?id=50083 - "riscv64" => llvm_util::get_version() < (13, 0, 0), - _ => false, - } - } - - fn fptoint_sat( - &mut self, - signed: bool, - val: &'ll Value, - dest_ty: &'ll Type, - ) -> Option<&'ll Value> { - if !self.fptoint_sat_broken_in_llvm() { - let src_ty = self.cx.val_ty(val); - let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector - { - assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty)); - ( - self.cx.element_type(src_ty), - self.cx.element_type(dest_ty), - Some(self.cx.vector_length(src_ty)), - ) - } else { - (src_ty, dest_ty, None) - }; - let float_width = self.cx.float_width(float_ty); - let int_width = self.cx.int_width(int_ty); - - let instr = if signed { "fptosi" } else { "fptoui" }; - let name = if let Some(vector_length) = vector_length { - format!( - "llvm.{}.sat.v{}i{}.v{}f{}", - instr, vector_length, int_width, vector_length, float_width - ) - } else { - format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width) - }; - let f = - self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); - Some(self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None)) + fn fptoint_sat(&mut self, signed: bool, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + let src_ty = self.cx.val_ty(val); + let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector { + assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty)); + ( + self.cx.element_type(src_ty), + self.cx.element_type(dest_ty), + Some(self.cx.vector_length(src_ty)), + ) } else { - None - } + (src_ty, dest_ty, None) + }; + let float_width = self.cx.float_width(float_ty); + let int_width = self.cx.int_width(int_ty); + + let instr = if signed { "fptosi" } else { "fptoui" }; + let name = if let Some(vector_length) = vector_length { + format!( + "llvm.{}.sat.v{}i{}.v{}f{}", + instr, vector_length, int_width, vector_length, float_width + ) + } else { + format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width) + }; + let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); + self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None) } pub(crate) fn landing_pad( @@ -1531,4 +1525,71 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED) } } + + // Emits CFI pointer type membership tests. + fn cfi_type_test( + &mut self, + fn_attrs: Option<&CodegenFnAttrs>, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + llfn: &'ll Value, + ) { + let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() }; + if is_indirect_call && fn_abi.is_some() && self.tcx.sess.is_sanitizer_cfi_enabled() { + if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::CFI) { + return; + } + + let mut options = TypeIdOptions::empty(); + if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { + options.insert(TypeIdOptions::GENERALIZE_POINTERS); + } + if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { + options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + } + + let typeid = typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options); + let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); + + // Test whether the function pointer is associated with the type identifier. + let cond = self.type_test(llfn, typeid_metadata); + let bb_pass = self.append_sibling_block("type_test.pass"); + let bb_fail = self.append_sibling_block("type_test.fail"); + self.cond_br(cond, bb_pass, bb_fail); + + self.switch_to_block(bb_fail); + self.abort(); + self.unreachable(); + + self.switch_to_block(bb_pass); + } + } + + // Emits KCFI operand bundles. + fn kcfi_operand_bundle( + &mut self, + fn_attrs: Option<&CodegenFnAttrs>, + fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + llfn: &'ll Value, + ) -> Option<llvm::OperandBundleDef<'ll>> { + let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() }; + let kcfi_bundle = if is_indirect_call && self.tcx.sess.is_sanitizer_kcfi_enabled() { + if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::KCFI) { + return None; + } + + let mut options = TypeIdOptions::empty(); + if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { + options.insert(TypeIdOptions::GENERALIZE_POINTERS); + } + if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { + options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + } + + let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options); + Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) + } else { + None + }; + kcfi_bundle + } } |
