diff options
Diffstat (limited to 'compiler/rustc_codegen_gcc/src')
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/back/lto.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/builder.rs | 40 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/int.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/intrinsic/mod.rs | 22 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/lib.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/type_of.rs | 13 |
6 files changed, 29 insertions, 54 deletions
diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index e419bd18099..cb4caec8c32 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -710,10 +710,6 @@ pub struct ThinBuffer { context: Arc<SyncContext>, } -// TODO: check if this makes sense to make ThinBuffer Send and Sync. -unsafe impl Send for ThinBuffer {} -unsafe impl Sync for ThinBuffer {} - impl ThinBuffer { pub(crate) fn new(context: &Arc<SyncContext>) -> Self { Self { context: Arc::clone(context) } diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 1c3d6cc899a..c8b7616e645 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -665,6 +665,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { a + b } + // TODO(antoyo): should we also override the `unchecked_` versions? fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { self.gcc_sub(a, b) } @@ -832,31 +833,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { set_rvalue_location(self, self.gcc_not(a)) } - fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - set_rvalue_location(self, self.gcc_add(a, b)) - } - - fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - set_rvalue_location(self, self.gcc_add(a, b)) - } - - fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - set_rvalue_location(self, self.gcc_sub(a, b)) - } - - fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - // TODO(antoyo): should generate poison value? - set_rvalue_location(self, self.gcc_sub(a, b)) - } - - fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - set_rvalue_location(self, self.gcc_mul(a, b)) - } - - fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - set_rvalue_location(self, self.gcc_mul(a, b)) - } - fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. set_rvalue_location(self, lhs + rhs) @@ -1013,10 +989,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { OperandValue::Ref(place.val) } else if place.layout.is_gcc_immediate() { let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align); - if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr { - scalar_load_metadata(self, load, scalar); - } - OperandValue::Immediate(self.to_immediate(load, place.layout)) + OperandValue::Immediate( + if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr { + scalar_load_metadata(self, load, scalar); + self.to_immediate_scalar(load, *scalar) + } else { + load + }, + ) } else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr { let b_offset = a.size(self).align_to(b.align(self).abi); @@ -1718,7 +1698,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value { if scalar.is_bool() { - return self.trunc(val, self.cx().type_i1()); + return self.unchecked_utrunc(val, self.cx().type_i1()); } val } diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs index 4a1db8d662a..f3552d9b12f 100644 --- a/compiler/rustc_codegen_gcc/src/int.rs +++ b/compiler/rustc_codegen_gcc/src/int.rs @@ -400,7 +400,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { conv: Conv::C, can_unwind: false, }; - fn_abi.adjust_for_foreign_abi(self.cx, ExternAbi::C { unwind: false }).unwrap(); + fn_abi.adjust_for_foreign_abi(self.cx, ExternAbi::C { unwind: false }); let ret_indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. }); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index a1123fafe2f..f8672c07299 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -9,7 +9,7 @@ use gccjit::FunctionType; use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp}; #[cfg(feature = "master")] use rustc_abi::ExternAbi; -use rustc_abi::HasDataLayout; +use rustc_abi::{BackendRepr, HasDataLayout}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::base::wants_msvc_seh; use rustc_codegen_ssa::common::IntPredicate; @@ -84,14 +84,11 @@ fn get_simple_intrinsic<'gcc, 'tcx>( sym::ceilf64 => "ceil", sym::truncf32 => "truncf", sym::truncf64 => "trunc", - sym::rintf32 => "rintf", - sym::rintf64 => "rint", - sym::nearbyintf32 => "nearbyintf", - sym::nearbyintf64 => "nearbyint", + // We match the LLVM backend and lower this to `rint`. + sym::round_ties_even_f32 => "rintf", + sym::round_ties_even_f64 => "rint", sym::roundf32 => "roundf", sym::roundf64 => "round", - sym::roundevenf32 => "roundevenf", - sym::roundevenf64 => "roundeven", sym::abort => "abort", _ => return None, }; @@ -181,14 +178,19 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = fn_args.type_at(0); let ptr = args[0].immediate(); + let layout = self.layout_of(tp_ty); let load = if let PassMode::Cast { cast: ref ty, pad_i32: _ } = fn_abi.ret.mode { let gcc_ty = ty.gcc_type(self); self.volatile_load(gcc_ty, ptr) } else { - self.volatile_load(self.layout_of(tp_ty).gcc_type(self), ptr) + self.volatile_load(layout.gcc_type(self), ptr) }; // TODO(antoyo): set alignment. - self.to_immediate(load, self.layout_of(tp_ty)) + if let BackendRepr::Scalar(scalar) = layout.backend_repr { + self.to_immediate_scalar(load, scalar) + } else { + load + } } sym::volatile_store => { let dst = args[0].deref(self.cx()); @@ -310,7 +312,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc let layout = self.layout_of(tp_ty).layout; let _use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - Uninhabited | Vector { .. } => false, + Vector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index ce88ac39021..6455bcec685 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -476,7 +476,7 @@ fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel { Some(level) => match level { OptLevel::No => OptimizationLevel::None, OptLevel::Less => OptimizationLevel::Limited, - OptLevel::Default => OptimizationLevel::Standard, + OptLevel::More => OptimizationLevel::Standard, OptLevel::Aggressive => OptimizationLevel::Aggressive, OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited, }, diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 8b8b54753e7..bac4fc51300 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -84,7 +84,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( false, ); } - BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} + BackendRepr::Memory { .. } => {} } let name = match *layout.ty.kind() { @@ -179,19 +179,16 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { match self.backend_repr { BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, - BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { - false - } + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } fn is_gcc_scalar_pair(&self) -> bool { match self.backend_repr { BackendRepr::ScalarPair(..) => true, - BackendRepr::Uninhabited - | BackendRepr::Scalar(_) - | BackendRepr::Vector { .. } - | BackendRepr::Memory { .. } => false, + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => { + false + } } } |
