diff options
Diffstat (limited to 'compiler/rustc_codegen_gcc/src/builder.rs')
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/builder.rs | 58 |
1 files changed, 46 insertions, 12 deletions
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index b1785af444a..f7a7a3f8c7e 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -34,6 +34,7 @@ use rustc_target::spec::{HasTargetSpec, HasX86AbiOpt, Target, X86Abi}; use crate::common::{SignType, TypeReflection, type_is_pointer}; use crate::context::CodegenCx; +use crate::errors; use crate::intrinsic::llvm; use crate::type_of::LayoutGccExt; @@ -539,9 +540,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn ret(&mut self, mut value: RValue<'gcc>) { let expected_return_type = self.current_func().get_return_type(); - if !expected_return_type.is_compatible_with(value.get_type()) { - // NOTE: due to opaque pointers now being used, we need to cast here. - value = self.context.new_cast(self.location, value, expected_return_type); + let value_type = value.get_type(); + if !expected_return_type.is_compatible_with(value_type) { + // NOTE: due to opaque pointers now being used, we need to (bit)cast here. + if self.is_native_int_type(value_type) && self.is_native_int_type(expected_return_type) + { + value = self.context.new_cast(self.location, value, expected_return_type); + } else { + value = self.context.new_bitcast(self.location, value, expected_return_type); + } } self.llbb().end_with_return(self.location, value); } @@ -926,10 +933,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { .get_address(self.location) } - fn dynamic_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { - unimplemented!(); - } - fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { let block = self.llbb(); let function = block.get_function(); @@ -975,7 +978,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn volatile_load(&mut self, ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { let ptr = self.context.new_cast(self.location, ptr, ty.make_volatile().make_pointer()); - ptr.dereference(self.location).to_rvalue() + // (FractalFir): We insert a local here, to ensure this volatile load can't move across + // blocks. + let local = self.current_func().new_local(self.location, ty, "volatile_tmp"); + self.block.add_assignment(self.location, local, ptr.dereference(self.location).to_rvalue()); + local.to_rvalue() } fn atomic_load( @@ -1278,11 +1285,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { fn intcast( &mut self, - value: RValue<'gcc>, + mut value: RValue<'gcc>, dest_typ: Type<'gcc>, - _is_signed: bool, + is_signed: bool, ) -> RValue<'gcc> { - // NOTE: is_signed is for value, not dest_typ. + let value_type = value.get_type(); + if is_signed && !value_type.is_signed(self.cx) { + let signed_type = value_type.to_signed(self.cx); + value = self.gcc_int_cast(value, signed_type); + } else if !is_signed && value_type.is_signed(self.cx) { + let unsigned_type = value_type.to_unsigned(self.cx); + value = self.gcc_int_cast(value, unsigned_type); + } + self.gcc_int_cast(value, dest_typ) } @@ -1656,6 +1671,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, + ret_ptr: bool, ) -> RValue<'gcc> { let size = get_maybe_pointer_size(src); let name = match op { @@ -1683,6 +1699,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let atomic_function = self.context.get_builtin_function(name); let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); + // FIXME: If `ret_ptr` is true and `src` is an integer, we should really tell GCC + // that this is a pointer operation that needs to preserve provenance -- but like LLVM, + // GCC does not currently seems to support that. let void_ptr_type = self.context.new_type::<*mut ()>(); let volatile_void_ptr_type = void_ptr_type.make_volatile(); let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type); @@ -1690,7 +1709,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let new_src_type = atomic_function.get_param(1).to_rvalue().get_type(); let src = self.context.new_bitcast(self.location, src, new_src_type); let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]); - self.context.new_cast(self.location, res, src.get_type()) + let res_type = if ret_ptr { void_ptr_type } else { src.get_type() }; + self.context.new_cast(self.location, res, res_type) } fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) { @@ -1742,6 +1762,20 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { call } + fn tail_call( + &mut self, + _llty: Self::Type, + _fn_attrs: Option<&CodegenFnAttrs>, + _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _llfn: Self::Value, + _args: &[Self::Value], + _funclet: Option<&Self::Funclet>, + _instance: Option<Instance<'tcx>>, + ) { + // FIXME: implement support for explicit tail calls like rustc_codegen_llvm. + self.tcx.dcx().emit_fatal(errors::ExplicitTailCallsUnsupported); + } + fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { // FIXME(antoyo): this does not zero-extend. self.gcc_int_cast(value, dest_typ) |
