diff options
Diffstat (limited to 'compiler/rustc_codegen_gcc')
| -rw-r--r-- | compiler/rustc_codegen_gcc/example/mini_core.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/archive.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/asm.rs | 31 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/builder.rs | 63 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/errors.rs | 17 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/intrinsic/mod.rs | 22 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/intrinsic/simd.rs | 7 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/lib.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/type_.rs | 10 | ||||
| -rw-r--r-- | compiler/rustc_codegen_gcc/src/type_of.rs | 27 |
11 files changed, 100 insertions, 92 deletions
diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs index 230009741dc..cc3d647c8c8 100644 --- a/compiler/rustc_codegen_gcc/example/mini_core.rs +++ b/compiler/rustc_codegen_gcc/example/mini_core.rs @@ -472,6 +472,7 @@ pub trait Allocator { impl Allocator for () {} +#[lang = "global_alloc_ty"] pub struct Global; impl Allocator for Global {} diff --git a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs index 9827e299f2a..add77880716 100644 --- a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs +++ b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs @@ -99,8 +99,8 @@ fn start<T: Termination + 'static>( static mut NUM: u8 = 6 * 7; -// FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_ref` lint -#[allow(static_mut_ref)] +// FIXME: Use `SyncUnsafeCell` instead of allowing `static_mut_refs` lint +#[allow(static_mut_refs)] static NUM_REF: &'static u8 = unsafe { &NUM }; macro_rules! assert { diff --git a/compiler/rustc_codegen_gcc/src/archive.rs b/compiler/rustc_codegen_gcc/src/archive.rs index 11fa074f5ac..73ff0c37b66 100644 --- a/compiler/rustc_codegen_gcc/src/archive.rs +++ b/compiler/rustc_codegen_gcc/src/archive.rs @@ -10,7 +10,7 @@ use rustc_session::cstore::DllImport; pub(crate) struct ArArchiveBuilderBuilder; impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder { - fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> { + fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder + 'a> { Box::new(ArArchiveBuilder::new(sess, get_native_object_symbols)) } diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs index 78e8e32b972..07edd26e27a 100644 --- a/compiler/rustc_codegen_gcc/src/asm.rs +++ b/compiler/rustc_codegen_gcc/src/asm.rs @@ -107,7 +107,7 @@ enum ConstraintOrRegister { impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { + fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], instance: Instance<'_>, dest: Option<Self::BasicBlock>, _catch_funclet: Option<(Self::BasicBlock, Option<&Self::Funclet>)>) { if options.contains(InlineAsmOptions::MAY_UNWIND) { self.sess().dcx() .create_err(UnwindingInlineAsm { span: span[0] }) @@ -126,6 +126,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // added to `outputs.len()` let mut inputs = vec![]; + // GCC index of a label equals its position in the array added to + // `outputs.len() + inputs.len()`. + let mut labels = vec![]; + // Clobbers collected from `out("explicit register") _` and `inout("expl_reg") var => _` let mut clobbers = vec![]; @@ -269,6 +273,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // some targets to add a leading underscore (Mach-O). constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len(); } + + InlineAsmOperandRef::Label { label } => { + labels.push(label); + } } } @@ -368,6 +376,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { InlineAsmOperandRef::Const { .. } => { // processed in the previous pass } + + InlineAsmOperandRef::Label { .. } => { + // processed in the previous pass + } } } @@ -454,6 +466,14 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { InlineAsmOperandRef::Const { ref string } => { template_str.push_str(string); } + + InlineAsmOperandRef::Label { label } => { + let label_gcc_index = labels.iter() + .position(|&l| l == label) + .expect("wrong rust index"); + let gcc_index = label_gcc_index + outputs.len() + inputs.len(); + push_to_template(Some('l'), gcc_index); + } } } } @@ -466,7 +486,12 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { // 4. Generate Extended Asm block let block = self.llbb(); - let extended_asm = block.add_extended_asm(None, &template_str); + let extended_asm = if let Some(dest) = dest { + assert!(!labels.is_empty()); + block.end_with_extended_asm_goto(None, &template_str, &labels, Some(dest)) + } else { + block.add_extended_asm(None, &template_str) + }; for op in &outputs { extended_asm.add_output_operand(None, &op.to_constraint(), op.tmp_var); @@ -494,7 +519,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { if !options.contains(InlineAsmOptions::NOSTACK) { // TODO(@Commeownist): figure out how to align stack } - if options.contains(InlineAsmOptions::NORETURN) { + if dest.is_none() && options.contains(InlineAsmOptions::NORETURN) { let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable"); let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) }; self.call(self.type_void(), None, None, builtin_unreachable, &[], None); diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 42e61b3ccb5..71a0a4c2e96 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -705,6 +705,31 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.frem(lhs, rhs) } + fn fadd_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs + rhs + } + + fn fsub_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs - rhs + } + + fn fmul_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs * rhs + } + + fn fdiv_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + lhs / rhs + } + + fn frem_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { + // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC. + self.frem(lhs, rhs) + } + fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { self.gcc_checked_binop(oop, typ, lhs, rhs) } @@ -809,10 +834,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { let b_offset = a.size(self).align_to(b.align(self).abi); - let pair_type = place.layout.gcc_type(self); let mut load = |i, scalar: &abi::Scalar, align| { - let llptr = self.struct_gep(pair_type, place.llval, i as u64); + let llptr = if i == 0 { + place.llval + } else { + self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes())) + }; let llty = place.layout.scalar_pair_element_gcc_type(self, i); let load = self.load(llty, llptr, align); scalar_load_metadata(self, load, scalar); @@ -946,33 +974,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { result.get_address(None) } - fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { - // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays. - assert_eq!(idx as usize as u64, idx); - let value = ptr.dereference(None).to_rvalue(); - - if value_type.dyncast_array().is_some() { - let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); - let element = self.context.new_array_access(None, value, index); - element.get_address(None) - } - else if let Some(vector_type) = value_type.dyncast_vector() { - let array_type = vector_type.get_element_type().make_pointer(); - let array = self.bitcast(ptr, array_type); - let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from")); - let element = self.context.new_array_access(None, array, index); - element.get_address(None) - } - else if let Some(struct_type) = value_type.is_struct() { - // NOTE: due to opaque pointers now being used, we need to bitcast here. - let ptr = self.bitcast_if_needed(ptr, value_type.make_pointer()); - ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None) - } - else { - panic!("Unexpected type {:?}", value_type); - } - } - /* Casts */ fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { // TODO(antoyo): check that it indeed truncate the value. @@ -1727,7 +1728,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b)) } - pub fn vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + pub fn vector_reduce_fadd_reassoc(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); } @@ -1747,7 +1748,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { unimplemented!(); } - pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { + pub fn vector_reduce_fmul_reassoc(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> { unimplemented!(); } diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs index cc0fbe46dcc..988a7e1033e 100644 --- a/compiler/rustc_codegen_gcc/src/errors.rs +++ b/compiler/rustc_codegen_gcc/src/errors.rs @@ -1,6 +1,5 @@ use rustc_errors::{ - DiagCtxt, DiagnosticArgValue, DiagnosticBuilder, EmissionGuarantee, IntoDiagnostic, - IntoDiagnosticArg, Level, + DiagCtxt, DiagArgValue, Diag, EmissionGuarantee, IntoDiagnostic, IntoDiagnosticArg, Level, }; use rustc_macros::{Diagnostic, Subdiagnostic}; use rustc_span::Span; @@ -35,11 +34,11 @@ pub(crate) enum PossibleFeature<'a> { struct ExitCode(Option<i32>); impl IntoDiagnosticArg for ExitCode { - fn into_diagnostic_arg(self) -> DiagnosticArgValue { + fn into_diagnostic_arg(self) -> DiagArgValue { let ExitCode(exit_code) = self; match exit_code { Some(t) => t.into_diagnostic_arg(), - None => DiagnosticArgValue::Str(Cow::Borrowed("<signal>")), + None => DiagArgValue::Str(Cow::Borrowed("<signal>")), } } } @@ -112,17 +111,13 @@ pub(crate) struct TargetFeatureDisableOrEnable<'a> { pub(crate) struct MissingFeatures; impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> { - fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> DiagnosticBuilder<'_, G> { - let mut diag = DiagnosticBuilder::new( - dcx, - level, - fluent::codegen_gcc_target_feature_disable_or_enable - ); + fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> { + let mut diag = Diag::new(dcx, level, fluent::codegen_gcc_target_feature_disable_or_enable); if let Some(span) = self.span { diag.span(span); }; if let Some(missing_features) = self.missing_features { - diag.subdiagnostic(missing_features); + diag.subdiagnostic(dcx, missing_features); } diag.arg("features", self.features.join(", ")); diag diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index eac8cb43779..d43f5d74757 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -21,7 +21,7 @@ use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::layout::LayoutOf; #[cfg(feature="master")] use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; -use rustc_span::{Span, Symbol, symbol::kw, sym}; +use rustc_span::{Span, Symbol, sym}; use rustc_target::abi::HasDataLayout; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; use rustc_target::spec::PanicStrategy; @@ -90,7 +90,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> } impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) { + fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) -> Result<(), Instance<'tcx>> { let tcx = self.tcx; let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); @@ -129,7 +129,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let res = self.context.new_call(None, builtin, &[a]); self.icmp(IntPredicate::IntEQ, res, self.const_i32(0)) } - kw::Try => { + sym::catch_unwind => { try_intrinsic( self, args[0].immediate(), @@ -137,7 +137,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { args[2].immediate(), llresult, ); - return; + return Ok(()); } sym::breakpoint => { unimplemented!(); @@ -166,12 +166,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { sym::volatile_store => { let dst = args[0].deref(self.cx()); args[1].val.volatile_store(self, dst); - return; + return Ok(()); } sym::unaligned_volatile_store => { let dst = args[0].deref(self.cx()); args[1].val.unaligned_volatile_store(self, dst); - return; + return Ok(()); } sym::prefetch_read_data | sym::prefetch_write_data @@ -269,7 +269,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }, None => { tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty }); - return; + return Ok(()); } } } @@ -339,7 +339,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { extended_asm.set_volatile_flag(true); // We have copied the value to `result` already. - return; + return Ok(()); } sym::ptr_mask => { @@ -357,11 +357,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, - Err(()) => return, + Err(()) => return Ok(()), } } - _ => bug!("unknown intrinsic '{}'", name), + // Fall back to default body + _ => return Err(Instance::new(instance.def_id(), instance.args)), }; if !fn_abi.ret.is_ignore() { @@ -376,6 +377,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { .store(self, result); } } + Ok(()) } fn abort(&mut self) { diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 9fa978cd2ef..d8091724d86 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -989,14 +989,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( arith_red!( simd_reduce_add_unordered: BinaryOp::Plus, - vector_reduce_fadd_fast, + vector_reduce_fadd_reassoc, false, add, 0.0 // TODO: Use this argument. ); arith_red!( simd_reduce_mul_unordered: BinaryOp::Mult, - vector_reduce_fmul_fast, + vector_reduce_fmul_reassoc, false, mul, 1.0 @@ -1041,9 +1041,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin); minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax); - // TODO(sadlerap): revisit these intrinsics to generate more optimal reductions - minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin); - minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax); macro_rules! bitwise_red { ($name:ident : $op:expr, $boolean:expr) => { diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 863b6333bcc..09ce059476e 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -217,13 +217,11 @@ impl CodegenBackend for GccCodegenBackend { Box::new(res) } - fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> { - let (codegen_results, work_products) = ongoing_codegen + fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) { + ongoing_codegen .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>() .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>") - .join(sess); - - Ok((codegen_results, work_products)) + .join(sess) } fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorGuaranteed> { diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs index 7a89fe81d38..7e5aa1c1766 100644 --- a/compiler/rustc_codegen_gcc/src/type_.rs +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -83,8 +83,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> { match t { + ty::FloatTy::F16 => self.type_f16(), ty::FloatTy::F32 => self.type_f32(), ty::FloatTy::F64 => self.type_f64(), + ty::FloatTy::F128 => self.type_f128(), } } } @@ -118,6 +120,10 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { self.isize_type } + fn type_f16(&self) -> Type<'gcc> { + unimplemented!("f16_f128") + } + fn type_f32(&self) -> Type<'gcc> { self.float_type } @@ -125,6 +131,10 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { fn type_f64(&self) -> Type<'gcc> { self.double_type } + + fn type_f128(&self) -> Type<'gcc> { + unimplemented!("f16_f128") + } fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> { self.context.new_function_pointer_type(None, return_type, params, false) diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 25149b80201..5a9212762b7 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -6,7 +6,7 @@ use rustc_middle::bug; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; -use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; +use rustc_target::abi::{self, Abi, Align, F16, F128, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; use crate::abi::{FnAbiGcc, FnAbiGccExt, GccType}; @@ -151,7 +151,6 @@ pub trait LayoutGccExt<'tcx> { fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>; - fn gcc_field_index(&self, index: usize) -> u64; fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>; } @@ -257,8 +256,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { match scalar.primitive() { Int(i, true) => cx.type_from_integer(i), Int(i, false) => cx.type_from_unsigned_integer(i), + F16 => cx.type_f16(), F32 => cx.type_f32(), F64 => cx.type_f64(), + F128 => cx.type_f128(), Pointer(address_space) => { // If we know the alignment, pick something better than i8. let pointee = @@ -304,24 +305,6 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { self.scalar_gcc_type_at(cx, scalar, offset) } - fn gcc_field_index(&self, index: usize) -> u64 { - match self.abi { - Abi::Scalar(_) | Abi::ScalarPair(..) => { - bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self) - } - _ => {} - } - match self.fields { - FieldsShape::Primitive | FieldsShape::Union(_) => { - bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self) - } - - FieldsShape::Array { .. } => index as u64, - - FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2, - } - } - fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> { if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { return pointee; @@ -351,10 +334,6 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { layout.is_gcc_scalar_pair() } - fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 { - layout.gcc_field_index(index) - } - fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> { layout.scalar_pair_element_gcc_type(self, index) } |
