diff options
Diffstat (limited to 'compiler/rustc_codegen_gcc')
5 files changed, 35 insertions, 39 deletions
diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs index 5f66ca67f2d..89661918d05 100644 --- a/compiler/rustc_codegen_gcc/example/alloc_system.rs +++ b/compiler/rustc_codegen_gcc/example/alloc_system.rs @@ -156,7 +156,7 @@ mod platform { struct Header(*mut u8); const HEAP_ZERO_MEMORY: DWORD = 0x00000008; unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { - &mut *(ptr as *mut Header).offset(-1) + &mut *(ptr as *mut Header).sub(1) } unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { let aligned = ptr.add(align - (ptr as usize & (align - 1))); diff --git a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch index d5fa1cec061..c59a40df039 100644 --- a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch +++ b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch @@ -14,7 +14,6 @@ index 06c7be0..359e2e7 100644 @@ -75,7 +75,6 @@ #![feature(never_type)] #![feature(unwrap_infallible)] - #![feature(result_into_ok_or_err)] -#![feature(portable_simd)] #![feature(ptr_metadata)] #![feature(once_cell)] diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs index 0ed3e1fbe93..848c34211ff 100644 --- a/compiler/rustc_codegen_gcc/src/abi.rs +++ b/compiler/rustc_codegen_gcc/src/abi.rs @@ -107,45 +107,24 @@ pub trait FnAbiGccExt<'gcc, 'tcx> { impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) { let mut on_stack_param_indices = FxHashSet::default(); - let args_capacity: usize = self.args.iter().map(|arg| - if arg.pad.is_some() { - 1 - } - else { - 0 - } + - if let PassMode::Pair(_, _) = arg.mode { - 2 - } else { - 1 - } - ).sum(); + + // This capacity calculation is approximate. let mut argument_tys = Vec::with_capacity( - if let PassMode::Indirect { .. } = self.ret.mode { - 1 - } - else { - 0 - } + args_capacity, + self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } ); let return_ty = match self.ret.mode { PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), - PassMode::Cast(cast) => cast.gcc_type(cx), + PassMode::Cast(ref cast, _) => cast.gcc_type(cx), PassMode::Indirect { .. } => { argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); cx.type_void() } }; - for arg in &self.args { - // add padding - if let Some(ty) = arg.pad { - argument_tys.push(ty.gcc_type(cx)); - } - + for arg in self.args.iter() { let arg_ty = match arg.mode { PassMode::Ignore => continue, PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx), @@ -157,7 +136,13 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { extra_attrs: Some(_), .. } => { unimplemented!(); } - PassMode::Cast(cast) => cast.gcc_type(cx), + PassMode::Cast(ref cast, pad_i32) => { + // add padding + if pad_i32 { + argument_tys.push(Reg::i32().gcc_type(cx)); + } + cast.gcc_type(cx) + } PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => { on_stack_param_indices.insert(argument_tys.len()); arg.memory_ty(cx) diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index c0b8d21818f..356c03ee3c1 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -127,7 +127,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { // // We could remove this hack whenever we decide to drop macOS 10.10 support. if self.tcx.sess.target.options.is_like_osx { - // The `inspect` method is okay here because we checked relocations, and + // The `inspect` method is okay here because we checked for provenance, and // because we are doing this access to inspect the final interpreter state // (not as part of the interpreter execution). // @@ -296,17 +296,17 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> { let alloc = alloc.inner(); - let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1); + let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1); let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; let mut next_offset = 0; - for &(offset, alloc_id) in alloc.relocations().iter() { + for &(offset, alloc_id) in alloc.provenance().iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; if offset > next_offset { - // This `inspect` is okay since we have checked that it is not within a relocation, it + // This `inspect` is okay since we have checked that it is not within a pointer with provenance, it // is within the bounds of the allocation, and it doesn't affect interpreter execution // (we inspect the result after interpreter execution). Any undef byte is replaced with // some arbitrary byte value. @@ -319,7 +319,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl read_target_uint( dl.endian, // This `inspect` is okay since it is within the bounds of the allocation, it doesn't // affect interpreter execution (we inspect the result after interpreter execution), - // and we properly interpret the relocation as a relocation pointer offset. + // and we properly interpret the provenance as a relocation pointer offset. alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), ) .expect("const_alloc_to_llvm: could not read relocation pointer") @@ -336,7 +336,7 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl } if alloc.len() >= next_offset { let range = next_offset..alloc.len(); - // This `inspect` is okay since we have check that it is after all relocations, it is + // This `inspect` is okay since we have check that it is after all provenance, it is // within the bounds of the allocation, and it doesn't affect interpreter execution (we // inspect the result after interpreter execution). Any undef byte is replaced with some // arbitrary byte value. diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 5fbdedac0c4..02cedd4646b 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -130,7 +130,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = fn_abi.ret.mode { + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); } let load = self.volatile_load(ptr.get_type(), ptr); @@ -309,6 +309,18 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { return; } + sym::ptr_mask => { + let usize_type = self.context.new_type::<usize>(); + let void_ptr_type = self.context.new_type::<*const ()>(); + + let ptr = args[0].immediate(); + let mask = args[1].immediate(); + + let addr = self.bitcast(ptr, usize_type); + let masked = self.and(addr, mask); + self.bitcast(masked, void_ptr_type) + }, + _ if name_str.starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, @@ -320,7 +332,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_abi.ret.mode { + if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); @@ -416,7 +428,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { else if self.is_unsized_indirect() { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); } - else if let PassMode::Cast(cast) = self.mode { + else if let PassMode::Cast(ref cast, _) = self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; @@ -481,7 +493,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { extra_attrs: Some(_), .. } => { OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); }, - PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => { + PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => { let next_arg = next(); self.store(bx, next_arg, dst); }, |
