diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 112 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/lto.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/write.rs | 3 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/base.rs | 28 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/common.rs | 10 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 12 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 23 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/type_of.rs | 5 |
15 files changed, 153 insertions, 81 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index b14a4f28c75..b9baa87bac7 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -510,9 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { // If the value is a boolean, the range is 0..2 and that ultimately // become 0..0 when the type becomes i1, which would be rejected // by the LLVM verifier. - if let Int(..) = scalar.value { + if let Int(..) = scalar.primitive() { if !scalar.is_bool() && !scalar.is_always_valid(bx) { - bx.range_metadata(callsite, scalar.valid_range); + bx.range_metadata(callsite, scalar.valid_range(bx)); } } } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 205b77e9862..e994001f96f 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -290,6 +290,11 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs }); + // Switch to the 'normal' basic block if we did an `invoke` instead of a `call` + if let Some((dest, _, _)) = dest_catch_funclet { + self.switch_to_block(dest); + } + // Write results to outputs for (idx, op) in operands.iter().enumerate() { if let InlineAsmOperandRef::Out { reg, place: Some(place), .. } @@ -307,11 +312,11 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } } -impl AsmMethods for CodegenCx<'_, '_> { +impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> { fn codegen_global_asm( &self, template: &[InlineAsmTemplatePiece], - operands: &[GlobalAsmOperandRef], + operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span], ) { @@ -337,6 +342,29 @@ impl AsmMethods for CodegenCx<'_, '_> { // here unlike normal inline assembly. template_str.push_str(string); } + GlobalAsmOperandRef::SymFn { instance } => { + let llval = self.get_fn(instance); + self.add_compiler_used_global(llval); + let symbol = llvm::build_string(|s| unsafe { + llvm::LLVMRustGetMangledName(llval, s); + }) + .expect("symbol is not valid UTF-8"); + template_str.push_str(&symbol); + } + GlobalAsmOperandRef::SymStatic { def_id } => { + let llval = self + .renamed_statics + .borrow() + .get(&def_id) + .copied() + .unwrap_or_else(|| self.get_static(def_id)); + self.add_compiler_used_global(llval); + let symbol = llvm::build_string(|s| unsafe { + llvm::LLVMRustGetMangledName(llval, s); + }) + .expect("symbol is not valid UTF-8"); + template_str.push_str(&symbol); + } } } } @@ -763,7 +791,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &' /// Helper function to get the LLVM type for a Scalar. Pointers are returned as /// the equivalent integer type. fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type { - match scalar.value { + match scalar.primitive() { Primitive::Int(Integer::I8, _) => cx.type_i8(), Primitive::Int(Integer::I16, _) => cx.type_i16(), Primitive::Int(Integer::I32, _) => cx.type_i32(), @@ -784,7 +812,7 @@ fn llvm_fixup_input<'ll, 'tcx>( ) -> &'ll Value { match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { - if let Primitive::Int(Integer::I8, _) = s.value { + if let Primitive::Int(Integer::I8, _) = s.primitive() { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } else { @@ -795,7 +823,7 @@ fn llvm_fixup_input<'ll, 'tcx>( let elem_ty = llvm_asm_scalar_type(bx.cx, s); let count = 16 / layout.size.bytes(); let vec_ty = bx.cx.type_vector(elem_ty, count); - if let Primitive::Pointer = s.value { + if let Primitive::Pointer = s.primitive() { value = bx.ptrtoint(value, bx.cx.type_isize()); } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) @@ -810,7 +838,7 @@ fn llvm_fixup_input<'ll, 'tcx>( bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) - if s.value == Primitive::F64 => + if s.primitive() == Primitive::F64 => { bx.bitcast(value, bx.cx.type_i64()) } @@ -822,7 +850,7 @@ fn llvm_fixup_input<'ll, 'tcx>( InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I32, _) = s.value { + if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f32()) } else { value @@ -836,19 +864,21 @@ fn llvm_fixup_input<'ll, 'tcx>( ), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I64, _) = s.value { + if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f64()) } else { value } } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { - // MIPS only supports register-length arithmetics. - Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), - Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()), - Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()), - _ => value, - }, + (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + match s.primitive() { + // MIPS only supports register-length arithmetics. + Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), + Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()), + Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()), + _ => value, + } + } _ => value, } } @@ -862,7 +892,7 @@ fn llvm_fixup_output<'ll, 'tcx>( ) -> &'ll Value { match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { - if let Primitive::Int(Integer::I8, _) = s.value { + if let Primitive::Int(Integer::I8, _) = s.primitive() { bx.extract_element(value, bx.const_i32(0)) } else { value @@ -870,7 +900,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { value = bx.extract_element(value, bx.const_i32(0)); - if let Primitive::Pointer = s.value { + if let Primitive::Pointer = s.primitive() { value = bx.inttoptr(value, layout.llvm_type(bx.cx)); } value @@ -885,7 +915,7 @@ fn llvm_fixup_output<'ll, 'tcx>( bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) - if s.value == Primitive::F64 => + if s.primitive() == Primitive::F64 => { bx.bitcast(value, bx.cx.type_f64()) } @@ -897,7 +927,7 @@ fn llvm_fixup_output<'ll, 'tcx>( InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I32, _) = s.value { + if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i32()) } else { value @@ -911,20 +941,22 @@ fn llvm_fixup_output<'ll, 'tcx>( ), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I64, _) = s.value { + if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i64()) } else { value } } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { - // MIPS only supports register-length arithmetics. - Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), - Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()), - Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()), - Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()), - _ => value, - }, + (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + match s.primitive() { + // MIPS only supports register-length arithmetics. + Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), + Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()), + Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()), + Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()), + _ => value, + } + } _ => value, } } @@ -937,7 +969,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( ) -> &'ll Type { match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { - if let Primitive::Int(Integer::I8, _) = s.value { + if let Primitive::Int(Integer::I8, _) = s.primitive() { cx.type_vector(cx.type_i8(), 8) } else { layout.llvm_type(cx) @@ -956,7 +988,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( cx.type_vector(elem_ty, count * 2) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) - if s.value == Primitive::F64 => + if s.primitive() == Primitive::F64 => { cx.type_i64() } @@ -968,7 +1000,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I32, _) = s.value { + if let Primitive::Int(Integer::I32, _) = s.primitive() { cx.type_f32() } else { layout.llvm_type(cx) @@ -982,19 +1014,21 @@ fn llvm_fixup_output_type<'ll, 'tcx>( ), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I64, _) = s.value { + if let Primitive::Int(Integer::I64, _) = s.primitive() { cx.type_f64() } else { layout.llvm_type(cx) } } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { - // MIPS only supports register-length arithmetics. - Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), - Primitive::F32 => cx.type_i32(), - Primitive::F64 => cx.type_i64(), - _ => layout.llvm_type(cx), - }, + (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + match s.primitive() { + // MIPS only supports register-length arithmetics. + Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), + Primitive::F32 => cx.type_i32(), + Primitive::F64 => cx.type_i64(), + _ => layout.llvm_type(cx), + } + } _ => layout.llvm_type(cx), } } diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 0f5b1c08ec2..7a747a9cdee 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -313,7 +313,9 @@ fn fat_lto( for (bc_decoded, name) in serialized_modules { let _timer = cgcx .prof - .generic_activity_with_arg("LLVM_fat_lto_link_module", format!("{:?}", name)); + .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| { + recorder.record_arg(format!("{:?}", name)) + }); info!("linking {:?}", name); let data = bc_decoded.data(); linker.add(data).map_err(|()| { diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index c18719d4ad7..7ef3b12cd08 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -721,8 +721,7 @@ pub(crate) fn link( let mut linker = Linker::new(first.module_llvm.llmod()); for module in elements { - let _timer = - cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name)); + let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name); let buffer = ModuleBuffer::new(module.module_llvm.llmod()); linker.add(buffer.data()).map_err(|()| { let msg = format!("failed to serialize module {:?}", module.name); diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index dd3ada44389..86f92dc0239 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -74,10 +74,11 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> { let cgu = tcx.codegen_unit(cgu_name); - let _prof_timer = tcx.prof.generic_activity_with_args( - "codegen_module", - &[cgu_name.to_string(), cgu.size_estimate().to_string()], - ); + let _prof_timer = + tcx.prof.generic_activity_with_arg_recorder("codegen_module", |recorder| { + recorder.record_arg(cgu_name.to_string()); + recorder.record_arg(cgu.size_estimate().to_string()); + }); // Instantiate monomorphizations without filling out definitions yet... let llvm_module = ModuleLlvm::new(tcx, cgu_name.as_str()); { @@ -99,15 +100,6 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen attributes::apply_to_llfn(entry, llvm::AttributePlace::Function, &attrs); } - // Run replace-all-uses-with for statics that need it - for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { - unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); - llvm::LLVMDeleteGlobal(old_g); - } - } - // Finalize code coverage by injecting the coverage map. Note, the coverage map will // also be added to the `llvm.compiler.used` variable, created next. if cx.sess().instrument_coverage() { @@ -122,6 +114,16 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen cx.create_compiler_used_variable() } + // Run replace-all-uses-with for statics that need it. This must + // happen after the llvm.used variables are created. + for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { + unsafe { + let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); + llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMDeleteGlobal(old_g); + } + } + // Finalize debuginfo if cx.sess().opts.debuginfo != DebugInfo::None { cx.debuginfo_finalize(); diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 1bbfc13e05e..88b87951ecd 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -484,14 +484,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { bx.noundef_metadata(load); } - match scalar.value { + match scalar.primitive() { abi::Int(..) => { if !scalar.is_always_valid(bx) { - bx.range_metadata(load, scalar.valid_range); + bx.range_metadata(load, scalar.valid_range(bx)); } } abi::Pointer => { - if !scalar.valid_range.contains(0) { + if !scalar.valid_range(bx).contains(0) { bx.nonnull_metadata(load); } @@ -525,7 +525,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }); OperandValue::Immediate(self.to_immediate(llval, place.layout)) } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { - let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + let b_offset = a.size(self).align_to(b.align(self).abi); let pair_ty = place.layout.llvm_type(self); let mut load = |i, scalar: abi::Scalar, layout, align, offset| { diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index a85b2e6141b..b69d7a000ee 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -221,16 +221,16 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value { - let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; + let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() }; match cv { Scalar::Int(ScalarInt::ZST) => { - assert_eq!(0, layout.value.size(self).bytes()); + assert_eq!(0, layout.size(self).bytes()); self.const_undef(self.type_ix(0)) } Scalar::Int(int) => { - let data = int.assert_bits(layout.value.size(self)); + let data = int.assert_bits(layout.size(self)); let llval = self.const_uint_big(self.type_ix(bitsize), data); - if layout.value == Pointer { + if layout.primitive() == Pointer { unsafe { llvm::LLVMConstIntToPtr(llval, llty) } } else { self.const_bitcast(llval, llty) @@ -269,7 +269,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { 1, ) }; - if layout.value != Pointer { + if layout.primitive() != Pointer { unsafe { llvm::LLVMConstPtrToInt(llval, llty) } } else { self.const_bitcast(llval, llty) diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 413ef0ba764..4d3f3f318b8 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -109,7 +109,10 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), &cx.tcx, ), - Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, + Scalar::Initialized { + value: Primitive::Pointer, + valid_range: WrappingRange::full(dl.pointer_size), + }, cx.type_i8p_ext(address_space), )); next_offset = offset + pointer_size; @@ -409,6 +412,13 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { llvm::LLVMRustSetLinkage(new_g, linkage); llvm::LLVMRustSetVisibility(new_g, visibility); + // The old global has had its name removed but is returned by + // get_static since it is in the instance cache. Provide an + // alternative lookup that points to the new global so that + // global_asm! can compute the correct mangled symbol name + // for the global. + self.renamed_statics.borrow_mut().insert(def_id, new_g); + // To avoid breaking any invariants, we leave around the old // global for the moment; we'll replace all references to it // with the new global later. (See base::codegen_backend.) diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 98cf873ebbd..d296ee3b42c 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -14,6 +14,7 @@ use rustc_codegen_ssa::traits::*; use rustc_data_structures::base_n; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_hir::def_id::DefId; use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers, @@ -105,6 +106,12 @@ pub struct CodegenCx<'ll, 'tcx> { /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell<usize>, + + /// `codegen_static` will sometimes create a second global variable with a + /// different type and clear the symbol name of the original global. + /// `global_asm!` needs to be able to find this new global so that it can + /// compute the correct mangled symbol name to insert into the asm. + pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>, } pub struct TypeLowering<'ll> { @@ -436,6 +443,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { rust_try_fn: Cell::new(None), intrinsics: Default::default(), local_gen_sym_counter: Cell::new(0), + renamed_statics: Default::default(), } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index 31bb9ed3185..d6e2c8ccdf4 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -372,7 +372,6 @@ fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>( // Build the type node for each field. let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range - .clone() .map(|variant_index| { let variant_struct_type_di_node = super::build_generator_variant_struct_type_di_node( cx, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index 1eafa9501c4..73e01d0453b 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -118,7 +118,7 @@ fn tag_base_type<'ll, 'tcx>( Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => { // Niche tags are always normalized to unsized integers of the correct size. - match tag.value { + match tag.primitive() { Primitive::Int(t, _) => t, Primitive::F32 => Integer::I32, Primitive::F64 => Integer::I64, @@ -136,7 +136,7 @@ fn tag_base_type<'ll, 'tcx>( Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => { // Direct tags preserve the sign. - tag.value.to_ty(cx.tcx) + tag.primitive().to_ty(cx.tcx) } } } @@ -425,7 +425,7 @@ fn compute_discriminant_value<'ll, 'tcx>( let value = (variant_index.as_u32() as u128) .wrapping_sub(niche_variants.start().as_u32() as u128) .wrapping_add(niche_start); - let value = tag.value.size(cx).truncate(value); + let value = tag.size(cx).truncate(value); // NOTE(eddyb) do *NOT* remove this assert, until // we pass the full 128-bit value to LLVM, otherwise // truncation will be silent and remain undetected. diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs index 8ce44ada887..87fbb737ea8 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs @@ -3,7 +3,7 @@ use std::cell::RefCell; use rustc_data_structures::{ fingerprint::Fingerprint, fx::FxHashMap, - stable_hasher::{HashStable, NodeIdHashingMode, StableHasher}, + stable_hasher::{HashStable, StableHasher}, }; use rustc_middle::{ bug, @@ -94,11 +94,7 @@ impl<'tcx> UniqueTypeId<'tcx> { pub fn generate_unique_id_string(self, tcx: TyCtxt<'tcx>) -> String { let mut hasher = StableHasher::new(); let mut hcx = tcx.create_stable_hashing_context(); - hcx.while_hashing_spans(false, |hcx| { - hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { - self.hash_stable(hcx, &mut hasher); - }); - }); + hcx.while_hashing_spans(false, |hcx| self.hash_stable(hcx, &mut hasher)); hasher.finish::<Fingerprint>().to_hex() } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index f4dc33452d1..cf9cf1b70aa 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -134,7 +134,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::va_arg => { match fn_abi.ret.layout.abi { abi::Abi::Scalar(scalar) => { - match scalar.value { + match scalar.primitive() { Primitive::Int(..) => { if self.cx().size_of(ret_ty).bytes() < 4 { // `va_arg` should not be called on an integer type @@ -1839,6 +1839,27 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, simd_neg: Int => neg, Float => fneg; } + if name == sym::simd_arith_offset { + // This also checks that the first operand is a ptr type. + let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| { + span_bug!(span, "must be called with a vector of pointer types as first argument") + }); + let layout = bx.layout_of(pointee.ty); + let ptrs = args[0].immediate(); + // The second argument must be a ptr-sized integer. + // (We don't care about the signedness, this is wrapping anyway.) + let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx()); + if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) { + span_bug!( + span, + "must be called with a vector of pointer-sized integers as second argument" + ); + } + let offsets = args[1].immediate(); + + return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets])); + } + if name == sym::simd_saturating_add || name == sym::simd_saturating_sub { let lhs = args[0].immediate(); let rhs = args[1].immediate(); diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 375b9927c86..7f533b0552a 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2537,4 +2537,6 @@ extern "C" { remark_passes_len: usize, ); + #[allow(improper_ctypes)] + pub fn LLVMRustGetMangledName(V: &Value, out: &RustString); } diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 757aa9a1011..86280523631 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -309,7 +309,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { scalar: Scalar, offset: Size, ) -> &'a Type { - match scalar.value { + match scalar.primitive() { Int(i, _) => cx.type_from_integer(i), F32 => cx.type_f32(), F64 => cx.type_f64(), @@ -362,8 +362,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { return cx.type_i1(); } - let offset = - if index == 0 { Size::ZERO } else { a.value.size(cx).align_to(b.value.align(cx).abi) }; + let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) }; self.scalar_llvm_type_at(cx, scalar, offset) } |
