diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src/asm.rs')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 112 |
1 files changed, 73 insertions, 39 deletions
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 205b77e9862..e994001f96f 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -290,6 +290,11 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs }); + // Switch to the 'normal' basic block if we did an `invoke` instead of a `call` + if let Some((dest, _, _)) = dest_catch_funclet { + self.switch_to_block(dest); + } + // Write results to outputs for (idx, op) in operands.iter().enumerate() { if let InlineAsmOperandRef::Out { reg, place: Some(place), .. } @@ -307,11 +312,11 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } } -impl AsmMethods for CodegenCx<'_, '_> { +impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> { fn codegen_global_asm( &self, template: &[InlineAsmTemplatePiece], - operands: &[GlobalAsmOperandRef], + operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span], ) { @@ -337,6 +342,29 @@ impl AsmMethods for CodegenCx<'_, '_> { // here unlike normal inline assembly. template_str.push_str(string); } + GlobalAsmOperandRef::SymFn { instance } => { + let llval = self.get_fn(instance); + self.add_compiler_used_global(llval); + let symbol = llvm::build_string(|s| unsafe { + llvm::LLVMRustGetMangledName(llval, s); + }) + .expect("symbol is not valid UTF-8"); + template_str.push_str(&symbol); + } + GlobalAsmOperandRef::SymStatic { def_id } => { + let llval = self + .renamed_statics + .borrow() + .get(&def_id) + .copied() + .unwrap_or_else(|| self.get_static(def_id)); + self.add_compiler_used_global(llval); + let symbol = llvm::build_string(|s| unsafe { + llvm::LLVMRustGetMangledName(llval, s); + }) + .expect("symbol is not valid UTF-8"); + template_str.push_str(&symbol); + } } } } @@ -763,7 +791,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &' /// Helper function to get the LLVM type for a Scalar. Pointers are returned as /// the equivalent integer type. fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type { - match scalar.value { + match scalar.primitive() { Primitive::Int(Integer::I8, _) => cx.type_i8(), Primitive::Int(Integer::I16, _) => cx.type_i16(), Primitive::Int(Integer::I32, _) => cx.type_i32(), @@ -784,7 +812,7 @@ fn llvm_fixup_input<'ll, 'tcx>( ) -> &'ll Value { match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { - if let Primitive::Int(Integer::I8, _) = s.value { + if let Primitive::Int(Integer::I8, _) = s.primitive() { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } else { @@ -795,7 +823,7 @@ fn llvm_fixup_input<'ll, 'tcx>( let elem_ty = llvm_asm_scalar_type(bx.cx, s); let count = 16 / layout.size.bytes(); let vec_ty = bx.cx.type_vector(elem_ty, count); - if let Primitive::Pointer = s.value { + if let Primitive::Pointer = s.primitive() { value = bx.ptrtoint(value, bx.cx.type_isize()); } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) @@ -810,7 +838,7 @@ fn llvm_fixup_input<'ll, 'tcx>( bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) - if s.value == Primitive::F64 => + if s.primitive() == Primitive::F64 => { bx.bitcast(value, bx.cx.type_i64()) } @@ -822,7 +850,7 @@ fn llvm_fixup_input<'ll, 'tcx>( InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I32, _) = s.value { + if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f32()) } else { value @@ -836,19 +864,21 @@ fn llvm_fixup_input<'ll, 'tcx>( ), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I64, _) = s.value { + if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f64()) } else { value } } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { - // MIPS only supports register-length arithmetics. - Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), - Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()), - Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()), - _ => value, - }, + (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + match s.primitive() { + // MIPS only supports register-length arithmetics. + Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), + Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()), + Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()), + _ => value, + } + } _ => value, } } @@ -862,7 +892,7 @@ fn llvm_fixup_output<'ll, 'tcx>( ) -> &'ll Value { match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { - if let Primitive::Int(Integer::I8, _) = s.value { + if let Primitive::Int(Integer::I8, _) = s.primitive() { bx.extract_element(value, bx.const_i32(0)) } else { value @@ -870,7 +900,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { value = bx.extract_element(value, bx.const_i32(0)); - if let Primitive::Pointer = s.value { + if let Primitive::Pointer = s.primitive() { value = bx.inttoptr(value, layout.llvm_type(bx.cx)); } value @@ -885,7 +915,7 @@ fn llvm_fixup_output<'ll, 'tcx>( bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) - if s.value == Primitive::F64 => + if s.primitive() == Primitive::F64 => { bx.bitcast(value, bx.cx.type_f64()) } @@ -897,7 +927,7 @@ fn llvm_fixup_output<'ll, 'tcx>( InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I32, _) = s.value { + if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i32()) } else { value @@ -911,20 +941,22 @@ fn llvm_fixup_output<'ll, 'tcx>( ), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I64, _) = s.value { + if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i64()) } else { value } } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { - // MIPS only supports register-length arithmetics. - Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), - Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()), - Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()), - Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()), - _ => value, - }, + (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + match s.primitive() { + // MIPS only supports register-length arithmetics. + Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), + Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()), + Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()), + Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()), + _ => value, + } + } _ => value, } } @@ -937,7 +969,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( ) -> &'ll Type { match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { - if let Primitive::Int(Integer::I8, _) = s.value { + if let Primitive::Int(Integer::I8, _) = s.primitive() { cx.type_vector(cx.type_i8(), 8) } else { layout.llvm_type(cx) @@ -956,7 +988,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( cx.type_vector(elem_ty, count * 2) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) - if s.value == Primitive::F64 => + if s.primitive() == Primitive::F64 => { cx.type_i64() } @@ -968,7 +1000,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I32, _) = s.value { + if let Primitive::Int(Integer::I32, _) = s.primitive() { cx.type_f32() } else { layout.llvm_type(cx) @@ -982,19 +1014,21 @@ fn llvm_fixup_output_type<'ll, 'tcx>( ), Abi::Scalar(s), ) => { - if let Primitive::Int(Integer::I64, _) = s.value { + if let Primitive::Int(Integer::I64, _) = s.primitive() { cx.type_f64() } else { layout.llvm_type(cx) } } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value { - // MIPS only supports register-length arithmetics. - Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), - Primitive::F32 => cx.type_i32(), - Primitive::F64 => cx.type_i64(), - _ => layout.llvm_type(cx), - }, + (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + match s.primitive() { + // MIPS only supports register-length arithmetics. + Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), + Primitive::F32 => cx.type_i32(), + Primitive::F64 => cx.type_i64(), + _ => layout.llvm_type(cx), + } + } _ => layout.llvm_type(cx), } } |
