diff options
Diffstat (limited to 'compiler/rustc_codegen_ssa/src/mir')
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/block.rs | 94 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/intrinsic.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/mod.rs | 31 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/naked_asm.rs | 40 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/operand.rs | 131 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/place.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 57 |
7 files changed, 196 insertions, 169 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index 8c571558717..e2a9b540d30 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -4,9 +4,7 @@ use rustc_abi::{BackendRepr, ExternAbi, HasDataLayout, Reg, WrappingRange}; use rustc_ast as ast; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_hir::lang_items::LangItem; -use rustc_middle::mir::{ - self, AssertKind, BasicBlock, InlineAsmMacro, SwitchTargets, UnwindTerminateReason, -}; +use rustc_middle::mir::{self, AssertKind, InlineAsmMacro, SwitchTargets, UnwindTerminateReason}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Instance, Ty}; @@ -167,9 +165,13 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { if let Some(instance) = instance { if is_call_from_compiler_builtins_to_upstream_monomorphization(tcx, instance) { if destination.is_some() { - let caller = with_no_trimmed_paths!(tcx.def_path_str(fx.instance.def_id())); - let callee = with_no_trimmed_paths!(tcx.def_path_str(instance.def_id())); - tcx.dcx().emit_err(CompilerBuiltinsCannotCall { caller, callee }); + let caller_def = fx.instance.def_id(); + let e = CompilerBuiltinsCannotCall { + span: tcx.def_span(caller_def), + caller: with_no_trimmed_paths!(tcx.def_path_str(caller_def)), + callee: with_no_trimmed_paths!(tcx.def_path_str(instance.def_id())), + }; + tcx.dcx().emit_err(e); } else { info!( "compiler_builtins call to diverging function {:?} replaced with abort", @@ -429,11 +431,34 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let cmp = bx.icmp(IntPredicate::IntEQ, discr_value, llval); bx.cond_br(cmp, ll1, ll2); } else { - bx.switch( - discr_value, - helper.llbb_with_cleanup(self, targets.otherwise()), - target_iter.map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))), - ); + let otherwise = targets.otherwise(); + let otherwise_cold = self.cold_blocks[otherwise]; + let otherwise_unreachable = self.mir[otherwise].is_empty_unreachable(); + let cold_count = targets.iter().filter(|(_, target)| self.cold_blocks[*target]).count(); + let none_cold = cold_count == 0; + let all_cold = cold_count == targets.iter().len(); + if (none_cold && (!otherwise_cold || otherwise_unreachable)) + || (all_cold && (otherwise_cold || otherwise_unreachable)) + { + // All targets have the same weight, + // or `otherwise` is unreachable and it's the only target with a different weight. + bx.switch( + discr_value, + helper.llbb_with_cleanup(self, targets.otherwise()), + target_iter + .map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))), + ); + } else { + // Targets have different weights + bx.switch_with_weights( + discr_value, + helper.llbb_with_cleanup(self, targets.otherwise()), + otherwise_cold, + target_iter.map(|(value, target)| { + (value, helper.llbb_with_cleanup(self, target), self.cold_blocks[target]) + }), + ); + } } } @@ -699,14 +724,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Put together the arguments to the panic entry point. let (lang_item, args) = match msg { - AssertKind::BoundsCheck { ref len, ref index } => { + AssertKind::BoundsCheck { len, index } => { let len = self.codegen_operand(bx, len).immediate(); let index = self.codegen_operand(bx, index).immediate(); // It's `fn panic_bounds_check(index: usize, len: usize)`, // and `#[track_caller]` adds an implicit third argument. (LangItem::PanicBoundsCheck, vec![index, len, location]) } - AssertKind::MisalignedPointerDereference { ref required, ref found } => { + AssertKind::MisalignedPointerDereference { required, found } => { let required = self.codegen_operand(bx, required).immediate(); let found = self.codegen_operand(bx, found).immediate(); // It's `fn panic_misaligned_pointer_dereference(required: usize, found: usize)`, @@ -919,7 +944,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { &fn_abi.ret, &mut llargs, Some(intrinsic), - target, ); let dest = match ret_dest { _ if fn_abi.ret.is_indirect() => llargs[0], @@ -975,23 +999,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let mut llargs = Vec::with_capacity(arg_count); - let destination = target.as_ref().map(|&target| { - ( - self.make_return_dest( - bx, - destination, - &fn_abi.ret, - &mut llargs, - None, - Some(target), - ), - target, - ) - }); + + // We still need to call `make_return_dest` even if there's no `target`, since + // `fn_abi.ret` could be `PassMode::Indirect`, even if it is uninhabited, + // and `make_return_dest` adds the return-place indirect pointer to `llargs`. + let return_dest = self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, None); + let destination = target.map(|target| (return_dest, target)); // Split the rust-call tupled arguments off. - let (first_args, untuple) = if abi == ExternAbi::RustCall && !args.is_empty() { - let (tup, args) = args.split_last().unwrap(); + let (first_args, untuple) = if abi == ExternAbi::RustCall + && let Some((tup, args)) = args.split_last() + { (args, Some(tup)) } else { (args, None) @@ -1013,11 +1031,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // // This is also relevant for `Pin<&mut Self>`, where we need to peel the // `Pin`. - while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() { + while !op.layout.ty.is_raw_ptr() && !op.layout.ty.is_ref() { let (idx, _) = op.layout.non_1zst_field(bx).expect( "not exactly one non-1-ZST field in a `DispatchFromDyn` type", ); - op = op.extract_field(bx, idx); + op = op.extract_field(self, bx, idx); } // Now that we have `*dyn Trait` or `&dyn Trait`, split it up into its @@ -1045,11 +1063,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } Immediate(_) => { // See comment above explaining why we peel these newtypes - while !op.layout.ty.is_unsafe_ptr() && !op.layout.ty.is_ref() { + while !op.layout.ty.is_raw_ptr() && !op.layout.ty.is_ref() { let (idx, _) = op.layout.non_1zst_field(bx).expect( "not exactly one non-1-ZST field in a `DispatchFromDyn` type", ); - op = op.extract_field(bx, idx); + op = op.extract_field(self, bx, idx); } // Make sure that we've actually unwrapped the rcvr down @@ -1549,9 +1567,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if scalar.is_bool() { bx.range_metadata(llval, WrappingRange { start: 0, end: 1 }); } + // We store bools as `i8` so we need to truncate to `i1`. + llval = bx.to_immediate_scalar(llval, scalar); } - // We store bools as `i8` so we need to truncate to `i1`. - llval = bx.to_immediate(llval, arg.layout); } } @@ -1581,7 +1599,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } else { // If the tuple is immediate, the elements are as well. for i in 0..tuple.layout.fields.count() { - let op = tuple.extract_field(bx, i); + let op = tuple.extract_field(self, bx, i); self.codegen_argument(bx, op, llargs, &args[i]); } } @@ -1790,11 +1808,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { fn_ret: &ArgAbi<'tcx, Ty<'tcx>>, llargs: &mut Vec<Bx::Value>, intrinsic: Option<ty::IntrinsicDef>, - target: Option<BasicBlock>, ) -> ReturnDest<'tcx, Bx::Value> { - if target.is_none() { - return ReturnDest::Nothing; - } // If the return is ignored, we can just return a do-nothing `ReturnDest`. if fn_ret.is_ignore() { return ReturnDest::Nothing; diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 6e7fbe62c8d..b34e966ba6c 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -367,7 +367,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.sess().dcx().emit_fatal(errors::AtomicCompareExchange); }; let ty = fn_args.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { + if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { let weak = instruction == "cxchgweak"; let dst = args[0].immediate(); let cmp = args[1].immediate(); @@ -395,7 +395,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { "load" => { let ty = fn_args.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { + if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { let layout = bx.layout_of(ty); let size = layout.size; let source = args[0].immediate(); @@ -413,7 +413,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { "store" => { let ty = fn_args.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { + if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { let size = bx.layout_of(ty).size; let val = args[1].immediate(); let ptr = args[0].immediate(); @@ -458,7 +458,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let ty = fn_args.type_at(0); - if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { + if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() { let ptr = args[0].immediate(); let val = args[1].immediate(); bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 3a896071bc6..0758e5d0456 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -3,7 +3,7 @@ use std::iter; use rustc_index::IndexVec; use rustc_index::bit_set::DenseBitSet; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; -use rustc_middle::mir::{UnwindTerminateReason, traversal}; +use rustc_middle::mir::{Local, UnwindTerminateReason, traversal}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt}; use rustc_middle::{bug, mir, span_bug}; @@ -240,7 +240,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let local_values = { let args = arg_local_refs(&mut start_bx, &mut fx, &memory_locals); - let mut allocate_local = |local| { + let mut allocate_local = |local: Local| { let decl = &mir.local_decls[local]; let layout = start_bx.layout_of(fx.monomorphize(decl.ty)); assert!(!layout.ty.has_erasable_regions()); @@ -502,14 +502,25 @@ fn find_cold_blocks<'tcx>( for (bb, bb_data) in traversal::postorder(mir) { let terminator = bb_data.terminator(); - // If a BB ends with a call to a cold function, mark it as cold. - if let mir::TerminatorKind::Call { ref func, .. } = terminator.kind - && let ty::FnDef(def_id, ..) = *func.ty(local_decls, tcx).kind() - && let attrs = tcx.codegen_fn_attrs(def_id) - && attrs.flags.contains(CodegenFnAttrFlags::COLD) - { - cold_blocks[bb] = true; - continue; + match terminator.kind { + // If a BB ends with a call to a cold function, mark it as cold. + mir::TerminatorKind::Call { ref func, .. } + | mir::TerminatorKind::TailCall { ref func, .. } + if let ty::FnDef(def_id, ..) = *func.ty(local_decls, tcx).kind() + && let attrs = tcx.codegen_fn_attrs(def_id) + && attrs.flags.contains(CodegenFnAttrFlags::COLD) => + { + cold_blocks[bb] = true; + continue; + } + + // If a BB ends with an `unreachable`, also mark it as cold. + mir::TerminatorKind::Unreachable => { + cold_blocks[bb] = true; + continue; + } + + _ => {} } // If all successors of a BB are cold and there's at least one of them, mark this BB as cold diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs index eb0711dbb32..0593fb420c3 100644 --- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs +++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs @@ -8,7 +8,7 @@ use rustc_middle::ty::{Instance, Ty, TyCtxt}; use rustc_middle::{bug, span_bug, ty}; use rustc_span::sym; use rustc_target::callconv::{ArgAbi, FnAbi, PassMode}; -use rustc_target::spec::WasmCAbi; +use rustc_target::spec::{BinaryFormat, WasmCAbi}; use crate::common; use crate::traits::{AsmCodegenMethods, BuilderMethods, GlobalAsmOperandRef, MiscCodegenMethods}; @@ -104,27 +104,6 @@ fn inline_to_global_operand<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } } -enum AsmBinaryFormat { - Elf, - Macho, - Coff, - Wasm, -} - -impl AsmBinaryFormat { - fn from_target(target: &rustc_target::spec::Target) -> Self { - if target.is_like_windows { - Self::Coff - } else if target.is_like_osx { - Self::Macho - } else if target.is_like_wasm { - Self::Wasm - } else { - Self::Elf - } - } -} - fn prefix_and_suffix<'tcx>( tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, @@ -134,7 +113,7 @@ fn prefix_and_suffix<'tcx>( ) -> (String, String) { use std::fmt::Write; - let asm_binary_format = AsmBinaryFormat::from_target(&tcx.sess.target); + let asm_binary_format = &tcx.sess.target.binary_format; let is_arm = tcx.sess.target.arch == "arm"; let is_thumb = tcx.sess.unstable_target_features.contains(&sym::thumb_mode); @@ -178,10 +157,13 @@ fn prefix_and_suffix<'tcx>( } Linkage::LinkOnceAny | Linkage::LinkOnceODR | Linkage::WeakAny | Linkage::WeakODR => { match asm_binary_format { - AsmBinaryFormat::Elf | AsmBinaryFormat::Coff | AsmBinaryFormat::Wasm => { + BinaryFormat::Elf + | BinaryFormat::Coff + | BinaryFormat::Wasm + | BinaryFormat::Xcoff => { writeln!(w, ".weak {asm_name}")?; } - AsmBinaryFormat::Macho => { + BinaryFormat::MachO => { writeln!(w, ".globl {asm_name}")?; writeln!(w, ".weak_definition {asm_name}")?; } @@ -207,7 +189,7 @@ fn prefix_and_suffix<'tcx>( let mut begin = String::new(); let mut end = String::new(); match asm_binary_format { - AsmBinaryFormat::Elf => { + BinaryFormat::Elf | BinaryFormat::Xcoff => { let section = link_section.unwrap_or(format!(".text.{asm_name}")); let progbits = match is_arm { @@ -239,7 +221,7 @@ fn prefix_and_suffix<'tcx>( writeln!(end, "{}", arch_suffix).unwrap(); } } - AsmBinaryFormat::Macho => { + BinaryFormat::MachO => { let section = link_section.unwrap_or("__TEXT,__text".to_string()); writeln!(begin, ".pushsection {},regular,pure_instructions", section).unwrap(); writeln!(begin, ".balign {align}").unwrap(); @@ -255,7 +237,7 @@ fn prefix_and_suffix<'tcx>( writeln!(end, "{}", arch_suffix).unwrap(); } } - AsmBinaryFormat::Coff => { + BinaryFormat::Coff => { let section = link_section.unwrap_or(format!(".text.{asm_name}")); writeln!(begin, ".pushsection {},\"xr\"", section).unwrap(); writeln!(begin, ".balign {align}").unwrap(); @@ -272,7 +254,7 @@ fn prefix_and_suffix<'tcx>( writeln!(end, "{}", arch_suffix).unwrap(); } } - AsmBinaryFormat::Wasm => { + BinaryFormat::Wasm => { let section = link_section.unwrap_or(format!(".text.{asm_name}")); writeln!(begin, ".section {section},\"\",@").unwrap(); diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 9ca7d4f8f00..461cf1b8acd 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -1,15 +1,14 @@ -use std::assert_matches::assert_matches; use std::fmt; use arrayvec::ArrayVec; use either::Either; use rustc_abi as abi; use rustc_abi::{Align, BackendRepr, Size}; -use rustc_middle::bug; use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range}; use rustc_middle::mir::{self, ConstValue}; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::{bug, span_bug}; use tracing::debug; use super::place::{PlaceRef, PlaceValue}; @@ -352,79 +351,83 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { pub(crate) fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( &self, + fx: &mut FunctionCx<'a, 'tcx, Bx>, bx: &mut Bx, i: usize, ) -> Self { let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); - let mut val = match (self.val, self.layout.backend_repr) { - // If the field is ZST, it has no data. - _ if field.is_zst() => OperandValue::ZeroSized, - - // Newtype of a scalar, scalar pair or vector. - (OperandValue::Immediate(_) | OperandValue::Pair(..), _) - if field.size == self.layout.size => + if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) { + if let BackendRepr::Vector { count, .. } = self.layout.backend_repr + && let BackendRepr::Memory { sized: true } = field.backend_repr + && count.is_power_of_two() { - assert_eq!(offset.bytes(), 0); - self.val + assert_eq!(field.size, self.layout.size); + // This is being deprecated, but for now stdarch still needs it for + // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]); + let place = PlaceRef::alloca(bx, field); + self.val.store(bx, place.val.with_type(self.layout)); + return bx.load_operand(place); + } else { + // Part of https://github.com/rust-lang/compiler-team/issues/838 + bug!("Non-ref type {self:?} cannot project to ref field type {field:?}"); } + } - // Extract a scalar component from a pair. - (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => { - if offset.bytes() == 0 { - assert_eq!(field.size, a.size(bx.cx())); - OperandValue::Immediate(a_llval) - } else { - assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi)); - assert_eq!(field.size, b.size(bx.cx())); - OperandValue::Immediate(b_llval) + let val = if field.is_zst() { + OperandValue::ZeroSized + } else if field.size == self.layout.size { + assert_eq!(offset.bytes(), 0); + fx.codegen_transmute_operand(bx, *self, field).unwrap_or_else(|| { + bug!( + "Expected `codegen_transmute_operand` to handle equal-size \ + field {i:?} projection from {self:?} to {field:?}" + ) + }) + } else { + let (in_scalar, imm) = match (self.val, self.layout.backend_repr) { + // Extract a scalar component from a pair. + (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => { + if offset.bytes() == 0 { + assert_eq!(field.size, a.size(bx.cx())); + (Some(a), a_llval) + } else { + assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi)); + assert_eq!(field.size, b.size(bx.cx())); + (Some(b), b_llval) + } } - } - - // `#[repr(simd)]` types are also immediate. - (OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => { - OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64))) - } - _ => bug!("OperandRef::extract_field({:?}): not applicable", self), + _ => { + span_bug!(fx.mir.span, "OperandRef::extract_field({:?}): not applicable", self) + } + }; + OperandValue::Immediate(match field.backend_repr { + BackendRepr::Vector { .. } => imm, + BackendRepr::Scalar(out_scalar) => { + let Some(in_scalar) = in_scalar else { + span_bug!( + fx.mir.span, + "OperandRef::extract_field({:?}): missing input scalar for output scalar", + self + ) + }; + if in_scalar != out_scalar { + // If the backend and backend_immediate types might differ, + // flip back to the backend type then to the new immediate. + // This avoids nop truncations, but still handles things like + // Bools in union fields needs to be truncated. + let backend = bx.from_immediate(imm); + bx.to_immediate_scalar(backend, out_scalar) + } else { + imm + } + } + BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(), + }) }; - match (&mut val, field.backend_repr) { - (OperandValue::ZeroSized, _) => {} - ( - OperandValue::Immediate(llval), - BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. }, - ) => { - // Bools in union fields needs to be truncated. - *llval = bx.to_immediate(*llval, field); - } - (OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => { - // Bools in union fields needs to be truncated. - *a = bx.to_immediate_scalar(*a, a_abi); - *b = bx.to_immediate_scalar(*b, b_abi); - } - // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]); - (OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => { - assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. }); - - let llfield_ty = bx.cx().backend_type(field); - - // Can't bitcast an aggregate, so round trip through memory. - let llptr = bx.alloca(field.size, field.align.abi); - bx.store(*llval, llptr, field.align.abi); - *llval = bx.load(llfield_ty, llptr, field.align.abi); - } - ( - OperandValue::Immediate(_), - BackendRepr::Uninhabited | BackendRepr::Memory { sized: false }, - ) => { - bug!() - } - (OperandValue::Pair(..), _) => bug!(), - (OperandValue::Ref(..), _) => bug!(), - } - OperandRef { val, layout: field } } } @@ -581,13 +584,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Moves out of scalar and scalar pair fields are trivial. for elem in place_ref.projection.iter() { match elem { - mir::ProjectionElem::Field(ref f, _) => { + mir::ProjectionElem::Field(f, _) => { assert!( !o.layout.ty.is_any_ptr(), "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \ but tried to access field {f:?} of pointer {o:?}", ); - o = o.extract_field(bx, f.index()); + o = o.extract_field(self, bx, f.index()); } mir::ProjectionElem::Index(_) | mir::ProjectionElem::ConstantIndex { .. } => { diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index eb4270ffe80..edd09b9c3c5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -1,7 +1,7 @@ use rustc_abi::Primitive::{Int, Pointer}; use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants}; +use rustc_middle::mir::PlaceTy; use rustc_middle::mir::interpret::Scalar; -use rustc_middle::mir::tcx::PlaceTy; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Ty}; use rustc_middle::{bug, mir}; @@ -423,7 +423,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { layout.size }; - let llval = bx.inbounds_gep(bx.cx().backend_type(layout), self.val.llval, &[llindex]); + let llval = bx.inbounds_nuw_gep(bx.cx().backend_type(layout), self.val.llval, &[llindex]); let align = self.val.align.restrict_for_offset(offset); PlaceValue::new_sized(llval, align).with_type(layout) } diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 27cb7883b9a..1eebe04225b 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -231,7 +231,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// /// Returns `None` for cases that can't work in that framework, such as for /// `Immediate`->`Ref` that needs an `alloc` to get the location. - fn codegen_transmute_operand( + pub(crate) fn codegen_transmute_operand( &mut self, bx: &mut Bx, operand: OperandRef<'tcx, Bx::Value>, @@ -260,6 +260,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Ref(source_place_val) => { assert_eq!(source_place_val.llextra, None); assert_matches!(operand_kind, OperandValueKind::Ref); + // The existing alignment is part of `source_place_val`, + // so that alignment will be used, not `cast`'s. Some(bx.load_operand(source_place_val.with_type(cast)).val) } OperandValue::ZeroSized => { @@ -435,18 +437,22 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { scalar: abi::Scalar, backend_ty: Bx::Type, ) { - if matches!(self.cx.sess().opts.optimize, OptLevel::No) - // For now, the critical niches are all over `Int`eger values. - // Should floating-point values or pointers ever get more complex - // niches, then this code will probably want to handle them too. - || !matches!(scalar.primitive(), abi::Primitive::Int(..)) - || scalar.is_always_valid(self.cx) - { + if matches!(self.cx.sess().opts.optimize, OptLevel::No) || scalar.is_always_valid(self.cx) { return; } - let range = scalar.valid_range(self.cx); - bx.assume_integer_range(imm, backend_ty, range); + match scalar.primitive() { + abi::Primitive::Int(..) => { + let range = scalar.valid_range(self.cx); + bx.assume_integer_range(imm, backend_ty, range); + } + abi::Primitive::Pointer(abi::AddressSpace::DATA) + if !scalar.valid_range(self.cx).contains(0) => + { + bx.assume_nonnull(imm); + } + abi::Primitive::Pointer(..) | abi::Primitive::Float(..) => {} + } } pub(crate) fn codegen_rvalue_unsized( @@ -660,9 +666,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { lhs.layout.ty, ), - (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { - self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty) - } + (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => self + .codegen_scalar_binop( + bx, + op, + lhs_val, + rhs_val, + lhs.layout.ty, + rhs.layout.ty, + ), _ => bug!(), }; @@ -689,7 +701,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (OperandValue::Immediate(llval), operand.layout) } mir::UnOp::PtrMetadata => { - assert!(operand.layout.ty.is_unsafe_ptr() || operand.layout.ty.is_ref(),); + assert!(operand.layout.ty.is_raw_ptr() || operand.layout.ty.is_ref(),); let (_, meta) = operand.val.pointer_parts(); assert_eq!(operand.layout.fields.count() > 1, meta.is_some()); if let Some(meta) = meta { @@ -883,10 +895,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { op: mir::BinOp, lhs: Bx::Value, rhs: Bx::Value, - input_ty: Ty<'tcx>, + lhs_ty: Ty<'tcx>, + rhs_ty: Ty<'tcx>, ) -> Bx::Value { - let is_float = input_ty.is_floating_point(); - let is_signed = input_ty.is_signed(); + let is_float = lhs_ty.is_floating_point(); + let is_signed = lhs_ty.is_signed(); match op { mir::BinOp::Add => { if is_float { @@ -952,9 +965,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::BinOp::BitAnd => bx.and(lhs, rhs), mir::BinOp::BitXor => bx.xor(lhs, rhs), mir::BinOp::Offset => { - let pointee_type = input_ty + let pointee_type = lhs_ty .builtin_deref(true) - .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty)); + .unwrap_or_else(|| bug!("deref of non-pointer {:?}", lhs_ty)); let pointee_layout = bx.cx().layout_of(pointee_type); if pointee_layout.is_zst() { // `Offset` works in terms of the size of pointee, @@ -962,7 +975,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { lhs } else { let llty = bx.cx().backend_type(pointee_layout); - bx.inbounds_gep(llty, lhs, &[rhs]) + if !rhs_ty.is_signed() { + bx.inbounds_nuw_gep(llty, lhs, &[rhs]) + } else { + bx.inbounds_gep(llty, lhs, &[rhs]) + } } } mir::BinOp::Shl | mir::BinOp::ShlUnchecked => { |
