about summary refs log tree commit diff
path: root/compiler/rustc_target/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_target/src')
-rw-r--r--compiler/rustc_target/src/abi/call/mips64.rs6
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs6
-rw-r--r--compiler/rustc_target/src/abi/call/riscv.rs4
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs26
-rw-r--r--compiler/rustc_target/src/abi/call/x86_64.rs2
-rw-r--r--compiler/rustc_target/src/abi/mod.rs102
6 files changed, 102 insertions, 44 deletions
diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs
index 1ac454be5e9..cd54167aa7f 100644
--- a/compiler/rustc_target/src/abi/call/mips64.rs
+++ b/compiler/rustc_target/src/abi/call/mips64.rs
@@ -6,7 +6,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
 fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
     // Always sign extend u32 values on 64-bit mips
     if let abi::Abi::Scalar(scalar) = arg.layout.abi {
-        if let abi::Int(i, signed) = scalar.value {
+        if let abi::Int(i, signed) = scalar.primitive() {
             if !signed && i.size().bits() == 32 {
                 if let PassMode::Direct(ref mut attrs) = arg.mode {
                     attrs.ext(ArgExtension::Sext);
@@ -25,7 +25,7 @@ where
     C: HasDataLayout,
 {
     match ret.layout.field(cx, i).abi {
-        abi::Abi::Scalar(scalar) => match scalar.value {
+        abi::Abi::Scalar(scalar) => match scalar.primitive() {
             abi::F32 => Some(Reg::f32()),
             abi::F64 => Some(Reg::f64()),
             _ => None,
@@ -110,7 +110,7 @@ where
 
                 // We only care about aligned doubles
                 if let abi::Abi::Scalar(scalar) = field.abi {
-                    if let abi::F64 = scalar.value {
+                    if let abi::F64 = scalar.primitive() {
                         if offset.is_aligned(dl.f64_align.abi) {
                             // Insert enough integers to cover [last_offset, offset)
                             assert!(last_offset.is_aligned(dl.f64_align.abi));
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
index 34324a58297..ce564d1455b 100644
--- a/compiler/rustc_target/src/abi/call/mod.rs
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -348,7 +348,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
 
             // The primitive for this algorithm.
             Abi::Scalar(scalar) => {
-                let kind = match scalar.value {
+                let kind = match scalar.primitive() {
                     abi::Int(..) | abi::Pointer => RegKind::Integer,
                     abi::F32 | abi::F64 => RegKind::Float,
                 };
@@ -482,7 +482,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
             Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
             Abi::ScalarPair(a, b) => PassMode::Pair(
                 scalar_attrs(&layout, a, Size::ZERO),
-                scalar_attrs(&layout, b, a.value.size(cx).align_to(b.value.align(cx).abi)),
+                scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
             ),
             Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
             Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
@@ -534,7 +534,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
     pub fn extend_integer_width_to(&mut self, bits: u64) {
         // Only integers have signedness
         if let Abi::Scalar(scalar) = self.layout.abi {
-            if let abi::Int(i, signed) = scalar.value {
+            if let abi::Int(i, signed) = scalar.primitive() {
                 if i.size().bits() < bits {
                     if let PassMode::Direct(ref mut attrs) = self.mode {
                         if signed {
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
index bbefc73a076..752b44f6434 100644
--- a/compiler/rustc_target/src/abi/call/riscv.rs
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -44,7 +44,7 @@ where
     Ty: TyAbiInterface<'a, C> + Copy,
 {
     match arg_layout.abi {
-        Abi::Scalar(scalar) => match scalar.value {
+        Abi::Scalar(scalar) => match scalar.primitive() {
             abi::Int(..) | abi::Pointer => {
                 if arg_layout.size.bits() > xlen {
                     return Err(CannotUseFpConv);
@@ -298,7 +298,7 @@ fn classify_arg<'a, Ty, C>(
 
 fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
     if let Abi::Scalar(scalar) = arg.layout.abi {
-        if let abi::Int(i, _) = scalar.value {
+        if let abi::Int(i, _) = scalar.primitive() {
             // 32-bit integers are always sign-extended
             if i.size().bits() == 32 && xlen > 32 {
                 if let PassMode::Direct(ref mut attrs) = arg.mode {
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
index 72709d31021..cc3a0a69999 100644
--- a/compiler/rustc_target/src/abi/call/sparc64.rs
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -20,7 +20,7 @@ where
 {
     let dl = cx.data_layout();
 
-    if scalar.value != abi::F32 && scalar.value != abi::F64 {
+    if !scalar.primitive().is_float() {
         return data;
     }
 
@@ -56,7 +56,7 @@ where
         return data;
     }
 
-    if scalar.value == abi::F32 {
+    if scalar.primitive() == abi::F32 {
         data.arg_attribute = ArgAttribute::InReg;
         data.prefix[data.prefix_index] = Some(Reg::f32());
         data.last_offset = offset + Reg::f32().size;
@@ -79,17 +79,15 @@ where
     C: HasDataLayout,
 {
     data = arg_scalar(cx, &scalar1, offset, data);
-    if scalar1.value == abi::F32 {
-        offset += Reg::f32().size;
-    } else if scalar2.value == abi::F64 {
-        offset += Reg::f64().size;
-    } else if let abi::Int(i, _signed) = scalar1.value {
-        offset += i.size();
-    } else if scalar1.value == abi::Pointer {
-        offset = offset + Reg::i64().size;
+    match (scalar1.primitive(), scalar2.primitive()) {
+        (abi::F32, _) => offset += Reg::f32().size,
+        (_, abi::F64) => offset += Reg::f64().size,
+        (abi::Int(i, _signed), _) => offset += i.size(),
+        (abi::Pointer, _) => offset += Reg::i64().size,
+        _ => {}
     }
 
-    if (offset.raw % 4) != 0 && (scalar2.value == abi::F32 || scalar2.value == abi::F64) {
+    if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
         offset.raw += 4 - (offset.raw % 4);
     }
     data = arg_scalar(cx, &scalar2, offset, data);
@@ -115,11 +113,11 @@ where
             data = arg_scalar(cx, &scalar, offset, data);
         }
         abi::Abi::Aggregate { .. } => {
-            for i in 0..layout.fields.count().clone() {
+            for i in 0..layout.fields.count() {
                 if offset < layout.fields.offset(i) {
                     offset = layout.fields.offset(i);
                 }
-                data = parse_structure(cx, layout.field(cx, i).clone(), data.clone(), offset);
+                data = parse_structure(cx, layout.field(cx, i), data.clone(), offset);
             }
         }
         _ => {
@@ -163,7 +161,7 @@ where
 
             let mut data = parse_structure(
                 cx,
-                arg.layout.clone(),
+                arg.layout,
                 Sdata {
                     prefix: [None; 8],
                     prefix_index: 0,
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
index fae3c3af61b..a52e01a495a 100644
--- a/compiler/rustc_target/src/abi/call/x86_64.rs
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -49,7 +49,7 @@ where
         let mut c = match layout.abi {
             Abi::Uninhabited => return Ok(()),
 
-            Abi::Scalar(scalar) => match scalar.value {
+            Abi::Scalar(scalar) => match scalar.primitive() {
                 abi::Int(..) | abi::Pointer => Class::Int,
                 abi::F32 | abi::F64 => Class::Sse,
             },
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
index 52fce7c0553..169167f69bf 100644
--- a/compiler/rustc_target/src/abi/mod.rs
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -752,6 +752,10 @@ pub struct WrappingRange {
 }
 
 impl WrappingRange {
+    pub fn full(size: Size) -> Self {
+        Self { start: 0, end: size.unsigned_int_max() }
+    }
+
     /// Returns `true` if `v` is contained in the range.
     #[inline(always)]
     pub fn contains(&self, v: u128) -> bool {
@@ -799,13 +803,23 @@ impl fmt::Debug for WrappingRange {
 /// Information about one scalar component of a Rust type.
 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
 #[derive(HashStable_Generic)]
-pub struct Scalar {
-    pub value: Primitive,
-
-    // FIXME(eddyb) always use the shortest range, e.g., by finding
-    // the largest space between two consecutive valid values and
-    // taking everything else as the (shortest) valid range.
-    pub valid_range: WrappingRange,
+pub enum Scalar {
+    Initialized {
+        value: Primitive,
+
+        // FIXME(eddyb) always use the shortest range, e.g., by finding
+        // the largest space between two consecutive valid values and
+        // taking everything else as the (shortest) valid range.
+        valid_range: WrappingRange,
+    },
+    Union {
+        /// Even for unions, we need to use the correct registers for the kind of
+        /// values inside the union, so we keep the `Primitive` type around. We
+        /// also use it to compute the size of the scalar.
+        /// However, unions never have niches and even allow undef,
+        /// so there is no `valid_range`.
+        value: Primitive,
+    },
 }
 
 impl Scalar {
@@ -813,14 +827,58 @@ impl Scalar {
     pub fn is_bool(&self) -> bool {
         matches!(
             self,
-            Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } }
+            Scalar::Initialized {
+                value: Int(I8, false),
+                valid_range: WrappingRange { start: 0, end: 1 }
+            }
         )
     }
 
+    /// Get the primitive representation of this type, ignoring the valid range and whether the
+    /// value is allowed to be undefined (due to being a union).
+    pub fn primitive(&self) -> Primitive {
+        match *self {
+            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
+        }
+    }
+
+    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
+        self.primitive().align(cx)
+    }
+
+    pub fn size(self, cx: &impl HasDataLayout) -> Size {
+        self.primitive().size(cx)
+    }
+
+    #[inline]
+    pub fn to_union(&self) -> Self {
+        Self::Union { value: self.primitive() }
+    }
+
+    #[inline]
+    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
+        match *self {
+            Scalar::Initialized { valid_range, .. } => valid_range,
+            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
+        }
+    }
+
+    #[inline]
+    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
+    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
+        match self {
+            Scalar::Initialized { valid_range, .. } => valid_range,
+            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
+        }
+    }
+
     /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
     #[inline]
     pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
-        self.valid_range.is_full_for(self.value.size(cx))
+        match *self {
+            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
+            Scalar::Union { .. } => true,
+        }
     }
 }
 
@@ -988,7 +1046,7 @@ impl Abi {
     #[inline]
     pub fn is_signed(&self) -> bool {
         match self {
-            Abi::Scalar(scal) => match scal.value {
+            Abi::Scalar(scal) => match scal.primitive() {
                 Primitive::Int(_, signed) => signed,
                 _ => false,
             },
@@ -1060,17 +1118,19 @@ pub enum TagEncoding {
 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
 pub struct Niche {
     pub offset: Size,
-    pub scalar: Scalar,
+    pub value: Primitive,
+    pub valid_range: WrappingRange,
 }
 
 impl Niche {
     pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
-        let niche = Niche { offset, scalar };
+        let Scalar::Initialized { value, valid_range } = scalar else { return None };
+        let niche = Niche { offset, value, valid_range };
         if niche.available(cx) > 0 { Some(niche) } else { None }
     }
 
     pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
-        let Scalar { value, valid_range: v } = self.scalar;
+        let Self { value, valid_range: v, .. } = *self;
         let size = value.size(cx);
         assert!(size.bits() <= 128);
         let max_value = size.unsigned_int_max();
@@ -1083,7 +1143,7 @@ impl Niche {
     pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
         assert!(count > 0);
 
-        let Scalar { value, valid_range: v } = self.scalar;
+        let Self { value, valid_range: v, .. } = *self;
         let size = value.size(cx);
         assert!(size.bits() <= 128);
         let max_value = size.unsigned_int_max();
@@ -1107,12 +1167,12 @@ impl Niche {
         // If niche zero is already reserved, the selection of bounds are of little interest.
         let move_start = |v: WrappingRange| {
             let start = v.start.wrapping_sub(count) & max_value;
-            Some((start, Scalar { value, valid_range: v.with_start(start) }))
+            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
         };
         let move_end = |v: WrappingRange| {
             let start = v.end.wrapping_add(1) & max_value;
             let end = v.end.wrapping_add(count) & max_value;
-            Some((start, Scalar { value, valid_range: v.with_end(end) }))
+            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
         };
         let distance_end_zero = max_value - v.end;
         if v.start > v.end {
@@ -1172,8 +1232,8 @@ pub struct LayoutS<'a> {
 impl<'a> LayoutS<'a> {
     pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
         let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
-        let size = scalar.value.size(cx);
-        let align = scalar.value.align(cx);
+        let size = scalar.size(cx);
+        let align = scalar.align(cx);
         LayoutS {
             variants: Variants::Single { index: VariantIdx::new(0) },
             fields: FieldsShape::Primitive,
@@ -1202,7 +1262,7 @@ impl<'a> fmt::Debug for LayoutS<'a> {
 }
 
 #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
-#[cfg_attr(not(bootstrap), rustc_pass_by_value)]
+#[rustc_pass_by_value]
 pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
 
 impl<'a> fmt::Debug for Layout<'a> {
@@ -1325,7 +1385,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
         C: HasDataLayout,
     {
         match self.abi {
-            Abi::Scalar(scalar) => scalar.value.is_float(),
+            Abi::Scalar(scalar) => scalar.primitive().is_float(),
             Abi::Aggregate { .. } => {
                 if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
                     self.field(cx, 0).is_single_fp_element(cx)
@@ -1371,7 +1431,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
         let scalar_allows_raw_init = move |s: Scalar| -> bool {
             if zero {
                 // The range must contain 0.
-                s.valid_range.contains(0)
+                s.valid_range(cx).contains(0)
             } else {
                 // The range must include all values.
                 s.is_always_valid(cx)