about summary refs log tree commit diff
path: root/compiler/rustc_codegen_ssa
diff options
context:
space:
mode:
authorJosh Stone <jistone@redhat.com>2025-08-07 14:29:00 -0700
committerJosh Stone <jistone@redhat.com>2025-09-16 11:49:20 -0700
commit580b4891aa23c0625539bf5ee55270f27af09072 (patch)
tree71fe7429078d373a1e3acf7d947ba751df5297d6 /compiler/rustc_codegen_ssa
parenteec6bd9d69832f57341c6de6a93fa7b9f47e2111 (diff)
downloadrust-580b4891aa23c0625539bf5ee55270f27af09072.tar.gz
rust-580b4891aa23c0625539bf5ee55270f27af09072.zip
Update the minimum external LLVM to 20
Diffstat (limited to 'compiler/rustc_codegen_ssa')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs30
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs38
2 files changed, 32 insertions, 36 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index f8755874014..4c9bb7cf8a8 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -901,36 +901,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
             }
             mir::BinOp::Cmp => {
-                use std::cmp::Ordering;
                 assert!(!is_float);
-                if let Some(value) = bx.three_way_compare(lhs_ty, lhs, rhs) {
-                    return value;
-                }
-                let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
-                if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
-                    // FIXME: This actually generates tighter assembly, and is a classic trick
-                    // <https://graphics.stanford.edu/~seander/bithacks.html#CopyIntegerSign>
-                    // However, as of 2023-11 it optimizes worse in things like derived
-                    // `PartialOrd`, so only use it in debug for now. Once LLVM can handle it
-                    // better (see <https://github.com/llvm/llvm-project/issues/73417>), it'll
-                    // be worth trying it in optimized builds as well.
-                    let is_gt = bx.icmp(pred(mir::BinOp::Gt), lhs, rhs);
-                    let gtext = bx.zext(is_gt, bx.type_i8());
-                    let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
-                    let ltext = bx.zext(is_lt, bx.type_i8());
-                    bx.unchecked_ssub(gtext, ltext)
-                } else {
-                    // These operations are those expected by `tests/codegen-llvm/integer-cmp.rs`,
-                    // from <https://github.com/rust-lang/rust/pull/63767>.
-                    let is_lt = bx.icmp(pred(mir::BinOp::Lt), lhs, rhs);
-                    let is_ne = bx.icmp(pred(mir::BinOp::Ne), lhs, rhs);
-                    let ge = bx.select(
-                        is_ne,
-                        bx.cx().const_i8(Ordering::Greater as i8),
-                        bx.cx().const_i8(Ordering::Equal as i8),
-                    );
-                    bx.select(is_lt, bx.cx().const_i8(Ordering::Less as i8), ge)
-                }
+                bx.three_way_compare(lhs_ty, lhs, rhs)
             }
             mir::BinOp::AddWithOverflow
             | mir::BinOp::SubWithOverflow
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index f417d1a7bf7..422fa715de1 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -3,6 +3,7 @@ use std::ops::Deref;
 
 use rustc_abi::{Align, Scalar, Size, WrappingRange};
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::mir;
 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
 use rustc_middle::ty::{AtomicOrdering, Instance, Ty};
 use rustc_session::config::OptLevel;
@@ -405,15 +406,38 @@ pub trait BuilderMethods<'a, 'tcx>:
     fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
 
     /// Returns `-1` if `lhs < rhs`, `0` if `lhs == rhs`, and `1` if `lhs > rhs`.
-    // FIXME: Move the default implementation from `codegen_scalar_binop` into this method and
-    // remove the `Option` return once LLVM 20 is the minimum version.
     fn three_way_compare(
         &mut self,
-        _ty: Ty<'tcx>,
-        _lhs: Self::Value,
-        _rhs: Self::Value,
-    ) -> Option<Self::Value> {
-        None
+        ty: Ty<'tcx>,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> Self::Value {
+        use std::cmp::Ordering;
+        let pred = |op| crate::base::bin_op_to_icmp_predicate(op, ty.is_signed());
+        if self.cx().sess().opts.optimize == OptLevel::No {
+            // FIXME: This actually generates tighter assembly, and is a classic trick
+            // <https://graphics.stanford.edu/~seander/bithacks.html#CopyIntegerSign>
+            // However, as of 2023-11 it optimizes worse in things like derived
+            // `PartialOrd`, so only use it in debug for now. Once LLVM can handle it
+            // better (see <https://github.com/llvm/llvm-project/issues/73417>), it'll
+            // be worth trying it in optimized builds as well.
+            let is_gt = self.icmp(pred(mir::BinOp::Gt), lhs, rhs);
+            let gtext = self.zext(is_gt, self.type_i8());
+            let is_lt = self.icmp(pred(mir::BinOp::Lt), lhs, rhs);
+            let ltext = self.zext(is_lt, self.type_i8());
+            self.unchecked_ssub(gtext, ltext)
+        } else {
+            // These operations are those expected by `tests/codegen-llvm/integer-cmp.rs`,
+            // from <https://github.com/rust-lang/rust/pull/63767>.
+            let is_lt = self.icmp(pred(mir::BinOp::Lt), lhs, rhs);
+            let is_ne = self.icmp(pred(mir::BinOp::Ne), lhs, rhs);
+            let ge = self.select(
+                is_ne,
+                self.cx().const_i8(Ordering::Greater as i8),
+                self.cx().const_i8(Ordering::Equal as i8),
+            );
+            self.select(is_lt, self.cx().const_i8(Ordering::Less as i8), ge)
+        }
     }
 
     fn memcpy(