diff options
| author | Scott McMurray <scottmcm@users.noreply.github.com> | 2025-03-06 15:09:14 -0800 |
|---|---|---|
| committer | Scott McMurray <scottmcm@users.noreply.github.com> | 2025-03-19 09:13:41 -0700 |
| commit | b54ca0e433e9cf5ef139aeebb36133be8ddee111 (patch) | |
| tree | a0dbc724a52bffc994c56970bf14e930735c1e94 | |
| parent | a7fc463dd8fbeca800d4b3efc501069502cffe64 (diff) | |
| download | rust-b54ca0e433e9cf5ef139aeebb36133be8ddee111.tar.gz rust-b54ca0e433e9cf5ef139aeebb36133be8ddee111.zip | |
Add a MIR pre-codegen test for tuple comparisons
We have codegen ones, but it looks like we could make those less flakey by just doing something better in the first place...
3 files changed, 398 insertions, 0 deletions
diff --git a/tests/mir-opt/pre-codegen/tuple_ord.demo_ge_partial.PreCodegen.after.mir b/tests/mir-opt/pre-codegen/tuple_ord.demo_ge_partial.PreCodegen.after.mir new file mode 100644 index 00000000000..696a4511777 --- /dev/null +++ b/tests/mir-opt/pre-codegen/tuple_ord.demo_ge_partial.PreCodegen.after.mir @@ -0,0 +1,237 @@ +// MIR for `demo_ge_partial` after PreCodegen + +fn demo_ge_partial(_1: &(f32, f32), _2: &(f32, f32)) -> bool { + debug a => _1; + debug b => _2; + let mut _0: bool; + scope 1 (inlined std::cmp::impls::<impl PartialOrd for &(f32, f32)>::le) { + scope 2 (inlined core::tuple::<impl PartialOrd for (f32, f32)>::le) { + let mut _12: bool; + let _15: std::option::Option<std::cmp::Ordering>; + let _19: &f32; + let _20: &f32; + scope 3 { + let mut _9: &std::option::Option<std::cmp::Ordering>; + let mut _13: &std::option::Option<std::cmp::Ordering>; + scope 4 (inlined <Option<std::cmp::Ordering> as PartialEq>::ne) { + let mut _11: bool; + scope 5 (inlined <Option<std::cmp::Ordering> as PartialEq>::eq) { + let mut _10: isize; + let mut _16: isize; + scope 6 { + scope 7 (inlined <std::cmp::Ordering as PartialEq>::eq) { + let _17: i8; + scope 8 { + let _18: i8; + scope 9 { + } + } + } + } + } + } + scope 10 (inlined <Option<std::cmp::Ordering> as PartialEq>::eq) { + let mut _14: isize; + let mut _21: isize; + scope 11 { + scope 12 (inlined <std::cmp::Ordering as PartialEq>::eq) { + let _22: i8; + scope 13 { + let _23: i8; + scope 14 { + } + } + } + } + } + } + scope 15 (inlined std::cmp::impls::<impl PartialOrd for f32>::partial_cmp) { + let mut _3: f32; + let mut _4: f32; + let mut _5: bool; + let mut _6: f32; + let mut _7: f32; + let mut _8: bool; + } + } + } + + bb0: { + StorageLive(_19); + StorageLive(_20); + StorageLive(_13); + StorageLive(_9); + StorageLive(_15); + StorageLive(_5); + StorageLive(_8); + StorageLive(_3); + _3 = copy ((*_1).0: f32); + StorageLive(_4); + _4 = copy ((*_2).0: f32); + _5 = Le(move _3, move _4); + StorageDead(_4); + StorageDead(_3); + StorageLive(_6); + _6 = copy ((*_1).0: f32); + StorageLive(_7); + _7 = copy ((*_2).0: f32); + _8 = Ge(move _6, move _7); + StorageDead(_7); + StorageDead(_6); + switchInt(copy _5) -> [0: bb1, otherwise: bb5]; + } + + bb1: { + switchInt(copy _8) -> [0: bb2, otherwise: bb4]; + } + + bb2: { + StorageDead(_8); + StorageDead(_5); + StorageLive(_12); + _9 = const core::tuple::<impl std::cmp::PartialOrd for (f32, f32)>::le::promoted[1]; + StorageLive(_11); + StorageLive(_16); + StorageLive(_10); + _10 = discriminant((*_9)); + _11 = Eq(copy _10, const 0_isize); + StorageDead(_10); + StorageDead(_16); + _12 = Not(move _11); + StorageDead(_11); + switchInt(move _12) -> [0: bb11, otherwise: bb3]; + } + + bb3: { + _13 = const core::tuple::<impl std::cmp::PartialOrd for (f32, f32)>::le::promoted[0]; + StorageLive(_21); + StorageLive(_14); + _14 = discriminant((*_13)); + _0 = Eq(copy _14, const 0_isize); + goto -> bb16; + } + + bb4: { + _15 = const Option::<std::cmp::Ordering>::Some(Greater); + StorageDead(_8); + StorageDead(_5); + StorageLive(_12); + _9 = const core::tuple::<impl std::cmp::PartialOrd for (f32, f32)>::le::promoted[1]; + StorageLive(_11); + StorageLive(_16); + StorageLive(_10); + goto -> bb8; + } + + bb5: { + switchInt(copy _8) -> [0: bb6, otherwise: bb7]; + } + + bb6: { + _15 = const Option::<std::cmp::Ordering>::Some(Less); + StorageDead(_8); + StorageDead(_5); + StorageLive(_12); + _9 = const core::tuple::<impl std::cmp::PartialOrd for (f32, f32)>::le::promoted[1]; + StorageLive(_11); + StorageLive(_16); + StorageLive(_10); + goto -> bb8; + } + + bb7: { + _15 = const Option::<std::cmp::Ordering>::Some(Equal); + StorageDead(_8); + StorageDead(_5); + StorageLive(_12); + _9 = const core::tuple::<impl std::cmp::PartialOrd for (f32, f32)>::le::promoted[1]; + StorageLive(_11); + StorageLive(_16); + StorageLive(_10); + goto -> bb8; + } + + bb8: { + _16 = discriminant((*_9)); + switchInt(move _16) -> [0: bb9, 1: bb10, otherwise: bb18]; + } + + bb9: { + StorageDead(_10); + StorageDead(_16); + StorageDead(_11); + _13 = const core::tuple::<impl std::cmp::PartialOrd for (f32, f32)>::le::promoted[0]; + StorageLive(_21); + StorageLive(_14); + goto -> bb13; + } + + bb10: { + StorageLive(_17); + StorageLive(_18); + _17 = discriminant(((_15 as Some).0: std::cmp::Ordering)); + _18 = discriminant((((*_9) as Some).0: std::cmp::Ordering)); + _11 = Eq(copy _17, copy _18); + StorageDead(_18); + StorageDead(_17); + StorageDead(_10); + StorageDead(_16); + _12 = Not(move _11); + StorageDead(_11); + switchInt(move _12) -> [0: bb11, otherwise: bb12]; + } + + bb11: { + _19 = &((*_1).1: f32); + _20 = &((*_2).1: f32); + _0 = <f32 as PartialOrd>::le(move _19, move _20) -> [return: bb17, unwind continue]; + } + + bb12: { + _13 = const core::tuple::<impl std::cmp::PartialOrd for (f32, f32)>::le::promoted[0]; + StorageLive(_21); + StorageLive(_14); + goto -> bb13; + } + + bb13: { + _21 = discriminant((*_13)); + switchInt(move _21) -> [0: bb14, 1: bb15, otherwise: bb18]; + } + + bb14: { + _0 = const false; + goto -> bb16; + } + + bb15: { + StorageLive(_22); + StorageLive(_23); + _22 = discriminant(((_15 as Some).0: std::cmp::Ordering)); + _23 = discriminant((((*_13) as Some).0: std::cmp::Ordering)); + _0 = Eq(copy _22, copy _23); + StorageDead(_23); + StorageDead(_22); + goto -> bb16; + } + + bb16: { + StorageDead(_14); + StorageDead(_21); + goto -> bb17; + } + + bb17: { + StorageDead(_12); + StorageDead(_15); + StorageDead(_9); + StorageDead(_13); + StorageDead(_20); + StorageDead(_19); + return; + } + + bb18: { + unreachable; + } +} diff --git a/tests/mir-opt/pre-codegen/tuple_ord.demo_le_total.PreCodegen.after.mir b/tests/mir-opt/pre-codegen/tuple_ord.demo_le_total.PreCodegen.after.mir new file mode 100644 index 00000000000..15c3ae76ae9 --- /dev/null +++ b/tests/mir-opt/pre-codegen/tuple_ord.demo_le_total.PreCodegen.after.mir @@ -0,0 +1,145 @@ +// MIR for `demo_le_total` after PreCodegen + +fn demo_le_total(_1: &(u16, i16), _2: &(u16, i16)) -> bool { + debug a => _1; + debug b => _2; + let mut _0: bool; + scope 1 (inlined std::cmp::impls::<impl PartialOrd for &(u16, i16)>::le) { + scope 2 (inlined core::tuple::<impl PartialOrd for (u16, i16)>::le) { + let mut _12: bool; + let _13: &i16; + let _14: &i16; + scope 3 { + let mut _6: &std::option::Option<std::cmp::Ordering>; + let mut _8: &std::option::Option<std::cmp::Ordering>; + scope 4 (inlined <Option<std::cmp::Ordering> as PartialEq>::ne) { + let mut _11: bool; + scope 5 (inlined <Option<std::cmp::Ordering> as PartialEq>::eq) { + let mut _7: isize; + scope 6 { + scope 7 (inlined <std::cmp::Ordering as PartialEq>::eq) { + let _9: i8; + scope 8 { + let _10: i8; + scope 9 { + } + } + } + } + } + } + scope 10 (inlined <Option<std::cmp::Ordering> as PartialEq>::eq) { + let mut _15: isize; + scope 11 { + scope 12 (inlined <std::cmp::Ordering as PartialEq>::eq) { + let _16: i8; + scope 13 { + let _17: i8; + scope 14 { + } + } + } + } + } + } + scope 15 (inlined std::cmp::impls::<impl PartialOrd for u16>::partial_cmp) { + let mut _3: u16; + let mut _4: u16; + let mut _5: std::cmp::Ordering; + } + } + } + + bb0: { + StorageLive(_13); + StorageLive(_14); + StorageLive(_8); + StorageLive(_6); + StorageLive(_3); + _3 = copy ((*_1).0: u16); + StorageLive(_4); + _4 = copy ((*_2).0: u16); + _5 = Cmp(move _3, move _4); + StorageDead(_4); + StorageDead(_3); + StorageLive(_12); + _6 = const core::tuple::<impl std::cmp::PartialOrd for (u16, i16)>::le::promoted[1]; + StorageLive(_11); + StorageLive(_7); + _7 = discriminant((*_6)); + switchInt(move _7) -> [0: bb1, 1: bb2, otherwise: bb10]; + } + + bb1: { + StorageDead(_7); + StorageDead(_11); + _8 = const core::tuple::<impl std::cmp::PartialOrd for (u16, i16)>::le::promoted[0]; + StorageLive(_15); + goto -> bb5; + } + + bb2: { + StorageLive(_9); + StorageLive(_10); + _9 = discriminant(_5); + _10 = discriminant((((*_6) as Some).0: std::cmp::Ordering)); + _11 = Eq(copy _9, copy _10); + StorageDead(_10); + StorageDead(_9); + StorageDead(_7); + _12 = Not(move _11); + StorageDead(_11); + switchInt(move _12) -> [0: bb3, otherwise: bb4]; + } + + bb3: { + _13 = &((*_1).1: i16); + _14 = &((*_2).1: i16); + _0 = <i16 as PartialOrd>::le(move _13, move _14) -> [return: bb9, unwind continue]; + } + + bb4: { + _8 = const core::tuple::<impl std::cmp::PartialOrd for (u16, i16)>::le::promoted[0]; + StorageLive(_15); + goto -> bb5; + } + + bb5: { + _15 = discriminant((*_8)); + switchInt(move _15) -> [0: bb6, 1: bb7, otherwise: bb10]; + } + + bb6: { + _0 = const false; + goto -> bb8; + } + + bb7: { + StorageLive(_16); + StorageLive(_17); + _16 = discriminant(_5); + _17 = discriminant((((*_8) as Some).0: std::cmp::Ordering)); + _0 = Eq(copy _16, copy _17); + StorageDead(_17); + StorageDead(_16); + goto -> bb8; + } + + bb8: { + StorageDead(_15); + goto -> bb9; + } + + bb9: { + StorageDead(_12); + StorageDead(_6); + StorageDead(_8); + StorageDead(_14); + StorageDead(_13); + return; + } + + bb10: { + unreachable; + } +} diff --git a/tests/mir-opt/pre-codegen/tuple_ord.rs b/tests/mir-opt/pre-codegen/tuple_ord.rs new file mode 100644 index 00000000000..435ac7a5be9 --- /dev/null +++ b/tests/mir-opt/pre-codegen/tuple_ord.rs @@ -0,0 +1,16 @@ +//@ compile-flags: -O -Zmir-opt-level=2 -Cdebuginfo=0 -Z inline-mir-hint-threshold=9999 +//@ needs-unwind + +#![crate_type = "lib"] + +// EMIT_MIR tuple_ord.demo_le_total.PreCodegen.after.mir +pub fn demo_le_total(a: &(u16, i16), b: &(u16, i16)) -> bool { + // CHECK-LABEL: demo_le_total + a <= b +} + +// EMIT_MIR tuple_ord.demo_ge_partial.PreCodegen.after.mir +pub fn demo_ge_partial(a: &(f32, f32), b: &(f32, f32)) -> bool { + // CHECK-LABEL: demo_ge_partial + a <= b +} |
