about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2017-05-10 08:54:50 +0000
committerbors <bors@rust-lang.org>2017-05-10 08:54:50 +0000
commit2b97174ada7fb1854269558ed2cf3b089e58beee (patch)
tree880576d0244be04f587f845d2cb96936d213f3ff /src
parent58b33ad70cdd11f9ce7b5874c6effab9627e51aa (diff)
parentda91361d2a8ea86a42cbe2a23a7ff816cc5500af (diff)
downloadrust-2b97174ada7fb1854269558ed2cf3b089e58beee.tar.gz
rust-2b97174ada7fb1854269558ed2cf3b089e58beee.zip
Auto merge of #41764 - scottmcm:faster-reverse, r=brson
Make [u8]::reverse() 5x faster

Since LLVM doesn't vectorize the loop for us, do unaligned reads of a larger type and use LLVM's bswap intrinsic to do the reversing of the actual bytes.  cfg!-restricted to x86 and x86_64, as I assume it wouldn't help on things like ARMv5.

Also makes [u16]::reverse() a more modest 1.5x faster by loading/storing u32 and swapping the u16s with ROT16.

Thank you ptr::*_unaligned for making this easy :)

Benchmark results (from my i5-2500K):
```text
# Before
test slice::reverse_u8      ... bench:  273,836 ns/iter (+/- 15,592) =  3829 MB/s
test slice::reverse_u16     ... bench:  139,793 ns/iter (+/- 17,748) =  7500 MB/s
test slice::reverse_u32     ... bench:   74,997 ns/iter  (+/- 5,130) = 13981 MB/s
test slice::reverse_u64     ... bench:   47,452 ns/iter  (+/- 2,213) = 22097 MB/s

# After
test slice::reverse_u8      ... bench:   52,170 ns/iter (+/- 3,962) = 20099 MB/s
test slice::reverse_u16     ... bench:   93,330 ns/iter (+/- 4,412) = 11235 MB/s
test slice::reverse_u32     ... bench:   74,731 ns/iter (+/- 1,425) = 14031 MB/s
test slice::reverse_u64     ... bench:   47,556 ns/iter (+/- 3,025) = 22049 MB/s
```

If you're curious about the assembly, instead of doing this
```
movzx	eax, byte ptr [rdi]
movzx	ecx, byte ptr [rsi]
mov	byte ptr [rdi], cl
mov	byte ptr [rsi], al
```
it does this
```
mov	rax, qword ptr [rdx]
mov	rbx, qword ptr [r11 + rcx - 8]
bswap	rbx
mov	qword ptr [rdx], rbx
bswap	rax
mov	qword ptr [r11 + rcx - 8], rax
```
Diffstat (limited to 'src')
-rw-r--r--src/libcollections/benches/lib.rs2
-rw-r--r--src/libcollections/benches/slice.rs25
-rw-r--r--src/libcollections/tests/slice.rs10
-rw-r--r--src/libcore/slice/mod.rs49
4 files changed, 86 insertions, 0 deletions
diff --git a/src/libcollections/benches/lib.rs b/src/libcollections/benches/lib.rs
index 42064e9ca57..9f356e4b579 100644
--- a/src/libcollections/benches/lib.rs
+++ b/src/libcollections/benches/lib.rs
@@ -10,7 +10,9 @@
 
 #![deny(warnings)]
 
+#![feature(i128_type)]
 #![feature(rand)]
+#![feature(repr_simd)]
 #![feature(sort_unstable)]
 #![feature(test)]
 
diff --git a/src/libcollections/benches/slice.rs b/src/libcollections/benches/slice.rs
index 7195a9f9bf2..0079f2d0103 100644
--- a/src/libcollections/benches/slice.rs
+++ b/src/libcollections/benches/slice.rs
@@ -290,3 +290,28 @@ sort!(sort_unstable, sort_unstable_large_random, gen_random, 10000);
 sort!(sort_unstable, sort_unstable_large_big_random, gen_big_random, 10000);
 sort!(sort_unstable, sort_unstable_large_strings, gen_strings, 10000);
 sort_expensive!(sort_unstable_by, sort_unstable_large_random_expensive, gen_random, 10000);
+
+macro_rules! reverse {
+    ($name:ident, $ty:ty, $f:expr) => {
+        #[bench]
+        fn $name(b: &mut Bencher) {
+            // odd length and offset by 1 to be as unaligned as possible
+            let n = 0xFFFFF;
+            let mut v: Vec<_> =
+                (0..1+(n / mem::size_of::<$ty>() as u64))
+                .map($f)
+                .collect();
+            b.iter(|| black_box(&mut v[1..]).reverse());
+            b.bytes = n;
+        }
+    }
+}
+
+reverse!(reverse_u8, u8, |x| x as u8);
+reverse!(reverse_u16, u16, |x| x as u16);
+reverse!(reverse_u8x3, [u8;3], |x| [x as u8, (x>>8) as u8, (x>>16) as u8]);
+reverse!(reverse_u32, u32, |x| x as u32);
+reverse!(reverse_u64, u64, |x| x as u64);
+reverse!(reverse_u128, u128, |x| x as u128);
+#[repr(simd)] struct F64x4(f64, f64, f64, f64);
+reverse!(reverse_simd_f64x4, F64x4, |x| { let x = x as f64; F64x4(x,x,x,x) });
diff --git a/src/libcollections/tests/slice.rs b/src/libcollections/tests/slice.rs
index c3e5304fb2b..1708f98b7ee 100644
--- a/src/libcollections/tests/slice.rs
+++ b/src/libcollections/tests/slice.rs
@@ -379,6 +379,16 @@ fn test_reverse() {
     let mut v3 = Vec::<i32>::new();
     v3.reverse();
     assert!(v3.is_empty());
+
+    // check the 1-byte-types path
+    let mut v = (-50..51i8).collect::<Vec<_>>();
+    v.reverse();
+    assert_eq!(v, (-50..51i8).rev().collect::<Vec<_>>());
+
+    // check the 2-byte-types path
+    let mut v = (-50..51i16).collect::<Vec<_>>();
+    v.reverse();
+    assert_eq!(v, (-50..51i16).rev().collect::<Vec<_>>());
 }
 
 #[test]
diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs
index 9e3bd911546..e15eb8f2444 100644
--- a/src/libcore/slice/mod.rs
+++ b/src/libcore/slice/mod.rs
@@ -539,6 +539,55 @@ impl<T> SliceExt for [T] {
     fn reverse(&mut self) {
         let mut i: usize = 0;
         let ln = self.len();
+
+        // For very small types, all the individual reads in the normal
+        // path perform poorly.  We can do better, given efficient unaligned
+        // load/store, by loading a larger chunk and reversing a register.
+
+        // Ideally LLVM would do this for us, as it knows better than we do
+        // whether unaligned reads are efficient (since that changes between
+        // different ARM versions, for example) and what the best chunk size
+        // would be.  Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls
+        // the loop, so we need to do this ourselves.  (Hypothesis: reverse
+        // is troublesome because the sides can be aligned differently --
+        // will be, when the length is odd -- so there's no way of emitting
+        // pre- and postludes to use fully-aligned SIMD in the middle.)
+
+        let fast_unaligned =
+            cfg!(any(target_arch = "x86", target_arch = "x86_64"));
+
+        if fast_unaligned && mem::size_of::<T>() == 1 {
+            // Use the llvm.bswap intrinsic to reverse u8s in a usize
+            let chunk = mem::size_of::<usize>();
+            while i + chunk - 1 < ln / 2 {
+                unsafe {
+                    let pa: *mut T = self.get_unchecked_mut(i);
+                    let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
+                    let va = ptr::read_unaligned(pa as *mut usize);
+                    let vb = ptr::read_unaligned(pb as *mut usize);
+                    ptr::write_unaligned(pa as *mut usize, vb.swap_bytes());
+                    ptr::write_unaligned(pb as *mut usize, va.swap_bytes());
+                }
+                i += chunk;
+            }
+        }
+
+        if fast_unaligned && mem::size_of::<T>() == 2 {
+            // Use rotate-by-16 to reverse u16s in a u32
+            let chunk = mem::size_of::<u32>() / 2;
+            while i + chunk - 1 < ln / 2 {
+                unsafe {
+                    let pa: *mut T = self.get_unchecked_mut(i);
+                    let pb: *mut T = self.get_unchecked_mut(ln - i - chunk);
+                    let va = ptr::read_unaligned(pa as *mut u32);
+                    let vb = ptr::read_unaligned(pb as *mut u32);
+                    ptr::write_unaligned(pa as *mut u32, vb.rotate_left(16));
+                    ptr::write_unaligned(pb as *mut u32, va.rotate_left(16));
+                }
+                i += chunk;
+            }
+        }
+
         while i < ln / 2 {
             // Unsafe swap to avoid the bounds check in safe swap.
             unsafe {