about summary refs log tree commit diff
path: root/library/alloc/src
diff options
context:
space:
mode:
authorSoveu <marx.tomasz@gmail.com>2021-03-15 20:36:29 +0100
committerSoveu <marx.tomasz@gmail.com>2021-03-15 20:36:29 +0100
commitafdbc9ece176ccac7b1d156efcb397d089d88b5a (patch)
treed8480cfdae421942750ad22160eda06c9a2216a3 /library/alloc/src
parent2abab1f688fe0d4a740b216b298f32fbb48b653b (diff)
downloadrust-afdbc9ece176ccac7b1d156efcb397d089d88b5a.tar.gz
rust-afdbc9ece176ccac7b1d156efcb397d089d88b5a.zip
Vec::dedup optimization - finishing polishes
Diffstat (limited to 'library/alloc/src')
-rw-r--r--library/alloc/src/vec/mod.rs18
1 files changed, 7 insertions, 11 deletions
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index e65adb6c77e..19198d4eeef 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -1538,13 +1538,9 @@ impl<T, A: Allocator> Vec<T, A> {
 
         impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> {
             fn drop(&mut self) {
-                /* This code gets executed either at the end of `dedup_by` or
-                 * when `same_bucket` panics */
+                /* This code gets executed when `same_bucket` panics */
 
-                /* SAFETY (if finishing successfully): self.read == len, so
-                 * no data is copied and length is set correctly */
-
-                /* SAFETY (if panicing): invariant guarantees that `read - write`
+                /* SAFETY: invariant guarantees that `read - write`
                  * and `len - read` never overflow and that the copy is always
                  * in-bounds. */
                 unsafe {
@@ -1553,7 +1549,7 @@ impl<T, A: Allocator> Vec<T, A> {
 
                     /* How many items were left when `same_bucket` paniced.
                      * Basically vec[read..].len() */
-                    let items_left = len - self.read;
+                    let items_left = len.wrapping_sub(self.read);
 
                     /* Pointer to first item in vec[write..write+items_left] slice */
                     let dropped_ptr = ptr.add(self.write);
@@ -1566,7 +1562,7 @@ impl<T, A: Allocator> Vec<T, A> {
 
                     /* How many items have been already dropped
                      * Basically vec[read..write].len() */
-                    let dropped = self.read - self.write;
+                    let dropped = self.read.wrapping_sub(self.write);
 
                     self.vec.set_len(len - dropped);
                 }
@@ -1574,7 +1570,6 @@ impl<T, A: Allocator> Vec<T, A> {
         }
 
         let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
-
         let ptr = gap.vec.as_mut_ptr();
 
         /* Drop items while going through Vec, it should be more efficient than
@@ -1593,8 +1588,9 @@ impl<T, A: Allocator> Vec<T, A> {
                 } else {
                     let write_ptr = ptr.add(gap.write);
 
-                    /* Looks like doing just `copy` can be faster than
-                     * conditional `copy_nonoverlapping` */
+                    /* Because `read_ptr` can be equal to `write_ptr`, we either
+                     * have to use `copy` or conditional `copy_nonoverlapping`.
+                     * Looks like the first option is faster. */
                     ptr::copy(read_ptr, write_ptr, 1);
 
                     /* We have filled that place, so go further */