about summary refs log tree commit diff
diff options
context:
space:
mode:
authorMarkus Everling <markuseverling@gmail.com>2024-04-11 15:31:10 +0000
committerMarkus Everling <markuseverling@gmail.com>2024-05-07 19:30:34 +0000
commitffe8510e3d989f35163ac5406af82e1c9dd8f769 (patch)
tree0e380e2ea672c0ab4b134e65dc5c51aba3620fff
parent0f40f14b61430792cc0ea316f424685041e8443e (diff)
downloadrust-ffe8510e3d989f35163ac5406af82e1c9dd8f769.tar.gz
rust-ffe8510e3d989f35163ac5406af82e1c9dd8f769.zip
Fix `VecDeque::shrink_to` UB when `handle_alloc_error` unwinds.
Luckily it's comparatively simple to just restore the `VecDeque` into a valid state on unwinds.
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs66
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs55
-rw-r--r--library/alloc/src/lib.rs1
3 files changed, 120 insertions, 2 deletions
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 4643a6bbe2e..61d05afccfd 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -982,6 +982,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
         // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
         // overflow.
         let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
+        // Used in the drop guard below.
+        let old_head = self.head;
 
         if self.len == 0 {
             self.head = 0;
@@ -1034,12 +1036,74 @@ impl<T, A: Allocator> VecDeque<T, A> {
             }
             self.head = new_head;
         }
-        self.buf.shrink_to_fit(target_cap);
+
+        struct Guard<'a, T, A: Allocator> {
+            deque: &'a mut VecDeque<T, A>,
+            old_head: usize,
+            target_cap: usize,
+        }
+
+        impl<T, A: Allocator> Drop for Guard<'_, T, A> {
+            #[cold]
+            fn drop(&mut self) {
+                unsafe {
+                    // SAFETY: This is only called if `buf.shrink_to_fit` unwinds,
+                    // which is the only time it's safe to call `abort_shrink`.
+                    self.deque.abort_shrink(self.old_head, self.target_cap)
+                }
+            }
+        }
+
+        let guard = Guard { deque: self, old_head, target_cap };
+
+        guard.deque.buf.shrink_to_fit(target_cap);
+
+        // Don't drop the guard if we didn't unwind.
+        mem::forget(guard);
 
         debug_assert!(self.head < self.capacity() || self.capacity() == 0);
         debug_assert!(self.len <= self.capacity());
     }
 
+    /// Reverts the deque back into a consistent state in case `shrink_to` failed.
+    /// This is necessary to prevent UB if the backing allocator returns an error
+    /// from `shrink` and `handle_alloc_error` subsequently unwinds (see #123369).
+    ///
+    /// `old_head` refers to the head index before `shrink_to` was called. `target_cap`
+    /// is the capacity that it was trying to shrink to.
+    unsafe fn abort_shrink(&mut self, old_head: usize, target_cap: usize) {
+        // Moral equivalent of self.head + self.len <= target_cap. Won't overflow
+        // because `self.len <= target_cap`.
+        if self.head <= target_cap - self.len {
+            // The deque's buffer is contiguous, so no need to copy anything around.
+            return;
+        }
+
+        // `shrink_to` already copied the head to fit into the new capacity, so this won't overflow.
+        let head_len = target_cap - self.head;
+        // `self.head > target_cap - self.len` => `self.len > target_cap - self.head =: head_len` so this must be positive.
+        let tail_len = self.len - head_len;
+
+        if tail_len <= cmp::min(head_len, self.capacity() - target_cap) {
+            // There's enough spare capacity to copy the tail to the back (because `tail_len < self.capacity() - target_cap`),
+            // and copying the tail should be cheaper than copying the head (because `tail_len <= head_len`).
+
+            unsafe {
+                // The old tail and the new tail can't overlap because the head slice lies between them. The
+                // head slice ends at `target_cap`, so that's where we copy to.
+                self.copy_nonoverlapping(0, target_cap, tail_len);
+            }
+        } else {
+            // Either there's not enough spare capacity to make the deque contiguous, or the head is shorter than the tail
+            // (and therefore hopefully cheaper to copy).
+            unsafe {
+                // The old and the new head slice can overlap, so we can't use `copy_nonoverlapping` here.
+                self.copy(self.head, old_head, head_len);
+                self.head = old_head;
+            }
+        }
+    }
+
     /// Shortens the deque, keeping the first `len` elements and dropping
     /// the rest.
     ///
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
index f8ce4ca9788..5329ad1aed5 100644
--- a/library/alloc/src/collections/vec_deque/tests.rs
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -1,4 +1,11 @@
-use core::iter::TrustedLen;
+#![feature(alloc_error_hook)]
+
+use crate::alloc::{AllocError, Layout};
+use core::{iter::TrustedLen, ptr::NonNull};
+use std::{
+    alloc::{set_alloc_error_hook, take_alloc_error_hook, System},
+    panic::{catch_unwind, AssertUnwindSafe},
+};
 
 use super::*;
 
@@ -791,6 +798,52 @@ fn test_shrink_to() {
 }
 
 #[test]
+fn test_shrink_to_unwind() {
+    // This tests that `shrink_to` leaves the deque in a consistent state when
+    // the call to `RawVec::shrink_to_fit` unwinds. The code is adapted from #123369
+    // but changed to hopefully not have any UB even if the test fails.
+
+    struct BadAlloc;
+
+    unsafe impl Allocator for BadAlloc {
+        fn allocate(&self, l: Layout) -> Result<NonNull<[u8]>, AllocError> {
+            // We allocate zeroed here so that the whole buffer of the deque
+            // is always initialized. That way, even if the deque is left in
+            // an inconsistent state, no uninitialized memory should be accessed.
+            System.allocate_zeroed(l)
+        }
+
+        unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+            unsafe { System.deallocate(ptr, layout) }
+        }
+
+        unsafe fn shrink(
+            &self,
+            _ptr: NonNull<u8>,
+            _old_layout: Layout,
+            _new_layout: Layout,
+        ) -> Result<NonNull<[u8]>, AllocError> {
+            Err(AllocError)
+        }
+    }
+
+    // preserve the old error hook just in case.
+    let old_error_hook = take_alloc_error_hook();
+    set_alloc_error_hook(|_| panic!("alloc error"));
+
+    let mut v = VecDeque::with_capacity_in(15, BadAlloc);
+    v.push_back(1);
+    v.push_front(2);
+    // This should unwind because it calls `BadAlloc::shrink` and then `handle_alloc_error` which unwinds.
+    assert!(catch_unwind(AssertUnwindSafe(|| v.shrink_to_fit())).is_err());
+    // This should only pass if the deque is left in a consistent state.
+    assert_eq!(v, [2, 1]);
+
+    // restore the old error hook.
+    set_alloc_error_hook(old_error_hook);
+}
+
+#[test]
 fn test_shrink_to_fit() {
     // This test checks that every single combination of head and tail position,
     // is tested. Capacity 15 should be large enough to cover every case.
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 91b83cfe011..d607926f8d4 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -92,6 +92,7 @@
 // tidy-alphabetical-start
 #![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
 #![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
+#![cfg_attr(test, feature(alloc_error_hook))]
 #![cfg_attr(test, feature(is_sorted))]
 #![cfg_attr(test, feature(new_uninit))]
 #![feature(alloc_layout_extra)]