diff options
| author | bors <bors@rust-lang.org> | 2018-05-30 11:35:00 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2018-05-30 11:35:00 +0000 |
| commit | 4f99f37b7e213d69a489884f651adfc6d217cef5 (patch) | |
| tree | 8b12fd25064a7c3df77c522bdff475e83aff8e23 /src/libstd | |
| parent | 20af72b943527d584df4b99e157262f9b297b3e4 (diff) | |
| parent | a4d899b4a1248f885563e241fa56fe9f69616dc2 (diff) | |
| download | rust-4f99f37b7e213d69a489884f651adfc6d217cef5.tar.gz rust-4f99f37b7e213d69a489884f651adfc6d217cef5.zip | |
Auto merge of #50880 - glandium:oom, r=SimonSapin
OOM handling changes As discussed in https://github.com/rust-lang/rust/issues/49668#issuecomment-384893456 and subsequent. This does have codegen implications. Even without the hooks, and with a handler that ignores the arguments, the compiler doesn't eliminate calling `rust_oom` with the `Layout`. Even if it managed to eliminate that, with the hooks, I don't know if the compiler would be able to figure out it can skip it if the hook is never set. A couple implementation notes: - I went with explicit enums rather than bools because it makes it clearer in callers what is being requested. - I didn't know what `feature` to put the hook setting functions behind. (and surprisingly, the compile went through without any annotation on the functions) - There's probably some bikeshedding to do on the naming. Cc: @Simonsapin, @sfackler
Diffstat (limited to 'src/libstd')
| -rw-r--r-- | src/libstd/alloc.rs | 50 | ||||
| -rw-r--r-- | src/libstd/collections/hash/map.rs | 39 | ||||
| -rw-r--r-- | src/libstd/collections/hash/table.rs | 43 |
3 files changed, 108 insertions, 24 deletions
diff --git a/src/libstd/alloc.rs b/src/libstd/alloc.rs index 78d3d6d5e60..4f9dffc7c95 100644 --- a/src/libstd/alloc.rs +++ b/src/libstd/alloc.rs @@ -13,15 +13,59 @@ #![unstable(issue = "32838", feature = "allocator_api")] #[doc(inline)] #[allow(deprecated)] pub use alloc_crate::alloc::Heap; -#[doc(inline)] pub use alloc_crate::alloc::{Global, oom}; +#[doc(inline)] pub use alloc_crate::alloc::{Global, Layout, oom}; #[doc(inline)] pub use alloc_system::System; #[doc(inline)] pub use core::alloc::*; +use core::sync::atomic::{AtomicPtr, Ordering}; +use core::{mem, ptr}; + +static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); + +/// Registers a custom OOM hook, replacing any that was previously registered. +/// +/// The OOM hook is invoked when an infallible memory allocation fails. +/// The default hook prints a message to standard error and aborts the +/// execution, but this behavior can be customized with the [`set_oom_hook`] +/// and [`take_oom_hook`] functions. +/// +/// The hook is provided with a `Layout` struct which contains information +/// about the allocation that failed. +/// +/// The OOM hook is a global resource. +pub fn set_oom_hook(hook: fn(Layout) -> !) { + HOOK.store(hook as *mut (), Ordering::SeqCst); +} + +/// Unregisters the current OOM hook, returning it. +/// +/// *See also the function [`set_oom_hook`].* +/// +/// If no custom hook is registered, the default hook will be returned. +pub fn take_oom_hook() -> fn(Layout) -> ! { + let hook = HOOK.swap(ptr::null_mut(), Ordering::SeqCst); + if hook.is_null() { + default_oom_hook + } else { + unsafe { mem::transmute(hook) } + } +} + +fn default_oom_hook(layout: Layout) -> ! { + rtabort!("memory allocation of {} bytes failed", layout.size()) +} + #[cfg(not(test))] #[doc(hidden)] #[lang = "oom"] -pub extern fn rust_oom() -> ! { - rtabort!("memory allocation failed"); +pub extern fn rust_oom(layout: Layout) -> ! { + let hook = HOOK.load(Ordering::SeqCst); + let hook: fn(Layout) -> ! = if hook.is_null() { + default_oom_hook + } else { + unsafe { mem::transmute(hook) } + }; + hook(layout) } #[cfg(not(test))] diff --git a/src/libstd/collections/hash/map.rs b/src/libstd/collections/hash/map.rs index a7eb002d5a1..935ea4b62b5 100644 --- a/src/libstd/collections/hash/map.rs +++ b/src/libstd/collections/hash/map.rs @@ -11,7 +11,7 @@ use self::Entry::*; use self::VacantEntryState::*; -use alloc::{CollectionAllocErr, oom}; +use alloc::CollectionAllocErr; use cell::Cell; use borrow::Borrow; use cmp::max; @@ -23,8 +23,10 @@ use mem::{self, replace}; use ops::{Deref, Index}; use sys; -use super::table::{self, Bucket, EmptyBucket, FullBucket, FullBucketMut, RawTable, SafeHash}; +use super::table::{self, Bucket, EmptyBucket, Fallibility, FullBucket, FullBucketMut, RawTable, + SafeHash}; use super::table::BucketState::{Empty, Full}; +use super::table::Fallibility::{Fallible, Infallible}; const MIN_NONZERO_RAW_CAPACITY: usize = 32; // must be a power of two @@ -783,11 +785,11 @@ impl<K, V, S> HashMap<K, V, S> /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn reserve(&mut self, additional: usize) { - match self.try_reserve(additional) { + match self.reserve_internal(additional, Infallible) { Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => oom(), + Err(CollectionAllocErr::AllocErr) => unreachable!(), Ok(()) => { /* yay */ } - } + } } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -809,17 +811,24 @@ impl<K, V, S> HashMap<K, V, S> /// ``` #[unstable(feature = "try_reserve", reason = "new API", issue="48043")] pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> { + self.reserve_internal(additional, Fallible) + } + + fn reserve_internal(&mut self, additional: usize, fallibility: Fallibility) + -> Result<(), CollectionAllocErr> { + let remaining = self.capacity() - self.len(); // this can't overflow if remaining < additional { - let min_cap = self.len().checked_add(additional) + let min_cap = self.len() + .checked_add(additional) .ok_or(CollectionAllocErr::CapacityOverflow)?; let raw_cap = self.resize_policy.try_raw_capacity(min_cap)?; - self.try_resize(raw_cap)?; + self.try_resize(raw_cap, fallibility)?; } else if self.table.tag() && remaining <= self.len() { // Probe sequence is too long and table is half full, // resize early to reduce probing length. let new_capacity = self.table.capacity() * 2; - self.try_resize(new_capacity)?; + self.try_resize(new_capacity, fallibility)?; } Ok(()) } @@ -831,11 +840,21 @@ impl<K, V, S> HashMap<K, V, S> /// 2) Ensure `new_raw_cap` is a power of two or zero. #[inline(never)] #[cold] - fn try_resize(&mut self, new_raw_cap: usize) -> Result<(), CollectionAllocErr> { + fn try_resize( + &mut self, + new_raw_cap: usize, + fallibility: Fallibility, + ) -> Result<(), CollectionAllocErr> { assert!(self.table.size() <= new_raw_cap); assert!(new_raw_cap.is_power_of_two() || new_raw_cap == 0); - let mut old_table = replace(&mut self.table, RawTable::try_new(new_raw_cap)?); + let mut old_table = replace( + &mut self.table, + match fallibility { + Infallible => RawTable::new(new_raw_cap), + Fallible => RawTable::try_new(new_raw_cap)?, + } + ); let old_size = old_table.size(); if old_table.size() == 0 { diff --git a/src/libstd/collections/hash/table.rs b/src/libstd/collections/hash/table.rs index b50652ed6b5..eed2debcaa2 100644 --- a/src/libstd/collections/hash/table.rs +++ b/src/libstd/collections/hash/table.rs @@ -711,11 +711,21 @@ fn test_offset_calculation() { assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false)); } +pub(crate) enum Fallibility { + Fallible, + Infallible, +} + +use self::Fallibility::*; + impl<K, V> RawTable<K, V> { /// Does not initialize the buckets. The caller should ensure they, /// at the very least, set every hash to EMPTY_BUCKET. /// Returns an error if it cannot allocate or capacity overflows. - unsafe fn try_new_uninitialized(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> { + unsafe fn new_uninitialized_internal( + capacity: usize, + fallibility: Fallibility, + ) -> Result<RawTable<K, V>, CollectionAllocErr> { if capacity == 0 { return Ok(RawTable { size: 0, @@ -754,8 +764,12 @@ impl<K, V> RawTable<K, V> { return Err(CollectionAllocErr::CapacityOverflow); } - let buffer = Global.alloc(Layout::from_size_align(size, alignment) - .map_err(|_| CollectionAllocErr::CapacityOverflow)?)?; + let layout = Layout::from_size_align(size, alignment) + .map_err(|_| CollectionAllocErr::CapacityOverflow)?; + let buffer = Global.alloc(layout).map_err(|e| match fallibility { + Infallible => oom(layout), + Fallible => e, + })?; Ok(RawTable { capacity_mask: capacity.wrapping_sub(1), @@ -768,9 +782,9 @@ impl<K, V> RawTable<K, V> { /// Does not initialize the buckets. The caller should ensure they, /// at the very least, set every hash to EMPTY_BUCKET. unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> { - match Self::try_new_uninitialized(capacity) { + match Self::new_uninitialized_internal(capacity, Infallible) { Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => oom(), + Err(CollectionAllocErr::AllocErr) => unreachable!(), Ok(table) => { table } } } @@ -794,22 +808,29 @@ impl<K, V> RawTable<K, V> { } } - /// Tries to create a new raw table from a given capacity. If it cannot allocate, - /// it returns with AllocErr. - pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> { + fn new_internal( + capacity: usize, + fallibility: Fallibility, + ) -> Result<RawTable<K, V>, CollectionAllocErr> { unsafe { - let ret = RawTable::try_new_uninitialized(capacity)?; + let ret = RawTable::new_uninitialized_internal(capacity, fallibility)?; ptr::write_bytes(ret.hashes.ptr(), 0, capacity); Ok(ret) } } + /// Tries to create a new raw table from a given capacity. If it cannot allocate, + /// it returns with AllocErr. + pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> { + Self::new_internal(capacity, Fallible) + } + /// Creates a new raw table from a given capacity. All buckets are /// initially empty. pub fn new(capacity: usize) -> RawTable<K, V> { - match Self::try_new(capacity) { + match Self::new_internal(capacity, Infallible) { Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"), - Err(CollectionAllocErr::AllocErr) => oom(), + Err(CollectionAllocErr::AllocErr) => unreachable!(), Ok(table) => { table } } } |
