diff options
| author | Mark Rousskov <mark.simulacrum@gmail.com> | 2024-04-29 09:27:34 -0400 |
|---|---|---|
| committer | Mark Rousskov <mark.simulacrum@gmail.com> | 2024-05-01 22:19:11 -0400 |
| commit | a64f94161130944f8fda1f6dcc00f37860f43ecf (patch) | |
| tree | 73ba98774f02b750916f1d71b428e038f7f0f8fe /library/core/src | |
| parent | 377c518bce25ebd2d1bf80568940787ffa397013 (diff) | |
| download | rust-a64f94161130944f8fda1f6dcc00f37860f43ecf.tar.gz rust-a64f94161130944f8fda1f6dcc00f37860f43ecf.zip | |
Step bootstrap cfgs
Diffstat (limited to 'library/core/src')
| -rw-r--r-- | library/core/src/cmp.rs | 38 | ||||
| -rw-r--r-- | library/core/src/default.rs | 2 | ||||
| -rw-r--r-- | library/core/src/future/mod.rs | 2 | ||||
| -rw-r--r-- | library/core/src/intrinsics.rs | 91 | ||||
| -rw-r--r-- | library/core/src/intrinsics/simd.rs | 13 | ||||
| -rw-r--r-- | library/core/src/iter/sources/from_coroutine.rs | 2 | ||||
| -rw-r--r-- | library/core/src/iter/traits/marker.rs | 2 | ||||
| -rw-r--r-- | library/core/src/lib.rs | 5 | ||||
| -rw-r--r-- | library/core/src/macros/mod.rs | 1 | ||||
| -rw-r--r-- | library/core/src/num/int_macros.rs | 24 | ||||
| -rw-r--r-- | library/core/src/num/nonzero.rs | 10 | ||||
| -rw-r--r-- | library/core/src/num/uint_macros.rs | 39 | ||||
| -rw-r--r-- | library/core/src/ops/coroutine.rs | 2 | ||||
| -rw-r--r-- | library/core/src/ops/deref.rs | 2 | ||||
| -rw-r--r-- | library/core/src/ops/drop.rs | 1 | ||||
| -rw-r--r-- | library/core/src/ops/mod.rs | 1 | ||||
| -rw-r--r-- | library/core/src/panicking.rs | 1 | ||||
| -rw-r--r-- | library/core/src/pin.rs | 2 | ||||
| -rw-r--r-- | library/core/src/prelude/common.rs | 1 | ||||
| -rw-r--r-- | library/core/src/ptr/metadata.rs | 25 | ||||
| -rw-r--r-- | library/core/src/ptr/mod.rs | 9 | ||||
| -rw-r--r-- | library/core/src/sync/atomic.rs | 3 |
22 files changed, 28 insertions, 248 deletions
diff --git a/library/core/src/cmp.rs b/library/core/src/cmp.rs index 2fd9e17c994..fc6022ab753 100644 --- a/library/core/src/cmp.rs +++ b/library/core/src/cmp.rs @@ -379,7 +379,7 @@ pub struct AssertParamIsEq<T: Eq + ?Sized> { // This is a lang item only so that `BinOp::Cmp` in MIR can return it. // It has no special behaviour, but does require that the three variants // `Less`/`Equal`/`Greater` remain `-1_i8`/`0_i8`/`+1_i8` respectively. -#[cfg_attr(not(bootstrap), lang = "Ordering")] +#[lang = "Ordering"] #[repr(i8)] pub enum Ordering { /// An ordering where a compared value is less than another. @@ -852,7 +852,7 @@ pub trait Ord: Eq + PartialOrd<Self> { #[stable(feature = "ord_max_min", since = "1.21.0")] #[inline] #[must_use] - #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_ord_max")] + #[rustc_diagnostic_item = "cmp_ord_max"] fn max(self, other: Self) -> Self where Self: Sized, @@ -873,7 +873,7 @@ pub trait Ord: Eq + PartialOrd<Self> { #[stable(feature = "ord_max_min", since = "1.21.0")] #[inline] #[must_use] - #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_ord_min")] + #[rustc_diagnostic_item = "cmp_ord_min"] fn min(self, other: Self) -> Self where Self: Sized, @@ -1160,7 +1160,7 @@ pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> { /// ``` #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialord_cmp")] + #[rustc_diagnostic_item = "cmp_partialord_cmp"] fn partial_cmp(&self, other: &Rhs) -> Option<Ordering>; /// This method tests less than (for `self` and `other`) and is used by the `<` operator. @@ -1175,7 +1175,7 @@ pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> { #[inline] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialord_lt")] + #[rustc_diagnostic_item = "cmp_partialord_lt"] fn lt(&self, other: &Rhs) -> bool { matches!(self.partial_cmp(other), Some(Less)) } @@ -1193,7 +1193,7 @@ pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> { #[inline] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialord_le")] + #[rustc_diagnostic_item = "cmp_partialord_le"] fn le(&self, other: &Rhs) -> bool { matches!(self.partial_cmp(other), Some(Less | Equal)) } @@ -1210,7 +1210,7 @@ pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> { #[inline] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialord_gt")] + #[rustc_diagnostic_item = "cmp_partialord_gt"] fn gt(&self, other: &Rhs) -> bool { matches!(self.partial_cmp(other), Some(Greater)) } @@ -1228,7 +1228,7 @@ pub trait PartialOrd<Rhs: ?Sized = Self>: PartialEq<Rhs> { #[inline] #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - #[cfg_attr(not(bootstrap), rustc_diagnostic_item = "cmp_partialord_ge")] + #[rustc_diagnostic_item = "cmp_partialord_ge"] fn ge(&self, other: &Rhs) -> bool { matches!(self.partial_cmp(other), Some(Greater | Equal)) } @@ -1558,14 +1558,7 @@ mod impls { impl PartialOrd for $t { #[inline] fn partial_cmp(&self, other: &$t) -> Option<Ordering> { - #[cfg(bootstrap)] - { - Some(self.cmp(other)) - } - #[cfg(not(bootstrap))] - { - Some(crate::intrinsics::three_way_compare(*self, *other)) - } + Some(crate::intrinsics::three_way_compare(*self, *other)) } #[inline(always)] fn lt(&self, other: &$t) -> bool { (*self) < (*other) } @@ -1581,18 +1574,7 @@ mod impls { impl Ord for $t { #[inline] fn cmp(&self, other: &$t) -> Ordering { - #[cfg(bootstrap)] - { - // The order here is important to generate more optimal assembly. - // See <https://github.com/rust-lang/rust/issues/63758> for more info. - if *self < *other { Less } - else if *self == *other { Equal } - else { Greater } - } - #[cfg(not(bootstrap))] - { - crate::intrinsics::three_way_compare(*self, *other) - } + crate::intrinsics::three_way_compare(*self, *other) } } )*) diff --git a/library/core/src/default.rs b/library/core/src/default.rs index e717a8d022f..4524b352ec8 100644 --- a/library/core/src/default.rs +++ b/library/core/src/default.rs @@ -178,9 +178,7 @@ default_impl! { i32, 0, "Returns the default value of `0`" } default_impl! { i64, 0, "Returns the default value of `0`" } default_impl! { i128, 0, "Returns the default value of `0`" } -#[cfg(not(bootstrap))] default_impl! { f16, 0.0f16, "Returns the default value of `0.0`" } default_impl! { f32, 0.0f32, "Returns the default value of `0.0`" } default_impl! { f64, 0.0f64, "Returns the default value of `0.0`" } -#[cfg(not(bootstrap))] default_impl! { f128, 0.0f128, "Returns the default value of `0.0`" } diff --git a/library/core/src/future/mod.rs b/library/core/src/future/mod.rs index c3bd18e30aa..873cccc7e96 100644 --- a/library/core/src/future/mod.rs +++ b/library/core/src/future/mod.rs @@ -12,7 +12,6 @@ use crate::ptr::NonNull; use crate::task::Context; -#[cfg(not(bootstrap))] mod async_drop; mod future; mod into_future; @@ -38,7 +37,6 @@ pub use ready::{ready, Ready}; #[stable(feature = "future_poll_fn", since = "1.64.0")] pub use poll_fn::{poll_fn, PollFn}; -#[cfg(not(bootstrap))] #[unstable(feature = "async_drop", issue = "none")] pub use async_drop::{async_drop, async_drop_in_place, AsyncDrop, AsyncDropInPlace}; diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index dc41fb5a778..3318a8ffa55 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -1987,18 +1987,11 @@ extern "rust-intrinsic" { /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `count_ones` method. For example, /// [`u32::count_ones`] - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")] #[rustc_safe_intrinsic] #[rustc_nounwind] pub fn ctpop<T: Copy>(x: T) -> u32; - #[cfg(bootstrap)] - #[rustc_const_stable(feature = "const_ctpop", since = "1.40.0")] - #[rustc_safe_intrinsic] - #[rustc_nounwind] - pub fn ctpop<T: Copy>(x: T) -> T; - /// Returns the number of leading unset bits (zeroes) in an integer type `T`. /// /// Note that, unlike most intrinsics, this is safe to call; @@ -2035,18 +2028,11 @@ extern "rust-intrinsic" { /// let num_leading = ctlz(x); /// assert_eq!(num_leading, 16); /// ``` - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")] #[rustc_safe_intrinsic] #[rustc_nounwind] pub fn ctlz<T: Copy>(x: T) -> u32; - #[cfg(bootstrap)] - #[rustc_const_stable(feature = "const_ctlz", since = "1.40.0")] - #[rustc_safe_intrinsic] - #[rustc_nounwind] - pub fn ctlz<T: Copy>(x: T) -> T; - /// Like `ctlz`, but extra-unsafe as it returns `undef` when /// given an `x` with value `0`. /// @@ -2064,16 +2050,10 @@ extern "rust-intrinsic" { /// let num_leading = unsafe { ctlz_nonzero(x) }; /// assert_eq!(num_leading, 3); /// ``` - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "constctlz", since = "1.50.0")] #[rustc_nounwind] pub fn ctlz_nonzero<T: Copy>(x: T) -> u32; - #[cfg(bootstrap)] - #[rustc_const_stable(feature = "constctlz", since = "1.50.0")] - #[rustc_nounwind] - pub fn ctlz_nonzero<T: Copy>(x: T) -> T; - /// Returns the number of trailing unset bits (zeroes) in an integer type `T`. /// /// Note that, unlike most intrinsics, this is safe to call; @@ -2110,18 +2090,11 @@ extern "rust-intrinsic" { /// let num_trailing = cttz(x); /// assert_eq!(num_trailing, 16); /// ``` - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")] #[rustc_safe_intrinsic] #[rustc_nounwind] pub fn cttz<T: Copy>(x: T) -> u32; - #[cfg(bootstrap)] - #[rustc_const_stable(feature = "const_cttz", since = "1.40.0")] - #[rustc_safe_intrinsic] - #[rustc_nounwind] - pub fn cttz<T: Copy>(x: T) -> T; - /// Like `cttz`, but extra-unsafe as it returns `undef` when /// given an `x` with value `0`. /// @@ -2139,16 +2112,10 @@ extern "rust-intrinsic" { /// let num_trailing = unsafe { cttz_nonzero(x) }; /// assert_eq!(num_trailing, 3); /// ``` - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_cttz_nonzero", since = "1.53.0")] #[rustc_nounwind] pub fn cttz_nonzero<T: Copy>(x: T) -> u32; - #[cfg(bootstrap)] - #[rustc_const_stable(feature = "const_cttz_nonzero", since = "1.53.0")] - #[rustc_nounwind] - pub fn cttz_nonzero<T: Copy>(x: T) -> T; - /// Reverses the bytes in an integer type `T`. /// /// Note that, unlike most intrinsics, this is safe to call; @@ -2186,7 +2153,6 @@ extern "rust-intrinsic" { /// large and difficult to optimize. /// /// The stabilized version of this intrinsic is [`Ord::cmp`]. - #[cfg(not(bootstrap))] #[rustc_const_unstable(feature = "const_three_way_compare", issue = "none")] #[rustc_safe_intrinsic] pub fn three_way_compare<T: Copy>(lhs: T, rhs: T) -> crate::cmp::Ordering; @@ -2269,7 +2235,6 @@ extern "rust-intrinsic" { /// Safe wrappers for this intrinsic are available on the integer /// primitives via the `checked_shl` method. For example, /// [`u32::checked_shl`] - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")] #[rustc_nounwind] pub fn unchecked_shl<T: Copy, U: Copy>(x: T, y: U) -> T; @@ -2279,7 +2244,6 @@ extern "rust-intrinsic" { /// Safe wrappers for this intrinsic are available on the integer /// primitives via the `checked_shr` method. For example, /// [`u32::checked_shr`] - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_int_unchecked", since = "1.40.0")] #[rustc_nounwind] pub fn unchecked_shr<T: Copy, U: Copy>(x: T, y: U) -> T; @@ -2321,18 +2285,11 @@ extern "rust-intrinsic" { /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `rotate_left` method. For example, /// [`u32::rotate_left`] - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] #[rustc_safe_intrinsic] #[rustc_nounwind] pub fn rotate_left<T: Copy>(x: T, shift: u32) -> T; - #[cfg(bootstrap)] - #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] - #[rustc_safe_intrinsic] - #[rustc_nounwind] - pub fn rotate_left<T: Copy>(x: T, y: T) -> T; - /// Performs rotate right. /// /// Note that, unlike most intrinsics, this is safe to call; @@ -2343,18 +2300,11 @@ extern "rust-intrinsic" { /// The stabilized versions of this intrinsic are available on the integer /// primitives via the `rotate_right` method. For example, /// [`u32::rotate_right`] - #[cfg(not(bootstrap))] #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] #[rustc_safe_intrinsic] #[rustc_nounwind] pub fn rotate_right<T: Copy>(x: T, shift: u32) -> T; - #[cfg(bootstrap)] - #[rustc_const_stable(feature = "const_int_rotate", since = "1.40.0")] - #[rustc_safe_intrinsic] - #[rustc_nounwind] - pub fn rotate_right<T: Copy>(x: T, y: T) -> T; - /// Returns (a + b) mod 2<sup>N</sup>, where N is the width of T in bits. /// /// Note that, unlike most intrinsics, this is safe to call; @@ -2507,12 +2457,6 @@ extern "rust-intrinsic" { #[rustc_const_unstable(feature = "const_ptr_sub_ptr", issue = "95892")] #[rustc_nounwind] pub fn ptr_offset_from_unsigned<T>(ptr: *const T, base: *const T) -> usize; - - #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] - #[rustc_safe_intrinsic] - #[rustc_nounwind] - #[cfg(bootstrap)] - pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8; } /// See documentation of `<*const T>::guaranteed_eq` for details. @@ -2522,7 +2466,6 @@ extern "rust-intrinsic" { #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")] #[unstable(feature = "core_intrinsics", issue = "none")] #[rustc_intrinsic] -#[cfg(not(bootstrap))] #[rustc_nounwind] #[rustc_do_not_const_check] #[inline] @@ -2581,28 +2524,6 @@ extern "rust-intrinsic" { #[rustc_safe_intrinsic] #[rustc_nounwind] pub fn black_box<T>(dummy: T) -> T; - - #[rustc_nounwind] - #[cfg(bootstrap)] - pub fn vtable_size(ptr: *const ()) -> usize; - - /// `ptr` must point to a vtable. - /// The intrinsic will return the alignment stored in that vtable. - #[rustc_nounwind] - #[cfg(bootstrap)] - pub fn vtable_align(ptr: *const ()) -> usize; - - #[rustc_const_unstable(feature = "const_eval_select", issue = "none")] - #[rustc_safe_intrinsic] - #[cfg(bootstrap)] - pub fn const_eval_select<ARG: Tuple, F, G, RET>( - arg: ARG, - called_in_const: F, - called_at_rt: G, - ) -> RET - where - G: FnOnce<ARG, Output = RET>, - F: FnOnce<ARG, Output = RET>; } /// Selects which function to call depending on the context. @@ -2637,11 +2558,9 @@ extern "rust-intrinsic" { /// #![feature(const_eval_select)] /// #![feature(core_intrinsics)] /// # #![allow(internal_features)] -/// # #![cfg_attr(bootstrap, allow(unused))] /// use std::intrinsics::const_eval_select; /// /// // Standard library -/// # #[cfg(not(bootstrap))] /// pub const fn inconsistent() -> i32 { /// fn runtime() -> i32 { 1 } /// const fn compiletime() -> i32 { 2 } @@ -2650,8 +2569,6 @@ extern "rust-intrinsic" { /// // and `runtime`. /// const_eval_select((), compiletime, runtime) /// } -/// # #[cfg(bootstrap)] -/// # pub const fn inconsistent() -> i32 { 0 } /// /// // User Crate /// const X: i32 = inconsistent(); @@ -2663,7 +2580,6 @@ extern "rust-intrinsic" { /// otherwise, that principle should not be violated. #[rustc_const_unstable(feature = "const_eval_select", issue = "none")] #[unstable(feature = "core_intrinsics", issue = "none")] -#[cfg(not(bootstrap))] #[rustc_intrinsic] #[rustc_intrinsic_must_be_overridden] pub const fn const_eval_select<ARG: Tuple, F, G, RET>( @@ -2776,7 +2692,7 @@ pub const fn is_val_statically_known<T: Copy>(_arg: T) -> bool { /// `x` and `y` are readable and writable as `T`, and non-overlapping. #[rustc_nounwind] #[inline] -#[cfg_attr(not(bootstrap), rustc_intrinsic)] +#[rustc_intrinsic] // This has fallback `const fn` MIR, so shouldn't need stability, see #122652 #[rustc_const_unstable(feature = "const_typed_swap", issue = "none")] pub const unsafe fn typed_swap<T>(x: *mut T, y: *mut T) { @@ -2801,7 +2717,7 @@ pub const unsafe fn typed_swap<T>(x: *mut T, y: *mut T) { #[rustc_const_unstable(feature = "const_ub_checks", issue = "none")] #[unstable(feature = "core_intrinsics", issue = "none")] #[inline(always)] -#[cfg_attr(not(bootstrap), rustc_intrinsic)] // just make it a regular fn in bootstrap +#[rustc_intrinsic] pub const fn ub_checks() -> bool { cfg!(debug_assertions) } @@ -2845,7 +2761,6 @@ pub const unsafe fn const_deallocate(_ptr: *mut u8, _size: usize, _align: usize) #[unstable(feature = "core_intrinsics", issue = "none")] #[rustc_intrinsic] #[rustc_intrinsic_must_be_overridden] -#[cfg(not(bootstrap))] pub unsafe fn vtable_size(_ptr: *const ()) -> usize { unreachable!() } @@ -2856,7 +2771,6 @@ pub unsafe fn vtable_size(_ptr: *const ()) -> usize { #[unstable(feature = "core_intrinsics", issue = "none")] #[rustc_intrinsic] #[rustc_intrinsic_must_be_overridden] -#[cfg(not(bootstrap))] pub unsafe fn vtable_align(_ptr: *const ()) -> usize { unreachable!() } @@ -2871,7 +2785,6 @@ pub unsafe fn vtable_align(_ptr: *const ()) -> usize { #[rustc_const_unstable(feature = "ptr_metadata", issue = "81513")] #[rustc_intrinsic] #[rustc_intrinsic_must_be_overridden] -#[cfg(not(bootstrap))] pub const fn aggregate_raw_ptr<P: AggregateRawPtr<D, Metadata = M>, D, M>(_data: D, _meta: M) -> P { // To implement a fallback we'd have to assume the layout of the pointer, // but the whole point of this intrinsic is that we shouldn't do that. diff --git a/library/core/src/intrinsics/simd.rs b/library/core/src/intrinsics/simd.rs index eeff4ec609a..ceea6790129 100644 --- a/library/core/src/intrinsics/simd.rs +++ b/library/core/src/intrinsics/simd.rs @@ -540,12 +540,8 @@ extern "rust-intrinsic" { /// `T` must be a vector of pointers. /// /// `U` must be a vector of `usize` with the same length as `T`. - #[cfg(not(bootstrap))] #[rustc_nounwind] pub fn simd_expose_provenance<T, U>(ptr: T) -> U; - #[cfg(bootstrap)] - #[rustc_nounwind] - pub fn simd_expose_addr<T, U>(ptr: T) -> U; /// Create a vector of pointers from a vector of addresses. /// @@ -553,11 +549,7 @@ extern "rust-intrinsic" { /// /// `U` must be a vector of pointers, with the same length as `T`. #[rustc_nounwind] - #[cfg(not(bootstrap))] pub fn simd_with_exposed_provenance<T, U>(addr: T) -> U; - #[rustc_nounwind] - #[cfg(bootstrap)] - pub fn simd_from_exposed_addr<T, U>(addr: T) -> U; /// Swap bytes of each element. /// @@ -663,8 +655,3 @@ extern "rust-intrinsic" { #[rustc_nounwind] pub fn simd_flog<T>(a: T) -> T; } - -#[cfg(bootstrap)] -pub use simd_expose_addr as simd_expose_provenance; -#[cfg(bootstrap)] -pub use simd_from_exposed_addr as simd_with_exposed_provenance; diff --git a/library/core/src/iter/sources/from_coroutine.rs b/library/core/src/iter/sources/from_coroutine.rs index 9bac9037a02..710ba504ded 100644 --- a/library/core/src/iter/sources/from_coroutine.rs +++ b/library/core/src/iter/sources/from_coroutine.rs @@ -14,7 +14,7 @@ use crate::pin::Pin; /// #![feature(coroutines)] /// #![feature(iter_from_coroutine)] /// -/// let it = std::iter::from_coroutine(#[cfg_attr(not(bootstrap), coroutine)] || { +/// let it = std::iter::from_coroutine(#[coroutine] || { /// yield 1; /// yield 2; /// yield 3; diff --git a/library/core/src/iter/traits/marker.rs b/library/core/src/iter/traits/marker.rs index ad4d63d83b5..2e756a6dd67 100644 --- a/library/core/src/iter/traits/marker.rs +++ b/library/core/src/iter/traits/marker.rs @@ -28,7 +28,7 @@ pub unsafe trait TrustedFused {} #[rustc_unsafe_specialization_marker] // FIXME: this should be a #[marker] and have another blanket impl for T: TrustedFused // but that ICEs iter::Fuse specializations. -#[cfg_attr(not(bootstrap), lang = "fused_iterator")] +#[lang = "fused_iterator"] pub trait FusedIterator: Iterator {} #[stable(feature = "fused", since = "1.26.0")] diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs index cee3870c629..5dcef638954 100644 --- a/library/core/src/lib.rs +++ b/library/core/src/lib.rs @@ -90,7 +90,7 @@ ))] #![no_core] #![rustc_coherence_is_core] -#![cfg_attr(not(bootstrap), rustc_preserve_ub_checks)] +#![rustc_preserve_ub_checks] // // Lints: #![deny(rust_2021_incompatible_or_patterns)] @@ -109,7 +109,6 @@ // // Library features: // tidy-alphabetical-start -#![cfg_attr(bootstrap, feature(associated_type_bounds))] #![feature(array_ptr_get)] #![feature(asm_experimental_arch)] #![feature(char_indices_offset)] @@ -201,7 +200,6 @@ // // Language features: // tidy-alphabetical-start -#![cfg_attr(bootstrap, feature(inline_const))] #![feature(abi_unadjusted)] #![feature(adt_const_params)] #![feature(allow_internal_unsafe)] @@ -402,7 +400,6 @@ pub mod net; pub mod option; pub mod panic; pub mod panicking; -#[cfg(not(bootstrap))] #[unstable(feature = "core_pattern_types", issue = "none")] pub mod pat; pub mod pin; diff --git a/library/core/src/macros/mod.rs b/library/core/src/macros/mod.rs index 6da05a1ca86..40a91689014 100644 --- a/library/core/src/macros/mod.rs +++ b/library/core/src/macros/mod.rs @@ -1715,7 +1715,6 @@ pub(crate) mod builtin { builtin # type_ascribe($expr, $ty) } - #[cfg(not(bootstrap))] /// Unstable placeholder for deref patterns. #[allow_internal_unstable(builtin_syntax)] #[unstable( diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index f50419dad20..77b1039039b 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -1234,18 +1234,9 @@ macro_rules! int_impl { #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self { - #[cfg(bootstrap)] - { - // For bootstrapping, just use built-in primitive shift. - // panicking is a legal manifestation of UB - self << rhs - } - #[cfg(not(bootstrap))] - { // SAFETY: the caller must uphold the safety contract for // `unchecked_shl`. unsafe { intrinsics::unchecked_shl(self, rhs) } - } } /// Checked shift right. Computes `self >> rhs`, returning `None` if `rhs` is @@ -1332,18 +1323,9 @@ macro_rules! int_impl { #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self { - #[cfg(bootstrap)] - { - // For bootstrapping, just use built-in primitive shift. - // panicking is a legal manifestation of UB - self >> rhs - } - #[cfg(not(bootstrap))] - { - // SAFETY: the caller must uphold the safety contract for - // `unchecked_shr`. - unsafe { intrinsics::unchecked_shr(self, rhs) } - } + // SAFETY: the caller must uphold the safety contract for + // `unchecked_shr`. + unsafe { intrinsics::unchecked_shr(self, rhs) } } /// Checked absolute value. Computes `self.abs()`, returning `None` if diff --git a/library/core/src/num/nonzero.rs b/library/core/src/num/nonzero.rs index fca36787659..5d3ae7316b1 100644 --- a/library/core/src/num/nonzero.rs +++ b/library/core/src/num/nonzero.rs @@ -528,10 +528,7 @@ macro_rules! nonzero_integer { pub const fn leading_zeros(self) -> u32 { // SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`. unsafe { - #[cfg(not(bootstrap))] - return intrinsics::ctlz_nonzero(self.get() as $UnsignedPrimitive); - #[cfg(bootstrap)] - return intrinsics::ctlz_nonzero(self.get() as $UnsignedPrimitive) as u32; + intrinsics::ctlz_nonzero(self.get() as $UnsignedPrimitive) } } @@ -557,10 +554,7 @@ macro_rules! nonzero_integer { pub const fn trailing_zeros(self) -> u32 { // SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`. unsafe { - #[cfg(not(bootstrap))] - return intrinsics::cttz_nonzero(self.get() as $UnsignedPrimitive); - #[cfg(bootstrap)] - return intrinsics::cttz_nonzero(self.get() as $UnsignedPrimitive) as u32; + intrinsics::cttz_nonzero(self.get() as $UnsignedPrimitive) } } diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index bb5113577c2..9effa279b48 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -77,10 +77,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn count_ones(self) -> u32 { - #[cfg(not(bootstrap))] return intrinsics::ctpop(self as $ActualT); - #[cfg(bootstrap)] - return intrinsics::ctpop(self as $ActualT) as u32; } /// Returns the number of zeros in the binary representation of `self`. @@ -122,10 +119,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn leading_zeros(self) -> u32 { - #[cfg(not(bootstrap))] return intrinsics::ctlz(self as $ActualT); - #[cfg(bootstrap)] - return intrinsics::ctlz(self as $ActualT) as u32; } /// Returns the number of trailing zeros in the binary representation @@ -146,10 +140,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn trailing_zeros(self) -> u32 { - #[cfg(not(bootstrap))] return intrinsics::cttz(self); - #[cfg(bootstrap)] - return intrinsics::cttz(self) as u32; } /// Returns the number of leading ones in the binary representation of `self`. @@ -214,10 +205,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn rotate_left(self, n: u32) -> Self { - #[cfg(not(bootstrap))] return intrinsics::rotate_left(self, n); - #[cfg(bootstrap)] - return intrinsics::rotate_left(self, n as $SelfT); } /// Shifts the bits to the right by a specified amount, `n`, @@ -242,10 +230,7 @@ macro_rules! uint_impl { without modifying the original"] #[inline(always)] pub const fn rotate_right(self, n: u32) -> Self { - #[cfg(not(bootstrap))] return intrinsics::rotate_right(self, n); - #[cfg(bootstrap)] - return intrinsics::rotate_right(self, n as $SelfT); } /// Reverses the byte order of the integer. @@ -1324,18 +1309,9 @@ macro_rules! uint_impl { #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self { - #[cfg(bootstrap)] - { - // For bootstrapping, just use built-in primitive shift. - // panicking is a legal manifestation of UB - self << rhs - } - #[cfg(not(bootstrap))] - { - // SAFETY: the caller must uphold the safety contract for - // `unchecked_shl`. - unsafe { intrinsics::unchecked_shl(self, rhs) } - } + // SAFETY: the caller must uphold the safety contract for + // `unchecked_shl`. + unsafe { intrinsics::unchecked_shl(self, rhs) } } /// Checked shift right. Computes `self >> rhs`, returning `None` @@ -1422,18 +1398,9 @@ macro_rules! uint_impl { #[inline(always)] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self { - #[cfg(bootstrap)] - { - // For bootstrapping, just use built-in primitive shift. - // panicking is a legal manifestation of UB - self >> rhs - } - #[cfg(not(bootstrap))] - { // SAFETY: the caller must uphold the safety contract for // `unchecked_shr`. unsafe { intrinsics::unchecked_shr(self, rhs) } - } } /// Checked exponentiation. Computes `self.pow(exp)`, returning `None` if diff --git a/library/core/src/ops/coroutine.rs b/library/core/src/ops/coroutine.rs index 5250be15fe4..6a6c5db1ab1 100644 --- a/library/core/src/ops/coroutine.rs +++ b/library/core/src/ops/coroutine.rs @@ -46,7 +46,7 @@ pub enum CoroutineState<Y, R> { /// use std::pin::Pin; /// /// fn main() { -/// let mut coroutine = #[cfg_attr(not(bootstrap), coroutine)] || { +/// let mut coroutine = #[coroutine] || { /// yield 1; /// "foo" /// }; diff --git a/library/core/src/ops/deref.rs b/library/core/src/ops/deref.rs index 3795a81c2c1..9849410d484 100644 --- a/library/core/src/ops/deref.rs +++ b/library/core/src/ops/deref.rs @@ -285,7 +285,7 @@ impl<T: ?Sized> DerefMut for &mut T { /// is concerned. Calls to `deref`/`deref_mut`` must leave the pointer itself likewise /// unchanged. #[unstable(feature = "deref_pure_trait", issue = "87121")] -#[cfg_attr(not(bootstrap), lang = "deref_pure")] +#[lang = "deref_pure"] pub unsafe trait DerefPure {} #[unstable(feature = "deref_pure_trait", issue = "87121")] diff --git a/library/core/src/ops/drop.rs b/library/core/src/ops/drop.rs index 1325d90e4f3..36ae581e3f7 100644 --- a/library/core/src/ops/drop.rs +++ b/library/core/src/ops/drop.rs @@ -240,7 +240,6 @@ pub trait Drop { } /// Fallback function to call surface level `Drop::drop` function -#[cfg(not(bootstrap))] #[allow(drop_bounds)] #[lang = "fallback_surface_drop"] pub(crate) fn fallback_surface_drop<T: Drop + ?Sized>(x: &mut T) { diff --git a/library/core/src/ops/mod.rs b/library/core/src/ops/mod.rs index 81d5e5c949e..7bcfaadbe37 100644 --- a/library/core/src/ops/mod.rs +++ b/library/core/src/ops/mod.rs @@ -174,7 +174,6 @@ pub use self::deref::Receiver; #[stable(feature = "rust1", since = "1.0.0")] pub use self::drop::Drop; -#[cfg(not(bootstrap))] pub(crate) use self::drop::fallback_surface_drop; #[stable(feature = "rust1", since = "1.0.0")] diff --git a/library/core/src/panicking.rs b/library/core/src/panicking.rs index 3ee56e6c579..ca06e059b75 100644 --- a/library/core/src/panicking.rs +++ b/library/core/src/panicking.rs @@ -156,7 +156,6 @@ pub const fn panic(expr: &'static str) -> ! { // reducing binary size impact. macro_rules! panic_const { ($($lang:ident = $message:expr,)+) => { - #[cfg(not(bootstrap))] pub mod panic_const { use super::*; diff --git a/library/core/src/pin.rs b/library/core/src/pin.rs index a11c6c742d7..f7463d170fd 100644 --- a/library/core/src/pin.rs +++ b/library/core/src/pin.rs @@ -1809,7 +1809,7 @@ impl<Ptr, U> DispatchFromDyn<Pin<U>> for Pin<Ptr> where Ptr: DispatchFromDyn<U> /// fn coroutine_fn() -> impl Coroutine<Yield = usize, Return = ()> /* not Unpin */ { /// // Allow coroutine to be self-referential (not `Unpin`) /// // vvvvvv so that locals can cross yield points. -/// #[cfg_attr(not(bootstrap), coroutine)] static || { +/// #[coroutine] static || { /// let foo = String::from("foo"); /// let foo_ref = &foo; // ------+ /// yield 0; // | <- crosses yield point! diff --git a/library/core/src/prelude/common.rs b/library/core/src/prelude/common.rs index b98f3a4659b..afc6817aa1d 100644 --- a/library/core/src/prelude/common.rs +++ b/library/core/src/prelude/common.rs @@ -97,7 +97,6 @@ pub use crate::macros::builtin::cfg_eval; )] pub use crate::macros::builtin::type_ascribe; -#[cfg(not(bootstrap))] #[unstable( feature = "deref_patterns", issue = "87121", diff --git a/library/core/src/ptr/metadata.rs b/library/core/src/ptr/metadata.rs index 1226c8e2419..eb815b6d822 100644 --- a/library/core/src/ptr/metadata.rs +++ b/library/core/src/ptr/metadata.rs @@ -2,7 +2,6 @@ use crate::fmt; use crate::hash::{Hash, Hasher}; -#[cfg(not(bootstrap))] use crate::intrinsics::aggregate_raw_ptr; use crate::marker::Freeze; @@ -115,17 +114,7 @@ pub const fn from_raw_parts<T: ?Sized>( data_pointer: *const (), metadata: <T as Pointee>::Metadata, ) -> *const T { - #[cfg(bootstrap)] - { - // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T - // and PtrComponents<T> have the same memory layouts. Only std can make this - // guarantee. - unsafe { PtrRepr { components: PtrComponents { data_pointer, metadata } }.const_ptr } - } - #[cfg(not(bootstrap))] - { - aggregate_raw_ptr(data_pointer, metadata) - } + aggregate_raw_ptr(data_pointer, metadata) } /// Performs the same functionality as [`from_raw_parts`], except that a @@ -139,17 +128,7 @@ pub const fn from_raw_parts_mut<T: ?Sized>( data_pointer: *mut (), metadata: <T as Pointee>::Metadata, ) -> *mut T { - #[cfg(bootstrap)] - { - // SAFETY: Accessing the value from the `PtrRepr` union is safe since *const T - // and PtrComponents<T> have the same memory layouts. Only std can make this - // guarantee. - unsafe { PtrRepr { components: PtrComponents { data_pointer, metadata } }.mut_ptr } - } - #[cfg(not(bootstrap))] - { - aggregate_raw_ptr(data_pointer, metadata) - } + aggregate_raw_ptr(data_pointer, metadata) } #[repr(C)] diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index f12ab3d50cd..5f3815859c8 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -1784,15 +1784,6 @@ pub(crate) const unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usiz assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub, }; - #[cfg(bootstrap)] - const unsafe fn unchecked_shl(value: usize, shift: usize) -> usize { - value << shift - } - #[cfg(bootstrap)] - const unsafe fn unchecked_shr(value: usize, shift: usize) -> usize { - value >> shift - } - #[cfg(not(bootstrap))] use intrinsics::{unchecked_shl, unchecked_shr}; /// Calculate multiplicative modular inverse of `x` modulo `m`. diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index 249b8ccb437..232ec589093 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -512,7 +512,6 @@ impl AtomicBool { /// /// ``` /// #![feature(atomic_from_mut)] - /// # #![cfg_attr(bootstrap, feature(inline_const))] /// use std::sync::atomic::{AtomicBool, Ordering}; /// /// let mut some_bools = [const { AtomicBool::new(false) }; 10]; @@ -1315,7 +1314,6 @@ impl<T> AtomicPtr<T> { /// /// ``` /// #![feature(atomic_from_mut)] - /// # #![cfg_attr(bootstrap, feature(inline_const))] /// use std::ptr::null_mut; /// use std::sync::atomic::{AtomicPtr, Ordering}; /// @@ -2306,7 +2304,6 @@ macro_rules! atomic_int { /// /// ``` /// #![feature(atomic_from_mut)] - /// # #![cfg_attr(bootstrap, feature(inline_const))] #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")] /// #[doc = concat!("let mut some_ints = [const { ", stringify!($atomic_type), "::new(0) }; 10];")] |
