| Age | Commit message (Collapse) | Author | Lines |
|
|
|
|
|
|
|
Also adds debug_asserts in Drop for Weak/Arc that the shared static is not being "dropped"/"deallocated".
|
|
Fix #124275: Implemented Default for `Arc<str>`
With added implementations.
```
GOOD Arc<CStr>
BROKEN Arc<OsStr> // removed
GOOD Rc<str>
GOOD Rc<CStr>
BROKEN Rc<OsStr> // removed
GOOD Rc<[T]>
GOOD Arc<[T]>
```
For discussion of https://github.com/rust-lang/rust/pull/124367#issuecomment-2091940137.
Key pain points currently:
> I've had a guess at the best locations/feature attrs for them but they might not be correct.
> However I'm unclear how to get the OsStr impl to compile, which file should they go in to avoid the error below? Is it possible, perhaps with some special std rust lib magic?
|
|
alloc: implement FromIterator for Box<str>
`Box<[T]>` implements `FromIterator<T>` using `Vec<T>` + `into_boxed_slice()`.
Add analogous `FromIterator` implementations for `Box<str>`
matching the current implementations for `String`.
Remove the `Global` allocator requirement for `FromIterator<Box<str>>` too.
ACP: https://github.com/rust-lang/libs-team/issues/196
|
|
|
|
LLVM currently adds a redundant check for the returned option, in addition
to the `self.ptr != self.end` check when using the default
`Iterator::fold` method that calls `vec::IntoIter::next` in a loop.
|
|
`Iterator::try_fold` gets called on the underlying Iterator in
`SpecInPlaceCollect::collect_in_place` whenever it does not implement
`TrustedRandomAccess`. For types that impl `Drop`, LLVM currently can't
tell that the drop can never occur, when using the default
`Iterator::try_fold` implementation.
For example, the asm from the `unwrap_clone` method is currently:
```
unwrap_clone:
push rbp
push r15
push r14
push r13
push r12
push rbx
push rax
mov rbx, rdi
mov r12, qword ptr [rsi]
mov rdi, qword ptr [rsi + 8]
mov rax, qword ptr [rsi + 16]
movabs rsi, -6148914691236517205
mov r14, r12
test rax, rax
je .LBB0_10
lea rcx, [rax + 2*rax]
lea r14, [r12 + 8*rcx]
shl rax, 3
lea rax, [rax + 2*rax]
xor ecx, ecx
.LBB0_2:
cmp qword ptr [r12 + rcx], 0
je .LBB0_4
add rcx, 24
cmp rax, rcx
jne .LBB0_2
jmp .LBB0_10
.LBB0_4:
lea rdx, [rax - 24]
lea r14, [r12 + rcx]
cmp rdx, rcx
je .LBB0_10
mov qword ptr [rsp], rdi
sub rax, rcx
add rax, -24
mul rsi
mov r15, rdx
lea rbp, [r12 + rcx]
add rbp, 32
shr r15, 4
mov r13, qword ptr [rip + __rust_dealloc@GOTPCREL]
jmp .LBB0_6
.LBB0_8:
add rbp, 24
dec r15
je .LBB0_9
.LBB0_6:
mov rsi, qword ptr [rbp]
test rsi, rsi
je .LBB0_8
mov rdi, qword ptr [rbp - 8]
mov edx, 1
call r13
jmp .LBB0_8
.LBB0_9:
mov rdi, qword ptr [rsp]
movabs rsi, -6148914691236517205
.LBB0_10:
sub r14, r12
mov rax, r14
mul rsi
shr rdx, 4
mov qword ptr [rbx], r12
mov qword ptr [rbx + 8], rdi
mov qword ptr [rbx + 16], rdx
mov rax, rbx
add rsp, 8
pop rbx
pop r12
pop r13
pop r14
pop r15
pop rbp
ret
```
After this PR:
```
unwrap_clone:
mov rax, rdi
movups xmm0, xmmword ptr [rsi]
mov rcx, qword ptr [rsi + 16]
movups xmmword ptr [rdi], xmm0
mov qword ptr [rdi + 16], rcx
ret
```
Fixes #120493
|
|
LLVM does not know that the multiplication never overflows, which causes
it to generate unnecessary instructions. Use `usize::unchecked_mul`, so
that it can fold the `dst_cap` calculation when `size_of::<I::SRC>() ==
size_of::<T>()`.
Running:
```
rustc -C llvm-args=-x86-asm-syntax=intel -O src/lib.rs --emit asm`
```
```rust
pub struct Foo([usize; 3]);
pub fn unwrap_copy(v: Vec<Foo>) -> Vec<[usize; 3]> {
v.into_iter().map(|f| f.0).collect()
}
```
Before this commit:
```
define void @unwrap_copy(ptr noalias nocapture noundef writeonly sret([24 x i8]) align 8 dereferenceable(24) %_0, ptr noalias nocapture noundef readonly align 8 dereferenceable(24) %iter) {
start:
%me.sroa.0.0.copyload.i = load i64, ptr %iter, align 8
%me.sroa.4.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 8
%me.sroa.4.0.copyload.i = load ptr, ptr %me.sroa.4.0.self.sroa_idx.i, align 8
%me.sroa.5.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 16
%me.sroa.5.0.copyload.i = load i64, ptr %me.sroa.5.0.self.sroa_idx.i, align 8
%_19.i.idx = mul nsw i64 %me.sroa.5.0.copyload.i, 24
%0 = udiv i64 %_19.i.idx, 24
%_16.i.i = mul i64 %me.sroa.0.0.copyload.i, 24
%dst_cap.i.i = udiv i64 %_16.i.i, 24
store i64 %dst_cap.i.i, ptr %_0, align 8
%1 = getelementptr inbounds i8, ptr %_0, i64 8
store ptr %me.sroa.4.0.copyload.i, ptr %1, align 8
%2 = getelementptr inbounds i8, ptr %_0, i64 16
store i64 %0, ptr %2, align 8
ret void
}
```
After:
```
define void @unwrap_copy(ptr noalias nocapture noundef writeonly sret([24 x i8]) align 8 dereferenceable(24) %_0, ptr noalias nocapture noundef readonly align 8 dereferenceable(24) %iter) {
start:
%me.sroa.0.0.copyload.i = load i64, ptr %iter, align 8
%me.sroa.4.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 8
%me.sroa.4.0.copyload.i = load ptr, ptr %me.sroa.4.0.self.sroa_idx.i, align 8
%me.sroa.5.0.self.sroa_idx.i = getelementptr inbounds i8, ptr %iter, i64 16
%me.sroa.5.0.copyload.i = load i64, ptr %me.sroa.5.0.self.sroa_idx.i, align 8
%_19.i.idx = mul nsw i64 %me.sroa.5.0.copyload.i, 24
%0 = udiv i64 %_19.i.idx, 24
store i64 %me.sroa.0.0.copyload.i, ptr %_0, align 8
%1 = getelementptr inbounds i8, ptr %_0, i64 8
store ptr %me.sroa.4.0.copyload.i, ptr %1, align 8
%2 = getelementptr inbounds i8, ptr %_0, i64 16
store i64 %0, ptr %2, align 8, !alias.scope !9, !noalias !14
ret void
}
```
Note that there is still one more `mul,udiv` pair that I couldn't get
rid of. The root cause is the same issue as #121239, the `nuw` gets
stripped off of `ptr::sub_ptr`.
|
|
|
|
... since fn allocator doesn't exist yet.
|
|
Also includes small doc fixes.
|
|
|
|
- `slice::sort` -> driftsort
https://github.com/Voultapher/sort-research-rs/blob/main/writeup/driftsort_introduction/text.md
- `slice::sort_unstable` -> ipnsort
https://github.com/Voultapher/sort-research-rs/blob/main/writeup/ipnsort_introduction/text.md
Replaces the sort implementations with tailor made ones that strike a
balance of run-time, compile-time and binary-size, yielding run-time and
compile-time improvements. Regressing binary-size for `slice::sort`
while improving it for `slice::sort_unstable`. All while upholding the
existing soft and hard safety guarantees, and even extending the soft
guarantees, detecting strict weak ordering violations with a high chance
and reporting it to users via a panic.
In addition the implementation of `select_nth_unstable` is also adapted
as it uses `slice::sort_unstable` internals.
|
|
|
|
Many, many projects use `size_of` to get the size of a type. However,
it's also often equally easy to hardcode a size (e.g. `8` instead of
`size_of::<u64>()`). Minimizing friction in the use of `size_of` helps
ensure that people use it and make code more self-documenting.
The name `size_of` is unambiguous: the name alone, without any prefix or
path, is self-explanatory and unmistakeable for any other functionality.
Adding it to the prelude cannot produce any name conflicts, as any local
definition will silently shadow the one from the prelude. Thus, we don't
need to wait for a new edition prelude to add it.
Add `size_of_val`, `align_of`, and `align_of_val` as well, with similar
justification: widely useful, self-explanatory, unmistakeable for
anything else, won't produce conflicts.
|
|
Arc<[T]>::default where alignof(T) <= 16.
|
|
|
|
reorganised attrs
removed OsStr impls
added backticks
|
|
io::Write::write_fmt: panic if the formatter fails when the stream does not fail
Follow-up to https://github.com/rust-lang/rust/pull/124954
|
|
Relax allocator requirements on some Rc/Arc APIs.
Split out from #119761
* Remove `A: Clone` bound from `Rc::assume_init`(s), `Rc::downcast`, and `Rc::downcast_unchecked` (`Arc` methods were already relaxed by #120445)
* Make `From<Rc<[T; N]>> for Rc<[T]>` allocator-aware (`Arc`'s already is).
* Remove `A: Clone` from `Rc/Arc::unwrap_or_clone`
Internal changes:
* Made `Arc::internal_into_inner_with_allocator` method into `Arc::into_inner_with_allocator` associated fn.
* Add private `Rc::into_inner_with_allocator` (to match Arc), so other fns don't have to juggle `ManuallyDrop`.
|
|
|
|
|
|
* Remove A: Clone bound from Rc::assume_init, Rc::downcast, and Rc::downcast_unchecked.
* Make From<Rc<[T; N]>> for Rc<[T]> allocator-aware.
Internal changes:
* Made Arc::internal_into_inner_with_allocator method into Arc::into_inner_with_allocator associated fn.
* Add private Rc::into_inner_with_allocator (to match Arc), so other fns don't have to juggle ManuallyDrop.
|
|
allow unsized T.
|
|
Documentation of these properties previously existed in a lone paragraph
in the `fmt` module's documentation:
<https://doc.rust-lang.org/1.78.0/std/fmt/index.html#formatting-traits>
However, users looking to implement a formatting trait won't necessarily
look there. Therefore, let's add the critical information (that
formatting per se is infallible) to all the involved items.
|
|
|
|
This way, no other test can be tripped up by `test_shrink_to_unwind` changing the alloc error hook.
|
|
Luckily it's comparatively simple to just restore the `VecDeque` into a valid state on unwinds.
|
|
Box<[T]> implements FromIterator<T> using Vec<T> + into_boxed_slice().
Add analogous FromIterator implementations for Box<str>
matching the current implementations for String.
Remove the Global allocator requirement for FromIterator<Box<str>> too.
|
|
Stabilize exclusive_range_pattern (v2)
This PR is identical to #124459, which was approved and merged but then removed from master by a force-push due to a [CI bug](https://rust-lang.zulipchat.com/#narrow/stream/242791-t-infra/topic/ci.20broken.3F).
r? ghost
Original PR description:
---
Stabilization report: https://github.com/rust-lang/rust/issues/37854#issuecomment-1842398130
FCP: https://github.com/rust-lang/rust/issues/37854#issuecomment-1872520294
Stabilization was blocked by a lint that was merged here: #118879
Documentation PR is here: rust-lang/reference#1484
`@rustbot` label +F-exclusive_range_pattern +T-lang
|
|
Describe and use CStr literals in CStr and CString docs
Mention CStr literals in the description of both types, and use them in some of the code samples for CStr. This is intended to make C string literals more discoverable.
Additionally, I don't think the orange "This example is not tested" warnings are very encouraging, so I have made the examples on `CStr` build.
|
|
String.truncate comment microfix (greater or equal)
String.truncate calls Vec.truncate, in turn, and that states "is greater or equal to". Beside common sense.
|
|
deref patterns: impl `DerefPure` for more std types
Context: [deref patterns](https://github.com/rust-lang/rust/issues/87121). The requirements of `DerefPure` aren't precise yet, but these types unambiguously satisfy them.
Interestingly, a hypothetical `impl DerefMut for Cow` that does a `Clone` would *not* be eligible for `DerefPure` if we allow mixing deref patterns with normal patterns. If the following is exhaustive then the `DerefMut` would cause UB:
```rust
match &mut Cow::Borrowed(&()) {
Cow::Owned(_) => ..., // Doesn't match
deref!(_x) if false => ..., // Causes the variant to switch to `Owned`
Cow::Borrowed(_) => ..., // Doesn't match
// We reach unreachable
}
```
|
|
|
|
|
|
|
|
|
|
Fully stabilize the following API, including const where applicable:
impl <T> NonNull<T> {
pub const unsafe fn offset(self, count: isize) -> Self;
pub const unsafe fn add(self, count: usize) -> Self;
pub const unsafe fn sub(self, count: usize) -> Self;
pub const unsafe fn offset_from(self, origin: NonNull<T>) -> isize;
pub const unsafe fn read(self) -> T;
pub unsafe fn read_volatile(self) -> T;
pub const unsafe fn read_unaligned(self) -> T;
pub unsafe fn write_volatile(self, val: T);
pub unsafe fn replace(self, src: T) -> T;
}
impl<T: ?Sized> NonNull<T> {
pub const unsafe fn byte_offset(self, count: isize) -> Self;
pub const unsafe fn byte_add(self, count: usize) -> Self;
pub const unsafe fn byte_sub(self, count: usize) -> Self;
pub const unsafe fn byte_offset_from<U: ?Sized>(self, origin: NonNull<U>) -> isize;
pub unsafe fn drop_in_place(self);
}
Stabilize the following without const:
impl <T> NonNull<T> {
// const under `const_intrinsic_copy`
pub const unsafe fn copy_to(self, dest: NonNull<T>, count: usize);
pub const unsafe fn copy_to_nonoverlapping(self, dest: NonNull<T>, count: usize);
pub const unsafe fn copy_from(self, src: NonNull<T>, count: usize);
pub const unsafe fn copy_from_nonoverlapping(self, src: NonNull<T>, count: usize);
// const under `const_ptr_write`
pub const unsafe fn write(self, val: T);
pub const unsafe fn write_bytes(self, val: u8, count: usize);
pub const unsafe fn write_unaligned(self, val: T);
// const under `const_swap`
pub const unsafe fn swap(self, with: NonNull<T>);
// const under `const_align_offset`
pub const fn align_offset(self, align: usize) -> usize;
// const under `const_pointer_is_aligned`
pub const fn is_aligned(self) -> bool;
}
Left the following unstable:
impl <T> NonNull<T> {
// moved gate to `ptr_sub_ptr`
pub const unsafe fn sub_ptr(self, subtracted: NonNull<T>) -> usize;
}
impl <T: ?Sized> NonNull<T> {
// moved gate to `pointer_is_aligned_to`
pub const fn is_aligned_to(self, align: usize) -> bool;
}
Fixes: https://github.com/rust-lang/rust/issues/117691
|
|
|
|
"is greater or equal to". Beside common sense.
|
|
Relax `A: Clone` bound for `rc::Weak::into_raw_and_alloc`
Makes this method to behave the same way as [`Box::into_raw_with_allocator`](https://doc.rust-lang.org/1.77.2/alloc/boxed/struct.Box.html#method.into_raw_with_allocator) and [`Vec::into_raw_parts_with_alloc`](https://doc.rust-lang.org/1.77.2/alloc/vec/struct.Vec.html#method.into_raw_parts_with_alloc).
I have also noticed the inconsistent presence and naming, should probably be addressed in the future.
|
|
|
|
Stabilize `Utf8Chunks`
Pending FCP in https://github.com/rust-lang/rust/issues/99543.
This PR includes the proposed modification in https://github.com/rust-lang/libs-team/issues/190 as agreed in https://github.com/rust-lang/rust/issues/99543#issuecomment-2050406568.
|
|
|
|
|
|
|
|
|
|
This saves an extra load from memory.
|