about summary refs log tree commit diff
path: root/src/libcore
diff options
context:
space:
mode:
authorManish Goregaokar <manishsmail@gmail.com>2020-07-02 00:16:28 -0700
committerGitHub <noreply@github.com>2020-07-02 00:16:28 -0700
commit500634bf1073248bb5b8561da3720a0820b09869 (patch)
tree8cf1714bfa8e19c1e1d78bd65bbc39ec141e74da /src/libcore
parent1c68bb6ec9c6fa3eab136a917778ec0625fbdd20 (diff)
parent6a7a6528f69ddb32574e486471c400fee6de8fd7 (diff)
downloadrust-500634bf1073248bb5b8561da3720a0820b09869.tar.gz
rust-500634bf1073248bb5b8561da3720a0820b09869.zip
Rollup merge of #73622 - LeSeulArtichaut:unsafe-libcore, r=nikomatsakis
Deny unsafe ops in unsafe fns in libcore

After `liballoc`, It's time for `libcore` :D

I planned to do this bit by bit to avoid having a big chunk of diffs, so to make reviews easier, and to make the unsafe blocks narrower and take the time to document them properly.

r? @nikomatsakis cc @RalfJung
Diffstat (limited to 'src/libcore')
-rw-r--r--src/libcore/alloc/global.rs22
-rw-r--r--src/libcore/alloc/layout.rs3
-rw-r--r--src/libcore/alloc/mod.rs58
-rw-r--r--src/libcore/cell.rs7
-rw-r--r--src/libcore/char/convert.rs3
-rw-r--r--src/libcore/char/methods.rs3
-rw-r--r--src/libcore/convert/num.rs3
-rw-r--r--src/libcore/ffi.rs8
-rw-r--r--src/libcore/future/mod.rs4
-rw-r--r--src/libcore/hash/sip.rs10
-rw-r--r--src/libcore/hint.rs4
-rw-r--r--src/libcore/intrinsics.rs13
-rw-r--r--src/libcore/iter/adapters/fuse.rs5
-rw-r--r--src/libcore/iter/adapters/mod.rs15
-rw-r--r--src/libcore/iter/adapters/zip.rs3
-rw-r--r--src/libcore/iter/range.rs30
-rw-r--r--src/libcore/lib.rs10
-rw-r--r--src/libcore/mem/manually_drop.rs9
-rw-r--r--src/libcore/mem/maybe_uninit.rs42
-rw-r--r--src/libcore/mem/mod.rs23
-rw-r--r--src/libcore/num/f32.rs4
-rw-r--r--src/libcore/num/f64.rs4
-rw-r--r--src/libcore/num/mod.rs27
-rw-r--r--src/libcore/pin.rs13
-rw-r--r--src/libcore/ptr/const_ptr.rs31
-rw-r--r--src/libcore/ptr/mod.rs126
-rw-r--r--src/libcore/ptr/mut_ptr.rs64
-rw-r--r--src/libcore/ptr/non_null.rs11
-rw-r--r--src/libcore/ptr/unique.rs11
-rw-r--r--src/libcore/slice/mod.rs143
-rw-r--r--src/libcore/slice/rotate.rs46
-rw-r--r--src/libcore/str/mod.rs99
-rw-r--r--src/libcore/sync/atomic.rs253
-rw-r--r--src/libcore/tests/lib.rs2
34 files changed, 772 insertions, 337 deletions
diff --git a/src/libcore/alloc/global.rs b/src/libcore/alloc/global.rs
index 147fe696ac0..c198797e650 100644
--- a/src/libcore/alloc/global.rs
+++ b/src/libcore/alloc/global.rs
@@ -127,9 +127,12 @@ pub unsafe trait GlobalAlloc {
     #[stable(feature = "global_alloc", since = "1.28.0")]
     unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
         let size = layout.size();
-        let ptr = self.alloc(layout);
+        // SAFETY: the safety contract for `alloc` must be upheld by the caller.
+        let ptr = unsafe { self.alloc(layout) };
         if !ptr.is_null() {
-            ptr::write_bytes(ptr, 0, size);
+            // SAFETY: as allocation succeeded, the region from `ptr`
+            // of size `size` is guaranteed to be valid for writes.
+            unsafe { ptr::write_bytes(ptr, 0, size) };
         }
         ptr
     }
@@ -187,11 +190,18 @@ pub unsafe trait GlobalAlloc {
     /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
     #[stable(feature = "global_alloc", since = "1.28.0")]
     unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
-        let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
-        let new_ptr = self.alloc(new_layout);
+        // SAFETY: the caller must ensure that the `new_size` does not overflow.
+        // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid.
+        let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
+        // SAFETY: the caller must ensure that `new_layout` is greater than zero.
+        let new_ptr = unsafe { self.alloc(new_layout) };
         if !new_ptr.is_null() {
-            ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
-            self.dealloc(ptr, layout);
+            // SAFETY: the previously allocated block cannot overlap the newly allocated block.
+            // The safety contract for `dealloc` must be upheld by the caller.
+            unsafe {
+                ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
+                self.dealloc(ptr, layout);
+            }
         }
         new_ptr
     }
diff --git a/src/libcore/alloc/layout.rs b/src/libcore/alloc/layout.rs
index a09c2387d0d..ae7ae704465 100644
--- a/src/libcore/alloc/layout.rs
+++ b/src/libcore/alloc/layout.rs
@@ -90,7 +90,8 @@ impl Layout {
     #[rustc_const_stable(feature = "alloc_layout", since = "1.28.0")]
     #[inline]
     pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
-        Layout { size_: size, align_: NonZeroUsize::new_unchecked(align) }
+        // SAFETY: the caller must ensure that `align` is greater than zero.
+        Layout { size_: size, align_: unsafe { NonZeroUsize::new_unchecked(align) } }
     }
 
     /// The minimum size in bytes for a memory block of this layout.
diff --git a/src/libcore/alloc/mod.rs b/src/libcore/alloc/mod.rs
index 1346fbd4810..be4e051b1ca 100644
--- a/src/libcore/alloc/mod.rs
+++ b/src/libcore/alloc/mod.rs
@@ -54,7 +54,9 @@ impl AllocInit {
     #[inline]
     #[unstable(feature = "allocator_api", issue = "32838")]
     pub unsafe fn init(self, memory: MemoryBlock) {
-        self.init_offset(memory, 0)
+        // SAFETY: the safety contract for `init_offset` must be
+        // upheld by the caller.
+        unsafe { self.init_offset(memory, 0) }
     }
 
     /// Initialize the memory block like specified by `init` at the specified `offset`.
@@ -78,7 +80,10 @@ impl AllocInit {
         match self {
             AllocInit::Uninitialized => (),
             AllocInit::Zeroed => {
-                memory.ptr.as_ptr().add(offset).write_bytes(0, memory.size - offset)
+                // SAFETY: the caller must guarantee that `offset` is smaller than or equal to `memory.size`,
+                // so the memory from `memory.ptr + offset` of length `memory.size - offset`
+                // is guaranteed to be contaned in `memory` and thus valid for writes.
+                unsafe { memory.ptr.as_ptr().add(offset).write_bytes(0, memory.size - offset) }
             }
         }
     }
@@ -281,11 +286,23 @@ pub unsafe trait AllocRef {
                     return Ok(MemoryBlock { ptr, size });
                 }
 
-                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+                let new_layout =
+                    // SAFETY: the caller must ensure that the `new_size` does not overflow.
+                    // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid for a Layout.
+                    // The caller must ensure that `new_size` is greater than zero.
+                    unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
                 let new_memory = self.alloc(new_layout, init)?;
-                ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), size);
-                self.dealloc(ptr, layout);
-                Ok(new_memory)
+
+                // SAFETY: because `new_size` must be greater than or equal to `size`, both the old and new
+                // memory allocation are valid for reads and writes for `size` bytes. Also, because the old
+                // allocation wasn't yet deallocated, it cannot overlap `new_memory`. Thus, the call to
+                // `copy_nonoverlapping` is safe.
+                // The safety contract for `dealloc` must be upheld by the caller.
+                unsafe {
+                    ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), size);
+                    self.dealloc(ptr, layout);
+                    Ok(new_memory)
+                }
             }
         }
     }
@@ -356,11 +373,23 @@ pub unsafe trait AllocRef {
                     return Ok(MemoryBlock { ptr, size });
                 }
 
-                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+                let new_layout =
+                // SAFETY: the caller must ensure that the `new_size` does not overflow.
+                // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid for a Layout.
+                // The caller must ensure that `new_size` is greater than zero.
+                    unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
                 let new_memory = self.alloc(new_layout, AllocInit::Uninitialized)?;
-                ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), new_size);
-                self.dealloc(ptr, layout);
-                Ok(new_memory)
+
+                // SAFETY: because `new_size` must be lower than or equal to `size`, both the old and new
+                // memory allocation are valid for reads and writes for `new_size` bytes. Also, because the
+                // old allocation wasn't yet deallocated, it cannot overlap `new_memory`. Thus, the call to
+                // `copy_nonoverlapping` is safe.
+                // The safety contract for `dealloc` must be upheld by the caller.
+                unsafe {
+                    ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), new_size);
+                    self.dealloc(ptr, layout);
+                    Ok(new_memory)
+                }
             }
         }
     }
@@ -386,7 +415,8 @@ where
 
     #[inline]
     unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
-        (**self).dealloc(ptr, layout)
+        // SAFETY: the safety contract must be upheld by the caller
+        unsafe { (**self).dealloc(ptr, layout) }
     }
 
     #[inline]
@@ -398,7 +428,8 @@ where
         placement: ReallocPlacement,
         init: AllocInit,
     ) -> Result<MemoryBlock, AllocErr> {
-        (**self).grow(ptr, layout, new_size, placement, init)
+        // SAFETY: the safety contract must be upheld by the caller
+        unsafe { (**self).grow(ptr, layout, new_size, placement, init) }
     }
 
     #[inline]
@@ -409,6 +440,7 @@ where
         new_size: usize,
         placement: ReallocPlacement,
     ) -> Result<MemoryBlock, AllocErr> {
-        (**self).shrink(ptr, layout, new_size, placement)
+        // SAFETY: the safety contract must be upheld by the caller
+        unsafe { (**self).shrink(ptr, layout, new_size, placement) }
     }
 }
diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs
index c4293ed7bcf..51d9695687f 100644
--- a/src/libcore/cell.rs
+++ b/src/libcore/cell.rs
@@ -1005,7 +1005,12 @@ impl<T: ?Sized> RefCell<T> {
     #[inline]
     pub unsafe fn try_borrow_unguarded(&self) -> Result<&T, BorrowError> {
         if !is_writing(self.borrow.get()) {
-            Ok(&*self.value.get())
+            // SAFETY: We check that nobody is actively writing now, but it is
+            // the caller's responsibility to ensure that nobody writes until
+            // the returned reference is no longer in use.
+            // Also, `self.value.get()` refers to the value owned by `self`
+            // and is thus guaranteed to be valid for the lifetime of `self`.
+            Ok(unsafe { &*self.value.get() })
         } else {
             Err(BorrowError { _private: () })
         }
diff --git a/src/libcore/char/convert.rs b/src/libcore/char/convert.rs
index d7e39946148..c329eec76ac 100644
--- a/src/libcore/char/convert.rs
+++ b/src/libcore/char/convert.rs
@@ -99,7 +99,8 @@ pub fn from_u32(i: u32) -> Option<char> {
 #[inline]
 #[stable(feature = "char_from_unchecked", since = "1.5.0")]
 pub unsafe fn from_u32_unchecked(i: u32) -> char {
-    if cfg!(debug_assertions) { char::from_u32(i).unwrap() } else { transmute(i) }
+    // SAFETY: the caller must guarantee that `i` is a valid char value.
+    if cfg!(debug_assertions) { char::from_u32(i).unwrap() } else { unsafe { transmute(i) } }
 }
 
 #[stable(feature = "char_convert", since = "1.13.0")]
diff --git a/src/libcore/char/methods.rs b/src/libcore/char/methods.rs
index dd2f01c679f..72555d781ed 100644
--- a/src/libcore/char/methods.rs
+++ b/src/libcore/char/methods.rs
@@ -183,7 +183,8 @@ impl char {
     #[unstable(feature = "assoc_char_funcs", reason = "recently added", issue = "71763")]
     #[inline]
     pub unsafe fn from_u32_unchecked(i: u32) -> char {
-        super::convert::from_u32_unchecked(i)
+        // SAFETY: the safety contract must be upheld by the caller.
+        unsafe { super::convert::from_u32_unchecked(i) }
     }
 
     /// Converts a digit in the given radix to a `char`.
diff --git a/src/libcore/convert/num.rs b/src/libcore/convert/num.rs
index 46ba0a279b7..336c0b26bc7 100644
--- a/src/libcore/convert/num.rs
+++ b/src/libcore/convert/num.rs
@@ -28,7 +28,8 @@ macro_rules! impl_float_to_int {
                 #[doc(hidden)]
                 #[inline]
                 unsafe fn to_int_unchecked(self) -> $Int {
-                    crate::intrinsics::float_to_int_unchecked(self)
+                    // SAFETY: the safety contract must be upheld by the caller.
+                    unsafe { crate::intrinsics::float_to_int_unchecked(self) }
                 }
             }
         )+
diff --git a/src/libcore/ffi.rs b/src/libcore/ffi.rs
index 7bc2866dc2e..ca463200650 100644
--- a/src/libcore/ffi.rs
+++ b/src/libcore/ffi.rs
@@ -333,7 +333,8 @@ impl<'f> VaListImpl<'f> {
     /// Advance to the next arg.
     #[inline]
     pub unsafe fn arg<T: sealed_trait::VaArgSafe>(&mut self) -> T {
-        va_arg(self)
+        // SAFETY: the caller must uphold the safety contract for `va_arg`.
+        unsafe { va_arg(self) }
     }
 
     /// Copies the `va_list` at the current location.
@@ -343,7 +344,10 @@ impl<'f> VaListImpl<'f> {
     {
         let mut ap = self.clone();
         let ret = f(ap.as_va_list());
-        va_end(&mut ap);
+        // SAFETY: the caller must uphold the safety contract for `va_end`.
+        unsafe {
+            va_end(&mut ap);
+        }
         ret
     }
 }
diff --git a/src/libcore/future/mod.rs b/src/libcore/future/mod.rs
index 9dbc23f5c04..2555d91ae8d 100644
--- a/src/libcore/future/mod.rs
+++ b/src/libcore/future/mod.rs
@@ -85,5 +85,7 @@ where
 #[unstable(feature = "gen_future", issue = "50547")]
 #[inline]
 pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
-    &mut *cx.0.as_ptr().cast()
+    // SAFETY: the caller must guarantee that `cx.0` is a valid pointer
+    // that fulfills all the requirements for a mutable reference.
+    unsafe { &mut *cx.0.as_ptr().cast() }
 }
diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs
index ac058609f45..f2bbf646f32 100644
--- a/src/libcore/hash/sip.rs
+++ b/src/libcore/hash/sip.rs
@@ -130,15 +130,19 @@ unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
     let mut i = 0; // current byte index (from LSB) in the output u64
     let mut out = 0;
     if i + 3 < len {
-        out = load_int_le!(buf, start + i, u32) as u64;
+        // SAFETY: `i` cannot be greater than `len`, and the caller must guarantee
+        // that the index start..start+len is in bounds.
+        out = unsafe { load_int_le!(buf, start + i, u32) } as u64;
         i += 4;
     }
     if i + 1 < len {
-        out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
+        // SAFETY: same as above.
+        out |= (unsafe { load_int_le!(buf, start + i, u16) } as u64) << (i * 8);
         i += 2
     }
     if i < len {
-        out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
+        // SAFETY: same as above.
+        out |= (unsafe { *buf.get_unchecked(start + i) } as u64) << (i * 8);
         i += 1;
     }
     debug_assert_eq!(i, len);
diff --git a/src/libcore/hint.rs b/src/libcore/hint.rs
index 0d794de5fe8..9ebcde79b63 100644
--- a/src/libcore/hint.rs
+++ b/src/libcore/hint.rs
@@ -46,7 +46,9 @@ use crate::intrinsics;
 #[inline]
 #[stable(feature = "unreachable", since = "1.27.0")]
 pub unsafe fn unreachable_unchecked() -> ! {
-    intrinsics::unreachable()
+    // SAFETY: the safety contract for `intrinsics::unreachable` must
+    // be upheld by the caller.
+    unsafe { intrinsics::unreachable() }
 }
 
 /// Emits a machine instruction hinting to the processor that it is running in busy-wait
diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs
index 57ffed19c00..7206cbd198f 100644
--- a/src/libcore/intrinsics.rs
+++ b/src/libcore/intrinsics.rs
@@ -2099,7 +2099,10 @@ pub unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    copy_nonoverlapping(src, dst, count)
+
+    // SAFETY: the safety contract for `copy_nonoverlapping` must be
+    // upheld by the caller.
+    unsafe { copy_nonoverlapping(src, dst, count) }
 }
 
 /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
@@ -2165,7 +2168,9 @@ pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    copy(src, dst, count)
+
+    // SAFETY: the safety contract for `copy` must be upheld by the caller.
+    unsafe { copy(src, dst, count) }
 }
 
 /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
@@ -2248,5 +2253,7 @@ pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
     }
 
     debug_assert!(is_aligned_and_not_null(dst), "attempt to write to unaligned or null pointer");
-    write_bytes(dst, val, count)
+
+    // SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
+    unsafe { write_bytes(dst, val, count) }
 }
diff --git a/src/libcore/iter/adapters/fuse.rs b/src/libcore/iter/adapters/fuse.rs
index 502fc2e6315..d2e2fc04a2b 100644
--- a/src/libcore/iter/adapters/fuse.rs
+++ b/src/libcore/iter/adapters/fuse.rs
@@ -178,9 +178,10 @@ where
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> I::Item {
         match self.iter {
-            Some(ref mut iter) => iter.get_unchecked(i),
+            // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+            Some(ref mut iter) => unsafe { iter.get_unchecked(i) },
             // SAFETY: the caller asserts there is an item at `i`, so we're not exhausted.
-            None => intrinsics::unreachable(),
+            None => unsafe { intrinsics::unreachable() },
         }
     }
 
diff --git a/src/libcore/iter/adapters/mod.rs b/src/libcore/iter/adapters/mod.rs
index 00529f0e2d5..133643a0c7f 100644
--- a/src/libcore/iter/adapters/mod.rs
+++ b/src/libcore/iter/adapters/mod.rs
@@ -272,7 +272,8 @@ where
     T: Copy,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        *self.it.get_unchecked(i)
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { *self.it.get_unchecked(i) }
     }
 
     #[inline]
@@ -402,7 +403,8 @@ where
     T: Clone,
 {
     default unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        self.it.get_unchecked(i).clone()
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { self.it.get_unchecked(i) }.clone()
     }
 
     #[inline]
@@ -418,7 +420,8 @@ where
     T: Copy,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        *self.it.get_unchecked(i)
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { *self.it.get_unchecked(i) }
     }
 
     #[inline]
@@ -930,7 +933,8 @@ where
     F: FnMut(I::Item) -> B,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        (self.f)(self.iter.get_unchecked(i))
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        (self.f)(unsafe { self.iter.get_unchecked(i) })
     }
     #[inline]
     fn may_have_side_effect() -> bool {
@@ -1392,7 +1396,8 @@ where
     I: TrustedRandomAccess,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> (usize, I::Item) {
-        (self.count + i, self.iter.get_unchecked(i))
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        (self.count + i, unsafe { self.iter.get_unchecked(i) })
     }
 
     fn may_have_side_effect() -> bool {
diff --git a/src/libcore/iter/adapters/zip.rs b/src/libcore/iter/adapters/zip.rs
index e83d36a580f..985e6561665 100644
--- a/src/libcore/iter/adapters/zip.rs
+++ b/src/libcore/iter/adapters/zip.rs
@@ -271,7 +271,8 @@ where
     B: TrustedRandomAccess,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> (A::Item, B::Item) {
-        (self.a.get_unchecked(i), self.b.get_unchecked(i))
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { (self.a.get_unchecked(i), self.b.get_unchecked(i)) }
     }
 
     fn may_have_side_effect() -> bool {
diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs
index bd7e6cfa5a7..ee53b6a13f8 100644
--- a/src/libcore/iter/range.rs
+++ b/src/libcore/iter/range.rs
@@ -189,12 +189,14 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
-            start.unchecked_add(n as Self)
+            // SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
+            unsafe { start.unchecked_add(n as Self) }
         }
 
         #[inline]
         unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
-            start.unchecked_sub(n as Self)
+            // SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
+            unsafe { start.unchecked_sub(n as Self) }
         }
 
         #[inline]
@@ -450,21 +452,33 @@ unsafe impl Step for char {
     #[inline]
     unsafe fn forward_unchecked(start: char, count: usize) -> char {
         let start = start as u32;
-        let mut res = Step::forward_unchecked(start, count);
+        // SAFETY: the caller must guarantee that this doesn't overflow
+        // the range of values for a char.
+        let mut res = unsafe { Step::forward_unchecked(start, count) };
         if start < 0xD800 && 0xD800 <= res {
-            res = Step::forward_unchecked(res, 0x800);
+            // SAFETY: the caller must guarantee that this doesn't overflow
+            // the range of values for a char.
+            res = unsafe { Step::forward_unchecked(res, 0x800) };
         }
-        char::from_u32_unchecked(res)
+        // SAFETY: because of the previous contract, this is guaranteed
+        // by the caller to be a valid char.
+        unsafe { char::from_u32_unchecked(res) }
     }
 
     #[inline]
     unsafe fn backward_unchecked(start: char, count: usize) -> char {
         let start = start as u32;
-        let mut res = Step::backward_unchecked(start, count);
+        // SAFETY: the caller must guarantee that this doesn't overflow
+        // the range of values for a char.
+        let mut res = unsafe { Step::backward_unchecked(start, count) };
         if start >= 0xE000 && 0xE000 > res {
-            res = Step::backward_unchecked(res, 0x800);
+            // SAFETY: the caller must guarantee that this doesn't overflow
+            // the range of values for a char.
+            res = unsafe { Step::backward_unchecked(res, 0x800) };
         }
-        char::from_u32_unchecked(res)
+        // SAFETY: because of the previous contract, this is guaranteed
+        // by the caller to be a valid char.
+        unsafe { char::from_u32_unchecked(res) }
     }
 }
 
diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs
index 63ddd97eed3..50c56434fa9 100644
--- a/src/libcore/lib.rs
+++ b/src/libcore/lib.rs
@@ -149,6 +149,8 @@
 #![feature(const_type_id)]
 #![feature(const_caller_location)]
 #![feature(no_niche)] // rust-lang/rust#68303
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
 
 #[prelude_import]
 #[allow(unused)]
@@ -279,7 +281,13 @@ pub mod primitive;
 // set up in such a way that directly pulling it here works such that the
 // crate uses the this crate as its libcore.
 #[path = "../stdarch/crates/core_arch/src/mod.rs"]
-#[allow(missing_docs, missing_debug_implementations, dead_code, unused_imports)]
+#[allow(
+    missing_docs,
+    missing_debug_implementations,
+    dead_code,
+    unused_imports,
+    unsafe_op_in_unsafe_fn
+)]
 // FIXME: This annotation should be moved into rust-lang/stdarch after clashing_extern_declarations is
 // merged. It currently cannot because bootstrap fails as the lint hasn't been defined yet.
 #[cfg_attr(not(bootstrap), allow(clashing_extern_declarations))]
diff --git a/src/libcore/mem/manually_drop.rs b/src/libcore/mem/manually_drop.rs
index 18767c482c7..920f5e9c0bd 100644
--- a/src/libcore/mem/manually_drop.rs
+++ b/src/libcore/mem/manually_drop.rs
@@ -122,7 +122,9 @@ impl<T> ManuallyDrop<T> {
     #[stable(feature = "manually_drop_take", since = "1.42.0")]
     #[inline]
     pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
-        ptr::read(&slot.value)
+        // SAFETY: we are reading from a reference, which is guaranteed
+        // to be valid for reads.
+        unsafe { ptr::read(&slot.value) }
     }
 }
 
@@ -152,7 +154,10 @@ impl<T: ?Sized> ManuallyDrop<T> {
     #[stable(feature = "manually_drop", since = "1.20.0")]
     #[inline]
     pub unsafe fn drop(slot: &mut ManuallyDrop<T>) {
-        ptr::drop_in_place(&mut slot.value)
+        // SAFETY: we are dropping the value pointed to by a mutable reference
+        // which is guaranteed to be valid for writes.
+        // It is up to the caller to make sure that `slot` isn't dropped again.
+        unsafe { ptr::drop_in_place(&mut slot.value) }
     }
 }
 
diff --git a/src/libcore/mem/maybe_uninit.rs b/src/libcore/mem/maybe_uninit.rs
index 499016545e9..7732525a0fc 100644
--- a/src/libcore/mem/maybe_uninit.rs
+++ b/src/libcore/mem/maybe_uninit.rs
@@ -494,8 +494,12 @@ impl<T> MaybeUninit<T> {
     #[inline(always)]
     #[rustc_diagnostic_item = "assume_init"]
     pub unsafe fn assume_init(self) -> T {
-        intrinsics::assert_inhabited::<T>();
-        ManuallyDrop::into_inner(self.value)
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // This also means that `self` must be a `value` variant.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            ManuallyDrop::into_inner(self.value)
+        }
     }
 
     /// Reads the value from the `MaybeUninit<T>` container. The resulting `T` is subject
@@ -558,8 +562,12 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_extra", issue = "63567")]
     #[inline(always)]
     pub unsafe fn read(&self) -> T {
-        intrinsics::assert_inhabited::<T>();
-        self.as_ptr().read()
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // Reading from `self.as_ptr()` is safe since `self` should be initialized.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            self.as_ptr().read()
+        }
     }
 
     /// Gets a shared reference to the contained value.
@@ -620,8 +628,12 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_ref", issue = "63568")]
     #[inline(always)]
     pub unsafe fn get_ref(&self) -> &T {
-        intrinsics::assert_inhabited::<T>();
-        &*self.value
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // This also means that `self` must be a `value` variant.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            &*self.value
+        }
     }
 
     /// Gets a mutable (unique) reference to the contained value.
@@ -738,8 +750,12 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_ref", issue = "63568")]
     #[inline(always)]
     pub unsafe fn get_mut(&mut self) -> &mut T {
-        intrinsics::assert_inhabited::<T>();
-        &mut *self.value
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // This also means that `self` must be a `value` variant.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            &mut *self.value
+        }
     }
 
     /// Assuming all the elements are initialized, get a slice to them.
@@ -752,7 +768,11 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_slice_assume_init", issue = "none")]
     #[inline(always)]
     pub unsafe fn slice_get_ref(slice: &[Self]) -> &[T] {
-        &*(slice as *const [Self] as *const [T])
+        // SAFETY: casting slice to a `*const [T]` is safe since the caller guarantees that
+        // `slice` is initialized, and`MaybeUninit` is guaranteed to have the same layout as `T`.
+        // The pointer obtained is valid since it refers to memory owned by `slice` which is a
+        // reference and thus guaranteed to be valid for reads.
+        unsafe { &*(slice as *const [Self] as *const [T]) }
     }
 
     /// Assuming all the elements are initialized, get a mutable slice to them.
@@ -765,7 +785,9 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_slice_assume_init", issue = "none")]
     #[inline(always)]
     pub unsafe fn slice_get_mut(slice: &mut [Self]) -> &mut [T] {
-        &mut *(slice as *mut [Self] as *mut [T])
+        // SAFETY: similar to safety notes for `slice_get_ref`, but we have a
+        // mutable reference which is also guaranteed to be valid for writes.
+        unsafe { &mut *(slice as *mut [Self] as *mut [T]) }
     }
 
     /// Gets a pointer to the first element of the array.
diff --git a/src/libcore/mem/mod.rs b/src/libcore/mem/mod.rs
index 46e6ea7cd18..272088815ec 100644
--- a/src/libcore/mem/mod.rs
+++ b/src/libcore/mem/mod.rs
@@ -623,8 +623,11 @@ pub const fn needs_drop<T>() -> bool {
 #[allow(deprecated)]
 #[rustc_diagnostic_item = "mem_zeroed"]
 pub unsafe fn zeroed<T>() -> T {
-    intrinsics::assert_zero_valid::<T>();
-    MaybeUninit::zeroed().assume_init()
+    // SAFETY: the caller must guarantee that an all-zero value is valid for `T`.
+    unsafe {
+        intrinsics::assert_zero_valid::<T>();
+        MaybeUninit::zeroed().assume_init()
+    }
 }
 
 /// Bypasses Rust's normal memory-initialization checks by pretending to
@@ -656,8 +659,11 @@ pub unsafe fn zeroed<T>() -> T {
 #[allow(deprecated)]
 #[rustc_diagnostic_item = "mem_uninitialized"]
 pub unsafe fn uninitialized<T>() -> T {
-    intrinsics::assert_uninit_valid::<T>();
-    MaybeUninit::uninit().assume_init()
+    // SAFETY: the caller must guarantee that an unitialized value is valid for `T`.
+    unsafe {
+        intrinsics::assert_uninit_valid::<T>();
+        MaybeUninit::uninit().assume_init()
+    }
 }
 
 /// Swaps the values at two mutable locations, without deinitializing either one.
@@ -922,9 +928,14 @@ pub fn drop<T>(_x: T) {}
 pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
     // If U has a higher alignment requirement, src may not be suitably aligned.
     if align_of::<U>() > align_of::<T>() {
-        ptr::read_unaligned(src as *const T as *const U)
+        // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+        // The caller must guarantee that the actual transmutation is safe.
+        unsafe { ptr::read_unaligned(src as *const T as *const U) }
     } else {
-        ptr::read(src as *const T as *const U)
+        // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+        // We just checked that `src as *const U` was properly aligned.
+        // The caller must guarantee that the actual transmutation is safe.
+        unsafe { ptr::read(src as *const T as *const U) }
     }
 }
 
diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs
index 6313de31ce4..061d1ea6b1c 100644
--- a/src/libcore/num/f32.rs
+++ b/src/libcore/num/f32.rs
@@ -629,7 +629,9 @@ impl f32 {
     where
         Self: FloatToInt<Int>,
     {
-        FloatToInt::<Int>::to_int_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for
+        // `FloatToInt::to_int_unchecked`.
+        unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
     }
 
     /// Raw transmutation to `u32`.
diff --git a/src/libcore/num/f64.rs b/src/libcore/num/f64.rs
index d42e5392c58..b0df4d64f6e 100644
--- a/src/libcore/num/f64.rs
+++ b/src/libcore/num/f64.rs
@@ -643,7 +643,9 @@ impl f64 {
     where
         Self: FloatToInt<Int>,
     {
-        FloatToInt::<Int>::to_int_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for
+        // `FloatToInt::to_int_unchecked`.
+        unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
     }
 
     /// Raw transmutation to `u64`.
diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs
index d36da90f2ad..2ded2e9c086 100644
--- a/src/libcore/num/mod.rs
+++ b/src/libcore/num/mod.rs
@@ -74,7 +74,8 @@ assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", s
                 #[rustc_const_stable(feature = "nonzero", since = "1.34.0")]
                 #[inline]
                 pub const unsafe fn new_unchecked(n: $Int) -> Self {
-                    Self(n)
+                    // SAFETY: this is guaranteed to be safe by the caller.
+                    unsafe { Self(n) }
                 }
 
                 /// Creates a non-zero if the given value is not zero.
@@ -762,7 +763,9 @@ cannot occur. This results in undefined behavior when `self + rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
-                intrinsics::unchecked_add(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_add`.
+                unsafe { intrinsics::unchecked_add(self, rhs) }
             }
         }
 
@@ -804,7 +807,9 @@ cannot occur. This results in undefined behavior when `self - rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
-                intrinsics::unchecked_sub(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_sub`.
+                unsafe { intrinsics::unchecked_sub(self, rhs) }
             }
         }
 
@@ -846,7 +851,9 @@ cannot occur. This results in undefined behavior when `self * rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
-                intrinsics::unchecked_mul(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_mul`.
+                unsafe { intrinsics::unchecked_mul(self, rhs) }
             }
         }
 
@@ -2998,7 +3005,9 @@ cannot occur. This results in undefined behavior when `self + rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
-                intrinsics::unchecked_add(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_add`.
+                unsafe { intrinsics::unchecked_add(self, rhs) }
             }
         }
 
@@ -3038,7 +3047,9 @@ cannot occur. This results in undefined behavior when `self - rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
-                intrinsics::unchecked_sub(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_sub`.
+                unsafe { intrinsics::unchecked_sub(self, rhs) }
             }
         }
 
@@ -3078,7 +3089,9 @@ cannot occur. This results in undefined behavior when `self * rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
-                intrinsics::unchecked_mul(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_mul`.
+                unsafe { intrinsics::unchecked_mul(self, rhs) }
             }
         }
 
diff --git a/src/libcore/pin.rs b/src/libcore/pin.rs
index 6f5bf7ad9da..da299f026f8 100644
--- a/src/libcore/pin.rs
+++ b/src/libcore/pin.rs
@@ -679,7 +679,10 @@ impl<'a, T: ?Sized> Pin<&'a T> {
     {
         let pointer = &*self.pointer;
         let new_pointer = func(pointer);
-        Pin::new_unchecked(new_pointer)
+
+        // SAFETY: the safety contract for `new_unchecked` must be
+        // upheld by the caller.
+        unsafe { Pin::new_unchecked(new_pointer) }
     }
 
     /// Gets a shared reference out of a pin.
@@ -769,9 +772,13 @@ impl<'a, T: ?Sized> Pin<&'a mut T> {
         U: ?Sized,
         F: FnOnce(&mut T) -> &mut U,
     {
-        let pointer = Pin::get_unchecked_mut(self);
+        // SAFETY: the caller is responsible for not moving the
+        // value out of this reference.
+        let pointer = unsafe { Pin::get_unchecked_mut(self) };
         let new_pointer = func(pointer);
-        Pin::new_unchecked(new_pointer)
+        // SAFETY: as the value of `this` is guaranteed to not have
+        // been moved out, this call to `new_unchecked` is safe.
+        unsafe { Pin::new_unchecked(new_pointer) }
     }
 }
 
diff --git a/src/libcore/ptr/const_ptr.rs b/src/libcore/ptr/const_ptr.rs
index 64a506a6377..d1d7a715238 100644
--- a/src/libcore/ptr/const_ptr.rs
+++ b/src/libcore/ptr/const_ptr.rs
@@ -95,7 +95,9 @@ impl<T: ?Sized> *const T {
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
     pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
-        if self.is_null() { None } else { Some(&*self) }
+        // SAFETY: the caller must guarantee that `self` is valid
+        // for a reference if it isn't null.
+        if self.is_null() { None } else { unsafe { Some(&*self) } }
     }
 
     /// Calculates the offset from a pointer.
@@ -157,7 +159,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        intrinsics::offset(self, count)
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { intrinsics::offset(self, count) }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -292,7 +295,8 @@ impl<T: ?Sized> *const T {
     {
         let pointee_size = mem::size_of::<T>();
         assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
-        intrinsics::ptr_offset_from(self, origin)
+        // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
+        unsafe { intrinsics::ptr_offset_from(self, origin) }
     }
 
     /// Returns whether two pointers are guaranteed to be equal.
@@ -471,7 +475,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        self.offset(count as isize)
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset(count as isize) }
     }
 
     /// Calculates the offset from a pointer (convenience for
@@ -534,7 +539,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        self.offset((count as isize).wrapping_neg())
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset((count as isize).wrapping_neg()) }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -663,7 +669,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        read(self)
+        // SAFETY: the caller must uphold the safety contract for `read`.
+        unsafe { read(self) }
     }
 
     /// Performs a volatile read of the value from `self` without moving it. This
@@ -682,7 +689,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        read_volatile(self)
+        // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+        unsafe { read_volatile(self) }
     }
 
     /// Reads the value from `self` without moving it. This leaves the
@@ -699,7 +707,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        read_unaligned(self)
+        // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+        unsafe { read_unaligned(self) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -716,7 +725,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        copy(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy`.
+        unsafe { copy(self, dest, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -733,7 +743,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        copy_nonoverlapping(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+        unsafe { copy_nonoverlapping(self, dest, count) }
     }
 
     /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
diff --git a/src/libcore/ptr/mod.rs b/src/libcore/ptr/mod.rs
index ca2b0c85ec1..5f028f9ea76 100644
--- a/src/libcore/ptr/mod.rs
+++ b/src/libcore/ptr/mod.rs
@@ -184,7 +184,9 @@ mod mut_ptr;
 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
     // Code here does not matter - this is replaced by the
     // real drop glue by the compiler.
-    drop_in_place(to_drop)
+
+    // SAFETY: see comment above
+    unsafe { drop_in_place(to_drop) }
 }
 
 /// Creates a null raw pointer.
@@ -374,9 +376,15 @@ pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
     let mut tmp = MaybeUninit::<T>::uninit();
 
     // Perform the swap
-    copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
-    copy(y, x, 1); // `x` and `y` may overlap
-    copy_nonoverlapping(tmp.as_ptr(), y, 1);
+    // SAFETY: the caller must guarantee that `x` and `y` are
+    // valid for writes and properly aligned. `tmp` cannot be
+    // overlapping either `x` or `y` because `tmp` was just allocated
+    // on the stack as a separate allocated object.
+    unsafe {
+        copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
+        copy(y, x, 1); // `x` and `y` may overlap
+        copy_nonoverlapping(tmp.as_ptr(), y, 1);
+    }
 }
 
 /// Swaps `count * size_of::<T>()` bytes between the two regions of memory
@@ -432,7 +440,9 @@ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
     let x = x as *mut u8;
     let y = y as *mut u8;
     let len = mem::size_of::<T>() * count;
-    swap_nonoverlapping_bytes(x, y, len)
+    // SAFETY: the caller must guarantee that `x` and `y` are
+    // valid for writes and properly aligned.
+    unsafe { swap_nonoverlapping_bytes(x, y, len) }
 }
 
 #[inline]
@@ -440,11 +450,16 @@ pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
     // For types smaller than the block optimization below,
     // just swap directly to avoid pessimizing codegen.
     if mem::size_of::<T>() < 32 {
-        let z = read(x);
-        copy_nonoverlapping(y, x, 1);
-        write(y, z);
+        // SAFETY: the caller must guarantee that `x` and `y` are valid
+        // for writes, properly aligned, and non-overlapping.
+        unsafe {
+            let z = read(x);
+            copy_nonoverlapping(y, x, 1);
+            write(y, z);
+        }
     } else {
-        swap_nonoverlapping(x, y, 1);
+        // SAFETY: the caller must uphold the safety contract for `swap_nonoverlapping`.
+        unsafe { swap_nonoverlapping(x, y, 1) };
     }
 }
 
@@ -471,14 +486,23 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
         // Declaring `t` here avoids aligning the stack when this loop is unused
         let mut t = mem::MaybeUninit::<Block>::uninit();
         let t = t.as_mut_ptr() as *mut u8;
-        let x = x.add(i);
-        let y = y.add(i);
 
-        // Swap a block of bytes of x & y, using t as a temporary buffer
-        // This should be optimized into efficient SIMD operations where available
-        copy_nonoverlapping(x, t, block_size);
-        copy_nonoverlapping(y, x, block_size);
-        copy_nonoverlapping(t, y, block_size);
+        // SAFETY: As `i < len`, and as the caller must guarantee that `x` and `y` are valid
+        // for `len` bytes, `x + i` and `y + i` must be valid adresses, which fulfills the
+        // safety contract for `add`.
+        //
+        // Also, the caller must guarantee that `x` and `y` are valid for writes, properly aligned,
+        // and non-overlapping, which fulfills the safety contract for `copy_nonoverlapping`.
+        unsafe {
+            let x = x.add(i);
+            let y = y.add(i);
+
+            // Swap a block of bytes of x & y, using t as a temporary buffer
+            // This should be optimized into efficient SIMD operations where available
+            copy_nonoverlapping(x, t, block_size);
+            copy_nonoverlapping(y, x, block_size);
+            copy_nonoverlapping(t, y, block_size);
+        }
         i += block_size;
     }
 
@@ -488,12 +512,16 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
         let rem = len - i;
 
         let t = t.as_mut_ptr() as *mut u8;
-        let x = x.add(i);
-        let y = y.add(i);
 
-        copy_nonoverlapping(x, t, rem);
-        copy_nonoverlapping(y, x, rem);
-        copy_nonoverlapping(t, y, rem);
+        // SAFETY: see previous safety comment.
+        unsafe {
+            let x = x.add(i);
+            let y = y.add(i);
+
+            copy_nonoverlapping(x, t, rem);
+            copy_nonoverlapping(y, x, rem);
+            copy_nonoverlapping(t, y, rem);
+        }
     }
 }
 
@@ -540,7 +568,13 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
-    mem::swap(&mut *dst, &mut src); // cannot overlap
+    // SAFETY: the caller must guarantee that `dst` is valid to be
+    // cast to a mutable reference (valid for writes, aligned, initialized),
+    // and cannot overlap `src` since `dst` must point to a distinct
+    // allocated object.
+    unsafe {
+        mem::swap(&mut *dst, &mut src); // cannot overlap
+    }
     src
 }
 
@@ -658,8 +692,16 @@ pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
 pub unsafe fn read<T>(src: *const T) -> T {
     // `copy_nonoverlapping` takes care of debug_assert.
     let mut tmp = MaybeUninit::<T>::uninit();
-    copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
-    tmp.assume_init()
+    // SAFETY: the caller must guarantee that `src` is valid for reads.
+    // `src` cannot overlap `tmp` because `tmp` was just allocated on
+    // the stack as a separate allocated object.
+    //
+    // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+    // to be properly initialized.
+    unsafe {
+        copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
+        tmp.assume_init()
+    }
 }
 
 /// Reads the value from `src` without moving it. This leaves the
@@ -752,8 +794,16 @@ pub unsafe fn read<T>(src: *const T) -> T {
 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
     // `copy_nonoverlapping` takes care of debug_assert.
     let mut tmp = MaybeUninit::<T>::uninit();
-    copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
-    tmp.assume_init()
+    // SAFETY: the caller must guarantee that `src` is valid for reads.
+    // `src` cannot overlap `tmp` because `tmp` was just allocated on
+    // the stack as a separate allocated object.
+    //
+    // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+    // to be properly initialized.
+    unsafe {
+        copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
+        tmp.assume_init()
+    }
 }
 
 /// Overwrites a memory location with the given value without reading or
@@ -847,7 +897,8 @@ pub unsafe fn write<T>(dst: *mut T, src: T) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    intrinsics::move_val_init(&mut *dst, src)
+    // SAFETY: the caller must uphold the safety contract for `move_val_init`.
+    unsafe { intrinsics::move_val_init(&mut *dst, src) }
 }
 
 /// Overwrites a memory location with the given value without reading or
@@ -939,8 +990,13 @@ pub unsafe fn write<T>(dst: *mut T, src: T) {
 #[inline]
 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
-    // `copy_nonoverlapping` takes care of debug_assert.
-    copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
+    // SAFETY: the caller must guarantee that `dst` is valid for writes.
+    // `dst` cannot overlap `src` because the caller has mutable access
+    // to `dst` while `src` is owned by this function.
+    unsafe {
+        // `copy_nonoverlapping` takes care of debug_assert.
+        copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
+    }
     mem::forget(src);
 }
 
@@ -1015,7 +1071,8 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    intrinsics::volatile_load(src)
+    // SAFETY: the caller must uphold the safety contract for `volatile_load`.
+    unsafe { intrinsics::volatile_load(src) }
 }
 
 /// Performs a volatile write of a memory location with the given value without
@@ -1087,7 +1144,10 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    intrinsics::volatile_store(dst, src);
+    // SAFETY: the caller must uphold the safety contract for `volatile_store`.
+    unsafe {
+        intrinsics::volatile_store(dst, src);
+    }
 }
 
 /// Align pointer `p`.
@@ -1173,8 +1233,8 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
     }
 
     let smoda = stride & a_minus_one;
-    // a is power-of-two so cannot be 0. stride = 0 is handled above.
-    let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a));
+    // SAFETY: a is power-of-two so cannot be 0. stride = 0 is handled above.
+    let gcdpow = unsafe { intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)) };
     let gcd = 1usize << gcdpow;
 
     if p as usize & (gcd.wrapping_sub(1)) == 0 {
diff --git a/src/libcore/ptr/mut_ptr.rs b/src/libcore/ptr/mut_ptr.rs
index 6b5cd9fdb85..7d4b6339b51 100644
--- a/src/libcore/ptr/mut_ptr.rs
+++ b/src/libcore/ptr/mut_ptr.rs
@@ -89,7 +89,9 @@ impl<T: ?Sized> *mut T {
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
     pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
-        if self.is_null() { None } else { Some(&*self) }
+        // SAFETY: the caller must guarantee that `self` is valid for a
+        // reference if it isn't null.
+        if self.is_null() { None } else { unsafe { Some(&*self) } }
     }
 
     /// Calculates the offset from a pointer.
@@ -151,7 +153,10 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        intrinsics::offset(self, count) as *mut T
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        // The obtained pointer is valid for writes since the caller must
+        // guarantee that it points to the same allocated object as `self`.
+        unsafe { intrinsics::offset(self, count) as *mut T }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -270,7 +275,9 @@ impl<T: ?Sized> *mut T {
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
     pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
-        if self.is_null() { None } else { Some(&mut *self) }
+        // SAFETY: the caller must guarantee that `self` is be valid for
+        // a mutable reference if it isn't null.
+        if self.is_null() { None } else { unsafe { Some(&mut *self) } }
     }
 
     /// Returns whether two pointers are guaranteed to be equal.
@@ -406,7 +413,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        (self as *const T).offset_from(origin)
+        // SAFETY: the caller must uphold the safety contract for `offset_from`.
+        unsafe { (self as *const T).offset_from(origin) }
     }
 
     /// Calculates the distance between two pointers. The returned value is in
@@ -518,7 +526,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        self.offset(count as isize)
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset(count as isize) }
     }
 
     /// Calculates the offset from a pointer (convenience for
@@ -581,7 +590,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        self.offset((count as isize).wrapping_neg())
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset((count as isize).wrapping_neg()) }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -710,7 +720,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        read(self)
+        // SAFETY: the caller must uphold the safety contract for ``.
+        unsafe { read(self) }
     }
 
     /// Performs a volatile read of the value from `self` without moving it. This
@@ -729,7 +740,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        read_volatile(self)
+        // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+        unsafe { read_volatile(self) }
     }
 
     /// Reads the value from `self` without moving it. This leaves the
@@ -746,7 +758,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        read_unaligned(self)
+        // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+        unsafe { read_unaligned(self) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -763,7 +776,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy`.
+        unsafe { copy(self, dest, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -780,7 +794,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy_nonoverlapping(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+        unsafe { copy_nonoverlapping(self, dest, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
@@ -797,7 +812,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy(src, self, count)
+        // SAFETY: the caller must uphold the safety contract for `copy`.
+        unsafe { copy(src, self, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
@@ -814,7 +830,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy_nonoverlapping(src, self, count)
+        // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+        unsafe { copy_nonoverlapping(src, self, count) }
     }
 
     /// Executes the destructor (if any) of the pointed-to value.
@@ -825,7 +842,8 @@ impl<T: ?Sized> *mut T {
     #[stable(feature = "pointer_methods", since = "1.26.0")]
     #[inline]
     pub unsafe fn drop_in_place(self) {
-        drop_in_place(self)
+        // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
+        unsafe { drop_in_place(self) }
     }
 
     /// Overwrites a memory location with the given value without reading or
@@ -840,7 +858,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write(self, val)
+        // SAFETY: the caller must uphold the safety contract for `write`.
+        unsafe { write(self, val) }
     }
 
     /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
@@ -855,7 +874,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write_bytes(self, val, count)
+        // SAFETY: the caller must uphold the safety contract for `write_bytes`.
+        unsafe { write_bytes(self, val, count) }
     }
 
     /// Performs a volatile write of a memory location with the given value without
@@ -874,7 +894,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write_volatile(self, val)
+        // SAFETY: the caller must uphold the safety contract for `write_volatile`.
+        unsafe { write_volatile(self, val) }
     }
 
     /// Overwrites a memory location with the given value without reading or
@@ -891,7 +912,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write_unaligned(self, val)
+        // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
+        unsafe { write_unaligned(self, val) }
     }
 
     /// Replaces the value at `self` with `src`, returning the old
@@ -906,7 +928,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        replace(self, src)
+        // SAFETY: the caller must uphold the safety contract for `replace`.
+        unsafe { replace(self, src) }
     }
 
     /// Swaps the values at two mutable locations of the same type, without
@@ -922,7 +945,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        swap(self, with)
+        // SAFETY: the caller must uphold the safety contract for `swap`.
+        unsafe { swap(self, with) }
     }
 
     /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
diff --git a/src/libcore/ptr/non_null.rs b/src/libcore/ptr/non_null.rs
index 870364a61dd..c2d31bfb6a4 100644
--- a/src/libcore/ptr/non_null.rs
+++ b/src/libcore/ptr/non_null.rs
@@ -87,7 +87,8 @@ impl<T: ?Sized> NonNull<T> {
     #[rustc_const_stable(feature = "const_nonnull_new_unchecked", since = "1.32.0")]
     #[inline]
     pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
-        NonNull { pointer: ptr as _ }
+        // SAFETY: the caller must guarantee that `ptr` is non-null.
+        unsafe { NonNull { pointer: ptr as _ } }
     }
 
     /// Creates a new `NonNull` if `ptr` is non-null.
@@ -118,7 +119,9 @@ impl<T: ?Sized> NonNull<T> {
     #[stable(feature = "nonnull", since = "1.25.0")]
     #[inline]
     pub unsafe fn as_ref(&self) -> &T {
-        &*self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a reference.
+        unsafe { &*self.as_ptr() }
     }
 
     /// Mutably dereferences the content.
@@ -129,7 +132,9 @@ impl<T: ?Sized> NonNull<T> {
     #[stable(feature = "nonnull", since = "1.25.0")]
     #[inline]
     pub unsafe fn as_mut(&mut self) -> &mut T {
-        &mut *self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a mutable reference.
+        unsafe { &mut *self.as_ptr() }
     }
 
     /// Casts to a pointer of another type.
diff --git a/src/libcore/ptr/unique.rs b/src/libcore/ptr/unique.rs
index f58d35f0613..78647eee338 100644
--- a/src/libcore/ptr/unique.rs
+++ b/src/libcore/ptr/unique.rs
@@ -87,7 +87,8 @@ impl<T: ?Sized> Unique<T> {
     /// `ptr` must be non-null.
     #[inline]
     pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
-        Unique { pointer: ptr as _, _marker: PhantomData }
+        // SAFETY: the caller must guarantee that `ptr` is non-null.
+        unsafe { Unique { pointer: ptr as _, _marker: PhantomData } }
     }
 
     /// Creates a new `Unique` if `ptr` is non-null.
@@ -114,7 +115,9 @@ impl<T: ?Sized> Unique<T> {
     /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
     #[inline]
     pub unsafe fn as_ref(&self) -> &T {
-        &*self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a reference.
+        unsafe { &*self.as_ptr() }
     }
 
     /// Mutably dereferences the content.
@@ -124,7 +127,9 @@ impl<T: ?Sized> Unique<T> {
     /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
     #[inline]
     pub unsafe fn as_mut(&mut self) -> &mut T {
-        &mut *self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a mutable reference.
+        unsafe { &mut *self.as_ptr() }
     }
 
     /// Casts to a pointer of another type.
diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs
index 81c1cb295e5..e7a2d7adede 100644
--- a/src/libcore/slice/mod.rs
+++ b/src/libcore/slice/mod.rs
@@ -310,7 +310,8 @@ impl<T> [T] {
     where
         I: SliceIndex<Self>,
     {
-        index.get_unchecked(self)
+        // SAFETY: the caller must uphold the safety requirements for `get_unchecked`.
+        unsafe { index.get_unchecked(self) }
     }
 
     /// Returns a mutable reference to an element or subslice, without doing
@@ -341,7 +342,8 @@ impl<T> [T] {
     where
         I: SliceIndex<Self>,
     {
-        index.get_unchecked_mut(self)
+        // SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`.
+        unsafe { index.get_unchecked_mut(self) }
     }
 
     /// Returns a raw pointer to the slice's buffer.
@@ -2581,18 +2583,21 @@ impl<T> [T] {
         // First, find at what point do we split between the first and 2nd slice. Easy with
         // ptr.align_offset.
         let ptr = self.as_ptr();
-        let offset = crate::ptr::align_offset(ptr, mem::align_of::<U>());
+        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
         if offset > self.len() {
             (self, &[], &[])
         } else {
             let (left, rest) = self.split_at(offset);
-            // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay
             let (us_len, ts_len) = rest.align_to_offsets::<U>();
-            (
-                left,
-                from_raw_parts(rest.as_ptr() as *const U, us_len),
-                from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
-            )
+            // SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
+            // since the caller guarantees that we can transmute `T` to `U` safely.
+            unsafe {
+                (
+                    left,
+                    from_raw_parts(rest.as_ptr() as *const U, us_len),
+                    from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
+                )
+            }
         }
     }
 
@@ -2637,21 +2642,23 @@ impl<T> [T] {
         // First, find at what point do we split between the first and 2nd slice. Easy with
         // ptr.align_offset.
         let ptr = self.as_ptr();
-        let offset = crate::ptr::align_offset(ptr, mem::align_of::<U>());
+        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
         if offset > self.len() {
             (self, &mut [], &mut [])
         } else {
             let (left, rest) = self.split_at_mut(offset);
-            // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay
             let (us_len, ts_len) = rest.align_to_offsets::<U>();
             let rest_len = rest.len();
             let mut_ptr = rest.as_mut_ptr();
             // We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
-            (
-                left,
-                from_raw_parts_mut(mut_ptr as *mut U, us_len),
-                from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
-            )
+            // SAFETY: see comments for `align_to`.
+            unsafe {
+                (
+                    left,
+                    from_raw_parts_mut(mut_ptr as *mut U, us_len),
+                    from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
+                )
+            }
         }
     }
 
@@ -2976,12 +2983,18 @@ impl<T> SliceIndex<[T]> for usize {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &T {
-        &*slice.as_ptr().add(self)
+        // SAFETY: `slice` cannot be longer than `isize::MAX` and
+        // the caller guarantees that `self` is in bounds of `slice`
+        // so `self` cannot overflow an `isize`, so the call to `add` is safe.
+        // The obtained pointer comes from a reference which is guaranteed
+        // to be valid.
+        unsafe { &*slice.as_ptr().add(self) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut T {
-        &mut *slice.as_mut_ptr().add(self)
+        // SAFETY: see comments for `get_unchecked` above.
+        unsafe { &mut *slice.as_mut_ptr().add(self) }
     }
 
     #[inline]
@@ -3021,12 +3034,18 @@ impl<T> SliceIndex<[T]> for ops::Range<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start)
+        // SAFETY: `slice` cannot be longer than `isize::MAX` and
+        // the caller guarantees that `self` is in bounds of `slice`
+        // so `self` cannot overflow an `isize`, so the call to `add` is safe.
+        // Also, since the caller guarantees that `self` is in bounds of `slice`,
+        // `from_raw_parts` will give a subslice of `slice` which is always safe.
+        unsafe { from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
+        // SAFETY: see comments for `get_unchecked` above.
+        unsafe { from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start) }
     }
 
     #[inline]
@@ -3066,12 +3085,14 @@ impl<T> SliceIndex<[T]> for ops::RangeTo<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (0..self.end).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (0..self.end).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (0..self.end).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (0..self.end).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3101,12 +3122,14 @@ impl<T> SliceIndex<[T]> for ops::RangeFrom<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (self.start..slice.len()).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (self.start..slice.len()).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (self.start..slice.len()).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3175,12 +3198,14 @@ impl<T> SliceIndex<[T]> for ops::RangeInclusive<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (*self.start()..self.end() + 1).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (*self.start()..self.end() + 1).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (*self.start()..self.end() + 1).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (*self.start()..self.end() + 1).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3216,12 +3241,14 @@ impl<T> SliceIndex<[T]> for ops::RangeToInclusive<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (0..=self.end).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (0..=self.end).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (0..=self.end).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (0..=self.end).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3370,7 +3397,9 @@ macro_rules! iterator {
                     self.ptr.as_ptr()
                 } else {
                     let old = self.ptr.as_ptr();
-                    self.ptr = NonNull::new_unchecked(self.ptr.as_ptr().offset(offset));
+                    // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+                    // so this new pointer is inside `self` and thus guaranteed to be non-null.
+                    self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
                     old
                 }
             }
@@ -3384,7 +3413,10 @@ macro_rules! iterator {
                     zst_shrink!(self, offset);
                     self.ptr.as_ptr()
                 } else {
-                    self.end = self.end.offset(-offset);
+                    // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+                    // which is guaranteed to not overflow an `isize`. Also, the resulting pointer
+                    // is in bounds of `slice`, which fulfills the other requirements for `offset`.
+                    self.end = unsafe { self.end.offset(-offset) };
                     self.end
                 }
             }
@@ -4702,7 +4734,11 @@ impl<T> FusedIterator for Windows<'_, T> {}
 #[doc(hidden)]
 unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
-        from_raw_parts(self.v.as_ptr().add(i), self.size)
+        // SAFETY: since the caller guarantees that `i` is in bounds,
+        // which means that `i` cannot overflow an `isize`, and the
+        // slice created by `from_raw_parts` is a subslice of `self.v`
+        // thus is guaranteed to be valid for the lifetime `'a` of `self.v`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(i), self.size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -4846,7 +4882,14 @@ unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> {
             None => self.v.len(),
             Some(end) => cmp::min(end, self.v.len()),
         };
-        from_raw_parts(self.v.as_ptr().add(start), end - start)
+        // SAFETY: the caller guarantees that `i` is in bounds,
+        // which means that `start` must be in bounds of the
+        // underlying `self.v` slice, and we made sure that `end`
+        // is also in bounds of `self.v`. Thus, `start` cannot overflow
+        // an `isize`, and the slice constructed by `from_raw_parts`
+        // is a subslice of `self.v` which is guaranteed to be valid
+        // for the lifetime `'a` of `self.v`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -4988,7 +5031,8 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> {
             None => self.v.len(),
             Some(end) => cmp::min(end, self.v.len()),
         };
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start)
+        // SAFETY: see comments for `Chunks::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5125,7 +5169,8 @@ impl<T> FusedIterator for ChunksExact<'_, T> {}
 unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
         let start = i * self.chunk_size;
-        from_raw_parts(self.v.as_ptr().add(start), self.chunk_size)
+        // SAFETY: mostly identical to `Chunks::get_unchecked`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5259,7 +5304,8 @@ impl<T> FusedIterator for ChunksExactMut<'_, T> {}
 unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
         let start = i * self.chunk_size;
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size)
+        // SAFETY: see comments for `ChunksExactMut::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5406,7 +5452,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> {
             None => 0,
             Some(start) => start,
         };
-        from_raw_parts(self.v.as_ptr().add(start), end - start)
+        // SAFETY: mostly identical to `Chunks::get_unchecked`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5551,7 +5598,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> {
             None => 0,
             Some(start) => start,
         };
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start)
+        // SAFETY: see comments for `RChunks::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5692,7 +5740,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
         let end = self.v.len() - i * self.chunk_size;
         let start = end - self.chunk_size;
-        from_raw_parts(self.v.as_ptr().add(start), self.chunk_size)
+        // SAFETY: mostmy identical to `Chunks::get_unchecked`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5831,7 +5880,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
         let end = self.v.len() - i * self.chunk_size;
         let start = end - self.chunk_size;
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size)
+        // SAFETY: see comments for `RChunksExact::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5927,7 +5977,8 @@ pub unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
         mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
         "attempt to create slice covering at least half the address space"
     );
-    &*ptr::slice_from_raw_parts(data, len)
+    // SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
+    unsafe { &*ptr::slice_from_raw_parts(data, len) }
 }
 
 /// Performs the same functionality as [`from_raw_parts`], except that a
@@ -5967,7 +6018,8 @@ pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T]
         mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
         "attempt to create slice covering at least half the address space"
     );
-    &mut *ptr::slice_from_raw_parts_mut(data, len)
+    // SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
+    unsafe { &mut *ptr::slice_from_raw_parts_mut(data, len) }
 }
 
 /// Converts a reference to T into a slice of length 1 (without copying).
@@ -6243,7 +6295,11 @@ impl_marker_for!(BytewiseEquality,
 #[doc(hidden)]
 unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a T {
-        &*self.ptr.as_ptr().add(i)
+        // SAFETY: the caller must guarantee that `i` is in bounds
+        // of the underlying slice, so `i` cannot overflow an `isize`,
+        // and the returned references is guaranteed to refer to an element
+        // of the slice and thus guaranteed to be valid.
+        unsafe { &*self.ptr.as_ptr().add(i) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -6253,7 +6309,8 @@ unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
 #[doc(hidden)]
 unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut T {
-        &mut *self.ptr.as_ptr().add(i)
+        // SAFETY: see comments for `Iter::get_unchecked`.
+        unsafe { &mut *self.ptr.as_ptr().add(i) }
     }
     fn may_have_side_effect() -> bool {
         false
diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs
index f73e14f27e0..a89596b15ef 100644
--- a/src/libcore/slice/rotate.rs
+++ b/src/libcore/slice/rotate.rs
@@ -1,3 +1,5 @@
+// ignore-tidy-undocumented-unsafe
+
 use crate::cmp;
 use crate::mem::{self, MaybeUninit};
 use crate::ptr;
@@ -77,9 +79,9 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // the way until about `left + right == 32`, but the worst case performance breaks even
             // around 16. 24 was chosen as middle ground. If the size of `T` is larger than 4
             // `usize`s, this algorithm also outperforms other algorithms.
-            let x = mid.sub(left);
+            let x = unsafe { mid.sub(left) };
             // beginning of first round
-            let mut tmp: T = x.read();
+            let mut tmp: T = unsafe { x.read() };
             let mut i = right;
             // `gcd` can be found before hand by calculating `gcd(left + right, right)`,
             // but it is faster to do one loop which calculates the gcd as a side effect, then
@@ -90,7 +92,7 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // the very end. This is possibly due to the fact that swapping or replacing temporaries
             // uses only one memory address in the loop instead of needing to manage two.
             loop {
-                tmp = x.add(i).replace(tmp);
+                tmp = unsafe { x.add(i).replace(tmp) };
                 // instead of incrementing `i` and then checking if it is outside the bounds, we
                 // check if `i` will go outside the bounds on the next increment. This prevents
                 // any wrapping of pointers or `usize`.
@@ -98,7 +100,7 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
                     i -= left;
                     if i == 0 {
                         // end of first round
-                        x.write(tmp);
+                        unsafe { x.write(tmp) };
                         break;
                     }
                     // this conditional must be here if `left + right >= 15`
@@ -111,14 +113,14 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             }
             // finish the chunk with more rounds
             for start in 1..gcd {
-                tmp = x.add(start).read();
+                tmp = unsafe { x.add(start).read() };
                 i = start + right;
                 loop {
-                    tmp = x.add(i).replace(tmp);
+                    tmp = unsafe { x.add(i).replace(tmp) };
                     if i >= left {
                         i -= left;
                         if i == start {
-                            x.add(start).write(tmp);
+                            unsafe { x.add(start).write(tmp) };
                             break;
                         }
                     } else {
@@ -133,15 +135,19 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // The `[T; 0]` here is to ensure this is appropriately aligned for T
             let mut rawarray = MaybeUninit::<(BufType, [T; 0])>::uninit();
             let buf = rawarray.as_mut_ptr() as *mut T;
-            let dim = mid.sub(left).add(right);
+            let dim = unsafe { mid.sub(left).add(right) };
             if left <= right {
-                ptr::copy_nonoverlapping(mid.sub(left), buf, left);
-                ptr::copy(mid, mid.sub(left), right);
-                ptr::copy_nonoverlapping(buf, dim, left);
+                unsafe {
+                    ptr::copy_nonoverlapping(mid.sub(left), buf, left);
+                    ptr::copy(mid, mid.sub(left), right);
+                    ptr::copy_nonoverlapping(buf, dim, left);
+                }
             } else {
-                ptr::copy_nonoverlapping(mid, buf, right);
-                ptr::copy(mid.sub(left), dim, left);
-                ptr::copy_nonoverlapping(buf, mid.sub(left), right);
+                unsafe {
+                    ptr::copy_nonoverlapping(mid, buf, right);
+                    ptr::copy(mid.sub(left), dim, left);
+                    ptr::copy_nonoverlapping(buf, mid.sub(left), right);
+                }
             }
             return;
         } else if left >= right {
@@ -150,8 +156,10 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // of this algorithm would be, and swapping using that last chunk instead of swapping
             // adjacent chunks like this algorithm is doing, but this way is still faster.
             loop {
-                ptr::swap_nonoverlapping(mid.sub(right), mid, right);
-                mid = mid.sub(right);
+                unsafe {
+                    ptr::swap_nonoverlapping(mid.sub(right), mid, right);
+                    mid = mid.sub(right);
+                }
                 left -= right;
                 if left < right {
                     break;
@@ -160,8 +168,10 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
         } else {
             // Algorithm 3, `left < right`
             loop {
-                ptr::swap_nonoverlapping(mid.sub(left), mid, left);
-                mid = mid.add(left);
+                unsafe {
+                    ptr::swap_nonoverlapping(mid.sub(left), mid, left);
+                    mid = mid.add(left);
+                }
                 right -= left;
                 if right < left {
                     break;
diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs
index 6c4b28499a6..0014501d2c4 100644
--- a/src/libcore/str/mod.rs
+++ b/src/libcore/str/mod.rs
@@ -419,7 +419,11 @@ pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
-    &*(v as *const [u8] as *const str)
+    // SAFETY: the caller must guarantee that the bytes `v`
+    // are valid UTF-8, thus the cast to `*const str` is safe.
+    // Also, the pointer dereference is safe because that pointer
+    // comes from a reference which is guaranteed to be valid for reads.
+    unsafe { &*(v as *const [u8] as *const str) }
 }
 
 /// Converts a slice of bytes to a string slice without checking
@@ -444,7 +448,11 @@ pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
 #[inline]
 #[stable(feature = "str_mut_extras", since = "1.20.0")]
 pub unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
-    &mut *(v as *mut [u8] as *mut str)
+    // SAFETY: the caller must guarantee that the bytes `v`
+    // are valid UTF-8, thus the cast to `*mut str` is safe.
+    // Also, the pointer dereference is safe because that pointer
+    // comes from a reference which is guaranteed to be valid for writes.
+    unsafe { &mut *(v as *mut [u8] as *mut str) }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -867,7 +875,9 @@ unsafe impl TrustedLen for Bytes<'_> {}
 #[doc(hidden)]
 unsafe impl TrustedRandomAccess for Bytes<'_> {
     unsafe fn get_unchecked(&mut self, i: usize) -> u8 {
-        self.0.get_unchecked(i)
+        // SAFETY: the caller must uphold the safety contract
+        // for `TrustedRandomAccess::get_unchecked`.
+        unsafe { self.0.get_unchecked(i) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -1904,15 +1914,27 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            let ptr = slice.as_ptr().add(self.start);
+            // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+            // which satisfies all the conditions for `add`.
+            let ptr = unsafe { slice.as_ptr().add(self.start) };
             let len = self.end - self.start;
-            super::from_utf8_unchecked(slice::from_raw_parts(ptr, len))
+            // SAFETY: as the caller guarantees that `self` is in bounds of `slice`,
+            // we can safely construct a subslice with `from_raw_parts` and use it
+            // since we return a shared thus immutable reference.
+            // The call to `from_utf8_unchecked` is safe since the data comes from
+            // a `str` which is guaranteed to be valid utf8, since the caller
+            // must guarantee that `self.start` and `self.end` are char boundaries.
+            unsafe { super::from_utf8_unchecked(slice::from_raw_parts(ptr, len)) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            let ptr = slice.as_mut_ptr().add(self.start);
+            // SAFETY: see comments for `get_unchecked`.
+            let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
             let len = self.end - self.start;
-            super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len))
+            // SAFETY: mostly identical to the comments for `get_unchecked`, except that we
+            // can return a mutable reference since the caller passed a mutable reference
+            // and is thus guaranteed to have exclusive write access to `slice`.
+            unsafe { super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len)) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -1974,12 +1996,21 @@ mod traits {
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
             let ptr = slice.as_ptr();
-            super::from_utf8_unchecked(slice::from_raw_parts(ptr, self.end))
+            // SAFETY: as the caller guarantees that `self` is in bounds of `slice`,
+            // we can safely construct a subslice with `from_raw_parts` and use it
+            // since we return a shared thus immutable reference.
+            // The call to `from_utf8_unchecked` is safe since the data comes from
+            // a `str` which is guaranteed to be valid utf8, since the caller
+            // must guarantee that `self.end` is a char boundary.
+            unsafe { super::from_utf8_unchecked(slice::from_raw_parts(ptr, self.end)) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
             let ptr = slice.as_mut_ptr();
-            super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, self.end))
+            // SAFETY: mostly identical to `get_unchecked`, except that we can safely
+            // return a mutable reference since the caller passed a mutable reference
+            // and is thus guaranteed to have exclusive write access to `slice`.
+            unsafe { super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, self.end)) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2036,15 +2067,27 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            let ptr = slice.as_ptr().add(self.start);
+            // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+            // which satisfies all the conditions for `add`.
+            let ptr = unsafe { slice.as_ptr().add(self.start) };
             let len = slice.len() - self.start;
-            super::from_utf8_unchecked(slice::from_raw_parts(ptr, len))
+            // SAFETY: as the caller guarantees that `self` is in bounds of `slice`,
+            // we can safely construct a subslice with `from_raw_parts` and use it
+            // since we return a shared thus immutable reference.
+            // The call to `from_utf8_unchecked` is safe since the data comes from
+            // a `str` which is guaranteed to be valid utf8, since the caller
+            // must guarantee that `self.start` is a char boundary.
+            unsafe { super::from_utf8_unchecked(slice::from_raw_parts(ptr, len)) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            let ptr = slice.as_mut_ptr().add(self.start);
+            // SAFETY: identical to `get_unchecked`.
+            let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
             let len = slice.len() - self.start;
-            super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len))
+            // SAFETY: mostly identical to `get_unchecked`, except that we can safely
+            // return a mutable reference since the caller passed a mutable reference
+            // and is thus guaranteed to have exclusive write access to `slice`.
+            unsafe { super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len)) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2099,11 +2142,13 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            (*self.start()..self.end() + 1).get_unchecked(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+            unsafe { (*self.start()..self.end() + 1).get_unchecked(slice) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            (*self.start()..self.end() + 1).get_unchecked_mut(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+            unsafe { (*self.start()..self.end() + 1).get_unchecked_mut(slice) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2148,11 +2193,13 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            (..self.end + 1).get_unchecked(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+            unsafe { (..self.end + 1).get_unchecked(slice) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            (..self.end + 1).get_unchecked_mut(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+            unsafe { (..self.end + 1).get_unchecked_mut(slice) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2373,7 +2420,11 @@ impl str {
     #[stable(feature = "str_mut_extras", since = "1.20.0")]
     #[inline(always)]
     pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
-        &mut *(self as *mut str as *mut [u8])
+        // SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
+        // has the same layout as `&[u8]` (only libstd can make this guarantee).
+        // The pointer dereference is safe since it comes from a mutable reference which
+        // is guaranteed to be valid for writes.
+        unsafe { &mut *(self as *mut str as *mut [u8]) }
     }
 
     /// Converts a string slice to a raw pointer.
@@ -2509,7 +2560,8 @@ impl str {
     #[stable(feature = "str_checked_slicing", since = "1.20.0")]
     #[inline]
     pub unsafe fn get_unchecked<I: SliceIndex<str>>(&self, i: I) -> &I::Output {
-        i.get_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+        unsafe { i.get_unchecked(self) }
     }
 
     /// Returns a mutable, unchecked subslice of `str`.
@@ -2541,7 +2593,8 @@ impl str {
     #[stable(feature = "str_checked_slicing", since = "1.20.0")]
     #[inline]
     pub unsafe fn get_unchecked_mut<I: SliceIndex<str>>(&mut self, i: I) -> &mut I::Output {
-        i.get_unchecked_mut(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+        unsafe { i.get_unchecked_mut(self) }
     }
 
     /// Creates a string slice from another string slice, bypassing safety
@@ -2591,7 +2644,8 @@ impl str {
     #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked(begin..end)` instead")]
     #[inline]
     pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
-        (begin..end).get_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+        unsafe { (begin..end).get_unchecked(self) }
     }
 
     /// Creates a string slice from another string slice, bypassing safety
@@ -2622,7 +2676,8 @@ impl str {
     #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked_mut(begin..end)` instead")]
     #[inline]
     pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str {
-        (begin..end).get_unchecked_mut(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (begin..end).get_unchecked_mut(self) }
     }
 
     /// Divide one string slice into two at an index.
diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs
index 1cd68f2881b..fcae6c86774 100644
--- a/src/libcore/sync/atomic.rs
+++ b/src/libcore/sync/atomic.rs
@@ -2335,35 +2335,44 @@ fn strongest_failure_ordering(order: Ordering) -> Ordering {
 
 #[inline]
 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
-    match order {
-        Release => intrinsics::atomic_store_rel(dst, val),
-        Relaxed => intrinsics::atomic_store_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_store(dst, val),
-        Acquire => panic!("there is no such thing as an acquire store"),
-        AcqRel => panic!("there is no such thing as an acquire/release store"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_store`.
+    unsafe {
+        match order {
+            Release => intrinsics::atomic_store_rel(dst, val),
+            Relaxed => intrinsics::atomic_store_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_store(dst, val),
+            Acquire => panic!("there is no such thing as an acquire store"),
+            AcqRel => panic!("there is no such thing as an acquire/release store"),
+        }
     }
 }
 
 #[inline]
 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_load_acq(dst),
-        Relaxed => intrinsics::atomic_load_relaxed(dst),
-        SeqCst => intrinsics::atomic_load(dst),
-        Release => panic!("there is no such thing as a release load"),
-        AcqRel => panic!("there is no such thing as an acquire/release load"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_load`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_load_acq(dst),
+            Relaxed => intrinsics::atomic_load_relaxed(dst),
+            SeqCst => intrinsics::atomic_load(dst),
+            Release => panic!("there is no such thing as a release load"),
+            AcqRel => panic!("there is no such thing as an acquire/release load"),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xchg_acq(dst, val),
-        Release => intrinsics::atomic_xchg_rel(dst, val),
-        AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xchg(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xchg_acq(dst, val),
+            Release => intrinsics::atomic_xchg_rel(dst, val),
+            AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xchg(dst, val),
+        }
     }
 }
 
@@ -2371,12 +2380,15 @@ unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xadd_acq(dst, val),
-        Release => intrinsics::atomic_xadd_rel(dst, val),
-        AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xadd(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_add`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xadd_acq(dst, val),
+            Release => intrinsics::atomic_xadd_rel(dst, val),
+            AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xadd(dst, val),
+        }
     }
 }
 
@@ -2384,12 +2396,15 @@ unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xsub_acq(dst, val),
-        Release => intrinsics::atomic_xsub_rel(dst, val),
-        AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xsub(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xsub_acq(dst, val),
+            Release => intrinsics::atomic_xsub_rel(dst, val),
+            AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xsub(dst, val),
+        }
     }
 }
 
@@ -2402,19 +2417,22 @@ unsafe fn atomic_compare_exchange<T: Copy>(
     success: Ordering,
     failure: Ordering,
 ) -> Result<T, T> {
-    let (val, ok) = match (success, failure) {
-        (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
-        (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
-        (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
-        (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
-        (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
-        (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
-        (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
-        (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
-        (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
-        (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
-        (_, Release) => panic!("there is no such thing as a release failure ordering"),
-        _ => panic!("a failure ordering can't be stronger than a success ordering"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
+    let (val, ok) = unsafe {
+        match (success, failure) {
+            (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
+            (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
+            (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
+            (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
+            (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
+            (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
+            (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
+            (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
+            (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
+            (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+            (_, Release) => panic!("there is no such thing as a release failure ordering"),
+            _ => panic!("a failure ordering can't be stronger than a success ordering"),
+        }
     };
     if ok { Ok(val) } else { Err(val) }
 }
@@ -2428,19 +2446,22 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
     success: Ordering,
     failure: Ordering,
 ) -> Result<T, T> {
-    let (val, ok) = match (success, failure) {
-        (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
-        (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
-        (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
-        (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
-        (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
-        (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
-        (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
-        (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
-        (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
-        (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
-        (_, Release) => panic!("there is no such thing as a release failure ordering"),
-        _ => panic!("a failure ordering can't be stronger than a success ordering"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
+    let (val, ok) = unsafe {
+        match (success, failure) {
+            (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
+            (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
+            (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
+            (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
+            (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
+            (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
+            (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
+            (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
+            (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
+            (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+            (_, Release) => panic!("there is no such thing as a release failure ordering"),
+            _ => panic!("a failure ordering can't be stronger than a success ordering"),
+        }
     };
     if ok { Ok(val) } else { Err(val) }
 }
@@ -2448,48 +2469,60 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_and_acq(dst, val),
-        Release => intrinsics::atomic_and_rel(dst, val),
-        AcqRel => intrinsics::atomic_and_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_and_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_and(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_and`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_and_acq(dst, val),
+            Release => intrinsics::atomic_and_rel(dst, val),
+            AcqRel => intrinsics::atomic_and_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_and_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_and(dst, val),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_nand_acq(dst, val),
-        Release => intrinsics::atomic_nand_rel(dst, val),
-        AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_nand(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_nand`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_nand_acq(dst, val),
+            Release => intrinsics::atomic_nand_rel(dst, val),
+            AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_nand(dst, val),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_or_acq(dst, val),
-        Release => intrinsics::atomic_or_rel(dst, val),
-        AcqRel => intrinsics::atomic_or_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_or_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_or(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_or`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_or_acq(dst, val),
+            Release => intrinsics::atomic_or_rel(dst, val),
+            AcqRel => intrinsics::atomic_or_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_or_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_or(dst, val),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xor_acq(dst, val),
-        Release => intrinsics::atomic_xor_rel(dst, val),
-        AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xor(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_xor`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xor_acq(dst, val),
+            Release => intrinsics::atomic_xor_rel(dst, val),
+            AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xor(dst, val),
+        }
     }
 }
 
@@ -2497,12 +2530,15 @@ unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_max_acq(dst, val),
-        Release => intrinsics::atomic_max_rel(dst, val),
-        AcqRel => intrinsics::atomic_max_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_max_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_max(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_max`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_max_acq(dst, val),
+            Release => intrinsics::atomic_max_rel(dst, val),
+            AcqRel => intrinsics::atomic_max_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_max_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_max(dst, val),
+        }
     }
 }
 
@@ -2510,12 +2546,15 @@ unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_min_acq(dst, val),
-        Release => intrinsics::atomic_min_rel(dst, val),
-        AcqRel => intrinsics::atomic_min_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_min_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_min(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_min`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_min_acq(dst, val),
+            Release => intrinsics::atomic_min_rel(dst, val),
+            AcqRel => intrinsics::atomic_min_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_min_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_min(dst, val),
+        }
     }
 }
 
@@ -2523,12 +2562,15 @@ unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_umax_acq(dst, val),
-        Release => intrinsics::atomic_umax_rel(dst, val),
-        AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_umax(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_umax`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_umax_acq(dst, val),
+            Release => intrinsics::atomic_umax_rel(dst, val),
+            AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_umax(dst, val),
+        }
     }
 }
 
@@ -2536,12 +2578,15 @@ unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_umin_acq(dst, val),
-        Release => intrinsics::atomic_umin_rel(dst, val),
-        AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_umin(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_umin`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_umin_acq(dst, val),
+            Release => intrinsics::atomic_umin_rel(dst, val),
+            AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_umin(dst, val),
+        }
     }
 }
 
diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs
index c60ce8ec837..68a5e20a66f 100644
--- a/src/libcore/tests/lib.rs
+++ b/src/libcore/tests/lib.rs
@@ -44,6 +44,8 @@
 #![feature(option_unwrap_none)]
 #![feature(peekable_next_if)]
 #![feature(partition_point)]
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
 
 extern crate test;