about summary refs log tree commit diff
path: root/compiler/rustc_data_structures/src
diff options
context:
space:
mode:
authorMaybe Waffle <waffle.lapkin@gmail.com>2023-04-19 18:00:48 +0000
committerMaybe Waffle <waffle.lapkin@gmail.com>2023-04-19 18:00:48 +0000
commitf79df7d2a4e224aa35697e5096d0f25fdcf26b2f (patch)
treed72dfc093802d242feb9b81299ee3c8179b34cb7 /compiler/rustc_data_structures/src
parentd7f9e81650dcee3e2d5ad1973a71da644a2eff93 (diff)
downloadrust-f79df7d2a4e224aa35697e5096d0f25fdcf26b2f.tar.gz
rust-f79df7d2a4e224aa35697e5096d0f25fdcf26b2f.zip
`deny(unsafe_op_in_unsafe_fn)` in `rustc_data_structures`
Diffstat (limited to 'compiler/rustc_data_structures/src')
-rw-r--r--compiler/rustc_data_structures/src/lib.rs1
-rw-r--r--compiler/rustc_data_structures/src/memmap.rs3
-rw-r--r--compiler/rustc_data_structures/src/sip128.rs196
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr.rs10
4 files changed, 109 insertions, 101 deletions
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
index 405ae99395b..426d2c4034b 100644
--- a/compiler/rustc_data_structures/src/lib.rs
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -35,6 +35,7 @@
 #![allow(rustc::potential_query_instability)]
 #![deny(rustc::untranslatable_diagnostic)]
 #![deny(rustc::diagnostic_outside_of_impl)]
+#![deny(unsafe_op_in_unsafe_fn)]
 
 #[macro_use]
 extern crate tracing;
diff --git a/compiler/rustc_data_structures/src/memmap.rs b/compiler/rustc_data_structures/src/memmap.rs
index ef37a606f31..ca908671ae5 100644
--- a/compiler/rustc_data_structures/src/memmap.rs
+++ b/compiler/rustc_data_structures/src/memmap.rs
@@ -13,7 +13,8 @@ pub struct Mmap(Vec<u8>);
 impl Mmap {
     #[inline]
     pub unsafe fn map(file: File) -> io::Result<Self> {
-        memmap2::Mmap::map(&file).map(Mmap)
+        // Safety: this is in fact not safe.
+        unsafe { memmap2::Mmap::map(&file).map(Mmap) }
     }
 }
 
diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs
index d849fe0373f..4a0ed87f77c 100644
--- a/compiler/rustc_data_structures/src/sip128.rs
+++ b/compiler/rustc_data_structures/src/sip128.rs
@@ -96,28 +96,30 @@ macro_rules! compress {
 unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
     debug_assert!(count <= 8);
 
-    if count == 8 {
-        ptr::copy_nonoverlapping(src, dst, 8);
-        return;
-    }
+    unsafe {
+        if count == 8 {
+            ptr::copy_nonoverlapping(src, dst, 8);
+            return;
+        }
 
-    let mut i = 0;
-    if i + 3 < count {
-        ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
-        i += 4;
-    }
+        let mut i = 0;
+        if i + 3 < count {
+            ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
+            i += 4;
+        }
 
-    if i + 1 < count {
-        ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
-        i += 2
-    }
+        if i + 1 < count {
+            ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
+            i += 2
+        }
 
-    if i < count {
-        *dst.add(i) = *src.add(i);
-        i += 1;
-    }
+        if i < count {
+            *dst.add(i) = *src.add(i);
+            i += 1;
+        }
 
-    debug_assert_eq!(i, count);
+        debug_assert_eq!(i, count);
+    }
 }
 
 // # Implementation
@@ -232,38 +234,40 @@ impl SipHasher128 {
     // overflow) if it wasn't already.
     #[inline(never)]
     unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) {
-        let nbuf = self.nbuf;
-        debug_assert!(LEN <= 8);
-        debug_assert!(nbuf < BUFFER_SIZE);
-        debug_assert!(nbuf + LEN >= BUFFER_SIZE);
-        debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
+        unsafe {
+            let nbuf = self.nbuf;
+            debug_assert!(LEN <= 8);
+            debug_assert!(nbuf < BUFFER_SIZE);
+            debug_assert!(nbuf + LEN >= BUFFER_SIZE);
+            debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
+
+            // Copy first part of input into end of buffer, possibly into spill
+            // element. The memcpy call is optimized away because the size is known.
+            let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+            ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
+
+            // Process buffer.
+            for i in 0..BUFFER_CAPACITY {
+                let elem = self.buf.get_unchecked(i).assume_init().to_le();
+                self.state.v3 ^= elem;
+                Sip13Rounds::c_rounds(&mut self.state);
+                self.state.v0 ^= elem;
+            }
 
-        // Copy first part of input into end of buffer, possibly into spill
-        // element. The memcpy call is optimized away because the size is known.
-        let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
-        ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
-
-        // Process buffer.
-        for i in 0..BUFFER_CAPACITY {
-            let elem = self.buf.get_unchecked(i).assume_init().to_le();
-            self.state.v3 ^= elem;
-            Sip13Rounds::c_rounds(&mut self.state);
-            self.state.v0 ^= elem;
+            // Copy remaining input into start of buffer by copying LEN - 1
+            // elements from spill (at most LEN - 1 bytes could have overflowed
+            // into the spill). The memcpy call is optimized away because the size
+            // is known. And the whole copy is optimized away for LEN == 1.
+            let dst = self.buf.as_mut_ptr() as *mut u8;
+            let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
+            ptr::copy_nonoverlapping(src, dst, LEN - 1);
+
+            // This function should only be called when the write fills the buffer.
+            // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
+            // LEN is statically known, so the branch is optimized away.
+            self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
+            self.processed += BUFFER_SIZE;
         }
-
-        // Copy remaining input into start of buffer by copying LEN - 1
-        // elements from spill (at most LEN - 1 bytes could have overflowed
-        // into the spill). The memcpy call is optimized away because the size
-        // is known. And the whole copy is optimized away for LEN == 1.
-        let dst = self.buf.as_mut_ptr() as *mut u8;
-        let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
-        ptr::copy_nonoverlapping(src, dst, LEN - 1);
-
-        // This function should only be called when the write fills the buffer.
-        // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
-        // LEN is statically known, so the branch is optimized away.
-        self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
-        self.processed += BUFFER_SIZE;
     }
 
     // A write function for byte slices.
@@ -301,57 +305,59 @@ impl SipHasher128 {
     // containing the byte offset `self.nbuf`.
     #[inline(never)]
     unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
-        let length = msg.len();
-        let nbuf = self.nbuf;
-        debug_assert!(nbuf < BUFFER_SIZE);
-        debug_assert!(nbuf + length >= BUFFER_SIZE);
-
-        // Always copy first part of input into current element of buffer.
-        // This function should only be called when the write fills the buffer,
-        // so we know that there is enough input to fill the current element.
-        let valid_in_elem = nbuf % ELEM_SIZE;
-        let needed_in_elem = ELEM_SIZE - valid_in_elem;
-
-        let src = msg.as_ptr();
-        let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
-        copy_nonoverlapping_small(src, dst, needed_in_elem);
-
-        // Process buffer.
+        unsafe {
+            let length = msg.len();
+            let nbuf = self.nbuf;
+            debug_assert!(nbuf < BUFFER_SIZE);
+            debug_assert!(nbuf + length >= BUFFER_SIZE);
+
+            // Always copy first part of input into current element of buffer.
+            // This function should only be called when the write fills the buffer,
+            // so we know that there is enough input to fill the current element.
+            let valid_in_elem = nbuf % ELEM_SIZE;
+            let needed_in_elem = ELEM_SIZE - valid_in_elem;
+
+            let src = msg.as_ptr();
+            let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+            copy_nonoverlapping_small(src, dst, needed_in_elem);
+
+            // Process buffer.
+
+            // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
+            // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
+            // We know that is true, because last step ensured we have a full
+            // element in the buffer.
+            let last = nbuf / ELEM_SIZE + 1;
+
+            for i in 0..last {
+                let elem = self.buf.get_unchecked(i).assume_init().to_le();
+                self.state.v3 ^= elem;
+                Sip13Rounds::c_rounds(&mut self.state);
+                self.state.v0 ^= elem;
+            }
 
-        // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
-        // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
-        // We know that is true, because last step ensured we have a full
-        // element in the buffer.
-        let last = nbuf / ELEM_SIZE + 1;
+            // Process the remaining element-sized chunks of input.
+            let mut processed = needed_in_elem;
+            let input_left = length - processed;
+            let elems_left = input_left / ELEM_SIZE;
+            let extra_bytes_left = input_left % ELEM_SIZE;
+
+            for _ in 0..elems_left {
+                let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
+                self.state.v3 ^= elem;
+                Sip13Rounds::c_rounds(&mut self.state);
+                self.state.v0 ^= elem;
+                processed += ELEM_SIZE;
+            }
 
-        for i in 0..last {
-            let elem = self.buf.get_unchecked(i).assume_init().to_le();
-            self.state.v3 ^= elem;
-            Sip13Rounds::c_rounds(&mut self.state);
-            self.state.v0 ^= elem;
-        }
+            // Copy remaining input into start of buffer.
+            let src = msg.as_ptr().add(processed);
+            let dst = self.buf.as_mut_ptr() as *mut u8;
+            copy_nonoverlapping_small(src, dst, extra_bytes_left);
 
-        // Process the remaining element-sized chunks of input.
-        let mut processed = needed_in_elem;
-        let input_left = length - processed;
-        let elems_left = input_left / ELEM_SIZE;
-        let extra_bytes_left = input_left % ELEM_SIZE;
-
-        for _ in 0..elems_left {
-            let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
-            self.state.v3 ^= elem;
-            Sip13Rounds::c_rounds(&mut self.state);
-            self.state.v0 ^= elem;
-            processed += ELEM_SIZE;
+            self.nbuf = extra_bytes_left;
+            self.processed += nbuf + processed;
         }
-
-        // Copy remaining input into start of buffer.
-        let src = msg.as_ptr().add(processed);
-        let dst = self.buf.as_mut_ptr() as *mut u8;
-        copy_nonoverlapping_small(src, dst, extra_bytes_left);
-
-        self.nbuf = extra_bytes_left;
-        self.processed += nbuf + processed;
     }
 
     #[inline]
diff --git a/compiler/rustc_data_structures/src/tagged_ptr.rs b/compiler/rustc_data_structures/src/tagged_ptr.rs
index c26bffac678..8568aac67c0 100644
--- a/compiler/rustc_data_structures/src/tagged_ptr.rs
+++ b/compiler/rustc_data_structures/src/tagged_ptr.rs
@@ -153,7 +153,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Box<T> {
     #[inline]
     unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
         // Safety: `ptr` comes from `into_ptr` which calls `Box::into_raw`
-        Box::from_raw(ptr.as_ptr())
+        unsafe { Box::from_raw(ptr.as_ptr()) }
     }
 }
 
@@ -169,7 +169,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Rc<T> {
     #[inline]
     unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
         // Safety: `ptr` comes from `into_ptr` which calls `Rc::into_raw`
-        Rc::from_raw(ptr.as_ptr())
+        unsafe { Rc::from_raw(ptr.as_ptr()) }
     }
 }
 
@@ -185,7 +185,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Arc<T> {
     #[inline]
     unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
         // Safety: `ptr` comes from `into_ptr` which calls `Arc::into_raw`
-        Arc::from_raw(ptr.as_ptr())
+        unsafe { Arc::from_raw(ptr.as_ptr()) }
     }
 }
 
@@ -201,7 +201,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a T {
     unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
         // Safety:
         // `ptr` comes from `into_ptr` which gets the pointer from a reference
-        ptr.as_ref()
+        unsafe { ptr.as_ref() }
     }
 }
 
@@ -217,7 +217,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a mut T {
     unsafe fn from_ptr(mut ptr: NonNull<T>) -> Self {
         // Safety:
         // `ptr` comes from `into_ptr` which gets the pointer from a reference
-        ptr.as_mut()
+        unsafe { ptr.as_mut() }
     }
 }