1
Fork 0

deny(unsafe_op_in_unsafe_fn) in rustc_data_structures

This commit is contained in:
Maybe Waffle 2023-04-19 18:00:48 +00:00
parent d7f9e81650
commit f79df7d2a4
4 changed files with 106 additions and 98 deletions

View file

@ -35,6 +35,7 @@
#![allow(rustc::potential_query_instability)] #![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)] #![deny(rustc::diagnostic_outside_of_impl)]
#![deny(unsafe_op_in_unsafe_fn)]
#[macro_use] #[macro_use]
extern crate tracing; extern crate tracing;

View file

@ -13,7 +13,8 @@ pub struct Mmap(Vec<u8>);
impl Mmap { impl Mmap {
#[inline] #[inline]
pub unsafe fn map(file: File) -> io::Result<Self> { pub unsafe fn map(file: File) -> io::Result<Self> {
memmap2::Mmap::map(&file).map(Mmap) // Safety: this is in fact not safe.
unsafe { memmap2::Mmap::map(&file).map(Mmap) }
} }
} }

View file

@ -96,28 +96,30 @@ macro_rules! compress {
unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) { unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
debug_assert!(count <= 8); debug_assert!(count <= 8);
if count == 8 { unsafe {
ptr::copy_nonoverlapping(src, dst, 8); if count == 8 {
return; ptr::copy_nonoverlapping(src, dst, 8);
} return;
}
let mut i = 0; let mut i = 0;
if i + 3 < count { if i + 3 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4); ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
i += 4; i += 4;
} }
if i + 1 < count { if i + 1 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2); ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
i += 2 i += 2
} }
if i < count { if i < count {
*dst.add(i) = *src.add(i); *dst.add(i) = *src.add(i);
i += 1; i += 1;
} }
debug_assert_eq!(i, count); debug_assert_eq!(i, count);
}
} }
// # Implementation // # Implementation
@ -232,38 +234,40 @@ impl SipHasher128 {
// overflow) if it wasn't already. // overflow) if it wasn't already.
#[inline(never)] #[inline(never)]
unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) { unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) {
let nbuf = self.nbuf; unsafe {
debug_assert!(LEN <= 8); let nbuf = self.nbuf;
debug_assert!(nbuf < BUFFER_SIZE); debug_assert!(LEN <= 8);
debug_assert!(nbuf + LEN >= BUFFER_SIZE); debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE); debug_assert!(nbuf + LEN >= BUFFER_SIZE);
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
// Copy first part of input into end of buffer, possibly into spill // Copy first part of input into end of buffer, possibly into spill
// element. The memcpy call is optimized away because the size is known. // element. The memcpy call is optimized away because the size is known.
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN); ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
// Process buffer. // Process buffer.
for i in 0..BUFFER_CAPACITY { for i in 0..BUFFER_CAPACITY {
let elem = self.buf.get_unchecked(i).assume_init().to_le(); let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem; self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state); Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem; self.state.v0 ^= elem;
}
// Copy remaining input into start of buffer by copying LEN - 1
// elements from spill (at most LEN - 1 bytes could have overflowed
// into the spill). The memcpy call is optimized away because the size
// is known. And the whole copy is optimized away for LEN == 1.
let dst = self.buf.as_mut_ptr() as *mut u8;
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
ptr::copy_nonoverlapping(src, dst, LEN - 1);
// This function should only be called when the write fills the buffer.
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
// LEN is statically known, so the branch is optimized away.
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
self.processed += BUFFER_SIZE;
} }
// Copy remaining input into start of buffer by copying LEN - 1
// elements from spill (at most LEN - 1 bytes could have overflowed
// into the spill). The memcpy call is optimized away because the size
// is known. And the whole copy is optimized away for LEN == 1.
let dst = self.buf.as_mut_ptr() as *mut u8;
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
ptr::copy_nonoverlapping(src, dst, LEN - 1);
// This function should only be called when the write fills the buffer.
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
// LEN is statically known, so the branch is optimized away.
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
self.processed += BUFFER_SIZE;
} }
// A write function for byte slices. // A write function for byte slices.
@ -301,57 +305,59 @@ impl SipHasher128 {
// containing the byte offset `self.nbuf`. // containing the byte offset `self.nbuf`.
#[inline(never)] #[inline(never)]
unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) { unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
let length = msg.len(); unsafe {
let nbuf = self.nbuf; let length = msg.len();
debug_assert!(nbuf < BUFFER_SIZE); let nbuf = self.nbuf;
debug_assert!(nbuf + length >= BUFFER_SIZE); debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + length >= BUFFER_SIZE);
// Always copy first part of input into current element of buffer. // Always copy first part of input into current element of buffer.
// This function should only be called when the write fills the buffer, // This function should only be called when the write fills the buffer,
// so we know that there is enough input to fill the current element. // so we know that there is enough input to fill the current element.
let valid_in_elem = nbuf % ELEM_SIZE; let valid_in_elem = nbuf % ELEM_SIZE;
let needed_in_elem = ELEM_SIZE - valid_in_elem; let needed_in_elem = ELEM_SIZE - valid_in_elem;
let src = msg.as_ptr(); let src = msg.as_ptr();
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
copy_nonoverlapping_small(src, dst, needed_in_elem); copy_nonoverlapping_small(src, dst, needed_in_elem);
// Process buffer. // Process buffer.
// Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) / // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0. // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
// We know that is true, because last step ensured we have a full // We know that is true, because last step ensured we have a full
// element in the buffer. // element in the buffer.
let last = nbuf / ELEM_SIZE + 1; let last = nbuf / ELEM_SIZE + 1;
for i in 0..last { for i in 0..last {
let elem = self.buf.get_unchecked(i).assume_init().to_le(); let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem; self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state); Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem; self.state.v0 ^= elem;
}
// Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem;
let input_left = length - processed;
let elems_left = input_left / ELEM_SIZE;
let extra_bytes_left = input_left % ELEM_SIZE;
for _ in 0..elems_left {
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
processed += ELEM_SIZE;
}
// Copy remaining input into start of buffer.
let src = msg.as_ptr().add(processed);
let dst = self.buf.as_mut_ptr() as *mut u8;
copy_nonoverlapping_small(src, dst, extra_bytes_left);
self.nbuf = extra_bytes_left;
self.processed += nbuf + processed;
} }
// Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem;
let input_left = length - processed;
let elems_left = input_left / ELEM_SIZE;
let extra_bytes_left = input_left % ELEM_SIZE;
for _ in 0..elems_left {
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
processed += ELEM_SIZE;
}
// Copy remaining input into start of buffer.
let src = msg.as_ptr().add(processed);
let dst = self.buf.as_mut_ptr() as *mut u8;
copy_nonoverlapping_small(src, dst, extra_bytes_left);
self.nbuf = extra_bytes_left;
self.processed += nbuf + processed;
} }
#[inline] #[inline]

View file

@ -153,7 +153,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Box<T> {
#[inline] #[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self { unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Box::into_raw` // Safety: `ptr` comes from `into_ptr` which calls `Box::into_raw`
Box::from_raw(ptr.as_ptr()) unsafe { Box::from_raw(ptr.as_ptr()) }
} }
} }
@ -169,7 +169,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Rc<T> {
#[inline] #[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self { unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Rc::into_raw` // Safety: `ptr` comes from `into_ptr` which calls `Rc::into_raw`
Rc::from_raw(ptr.as_ptr()) unsafe { Rc::from_raw(ptr.as_ptr()) }
} }
} }
@ -185,7 +185,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Arc<T> {
#[inline] #[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self { unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Arc::into_raw` // Safety: `ptr` comes from `into_ptr` which calls `Arc::into_raw`
Arc::from_raw(ptr.as_ptr()) unsafe { Arc::from_raw(ptr.as_ptr()) }
} }
} }
@ -201,7 +201,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a T {
unsafe fn from_ptr(ptr: NonNull<T>) -> Self { unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: // Safety:
// `ptr` comes from `into_ptr` which gets the pointer from a reference // `ptr` comes from `into_ptr` which gets the pointer from a reference
ptr.as_ref() unsafe { ptr.as_ref() }
} }
} }
@ -217,7 +217,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a mut T {
unsafe fn from_ptr(mut ptr: NonNull<T>) -> Self { unsafe fn from_ptr(mut ptr: NonNull<T>) -> Self {
// Safety: // Safety:
// `ptr` comes from `into_ptr` which gets the pointer from a reference // `ptr` comes from `into_ptr` which gets the pointer from a reference
ptr.as_mut() unsafe { ptr.as_mut() }
} }
} }