1
Fork 0

Auto merge of #110585 - JohnTitor:rollup-gfffoiv, r=JohnTitor

Rollup of 7 pull requests

Successful merges:

 - #102341 (Implement `Neg` for signed non-zero integers.)
 - #110424 (Spelling misc)
 - #110448 (cmp doc examples improvements)
 - #110516 (bootstrap: Update linux-raw-sys to 0.3.2)
 - #110548 (Make `impl Debug for Span` not panic on not having session globals.)
 - #110554 (`deny(unsafe_op_in_unsafe_fn)` in `rustc_data_structures`)
 - #110575 (fix lint regression in `non_upper_case_globals`)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2023-04-20 08:33:33 +00:00
commit 13fc33e3f2
14 changed files with 251 additions and 175 deletions

View file

@ -35,6 +35,7 @@
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
#![deny(unsafe_op_in_unsafe_fn)]
#[macro_use]
extern crate tracing;

View file

@ -13,7 +13,8 @@ pub struct Mmap(Vec<u8>);
impl Mmap {
#[inline]
pub unsafe fn map(file: File) -> io::Result<Self> {
memmap2::Mmap::map(&file).map(Mmap)
// Safety: this is in fact not safe.
unsafe { memmap2::Mmap::map(&file).map(Mmap) }
}
}

View file

@ -96,28 +96,30 @@ macro_rules! compress {
unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
debug_assert!(count <= 8);
if count == 8 {
ptr::copy_nonoverlapping(src, dst, 8);
return;
}
unsafe {
if count == 8 {
ptr::copy_nonoverlapping(src, dst, 8);
return;
}
let mut i = 0;
if i + 3 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
i += 4;
}
let mut i = 0;
if i + 3 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
i += 4;
}
if i + 1 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
i += 2
}
if i + 1 < count {
ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
i += 2
}
if i < count {
*dst.add(i) = *src.add(i);
i += 1;
}
if i < count {
*dst.add(i) = *src.add(i);
i += 1;
}
debug_assert_eq!(i, count);
debug_assert_eq!(i, count);
}
}
// # Implementation
@ -232,38 +234,40 @@ impl SipHasher128 {
// overflow) if it wasn't already.
#[inline(never)]
unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) {
let nbuf = self.nbuf;
debug_assert!(LEN <= 8);
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + LEN >= BUFFER_SIZE);
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
unsafe {
let nbuf = self.nbuf;
debug_assert!(LEN <= 8);
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + LEN >= BUFFER_SIZE);
debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
// Copy first part of input into end of buffer, possibly into spill
// element. The memcpy call is optimized away because the size is known.
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
// Copy first part of input into end of buffer, possibly into spill
// element. The memcpy call is optimized away because the size is known.
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
// Process buffer.
for i in 0..BUFFER_CAPACITY {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
// Process buffer.
for i in 0..BUFFER_CAPACITY {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
}
// Copy remaining input into start of buffer by copying LEN - 1
// elements from spill (at most LEN - 1 bytes could have overflowed
// into the spill). The memcpy call is optimized away because the size
// is known. And the whole copy is optimized away for LEN == 1.
let dst = self.buf.as_mut_ptr() as *mut u8;
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
ptr::copy_nonoverlapping(src, dst, LEN - 1);
// This function should only be called when the write fills the buffer.
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
// LEN is statically known, so the branch is optimized away.
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
self.processed += BUFFER_SIZE;
}
// Copy remaining input into start of buffer by copying LEN - 1
// elements from spill (at most LEN - 1 bytes could have overflowed
// into the spill). The memcpy call is optimized away because the size
// is known. And the whole copy is optimized away for LEN == 1.
let dst = self.buf.as_mut_ptr() as *mut u8;
let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
ptr::copy_nonoverlapping(src, dst, LEN - 1);
// This function should only be called when the write fills the buffer.
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
// LEN is statically known, so the branch is optimized away.
self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
self.processed += BUFFER_SIZE;
}
// A write function for byte slices.
@ -301,57 +305,59 @@ impl SipHasher128 {
// containing the byte offset `self.nbuf`.
#[inline(never)]
unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
let length = msg.len();
let nbuf = self.nbuf;
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + length >= BUFFER_SIZE);
unsafe {
let length = msg.len();
let nbuf = self.nbuf;
debug_assert!(nbuf < BUFFER_SIZE);
debug_assert!(nbuf + length >= BUFFER_SIZE);
// Always copy first part of input into current element of buffer.
// This function should only be called when the write fills the buffer,
// so we know that there is enough input to fill the current element.
let valid_in_elem = nbuf % ELEM_SIZE;
let needed_in_elem = ELEM_SIZE - valid_in_elem;
// Always copy first part of input into current element of buffer.
// This function should only be called when the write fills the buffer,
// so we know that there is enough input to fill the current element.
let valid_in_elem = nbuf % ELEM_SIZE;
let needed_in_elem = ELEM_SIZE - valid_in_elem;
let src = msg.as_ptr();
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
copy_nonoverlapping_small(src, dst, needed_in_elem);
let src = msg.as_ptr();
let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
copy_nonoverlapping_small(src, dst, needed_in_elem);
// Process buffer.
// Process buffer.
// Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
// We know that is true, because last step ensured we have a full
// element in the buffer.
let last = nbuf / ELEM_SIZE + 1;
// Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
// We know that is true, because last step ensured we have a full
// element in the buffer.
let last = nbuf / ELEM_SIZE + 1;
for i in 0..last {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
for i in 0..last {
let elem = self.buf.get_unchecked(i).assume_init().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
}
// Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem;
let input_left = length - processed;
let elems_left = input_left / ELEM_SIZE;
let extra_bytes_left = input_left % ELEM_SIZE;
for _ in 0..elems_left {
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
processed += ELEM_SIZE;
}
// Copy remaining input into start of buffer.
let src = msg.as_ptr().add(processed);
let dst = self.buf.as_mut_ptr() as *mut u8;
copy_nonoverlapping_small(src, dst, extra_bytes_left);
self.nbuf = extra_bytes_left;
self.processed += nbuf + processed;
}
// Process the remaining element-sized chunks of input.
let mut processed = needed_in_elem;
let input_left = length - processed;
let elems_left = input_left / ELEM_SIZE;
let extra_bytes_left = input_left % ELEM_SIZE;
for _ in 0..elems_left {
let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
self.state.v3 ^= elem;
Sip13Rounds::c_rounds(&mut self.state);
self.state.v0 ^= elem;
processed += ELEM_SIZE;
}
// Copy remaining input into start of buffer.
let src = msg.as_ptr().add(processed);
let dst = self.buf.as_mut_ptr() as *mut u8;
copy_nonoverlapping_small(src, dst, extra_bytes_left);
self.nbuf = extra_bytes_left;
self.processed += nbuf + processed;
}
#[inline]

View file

@ -153,7 +153,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Box<T> {
#[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Box::into_raw`
Box::from_raw(ptr.as_ptr())
unsafe { Box::from_raw(ptr.as_ptr()) }
}
}
@ -169,7 +169,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Rc<T> {
#[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Rc::into_raw`
Rc::from_raw(ptr.as_ptr())
unsafe { Rc::from_raw(ptr.as_ptr()) }
}
}
@ -185,7 +185,7 @@ unsafe impl<T: ?Sized + Aligned> Pointer for Arc<T> {
#[inline]
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety: `ptr` comes from `into_ptr` which calls `Arc::into_raw`
Arc::from_raw(ptr.as_ptr())
unsafe { Arc::from_raw(ptr.as_ptr()) }
}
}
@ -201,7 +201,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a T {
unsafe fn from_ptr(ptr: NonNull<T>) -> Self {
// Safety:
// `ptr` comes from `into_ptr` which gets the pointer from a reference
ptr.as_ref()
unsafe { ptr.as_ref() }
}
}
@ -217,7 +217,7 @@ unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a mut T {
unsafe fn from_ptr(mut ptr: NonNull<T>) -> Self {
// Safety:
// `ptr` comes from `into_ptr` which gets the pointer from a reference
ptr.as_mut()
unsafe { ptr.as_mut() }
}
}

View file

@ -33,6 +33,11 @@ pub fn method_context(cx: &LateContext<'_>, id: LocalDefId) -> MethodLateContext
}
}
fn assoc_item_in_trait_impl(cx: &LateContext<'_>, ii: &hir::ImplItem<'_>) -> bool {
let item = cx.tcx.associated_item(ii.owner_id);
item.trait_item_def_id.is_some()
}
declare_lint! {
/// The `non_camel_case_types` lint detects types, variants, traits and
/// type parameters that don't have camel case names.
@ -177,6 +182,7 @@ impl EarlyLintPass for NonCamelCaseTypes {
// trait impls where we should have warned for the trait definition already.
ast::ItemKind::Impl(box ast::Impl { of_trait: None, items, .. }) => {
for it in items {
// FIXME: this doesn't respect `#[allow(..)]` on the item itself.
if let ast::AssocItemKind::Type(..) = it.kind {
self.check_case(cx, "associated type", &it.ident);
}
@ -494,15 +500,6 @@ impl<'tcx> LateLintPass<'tcx> for NonUpperCaseGlobals {
hir::ItemKind::Const(..) => {
NonUpperCaseGlobals::check_upper_case(cx, "constant", &it.ident);
}
// we only want to check inherent associated consts, trait consts
// are linted at def-site.
hir::ItemKind::Impl(hir::Impl { of_trait: None, items, .. }) => {
for it in *items {
if let hir::AssocItemKind::Const = it.kind {
NonUpperCaseGlobals::check_upper_case(cx, "associated constant", &it.ident);
}
}
}
_ => {}
}
}
@ -513,6 +510,12 @@ impl<'tcx> LateLintPass<'tcx> for NonUpperCaseGlobals {
}
}
fn check_impl_item(&mut self, cx: &LateContext<'_>, ii: &hir::ImplItem<'_>) {
if let hir::ImplItemKind::Const(..) = ii.kind && !assoc_item_in_trait_impl(cx, ii) {
NonUpperCaseGlobals::check_upper_case(cx, "associated constant", &ii.ident);
}
}
fn check_pat(&mut self, cx: &LateContext<'_>, p: &hir::Pat<'_>) {
// Lint for constants that look like binding identifiers (#7526)
if let PatKind::Path(hir::QPath::Resolved(None, ref path)) = p.kind {

View file

@ -1044,17 +1044,26 @@ impl fmt::Debug for Span {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Use the global `SourceMap` to print the span. If that's not
// available, fall back to printing the raw values.
with_session_globals(|session_globals| {
if let Some(source_map) = &*session_globals.source_map.borrow() {
write!(f, "{} ({:?})", source_map.span_to_diagnostic_string(*self), self.ctxt())
} else {
f.debug_struct("Span")
.field("lo", &self.lo())
.field("hi", &self.hi())
.field("ctxt", &self.ctxt())
.finish()
}
})
fn fallback(span: Span, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Span")
.field("lo", &span.lo())
.field("hi", &span.hi())
.field("ctxt", &span.ctxt())
.finish()
}
if SESSION_GLOBALS.is_set() {
with_session_globals(|session_globals| {
if let Some(source_map) = &*session_globals.source_map.borrow() {
write!(f, "{} ({:?})", source_map.span_to_diagnostic_string(*self), self.ctxt())
} else {
fallback(*self, f)
}
})
} else {
fallback(*self, f)
}
}
}