Rollup merge of #138040 - thaliaarchi:use-prelude-size-of.compiler, r=compiler-errors
compiler: Use `size_of` from the prelude instead of imported Use `std::mem::{size_of, size_of_val, align_of, align_of_val}` from the prelude instead of importing or qualifying them. Apply this change across the compiler. These functions were added to all preludes in Rust 1.80. r? ``@compiler-errors``
This commit is contained in:
commit
c6662879b2
30 changed files with 64 additions and 74 deletions
|
@ -223,8 +223,8 @@ impl<D: TyDecoder> Decodable<D> for InitMaskMaterialized {
|
|||
// large.
|
||||
impl hash::Hash for InitMaskMaterialized {
|
||||
fn hash<H: hash::Hasher>(&self, state: &mut H) {
|
||||
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
|
||||
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
|
||||
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / size_of::<Block>();
|
||||
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / size_of::<Block>();
|
||||
|
||||
// Partially hash the `blocks` buffer when it is large. To limit collisions with common
|
||||
// prefixes and suffixes, we hash the length and some slices of the buffer.
|
||||
|
|
|
@ -573,7 +573,7 @@ pub fn write_target_uint(
|
|||
#[inline]
|
||||
pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
|
||||
// This u128 holds an "any-size uint" (since smaller uints can fits in it)
|
||||
let mut buf = [0u8; std::mem::size_of::<u128>()];
|
||||
let mut buf = [0u8; size_of::<u128>()];
|
||||
// So we do not read exactly 16 bytes into the u128, just the "payload".
|
||||
let uint = match endianness {
|
||||
Endian::Little => {
|
||||
|
|
|
@ -332,13 +332,13 @@ pub struct Body<'tcx> {
|
|||
///
|
||||
/// ```rust
|
||||
/// fn test<T>() {
|
||||
/// let _ = [0; std::mem::size_of::<*mut T>()];
|
||||
/// let _ = [0; size_of::<*mut T>()];
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// **WARNING**: Do not change this flags after the MIR was originally created, even if an optimization
|
||||
/// removed the last mention of all generic params. We do not want to rely on optimizations and
|
||||
/// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this.
|
||||
/// potentially allow things like `[u8; size_of::<T>() * 0]` due to this.
|
||||
pub is_polymorphic: bool,
|
||||
|
||||
/// The phase at which this MIR should be "injected" into the compilation process.
|
||||
|
|
|
@ -27,7 +27,7 @@ pub type Erase<T: EraseType> = Erased<impl Copy>;
|
|||
pub fn erase<T: EraseType>(src: T) -> Erase<T> {
|
||||
// Ensure the sizes match
|
||||
const {
|
||||
if std::mem::size_of::<T>() != std::mem::size_of::<T::Result>() {
|
||||
if size_of::<T>() != size_of::<T::Result>() {
|
||||
panic!("size of T must match erased type T::Result")
|
||||
}
|
||||
};
|
||||
|
|
|
@ -370,7 +370,7 @@ macro_rules! define_callbacks {
|
|||
// Increase this limit if necessary, but do try to keep the size low if possible
|
||||
#[cfg(target_pointer_width = "64")]
|
||||
const _: () = {
|
||||
if mem::size_of::<Key<'static>>() > 88 {
|
||||
if size_of::<Key<'static>>() > 88 {
|
||||
panic!("{}", concat!(
|
||||
"the query `",
|
||||
stringify!($name),
|
||||
|
@ -386,7 +386,7 @@ macro_rules! define_callbacks {
|
|||
#[cfg(target_pointer_width = "64")]
|
||||
#[cfg(not(feature = "rustc_randomized_layouts"))]
|
||||
const _: () = {
|
||||
if mem::size_of::<Value<'static>>() > 64 {
|
||||
if size_of::<Value<'static>>() > 64 {
|
||||
panic!("{}", concat!(
|
||||
"the query `",
|
||||
stringify!($name),
|
||||
|
|
|
@ -408,7 +408,7 @@ macro_rules! from_x_for_scalar_int {
|
|||
fn from(u: $ty) -> Self {
|
||||
Self {
|
||||
data: u128::from(u),
|
||||
size: NonZero::new(std::mem::size_of::<$ty>() as u8).unwrap(),
|
||||
size: NonZero::new(size_of::<$ty>() as u8).unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ macro_rules! from_scalar_int_for_x {
|
|||
fn from(int: ScalarInt) -> Self {
|
||||
// The `unwrap` cannot fail because to_bits (if it succeeds)
|
||||
// is guaranteed to return a value that fits into the size.
|
||||
int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
|
||||
int.to_bits(Size::from_bytes(size_of::<$ty>()))
|
||||
.try_into().unwrap()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
use core::intrinsics;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::num::NonZero;
|
||||
use std::ptr::NonNull;
|
||||
|
||||
|
@ -176,17 +175,17 @@ impl<'tcx> GenericArgKind<'tcx> {
|
|||
let (tag, ptr) = match self {
|
||||
GenericArgKind::Lifetime(lt) => {
|
||||
// Ensure we can use the tag bits.
|
||||
assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0);
|
||||
assert_eq!(align_of_val(&*lt.0.0) & TAG_MASK, 0);
|
||||
(REGION_TAG, NonNull::from(lt.0.0).cast())
|
||||
}
|
||||
GenericArgKind::Type(ty) => {
|
||||
// Ensure we can use the tag bits.
|
||||
assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
|
||||
assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0);
|
||||
(TYPE_TAG, NonNull::from(ty.0.0).cast())
|
||||
}
|
||||
GenericArgKind::Const(ct) => {
|
||||
// Ensure we can use the tag bits.
|
||||
assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
|
||||
assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0);
|
||||
(CONST_TAG, NonNull::from(ct.0.0).cast())
|
||||
}
|
||||
};
|
||||
|
|
|
@ -93,7 +93,7 @@ impl<H, T> RawList<H, T> {
|
|||
T: Copy,
|
||||
{
|
||||
assert!(!mem::needs_drop::<T>());
|
||||
assert!(mem::size_of::<T>() != 0);
|
||||
assert!(size_of::<T>() != 0);
|
||||
assert!(!slice.is_empty());
|
||||
|
||||
let (layout, _offset) =
|
||||
|
@ -155,7 +155,7 @@ macro_rules! impl_list_empty {
|
|||
static EMPTY: ListSkeleton<$header_ty, MaxAlign> =
|
||||
ListSkeleton { header: $header_init, len: 0, data: [] };
|
||||
|
||||
assert!(mem::align_of::<T>() <= mem::align_of::<MaxAlign>());
|
||||
assert!(align_of::<T>() <= align_of::<MaxAlign>());
|
||||
|
||||
// SAFETY: `EMPTY` is sufficiently aligned to be an empty list for all
|
||||
// types with `align_of(T) <= align_of(MaxAlign)`, which we checked above.
|
||||
|
|
|
@ -17,7 +17,7 @@ use std::hash::{Hash, Hasher};
|
|||
use std::marker::PhantomData;
|
||||
use std::num::NonZero;
|
||||
use std::ptr::NonNull;
|
||||
use std::{fmt, mem, str};
|
||||
use std::{fmt, str};
|
||||
|
||||
pub use adt::*;
|
||||
pub use assoc::*;
|
||||
|
@ -637,12 +637,12 @@ impl<'tcx> TermKind<'tcx> {
|
|||
let (tag, ptr) = match self {
|
||||
TermKind::Ty(ty) => {
|
||||
// Ensure we can use the tag bits.
|
||||
assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
|
||||
assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0);
|
||||
(TYPE_TAG, NonNull::from(ty.0.0).cast())
|
||||
}
|
||||
TermKind::Const(ct) => {
|
||||
// Ensure we can use the tag bits.
|
||||
assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
|
||||
assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0);
|
||||
(CONST_TAG, NonNull::from(ct.0.0).cast())
|
||||
}
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue