Rollup merge of #138040 - thaliaarchi:use-prelude-size-of.compiler, r=compiler-errors

compiler: Use `size_of` from the prelude instead of imported

Use `std::mem::{size_of, size_of_val, align_of, align_of_val}` from the prelude instead of importing or qualifying them. Apply this change across the compiler.

These functions were added to all preludes in Rust 1.80.

r? ``@compiler-errors``
This commit is contained in:
Matthias Krüger 2025-03-09 10:34:49 +01:00 committed by GitHub
commit c6662879b2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
30 changed files with 64 additions and 74 deletions

View file

@ -93,7 +93,7 @@ impl<T> ArenaChunk<T> {
#[inline]
fn end(&mut self) -> *mut T {
unsafe {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
// A pointer as large as possible for zero-sized elements.
ptr::without_provenance_mut(!0)
} else {
@ -151,7 +151,7 @@ impl<T> TypedArena<T> {
}
unsafe {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
self.ptr.set(self.ptr.get().wrapping_byte_add(1));
let ptr = ptr::NonNull::<T>::dangling().as_ptr();
// Don't drop the object. This `write` is equivalent to `forget`.
@ -173,13 +173,13 @@ impl<T> TypedArena<T> {
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
let available_bytes = self.end.get().addr() - self.ptr.get().addr();
let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap();
available_bytes >= additional_bytes
}
#[inline]
fn alloc_raw_slice(&self, len: usize) -> *mut T {
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
assert!(len != 0);
// Ensure the current chunk can fit `len` objects.
@ -213,7 +213,7 @@ impl<T> TypedArena<T> {
// So we collect all the elements beforehand, which takes care of reentrancy and panic
// safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it
// doesn't need to be hyper-optimized.
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
if vec.is_empty() {
@ -236,7 +236,7 @@ impl<T> TypedArena<T> {
unsafe {
// We need the element size to convert chunk sizes (ranging from
// PAGE to HUGE_PAGE bytes) to element counts.
let elem_size = cmp::max(1, mem::size_of::<T>());
let elem_size = cmp::max(1, size_of::<T>());
let mut chunks = self.chunks.borrow_mut();
let mut new_cap;
if let Some(last_chunk) = chunks.last_mut() {
@ -246,7 +246,7 @@ impl<T> TypedArena<T> {
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
last_chunk.entries = used_bytes / mem::size_of::<T>();
last_chunk.entries = used_bytes / size_of::<T>();
}
// If the previous chunk's len is less than HUGE_PAGE
@ -276,7 +276,7 @@ impl<T> TypedArena<T> {
let end = self.ptr.get().addr();
// We then calculate the number of elements to be dropped in the last chunk,
// which is the filled area's length.
let diff = if mem::size_of::<T>() == 0 {
let diff = if size_of::<T>() == 0 {
// `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
// the number of zero-sized values in the last and only chunk, just out of caution.
// Recall that `end` was incremented for each allocated value.
@ -284,7 +284,7 @@ impl<T> TypedArena<T> {
} else {
// FIXME: this should *likely* use `offset_from`, but more
// investigation is needed (including running tests in miri).
(end - start) / mem::size_of::<T>()
(end - start) / size_of::<T>()
};
// Pass that to the `destroy` method.
unsafe {
@ -329,7 +329,7 @@ fn align_up(val: usize, align: usize) -> usize {
// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them
// to optimize away alignment code.
const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>();
const DROPLESS_ALIGNMENT: usize = align_of::<usize>();
/// An arena that can hold objects of multiple different types that impl `Copy`
/// and/or satisfy `!mem::needs_drop`.
@ -447,7 +447,7 @@ impl DroplessArena {
#[inline]
pub fn alloc<T>(&self, object: T) -> &mut T {
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
@ -471,7 +471,7 @@ impl DroplessArena {
T: Copy,
{
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
assert!(!slice.is_empty());
let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
@ -546,7 +546,7 @@ impl DroplessArena {
// Warning: this function is reentrant: `iter` could hold a reference to `&self` and
// allocate additional elements while we're iterating.
let iter = iter.into_iter();
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
assert!(!mem::needs_drop::<T>());
let size_hint = iter.size_hint();

View file

@ -2439,9 +2439,5 @@ fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
#[cfg(not(feature = "master"))]
fn get_maybe_pointer_size(value: RValue<'_>) -> u32 {
let type_ = value.get_type();
if type_.get_pointee().is_some() {
std::mem::size_of::<*const ()>() as _
} else {
type_.get_size()
}
if type_.get_pointee().is_some() { size_of::<*const ()>() as _ } else { type_.get_size() }
}

View file

@ -1177,7 +1177,7 @@ mod win {
let mut cp: u32 = 0;
// We're using the `LOCALE_RETURN_NUMBER` flag to return a u32.
// But the API requires us to pass the data as though it's a [u16] string.
let len = std::mem::size_of::<u32>() / std::mem::size_of::<u16>();
let len = size_of::<u32>() / size_of::<u16>();
let data = std::slice::from_raw_parts_mut(&mut cp as *mut u32 as *mut u16, len);
let len_written = GetLocaleInfoEx(
LOCALE_NAME_SYSTEM_DEFAULT,

View file

@ -2,10 +2,8 @@ use std::ptr::Alignment;
/// Returns the ABI-required minimum alignment of a type in bytes.
///
/// This is equivalent to [`mem::align_of`], but also works for some unsized
/// This is equivalent to [`align_of`], but also works for some unsized
/// types (e.g. slices or rustc's `List`s).
///
/// [`mem::align_of`]: std::mem::align_of
pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
T::ALIGN
}
@ -15,10 +13,10 @@ pub const fn align_of<T: ?Sized + Aligned>() -> Alignment {
/// # Safety
///
/// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it
/// is [`mem::align_of<Self>()`], for unsized types it depends on the type, for
/// is [`align_of::<Self>()`], for unsized types it depends on the type, for
/// example `[T]` has alignment of `T`.
///
/// [`mem::align_of<Self>()`]: std::mem::align_of
/// [`align_of::<Self>()`]: align_of
pub unsafe trait Aligned {
/// Alignment of `Self`.
const ALIGN: Alignment;

View file

@ -863,15 +863,13 @@ fn get_thread_id() -> u32 {
cfg_match! {
windows => {
pub fn get_resident_set_size() -> Option<usize> {
use std::mem;
use windows::{
Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS},
Win32::System::Threading::GetCurrentProcess,
};
let mut pmc = PROCESS_MEMORY_COUNTERS::default();
let pmc_size = mem::size_of_val(&pmc);
let pmc_size = size_of_val(&pmc);
unsafe {
K32GetProcessMemoryInfo(
GetCurrentProcess(),
@ -889,7 +887,7 @@ cfg_match! {
pub fn get_resident_set_size() -> Option<usize> {
use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
use std::mem;
const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int;
const PROC_TASKINFO_SIZE: c_int = size_of::<proc_taskinfo>() as c_int;
unsafe {
let mut info: proc_taskinfo = mem::zeroed();

View file

@ -1,7 +1,7 @@
use std::borrow::Borrow;
use std::collections::hash_map::RawEntryMut;
use std::hash::{Hash, Hasher};
use std::{iter, mem};
use std::iter;
use either::Either;
@ -221,7 +221,7 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
/// consistently for each `Sharded` instance.
#[inline]
fn get_shard_hash(hash: u64) -> usize {
let hash_len = mem::size_of::<usize>();
let hash_len = size_of::<usize>();
// Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
// hashbrown also uses the lowest bits, so we can't use those
(hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize

View file

@ -490,7 +490,7 @@ pub struct Diag<'a, G: EmissionGuarantee = ErrorGuaranteed> {
// would be bad.
impl<G> !Clone for Diag<'_, G> {}
rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * std::mem::size_of::<usize>());
rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * size_of::<usize>());
impl<G: EmissionGuarantee> Deref for Diag<'_, G> {
type Target = DiagInner;

View file

@ -435,7 +435,7 @@ pub enum Res<Id = hir::HirId> {
/// mention any generic parameters to allow the following with `min_const_generics`:
/// ```
/// # struct Foo;
/// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } }
/// impl Foo { fn test() -> [u8; size_of::<Self>()] { todo!() } }
///
/// struct Bar([u8; baz::<Self>()]);
/// const fn baz<T>() -> usize { 10 }
@ -445,7 +445,7 @@ pub enum Res<Id = hir::HirId> {
/// compat lint:
/// ```
/// fn foo<T>() {
/// let _bar = [1_u8; std::mem::size_of::<*mut T>()];
/// let _bar = [1_u8; size_of::<*mut T>()];
/// }
/// ```
// FIXME(generic_const_exprs): Remove this bodge once that feature is stable.

View file

@ -123,7 +123,7 @@ pub(crate) fn read_file(
// Check HEADER_FORMAT_VERSION
{
debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2);
debug_assert!(size_of_val(&HEADER_FORMAT_VERSION) == 2);
let mut header_format_version = [0u8; 2];
file.read_exact(&mut header_format_version)?;
let header_format_version =

View file

@ -1,7 +1,9 @@
use std::marker::PhantomData;
#[cfg(not(feature = "nightly"))]
use std::mem;
use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
use std::rc::Rc;
use std::{fmt, iter, mem, slice};
use std::{fmt, iter, slice};
use Chunk::*;
#[cfg(feature = "nightly")]
@ -14,7 +16,7 @@ use crate::{Idx, IndexVec};
mod tests;
type Word = u64;
const WORD_BYTES: usize = mem::size_of::<Word>();
const WORD_BYTES: usize = size_of::<Word>();
const WORD_BITS: usize = WORD_BYTES * 8;
// The choice of chunk size has some trade-offs.

View file

@ -9,8 +9,6 @@ crate::newtype_index! {
#[test]
fn index_size_is_optimized() {
use std::mem::size_of;
assert_eq!(size_of::<MyIdx>(), 4);
// Uses 0xFFFF_FFFB
assert_eq!(size_of::<Option<MyIdx>>(), 4);

View file

@ -2673,7 +2673,7 @@ declare_lint! {
///
/// ```rust
/// const fn foo<T>() -> usize {
/// if std::mem::size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
/// if size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
/// 4
/// } else {
/// 8

View file

@ -223,8 +223,8 @@ impl<D: TyDecoder> Decodable<D> for InitMaskMaterialized {
// large.
impl hash::Hash for InitMaskMaterialized {
fn hash<H: hash::Hasher>(&self, state: &mut H) {
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / size_of::<Block>();
const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / size_of::<Block>();
// Partially hash the `blocks` buffer when it is large. To limit collisions with common
// prefixes and suffixes, we hash the length and some slices of the buffer.

View file

@ -573,7 +573,7 @@ pub fn write_target_uint(
#[inline]
pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
// This u128 holds an "any-size uint" (since smaller uints can fits in it)
let mut buf = [0u8; std::mem::size_of::<u128>()];
let mut buf = [0u8; size_of::<u128>()];
// So we do not read exactly 16 bytes into the u128, just the "payload".
let uint = match endianness {
Endian::Little => {

View file

@ -332,13 +332,13 @@ pub struct Body<'tcx> {
///
/// ```rust
/// fn test<T>() {
/// let _ = [0; std::mem::size_of::<*mut T>()];
/// let _ = [0; size_of::<*mut T>()];
/// }
/// ```
///
/// **WARNING**: Do not change this flags after the MIR was originally created, even if an optimization
/// removed the last mention of all generic params. We do not want to rely on optimizations and
/// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this.
/// potentially allow things like `[u8; size_of::<T>() * 0]` due to this.
pub is_polymorphic: bool,
/// The phase at which this MIR should be "injected" into the compilation process.

View file

@ -27,7 +27,7 @@ pub type Erase<T: EraseType> = Erased<impl Copy>;
pub fn erase<T: EraseType>(src: T) -> Erase<T> {
// Ensure the sizes match
const {
if std::mem::size_of::<T>() != std::mem::size_of::<T::Result>() {
if size_of::<T>() != size_of::<T::Result>() {
panic!("size of T must match erased type T::Result")
}
};

View file

@ -370,7 +370,7 @@ macro_rules! define_callbacks {
// Increase this limit if necessary, but do try to keep the size low if possible
#[cfg(target_pointer_width = "64")]
const _: () = {
if mem::size_of::<Key<'static>>() > 88 {
if size_of::<Key<'static>>() > 88 {
panic!("{}", concat!(
"the query `",
stringify!($name),
@ -386,7 +386,7 @@ macro_rules! define_callbacks {
#[cfg(target_pointer_width = "64")]
#[cfg(not(feature = "rustc_randomized_layouts"))]
const _: () = {
if mem::size_of::<Value<'static>>() > 64 {
if size_of::<Value<'static>>() > 64 {
panic!("{}", concat!(
"the query `",
stringify!($name),

View file

@ -408,7 +408,7 @@ macro_rules! from_x_for_scalar_int {
fn from(u: $ty) -> Self {
Self {
data: u128::from(u),
size: NonZero::new(std::mem::size_of::<$ty>() as u8).unwrap(),
size: NonZero::new(size_of::<$ty>() as u8).unwrap(),
}
}
}
@ -424,7 +424,7 @@ macro_rules! from_scalar_int_for_x {
fn from(int: ScalarInt) -> Self {
// The `unwrap` cannot fail because to_bits (if it succeeds)
// is guaranteed to return a value that fits into the size.
int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
int.to_bits(Size::from_bytes(size_of::<$ty>()))
.try_into().unwrap()
}
}

View file

@ -2,7 +2,6 @@
use core::intrinsics;
use std::marker::PhantomData;
use std::mem;
use std::num::NonZero;
use std::ptr::NonNull;
@ -176,17 +175,17 @@ impl<'tcx> GenericArgKind<'tcx> {
let (tag, ptr) = match self {
GenericArgKind::Lifetime(lt) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*lt.0.0) & TAG_MASK, 0);
(REGION_TAG, NonNull::from(lt.0.0).cast())
}
GenericArgKind::Type(ty) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0);
(TYPE_TAG, NonNull::from(ty.0.0).cast())
}
GenericArgKind::Const(ct) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0);
(CONST_TAG, NonNull::from(ct.0.0).cast())
}
};

View file

@ -93,7 +93,7 @@ impl<H, T> RawList<H, T> {
T: Copy,
{
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
assert!(size_of::<T>() != 0);
assert!(!slice.is_empty());
let (layout, _offset) =
@ -155,7 +155,7 @@ macro_rules! impl_list_empty {
static EMPTY: ListSkeleton<$header_ty, MaxAlign> =
ListSkeleton { header: $header_init, len: 0, data: [] };
assert!(mem::align_of::<T>() <= mem::align_of::<MaxAlign>());
assert!(align_of::<T>() <= align_of::<MaxAlign>());
// SAFETY: `EMPTY` is sufficiently aligned to be an empty list for all
// types with `align_of(T) <= align_of(MaxAlign)`, which we checked above.

View file

@ -17,7 +17,7 @@ use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::num::NonZero;
use std::ptr::NonNull;
use std::{fmt, mem, str};
use std::{fmt, str};
pub use adt::*;
pub use assoc::*;
@ -637,12 +637,12 @@ impl<'tcx> TermKind<'tcx> {
let (tag, ptr) = match self {
TermKind::Ty(ty) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0);
(TYPE_TAG, NonNull::from(ty.0.0).cast())
}
TermKind::Const(ct) => {
// Ensure we can use the tag bits.
assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0);
(CONST_TAG, NonNull::from(ct.0.0).cast())
}
};

View file

@ -619,7 +619,7 @@ impl Iterator for TokenTypeSetIter {
type Item = TokenType;
fn next(&mut self) -> Option<TokenType> {
let num_bits: u32 = (std::mem::size_of_val(&self.0.0) * 8) as u32;
let num_bits: u32 = (size_of_val(&self.0.0) * 8) as u32;
assert_eq!(num_bits, 128);
let z = self.0.0.trailing_zeros();
if z == num_bits {

View file

@ -107,12 +107,12 @@ impl<'k> StatCollector<'k> {
let node = self.nodes.entry(label1).or_insert(Node::new());
node.stats.count += 1;
node.stats.size = std::mem::size_of_val(val);
node.stats.size = size_of_val(val);
if let Some(label2) = label2 {
let subnode = node.subnodes.entry(label2).or_insert(NodeStats::new());
subnode.count += 1;
subnode.size = std::mem::size_of_val(val);
subnode.size = size_of_val(val);
}
}

View file

@ -39,7 +39,7 @@ impl RWUTable {
/// Size of packed RWU in bits.
const RWU_BITS: usize = 4;
/// Size of a word in bits.
const WORD_BITS: usize = std::mem::size_of::<u8>() * 8;
const WORD_BITS: usize = size_of::<u8>() * 8;
/// Number of packed RWUs that fit into a single word.
const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS;

View file

@ -63,7 +63,7 @@ rustc_index::newtype_index! {
pub struct SerializedDepNodeIndex {}
}
const DEP_NODE_SIZE: usize = std::mem::size_of::<SerializedDepNodeIndex>();
const DEP_NODE_SIZE: usize = size_of::<SerializedDepNodeIndex>();
/// Amount of padding we need to add to the edge list data so that we can retrieve every
/// SerializedDepNodeIndex with a fixed-size read then mask.
const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
@ -175,7 +175,7 @@ impl EdgeHeader {
#[inline]
fn mask(bits: usize) -> usize {
usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits)
usize::MAX >> ((size_of::<usize>() * 8) - bits)
}
impl SerializedDepGraph {
@ -208,9 +208,8 @@ impl SerializedDepGraph {
// for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at
// least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128
// length is about the same fractional overhead and it amortizes for yet greater lengths.
let mut edge_list_data = Vec::with_capacity(
graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<D>>(),
);
let mut edge_list_data =
Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader<D>>());
for _index in 0..node_count {
// Decode the header for this edge; the header packs together as many of the fixed-size
@ -300,7 +299,7 @@ struct Unpacked {
// M..M+N bytes per index
// M+N..16 kind
impl<D: Deps> SerializedNodeHeader<D> {
const TOTAL_BITS: usize = std::mem::size_of::<DepKind>() * 8;
const TOTAL_BITS: usize = size_of::<DepKind>() * 8;
const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS;
const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS;
const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize;

View file

@ -2995,7 +2995,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> {
}
// HACK(min_const_generics, generic_const_exprs): We
// want to keep allowing `[0; std::mem::size_of::<*mut T>()]`
// want to keep allowing `[0; size_of::<*mut T>()]`
// with a future compat lint for now. We do this by adding an
// additional special case for repeat expressions.
//

View file

@ -7,7 +7,7 @@ use crate::serialize::Decoder;
/// Returns the length of the longest LEB128 encoding for `T`, assuming `T` is an integer type
pub const fn max_leb128_len<T>() -> usize {
// The longest LEB128 encoding for an integer uses 7 bits per byte.
(std::mem::size_of::<T>() * 8 + 6) / 7
(size_of::<T>() * 8 + 6) / 7
}
/// Returns the length of the longest LEB128 encoding of all supported integer types.

View file

@ -92,7 +92,7 @@ fn current_dll_path() -> Result<PathBuf, String> {
if libc::loadquery(
libc::L_GETINFO,
buffer.as_mut_ptr() as *mut u8,
(std::mem::size_of::<libc::ld_info>() * buffer.len()) as u32,
(size_of::<libc::ld_info>() * buffer.len()) as u32,
) >= 0
{
break;

View file

@ -2,7 +2,7 @@
//!
//! For concrete constants, this is fairly simple as we can just try and evaluate it.
//!
//! When dealing with polymorphic constants, for example `std::mem::size_of::<T>() - 1`,
//! When dealing with polymorphic constants, for example `size_of::<T>() - 1`,
//! this is not as easy.
//!
//! In this case we try to build an abstract representation of this constant using

View file

@ -58,7 +58,7 @@ impl IndexedVal for AllocId {
/// Utility function used to read an allocation data into a unassigned integer.
pub(crate) fn read_target_uint(mut bytes: &[u8]) -> Result<u128, Error> {
let mut buf = [0u8; std::mem::size_of::<u128>()];
let mut buf = [0u8; size_of::<u128>()];
match MachineInfo::target_endianness() {
Endian::Little => {
bytes.read_exact(&mut buf[..bytes.len()])?;
@ -73,7 +73,7 @@ pub(crate) fn read_target_uint(mut bytes: &[u8]) -> Result<u128, Error> {
/// Utility function used to read an allocation data into an assigned integer.
pub(crate) fn read_target_int(mut bytes: &[u8]) -> Result<i128, Error> {
let mut buf = [0u8; std::mem::size_of::<i128>()];
let mut buf = [0u8; size_of::<i128>()];
match MachineInfo::target_endianness() {
Endian::Little => {
bytes.read_exact(&mut buf[..bytes.len()])?;