2018-05-31 18:23:42 +02:00
|
|
|
|
//! Memory allocation APIs
|
|
|
|
|
|
2018-05-31 18:36:51 +02:00
|
|
|
|
#![stable(feature = "alloc_module", since = "1.28.0")]
|
2015-06-09 11:52:41 -07:00
|
|
|
|
|
2020-10-06 16:37:23 +02:00
|
|
|
|
#[cfg(not(test))]
|
|
|
|
|
use core::intrinsics;
|
|
|
|
|
use core::intrinsics::{min_align_of_val, size_of_val};
|
|
|
|
|
|
|
|
|
|
use core::ptr::Unique;
|
|
|
|
|
#[cfg(not(test))]
|
|
|
|
|
use core::ptr::{self, NonNull};
|
2015-02-07 18:49:54 -05:00
|
|
|
|
|
2018-05-31 18:36:51 +02:00
|
|
|
|
#[stable(feature = "alloc_module", since = "1.28.0")]
|
2018-04-02 10:38:07 +02:00
|
|
|
|
#[doc(inline)]
|
2018-04-03 21:05:10 +02:00
|
|
|
|
pub use core::alloc::*;
|
2018-04-02 10:38:07 +02:00
|
|
|
|
|
2019-08-02 01:40:56 +03:00
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests;
|
|
|
|
|
|
2018-04-03 17:12:57 +02:00
|
|
|
|
extern "Rust" {
|
2018-11-12 09:00:39 +01:00
|
|
|
|
// These are the magic symbols to call the global allocator. rustc generates
|
2020-10-12 13:42:49 -07:00
|
|
|
|
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
|
2020-10-09 11:31:54 +02:00
|
|
|
|
// (the code expanding that attribute macro generates those functions), or to call
|
2020-10-12 13:42:49 -07:00
|
|
|
|
// the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
|
2018-11-12 09:00:39 +01:00
|
|
|
|
// otherwise.
|
2020-11-15 18:40:49 +01:00
|
|
|
|
// The rustc fork of LLVM also special-cases these function names to be able to optimize them
|
|
|
|
|
// like `malloc`, `realloc`, and `free`, respectively.
|
2019-07-04 10:05:50 -04:00
|
|
|
|
#[rustc_allocator]
|
2017-08-22 14:36:49 -07:00
|
|
|
|
#[rustc_allocator_nounwind]
|
2018-04-03 17:12:57 +02:00
|
|
|
|
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
|
2017-08-22 14:36:49 -07:00
|
|
|
|
#[rustc_allocator_nounwind]
|
2018-04-03 17:12:57 +02:00
|
|
|
|
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
|
2017-08-22 14:36:49 -07:00
|
|
|
|
#[rustc_allocator_nounwind]
|
2019-12-22 17:42:04 -05:00
|
|
|
|
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
|
2017-08-22 14:36:49 -07:00
|
|
|
|
#[rustc_allocator_nounwind]
|
2018-04-03 17:12:57 +02:00
|
|
|
|
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
|
2015-02-07 18:49:54 -05:00
|
|
|
|
}
|
|
|
|
|
|
2018-05-31 18:23:42 +02:00
|
|
|
|
/// The global memory allocator.
|
|
|
|
|
///
|
2020-12-04 14:47:15 +01:00
|
|
|
|
/// This type implements the [`Allocator`] trait by forwarding calls
|
2018-05-31 18:23:42 +02:00
|
|
|
|
/// to the allocator registered with the `#[global_allocator]` attribute
|
|
|
|
|
/// if there is one, or the `std` crate’s default.
|
2019-02-05 16:56:19 +05:30
|
|
|
|
///
|
|
|
|
|
/// Note: while this type is unstable, the functionality it provides can be
|
2020-10-12 13:42:49 -07:00
|
|
|
|
/// accessed through the [free functions in `alloc`](self#functions).
|
2018-05-31 18:36:51 +02:00
|
|
|
|
#[unstable(feature = "allocator_api", issue = "32838")]
|
2017-05-23 14:47:41 +02:00
|
|
|
|
#[derive(Copy, Clone, Default, Debug)]
|
2020-10-06 16:37:23 +02:00
|
|
|
|
#[cfg(not(test))]
|
2018-04-03 14:43:34 +02:00
|
|
|
|
pub struct Global;
|
2017-05-23 14:47:41 +02:00
|
|
|
|
|
2020-10-06 16:37:23 +02:00
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub use std::alloc::Global;
|
|
|
|
|
|
2018-05-31 18:23:42 +02:00
|
|
|
|
/// Allocate memory with the global allocator.
|
|
|
|
|
///
|
|
|
|
|
/// This function forwards calls to the [`GlobalAlloc::alloc`] method
|
|
|
|
|
/// of the allocator registered with the `#[global_allocator]` attribute
|
|
|
|
|
/// if there is one, or the `std` crate’s default.
|
|
|
|
|
///
|
|
|
|
|
/// This function is expected to be deprecated in favor of the `alloc` method
|
2020-12-04 14:47:15 +01:00
|
|
|
|
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
|
2018-05-31 18:23:42 +02:00
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// See [`GlobalAlloc::alloc`].
|
2018-08-14 08:37:12 -04:00
|
|
|
|
///
|
|
|
|
|
/// # Examples
|
|
|
|
|
///
|
|
|
|
|
/// ```
|
|
|
|
|
/// use std::alloc::{alloc, dealloc, Layout};
|
|
|
|
|
///
|
|
|
|
|
/// unsafe {
|
|
|
|
|
/// let layout = Layout::new::<u16>();
|
|
|
|
|
/// let ptr = alloc(layout);
|
|
|
|
|
///
|
|
|
|
|
/// *(ptr as *mut u16) = 42;
|
|
|
|
|
/// assert_eq!(*(ptr as *mut u16), 42);
|
|
|
|
|
///
|
|
|
|
|
/// dealloc(ptr, layout);
|
|
|
|
|
/// }
|
|
|
|
|
/// ```
|
2018-05-31 19:13:37 +02:00
|
|
|
|
#[stable(feature = "global_alloc", since = "1.28.0")]
|
2021-10-30 22:06:31 -04:00
|
|
|
|
#[must_use = "losing the pointer will leak memory"]
|
2018-05-31 16:10:01 +09:00
|
|
|
|
#[inline]
|
|
|
|
|
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
|
2020-05-28 23:27:00 +02:00
|
|
|
|
unsafe { __rust_alloc(layout.size(), layout.align()) }
|
2018-05-31 16:10:01 +09:00
|
|
|
|
}
|
2017-05-23 14:47:41 +02:00
|
|
|
|
|
2018-05-31 18:23:42 +02:00
|
|
|
|
/// Deallocate memory with the global allocator.
|
|
|
|
|
///
|
|
|
|
|
/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
|
|
|
|
|
/// of the allocator registered with the `#[global_allocator]` attribute
|
|
|
|
|
/// if there is one, or the `std` crate’s default.
|
|
|
|
|
///
|
|
|
|
|
/// This function is expected to be deprecated in favor of the `dealloc` method
|
2020-12-04 14:47:15 +01:00
|
|
|
|
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
|
2018-05-31 18:23:42 +02:00
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// See [`GlobalAlloc::dealloc`].
|
2018-05-31 19:13:37 +02:00
|
|
|
|
#[stable(feature = "global_alloc", since = "1.28.0")]
|
2018-05-31 16:10:01 +09:00
|
|
|
|
#[inline]
|
|
|
|
|
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
|
2020-05-28 23:27:00 +02:00
|
|
|
|
unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
2018-05-31 16:10:01 +09:00
|
|
|
|
}
|
2017-05-23 14:47:41 +02:00
|
|
|
|
|
2018-05-31 18:23:42 +02:00
|
|
|
|
/// Reallocate memory with the global allocator.
|
|
|
|
|
///
|
|
|
|
|
/// This function forwards calls to the [`GlobalAlloc::realloc`] method
|
|
|
|
|
/// of the allocator registered with the `#[global_allocator]` attribute
|
|
|
|
|
/// if there is one, or the `std` crate’s default.
|
|
|
|
|
///
|
|
|
|
|
/// This function is expected to be deprecated in favor of the `realloc` method
|
2020-12-04 14:47:15 +01:00
|
|
|
|
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
|
2018-05-31 18:23:42 +02:00
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// See [`GlobalAlloc::realloc`].
|
2018-05-31 19:13:37 +02:00
|
|
|
|
#[stable(feature = "global_alloc", since = "1.28.0")]
|
2021-10-30 22:06:31 -04:00
|
|
|
|
#[must_use = "losing the pointer will leak memory"]
|
2018-05-31 16:10:01 +09:00
|
|
|
|
#[inline]
|
|
|
|
|
pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
|
2020-05-28 23:27:00 +02:00
|
|
|
|
unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
|
2018-05-31 16:10:01 +09:00
|
|
|
|
}
|
2017-05-23 14:47:41 +02:00
|
|
|
|
|
2018-05-31 18:23:42 +02:00
|
|
|
|
/// Allocate zero-initialized memory with the global allocator.
|
|
|
|
|
///
|
|
|
|
|
/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
|
|
|
|
|
/// of the allocator registered with the `#[global_allocator]` attribute
|
|
|
|
|
/// if there is one, or the `std` crate’s default.
|
|
|
|
|
///
|
|
|
|
|
/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
|
2020-12-04 14:47:15 +01:00
|
|
|
|
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
|
2018-05-31 18:23:42 +02:00
|
|
|
|
///
|
|
|
|
|
/// # Safety
|
|
|
|
|
///
|
|
|
|
|
/// See [`GlobalAlloc::alloc_zeroed`].
|
2018-08-14 08:37:12 -04:00
|
|
|
|
///
|
|
|
|
|
/// # Examples
|
|
|
|
|
///
|
|
|
|
|
/// ```
|
|
|
|
|
/// use std::alloc::{alloc_zeroed, dealloc, Layout};
|
|
|
|
|
///
|
|
|
|
|
/// unsafe {
|
|
|
|
|
/// let layout = Layout::new::<u16>();
|
|
|
|
|
/// let ptr = alloc_zeroed(layout);
|
|
|
|
|
///
|
|
|
|
|
/// assert_eq!(*(ptr as *mut u16), 0);
|
|
|
|
|
///
|
|
|
|
|
/// dealloc(ptr, layout);
|
|
|
|
|
/// }
|
|
|
|
|
/// ```
|
2018-05-31 19:13:37 +02:00
|
|
|
|
#[stable(feature = "global_alloc", since = "1.28.0")]
|
2021-10-30 22:06:31 -04:00
|
|
|
|
#[must_use = "losing the pointer will leak memory"]
|
2018-05-31 16:10:01 +09:00
|
|
|
|
#[inline]
|
|
|
|
|
pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
|
2020-05-28 23:27:00 +02:00
|
|
|
|
unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
|
2014-05-11 17:41:15 -04:00
|
|
|
|
}
|
|
|
|
|
|
2020-10-06 16:37:23 +02:00
|
|
|
|
#[cfg(not(test))]
|
2020-08-18 07:41:06 +02:00
|
|
|
|
impl Global {
|
|
|
|
|
#[inline]
|
2020-09-24 18:10:56 -04:00
|
|
|
|
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
|
2020-08-18 07:41:06 +02:00
|
|
|
|
match layout.size() {
|
|
|
|
|
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
|
|
|
|
|
// SAFETY: `layout` is non-zero in size,
|
|
|
|
|
size => unsafe {
|
|
|
|
|
let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
|
2020-09-24 18:10:56 -04:00
|
|
|
|
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
|
2020-08-18 07:41:06 +02:00
|
|
|
|
Ok(NonNull::slice_from_raw_parts(ptr, size))
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-04 14:47:15 +01:00
|
|
|
|
// SAFETY: Same as `Allocator::grow`
|
2020-08-18 07:41:06 +02:00
|
|
|
|
#[inline]
|
2020-08-18 15:22:10 +02:00
|
|
|
|
unsafe fn grow_impl(
|
2020-09-22 21:04:31 -07:00
|
|
|
|
&self,
|
2020-08-18 07:41:06 +02:00
|
|
|
|
ptr: NonNull<u8>,
|
2020-08-18 22:39:33 +02:00
|
|
|
|
old_layout: Layout,
|
|
|
|
|
new_layout: Layout,
|
2020-08-18 07:41:06 +02:00
|
|
|
|
zeroed: bool,
|
2020-09-24 18:10:56 -04:00
|
|
|
|
) -> Result<NonNull<[u8]>, AllocError> {
|
2020-08-18 07:41:06 +02:00
|
|
|
|
debug_assert!(
|
2020-08-18 22:39:33 +02:00
|
|
|
|
new_layout.size() >= old_layout.size(),
|
|
|
|
|
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
|
2020-08-18 07:41:06 +02:00
|
|
|
|
);
|
|
|
|
|
|
2020-08-18 22:39:33 +02:00
|
|
|
|
match old_layout.size() {
|
|
|
|
|
0 => self.alloc_impl(new_layout, zeroed),
|
2020-08-18 07:41:06 +02:00
|
|
|
|
|
|
|
|
|
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
|
|
|
|
|
// as required by safety conditions. Other conditions must be upheld by the caller
|
2020-08-18 22:39:33 +02:00
|
|
|
|
old_size if old_layout.align() == new_layout.align() => unsafe {
|
|
|
|
|
let new_size = new_layout.size();
|
|
|
|
|
|
|
|
|
|
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
|
|
|
|
|
intrinsics::assume(new_size >= old_layout.size());
|
2020-08-18 07:41:06 +02:00
|
|
|
|
|
2020-08-18 22:39:33 +02:00
|
|
|
|
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
|
2020-09-24 18:10:56 -04:00
|
|
|
|
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
|
2020-08-18 07:41:06 +02:00
|
|
|
|
if zeroed {
|
|
|
|
|
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
|
|
|
|
|
}
|
|
|
|
|
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
|
|
|
|
|
},
|
2020-08-18 22:39:33 +02:00
|
|
|
|
|
|
|
|
|
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
|
|
|
|
|
// both the old and new memory allocation are valid for reads and writes for `old_size`
|
|
|
|
|
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
|
|
|
|
|
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
|
|
|
|
|
// for `dealloc` must be upheld by the caller.
|
|
|
|
|
old_size => unsafe {
|
|
|
|
|
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
|
|
|
|
|
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
|
2020-12-04 14:47:15 +01:00
|
|
|
|
self.deallocate(ptr, old_layout);
|
2020-08-18 22:39:33 +02:00
|
|
|
|
Ok(new_ptr)
|
|
|
|
|
},
|
2020-08-18 07:41:06 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-31 18:36:51 +02:00
|
|
|
|
#[unstable(feature = "allocator_api", issue = "32838")]
|
2020-10-06 16:37:23 +02:00
|
|
|
|
#[cfg(not(test))]
|
2020-12-04 14:47:15 +01:00
|
|
|
|
unsafe impl Allocator for Global {
|
2018-04-04 19:15:22 +02:00
|
|
|
|
#[inline]
|
2020-12-04 14:47:15 +01:00
|
|
|
|
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
2020-08-18 07:41:06 +02:00
|
|
|
|
self.alloc_impl(layout, false)
|
2020-07-28 12:41:18 +02:00
|
|
|
|
}
|
|
|
|
|
|
2020-07-29 11:41:36 +02:00
|
|
|
|
#[inline]
|
2020-12-04 14:47:15 +01:00
|
|
|
|
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
2020-08-18 07:41:06 +02:00
|
|
|
|
self.alloc_impl(layout, true)
|
2018-04-04 19:15:22 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-12-04 14:47:15 +01:00
|
|
|
|
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
|
2020-03-25 21:12:12 +01:00
|
|
|
|
if layout.size() != 0 {
|
2020-07-29 11:41:36 +02:00
|
|
|
|
// SAFETY: `layout` is non-zero in size,
|
|
|
|
|
// other conditions must be upheld by the caller
|
2020-05-28 23:27:00 +02:00
|
|
|
|
unsafe { dealloc(ptr.as_ptr(), layout) }
|
2020-03-07 12:04:40 +01:00
|
|
|
|
}
|
2018-04-04 19:15:22 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-03-24 11:45:38 +01:00
|
|
|
|
unsafe fn grow(
|
2020-09-22 21:04:31 -07:00
|
|
|
|
&self,
|
2020-03-25 21:12:12 +01:00
|
|
|
|
ptr: NonNull<u8>,
|
2020-08-18 22:39:33 +02:00
|
|
|
|
old_layout: Layout,
|
|
|
|
|
new_layout: Layout,
|
2020-09-24 18:10:56 -04:00
|
|
|
|
) -> Result<NonNull<[u8]>, AllocError> {
|
2020-08-18 15:22:10 +02:00
|
|
|
|
// SAFETY: all conditions must be upheld by the caller
|
2020-08-18 22:39:33 +02:00
|
|
|
|
unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
|
2020-07-28 12:41:18 +02:00
|
|
|
|
}
|
|
|
|
|
|
2020-07-29 11:41:36 +02:00
|
|
|
|
#[inline]
|
2020-07-28 12:41:18 +02:00
|
|
|
|
unsafe fn grow_zeroed(
|
2020-09-22 21:04:31 -07:00
|
|
|
|
&self,
|
2020-07-28 12:41:18 +02:00
|
|
|
|
ptr: NonNull<u8>,
|
2020-08-18 22:39:33 +02:00
|
|
|
|
old_layout: Layout,
|
|
|
|
|
new_layout: Layout,
|
2020-09-24 18:10:56 -04:00
|
|
|
|
) -> Result<NonNull<[u8]>, AllocError> {
|
2020-08-18 15:22:10 +02:00
|
|
|
|
// SAFETY: all conditions must be upheld by the caller
|
2020-08-18 22:39:33 +02:00
|
|
|
|
unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
|
2018-04-04 19:15:22 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[inline]
|
2020-03-24 11:45:38 +01:00
|
|
|
|
unsafe fn shrink(
|
2020-09-22 21:04:31 -07:00
|
|
|
|
&self,
|
2020-03-25 21:12:12 +01:00
|
|
|
|
ptr: NonNull<u8>,
|
2020-08-18 22:39:33 +02:00
|
|
|
|
old_layout: Layout,
|
|
|
|
|
new_layout: Layout,
|
2020-09-24 18:10:56 -04:00
|
|
|
|
) -> Result<NonNull<[u8]>, AllocError> {
|
2020-03-24 11:45:38 +01:00
|
|
|
|
debug_assert!(
|
2020-08-18 22:39:33 +02:00
|
|
|
|
new_layout.size() <= old_layout.size(),
|
|
|
|
|
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
|
2020-03-24 11:45:38 +01:00
|
|
|
|
);
|
|
|
|
|
|
2020-08-18 22:39:33 +02:00
|
|
|
|
match new_layout.size() {
|
2020-08-17 13:23:38 +02:00
|
|
|
|
// SAFETY: conditions must be upheld by the caller
|
2020-08-18 07:41:06 +02:00
|
|
|
|
0 => unsafe {
|
2020-12-04 14:47:15 +01:00
|
|
|
|
self.deallocate(ptr, old_layout);
|
2020-08-18 22:39:33 +02:00
|
|
|
|
Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
|
2020-08-18 07:41:06 +02:00
|
|
|
|
},
|
2020-07-29 11:41:36 +02:00
|
|
|
|
|
2020-08-18 07:41:06 +02:00
|
|
|
|
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
|
2020-08-18 22:39:33 +02:00
|
|
|
|
new_size if old_layout.align() == new_layout.align() => unsafe {
|
|
|
|
|
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
|
|
|
|
|
intrinsics::assume(new_size <= old_layout.size());
|
2020-08-18 07:41:06 +02:00
|
|
|
|
|
2020-08-18 22:39:33 +02:00
|
|
|
|
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
|
2020-09-24 18:10:56 -04:00
|
|
|
|
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
|
2020-08-18 07:41:06 +02:00
|
|
|
|
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
|
|
|
|
|
},
|
2020-08-18 22:39:33 +02:00
|
|
|
|
|
|
|
|
|
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
|
|
|
|
|
// both the old and new memory allocation are valid for reads and writes for `new_size`
|
|
|
|
|
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
|
|
|
|
|
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
|
|
|
|
|
// for `dealloc` must be upheld by the caller.
|
|
|
|
|
new_size => unsafe {
|
2020-12-04 14:47:15 +01:00
|
|
|
|
let new_ptr = self.allocate(new_layout)?;
|
2020-08-18 22:39:33 +02:00
|
|
|
|
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
|
2020-12-04 14:47:15 +01:00
|
|
|
|
self.deallocate(ptr, old_layout);
|
2020-08-18 22:39:33 +02:00
|
|
|
|
Ok(new_ptr)
|
|
|
|
|
},
|
2020-08-18 07:41:06 +02:00
|
|
|
|
}
|
2018-04-04 19:15:22 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-05-06 22:03:14 -04:00
|
|
|
|
/// The allocator for unique pointers.
|
alloc: Add unstable Cfg feature `no-global_oom_handling`
For certain sorts of systems, programming, it's deemed essential that
all allocation failures be explicitly handled where they occur. For
example, see Linus Torvald's opinion in [1]. Merely not calling global
panic handlers, or always `try_reserving` first (for vectors), is not
deemed good enough, because the mere presence of the global OOM handlers
is burdens static analysis.
One option for these projects to use rust would just be to skip `alloc`,
rolling their own allocation abstractions. But this would, in my
opinion be a real shame. `alloc` has a few `try_*` methods already, and
we could easily have more. Features like custom allocator support also
demonstrate and existing to support diverse use-cases with the same
abstractions.
A natural way to add such a feature flag would a Cargo feature, but
there are currently uncertainties around how std library crate's Cargo
features may or not be stable, so to avoid any risk of stabilizing by
mistake we are going with a more low-level "raw cfg" token, which
cannot be interacted with via Cargo alone.
Note also that since there is no notion of "default cfg tokens" outside
of Cargo features, we have to invert the condition from
`global_oom_handling` to to `not(no_global_oom_handling)`. This breaks
the monotonicity that would be important for a Cargo feature (i.e.
turning on more features should never break compatibility), but it
doesn't matter for raw cfg tokens which are not intended to be
"constraint solved" by Cargo or anything else.
To support this use-case we create a new feature, "global-oom-handling",
on by default, and put the global OOM handler infra and everything else
it that depends on it behind it. By default, nothing is changed, but
users concerned about global handling can make sure it is disabled, and
be confident that all OOM handling is local and explicit.
For this first iteration, non-flat collections are outright disabled.
`Vec` and `String` don't yet have `try_*` allocation methods, but are
kept anyways since they can be oom-safely created "from parts", and we
hope to add those `try_` methods in the future.
[1]: https://lore.kernel.org/lkml/CAHk-=wh_sNLoz84AUUzuqXEsYH35u=8HV3vK-jbRbJ_B-JjGrg@mail.gmail.com/
2021-04-16 20:18:04 -04:00
|
|
|
|
#[cfg(all(not(no_global_oom_handling), not(test)))]
|
2015-05-09 14:50:28 -05:00
|
|
|
|
#[lang = "exchange_malloc"]
|
2014-05-06 22:03:14 -04:00
|
|
|
|
#[inline]
|
2015-02-09 10:00:46 +03:00
|
|
|
|
unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
|
2020-05-28 23:27:00 +02:00
|
|
|
|
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
|
2020-12-04 14:47:15 +01:00
|
|
|
|
match Global.allocate(layout) {
|
2020-08-18 22:39:33 +02:00
|
|
|
|
Ok(ptr) => ptr.as_mut_ptr(),
|
2020-03-24 11:45:38 +01:00
|
|
|
|
Err(_) => handle_alloc_error(layout),
|
2014-05-06 22:03:14 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-10 12:02:19 -06:00
|
|
|
|
#[cfg_attr(not(test), lang = "box_free")]
|
2016-01-28 23:59:00 +02:00
|
|
|
|
#[inline]
|
2022-01-04 00:35:53 +09:00
|
|
|
|
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
|
2020-02-11 13:02:51 +01:00
|
|
|
|
// This signature has to be the same as `Box`, otherwise an ICE will happen.
|
2020-12-04 14:47:15 +01:00
|
|
|
|
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
|
2020-02-11 13:02:51 +01:00
|
|
|
|
// well.
|
2020-12-04 14:47:15 +01:00
|
|
|
|
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
|
|
|
|
|
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
|
2021-12-23 22:03:12 +09:00
|
|
|
|
pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Drop>(
|
|
|
|
|
ptr: Unique<T>,
|
|
|
|
|
alloc: A,
|
|
|
|
|
) {
|
2020-05-28 23:27:00 +02:00
|
|
|
|
unsafe {
|
|
|
|
|
let size = size_of_val(ptr.as_ref());
|
|
|
|
|
let align = min_align_of_val(ptr.as_ref());
|
|
|
|
|
let layout = Layout::from_size_align_unchecked(size, align);
|
2021-12-23 22:03:12 +09:00
|
|
|
|
alloc.deallocate(From::from(ptr.cast()), layout)
|
2020-05-28 23:27:00 +02:00
|
|
|
|
}
|
2016-01-28 23:59:00 +02:00
|
|
|
|
}
|
|
|
|
|
|
2020-10-09 11:17:08 +02:00
|
|
|
|
// # Allocation error handler
|
|
|
|
|
|
alloc: Add unstable Cfg feature `no-global_oom_handling`
For certain sorts of systems, programming, it's deemed essential that
all allocation failures be explicitly handled where they occur. For
example, see Linus Torvald's opinion in [1]. Merely not calling global
panic handlers, or always `try_reserving` first (for vectors), is not
deemed good enough, because the mere presence of the global OOM handlers
is burdens static analysis.
One option for these projects to use rust would just be to skip `alloc`,
rolling their own allocation abstractions. But this would, in my
opinion be a real shame. `alloc` has a few `try_*` methods already, and
we could easily have more. Features like custom allocator support also
demonstrate and existing to support diverse use-cases with the same
abstractions.
A natural way to add such a feature flag would a Cargo feature, but
there are currently uncertainties around how std library crate's Cargo
features may or not be stable, so to avoid any risk of stabilizing by
mistake we are going with a more low-level "raw cfg" token, which
cannot be interacted with via Cargo alone.
Note also that since there is no notion of "default cfg tokens" outside
of Cargo features, we have to invert the condition from
`global_oom_handling` to to `not(no_global_oom_handling)`. This breaks
the monotonicity that would be important for a Cargo feature (i.e.
turning on more features should never break compatibility), but it
doesn't matter for raw cfg tokens which are not intended to be
"constraint solved" by Cargo or anything else.
To support this use-case we create a new feature, "global-oom-handling",
on by default, and put the global OOM handler infra and everything else
it that depends on it behind it. By default, nothing is changed, but
users concerned about global handling can make sure it is disabled, and
be confident that all OOM handling is local and explicit.
For this first iteration, non-flat collections are outright disabled.
`Vec` and `String` don't yet have `try_*` allocation methods, but are
kept anyways since they can be oom-safely created "from parts", and we
hope to add those `try_` methods in the future.
[1]: https://lore.kernel.org/lkml/CAHk-=wh_sNLoz84AUUzuqXEsYH35u=8HV3vK-jbRbJ_B-JjGrg@mail.gmail.com/
2021-04-16 20:18:04 -04:00
|
|
|
|
#[cfg(not(no_global_oom_handling))]
|
2020-10-09 11:17:08 +02:00
|
|
|
|
extern "Rust" {
|
|
|
|
|
// This is the magic symbol to call the global alloc error handler. rustc generates
|
|
|
|
|
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
|
|
|
|
|
// default implementations below (`__rdl_oom`) otherwise.
|
|
|
|
|
#[rustc_allocator_nounwind]
|
|
|
|
|
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
|
|
|
|
|
}
|
|
|
|
|
|
2018-05-31 18:23:42 +02:00
|
|
|
|
/// Abort on memory allocation error or failure.
|
|
|
|
|
///
|
|
|
|
|
/// Callers of memory allocation APIs wishing to abort computation
|
|
|
|
|
/// in response to an allocation error are encouraged to call this function,
|
|
|
|
|
/// rather than directly invoking `panic!` or similar.
|
|
|
|
|
///
|
|
|
|
|
/// The default behavior of this function is to print a message to standard error
|
|
|
|
|
/// and abort the process.
|
2018-06-14 00:32:30 +02:00
|
|
|
|
/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
|
2018-06-01 09:18:25 +02:00
|
|
|
|
///
|
2018-06-14 00:32:30 +02:00
|
|
|
|
/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
|
|
|
|
|
/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
|
2018-05-31 19:13:57 +02:00
|
|
|
|
#[stable(feature = "global_alloc", since = "1.28.0")]
|
2022-01-04 00:35:53 +09:00
|
|
|
|
#[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")]
|
alloc: Add unstable Cfg feature `no-global_oom_handling`
For certain sorts of systems, programming, it's deemed essential that
all allocation failures be explicitly handled where they occur. For
example, see Linus Torvald's opinion in [1]. Merely not calling global
panic handlers, or always `try_reserving` first (for vectors), is not
deemed good enough, because the mere presence of the global OOM handlers
is burdens static analysis.
One option for these projects to use rust would just be to skip `alloc`,
rolling their own allocation abstractions. But this would, in my
opinion be a real shame. `alloc` has a few `try_*` methods already, and
we could easily have more. Features like custom allocator support also
demonstrate and existing to support diverse use-cases with the same
abstractions.
A natural way to add such a feature flag would a Cargo feature, but
there are currently uncertainties around how std library crate's Cargo
features may or not be stable, so to avoid any risk of stabilizing by
mistake we are going with a more low-level "raw cfg" token, which
cannot be interacted with via Cargo alone.
Note also that since there is no notion of "default cfg tokens" outside
of Cargo features, we have to invert the condition from
`global_oom_handling` to to `not(no_global_oom_handling)`. This breaks
the monotonicity that would be important for a Cargo feature (i.e.
turning on more features should never break compatibility), but it
doesn't matter for raw cfg tokens which are not intended to be
"constraint solved" by Cargo or anything else.
To support this use-case we create a new feature, "global-oom-handling",
on by default, and put the global OOM handler infra and everything else
it that depends on it behind it. By default, nothing is changed, but
users concerned about global handling can make sure it is disabled, and
be confident that all OOM handling is local and explicit.
For this first iteration, non-flat collections are outright disabled.
`Vec` and `String` don't yet have `try_*` allocation methods, but are
kept anyways since they can be oom-safely created "from parts", and we
hope to add those `try_` methods in the future.
[1]: https://lore.kernel.org/lkml/CAHk-=wh_sNLoz84AUUzuqXEsYH35u=8HV3vK-jbRbJ_B-JjGrg@mail.gmail.com/
2021-04-16 20:18:04 -04:00
|
|
|
|
#[cfg(all(not(no_global_oom_handling), not(test)))]
|
2020-09-07 10:45:20 +02:00
|
|
|
|
#[rustc_allocator_nounwind]
|
2020-11-18 18:15:03 +01:00
|
|
|
|
#[cold]
|
2021-12-23 22:03:12 +09:00
|
|
|
|
pub const fn handle_alloc_error(layout: Layout) -> ! {
|
|
|
|
|
const fn ct_error(_: Layout) -> ! {
|
|
|
|
|
panic!("allocation failed");
|
2020-09-07 10:45:20 +02:00
|
|
|
|
}
|
2021-12-23 22:03:12 +09:00
|
|
|
|
|
|
|
|
|
fn rt_error(layout: Layout) -> ! {
|
|
|
|
|
unsafe {
|
|
|
|
|
__rust_alloc_error_handler(layout.size(), layout.align());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsafe { core::intrinsics::const_eval_select((layout,), ct_error, rt_error) }
|
2020-09-07 10:45:20 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// For alloc test `std::alloc::handle_alloc_error` can be used directly.
|
alloc: Add unstable Cfg feature `no-global_oom_handling`
For certain sorts of systems, programming, it's deemed essential that
all allocation failures be explicitly handled where they occur. For
example, see Linus Torvald's opinion in [1]. Merely not calling global
panic handlers, or always `try_reserving` first (for vectors), is not
deemed good enough, because the mere presence of the global OOM handlers
is burdens static analysis.
One option for these projects to use rust would just be to skip `alloc`,
rolling their own allocation abstractions. But this would, in my
opinion be a real shame. `alloc` has a few `try_*` methods already, and
we could easily have more. Features like custom allocator support also
demonstrate and existing to support diverse use-cases with the same
abstractions.
A natural way to add such a feature flag would a Cargo feature, but
there are currently uncertainties around how std library crate's Cargo
features may or not be stable, so to avoid any risk of stabilizing by
mistake we are going with a more low-level "raw cfg" token, which
cannot be interacted with via Cargo alone.
Note also that since there is no notion of "default cfg tokens" outside
of Cargo features, we have to invert the condition from
`global_oom_handling` to to `not(no_global_oom_handling)`. This breaks
the monotonicity that would be important for a Cargo feature (i.e.
turning on more features should never break compatibility), but it
doesn't matter for raw cfg tokens which are not intended to be
"constraint solved" by Cargo or anything else.
To support this use-case we create a new feature, "global-oom-handling",
on by default, and put the global OOM handler infra and everything else
it that depends on it behind it. By default, nothing is changed, but
users concerned about global handling can make sure it is disabled, and
be confident that all OOM handling is local and explicit.
For this first iteration, non-flat collections are outright disabled.
`Vec` and `String` don't yet have `try_*` allocation methods, but are
kept anyways since they can be oom-safely created "from parts", and we
hope to add those `try_` methods in the future.
[1]: https://lore.kernel.org/lkml/CAHk-=wh_sNLoz84AUUzuqXEsYH35u=8HV3vK-jbRbJ_B-JjGrg@mail.gmail.com/
2021-04-16 20:18:04 -04:00
|
|
|
|
#[cfg(all(not(no_global_oom_handling), test))]
|
2020-09-07 10:45:20 +02:00
|
|
|
|
pub use std::alloc::handle_alloc_error;
|
|
|
|
|
|
alloc: Add unstable Cfg feature `no-global_oom_handling`
For certain sorts of systems, programming, it's deemed essential that
all allocation failures be explicitly handled where they occur. For
example, see Linus Torvald's opinion in [1]. Merely not calling global
panic handlers, or always `try_reserving` first (for vectors), is not
deemed good enough, because the mere presence of the global OOM handlers
is burdens static analysis.
One option for these projects to use rust would just be to skip `alloc`,
rolling their own allocation abstractions. But this would, in my
opinion be a real shame. `alloc` has a few `try_*` methods already, and
we could easily have more. Features like custom allocator support also
demonstrate and existing to support diverse use-cases with the same
abstractions.
A natural way to add such a feature flag would a Cargo feature, but
there are currently uncertainties around how std library crate's Cargo
features may or not be stable, so to avoid any risk of stabilizing by
mistake we are going with a more low-level "raw cfg" token, which
cannot be interacted with via Cargo alone.
Note also that since there is no notion of "default cfg tokens" outside
of Cargo features, we have to invert the condition from
`global_oom_handling` to to `not(no_global_oom_handling)`. This breaks
the monotonicity that would be important for a Cargo feature (i.e.
turning on more features should never break compatibility), but it
doesn't matter for raw cfg tokens which are not intended to be
"constraint solved" by Cargo or anything else.
To support this use-case we create a new feature, "global-oom-handling",
on by default, and put the global OOM handler infra and everything else
it that depends on it behind it. By default, nothing is changed, but
users concerned about global handling can make sure it is disabled, and
be confident that all OOM handling is local and explicit.
For this first iteration, non-flat collections are outright disabled.
`Vec` and `String` don't yet have `try_*` allocation methods, but are
kept anyways since they can be oom-safely created "from parts", and we
hope to add those `try_` methods in the future.
[1]: https://lore.kernel.org/lkml/CAHk-=wh_sNLoz84AUUzuqXEsYH35u=8HV3vK-jbRbJ_B-JjGrg@mail.gmail.com/
2021-04-16 20:18:04 -04:00
|
|
|
|
#[cfg(all(not(no_global_oom_handling), not(any(target_os = "hermit", test))))]
|
2020-09-07 10:45:20 +02:00
|
|
|
|
#[doc(hidden)]
|
|
|
|
|
#[allow(unused_attributes)]
|
|
|
|
|
#[unstable(feature = "alloc_internals", issue = "none")]
|
2020-10-09 11:39:28 +02:00
|
|
|
|
pub mod __alloc_error_handler {
|
2020-09-07 10:45:20 +02:00
|
|
|
|
use crate::alloc::Layout;
|
|
|
|
|
|
|
|
|
|
// called via generated `__rust_alloc_error_handler`
|
|
|
|
|
|
|
|
|
|
// if there is no `#[alloc_error_handler]`
|
|
|
|
|
#[rustc_std_internal_symbol]
|
|
|
|
|
pub unsafe extern "C" fn __rdl_oom(size: usize, _align: usize) -> ! {
|
|
|
|
|
panic!("memory allocation of {} bytes failed", size)
|
|
|
|
|
}
|
|
|
|
|
|
2021-08-22 14:46:15 +02:00
|
|
|
|
// if there is an `#[alloc_error_handler]`
|
2020-09-07 10:45:20 +02:00
|
|
|
|
#[rustc_std_internal_symbol]
|
|
|
|
|
pub unsafe extern "C" fn __rg_oom(size: usize, align: usize) -> ! {
|
|
|
|
|
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
|
|
|
|
|
extern "Rust" {
|
|
|
|
|
#[lang = "oom"]
|
|
|
|
|
fn oom_impl(layout: Layout) -> !;
|
|
|
|
|
}
|
|
|
|
|
unsafe { oom_impl(layout) }
|
|
|
|
|
}
|
|
|
|
|
}
|
2021-01-12 12:24:28 -08:00
|
|
|
|
|
|
|
|
|
/// Specialize clones into pre-allocated, uninitialized memory.
|
|
|
|
|
/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
|
|
|
|
|
pub(crate) trait WriteCloneIntoRaw: Sized {
|
|
|
|
|
unsafe fn write_clone_into_raw(&self, target: *mut Self);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<T: Clone> WriteCloneIntoRaw for T {
|
|
|
|
|
#[inline]
|
|
|
|
|
default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
|
|
|
|
|
// Having allocated *first* may allow the optimizer to create
|
|
|
|
|
// the cloned value in-place, skipping the local and move.
|
|
|
|
|
unsafe { target.write(self.clone()) };
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl<T: Copy> WriteCloneIntoRaw for T {
|
|
|
|
|
#[inline]
|
|
|
|
|
unsafe fn write_clone_into_raw(&self, target: *mut Self) {
|
|
|
|
|
// We can always copy in-place, without ever involving a local value.
|
|
|
|
|
unsafe { target.copy_from_nonoverlapping(self, 1) };
|
|
|
|
|
}
|
|
|
|
|
}
|