1
Fork 0

Merge branch 'master' into binary-heap-ta

This commit is contained in:
yanchith 2023-06-09 11:22:08 +02:00
commit cb5c011670
42145 changed files with 1210864 additions and 417676 deletions

View file

@ -14,28 +14,31 @@ use core::ptr::{self, NonNull};
#[doc(inline)]
pub use core::alloc::*;
use core::marker::Destruct;
#[cfg(test)]
mod tests;
extern "Rust" {
// These are the magic symbols to call the global allocator. rustc generates
// These are the magic symbols to call the global allocator. rustc generates
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
// (the code expanding that attribute macro generates those functions), or to call
// the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// otherwise.
// The rustc fork of LLVM also special-cases these function names to be able to optimize them
// The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
#[rustc_allocator_nounwind]
#[rustc_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_allocator_nounwind]
#[rustc_deallocator]
#[rustc_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
#[rustc_allocator_nounwind]
#[rustc_reallocator]
#[rustc_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
#[rustc_allocator_nounwind]
#[rustc_allocator_zeroed]
#[rustc_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
static __rust_no_alloc_shim_is_unstable: u8;
}
/// The global memory allocator.
@ -70,11 +73,14 @@ pub use std::alloc::Global;
/// # Examples
///
/// ```
/// use std::alloc::{alloc, dealloc, Layout};
/// use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc(layout);
/// if ptr.is_null() {
/// handle_alloc_error(layout);
/// }
///
/// *(ptr as *mut u16) = 42;
/// assert_eq!(*(ptr as *mut u16), 42);
@ -86,7 +92,13 @@ pub use std::alloc::Global;
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
unsafe { __rust_alloc(layout.size(), layout.align()) }
unsafe {
// Make sure we don't accidentally allow omitting the allocator shim in
// stable code until it is actually stabilized.
core::ptr::read_volatile(&__rust_no_alloc_shim_is_unstable);
__rust_alloc(layout.size(), layout.align())
}
}
/// Deallocate memory with the global allocator.
@ -325,16 +337,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
ptr: Unique<T>,
alloc: A,
) {
pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
@ -347,7 +355,7 @@ pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Dest
#[cfg(not(no_global_oom_handling))]
extern "Rust" {
// This is the magic symbol to call the global alloc error handler. rustc generates
// This is the magic symbol to call the global alloc error handler. rustc generates
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
// default implementations below (`__rdl_oom`) otherwise.
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
@ -392,25 +400,24 @@ pub use std::alloc::handle_alloc_error;
#[allow(unused_attributes)]
#[unstable(feature = "alloc_internals", issue = "none")]
pub mod __alloc_error_handler {
use crate::alloc::Layout;
// called via generated `__rust_alloc_error_handler`
// if there is no `#[alloc_error_handler]`
// called via generated `__rust_alloc_error_handler` if there is no
// `#[alloc_error_handler]`.
#[rustc_std_internal_symbol]
pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
panic!("memory allocation of {size} bytes failed")
}
// if there is an `#[alloc_error_handler]`
#[rustc_std_internal_symbol]
pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
extern "Rust" {
#[lang = "oom"]
fn oom_impl(layout: Layout) -> !;
// This symbol is emitted by rustc next to __rust_alloc_error_handler.
// Its value depends on the -Zoom={panic,abort} compiler option.
static __rust_alloc_error_handler_should_panic: u8;
}
#[allow(unused_unsafe)]
if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
panic!("memory allocation of {size} bytes failed")
} else {
core::panicking::panic_nounwind_fmt(format_args!(
"memory allocation of {size} bytes failed"
))
}
unsafe { oom_impl(layout) }
}
}

View file

@ -15,14 +15,13 @@ fn allocate_zeroed() {
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
i = i.offset(1);
i = i.add(1);
}
Global.deallocate(ptr.as_non_null_ptr(), layout);
}
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn alloc_owned_small(b: &mut Bencher) {
b.iter(|| {
let _: Box<_> = Box::new(10);

View file

@ -21,7 +21,6 @@ use Cow::*;
impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
where
B: ToOwned,
<B as ToOwned>::Owned: 'a,
{
fn borrow(&self) -> &B {
&**self
@ -116,7 +115,7 @@ where
/// ```
/// use std::borrow::Cow;
///
/// fn abs_all(input: &mut Cow<[i32]>) {
/// fn abs_all(input: &mut Cow<'_, [i32]>) {
/// for i in 0..input.len() {
/// let v = input[i];
/// if v < 0 {
@ -146,7 +145,7 @@ where
/// ```
/// use std::borrow::Cow;
///
/// struct Items<'a, X: 'a> where [X]: ToOwned<Owned = Vec<X>> {
/// struct Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
/// values: Cow<'a, [X]>,
/// }
///
@ -268,7 +267,7 @@ impl<B: ?Sized + ToOwned> Cow<'_, B> {
///
/// assert_eq!(
/// cow,
/// Cow::Owned(String::from("FOO")) as Cow<str>
/// Cow::Owned(String::from("FOO")) as Cow<'_, str>
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
@ -312,7 +311,7 @@ impl<B: ?Sized + ToOwned> Cow<'_, B> {
/// use std::borrow::Cow;
///
/// let s = "Hello world!";
/// let cow: Cow<str> = Cow::Owned(String::from(s));
/// let cow: Cow<'_, str> = Cow::Owned(String::from(s));
///
/// assert_eq!(
/// cow.into_owned(),
@ -329,10 +328,9 @@ impl<B: ?Sized + ToOwned> Cow<'_, B> {
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
impl<B: ?Sized + ToOwned> Deref for Cow<'_, B>
where
B::Owned: ~const Borrow<B>,
B::Owned: Borrow<B>,
{
type Target = B;

View file

@ -1,4 +1,4 @@
//! A pointer type for heap allocation.
//! The `Box<T>` type for heap allocation.
//!
//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
@ -150,14 +150,13 @@ use core::any::Any;
use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
use core::convert::{From, TryFrom};
use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::iter::{FusedIterator, Iterator};
use core::marker::{Destruct, Unpin, Unsize};
use core::iter::FusedIterator;
use core::marker::Tuple;
use core::marker::Unsize;
use core::mem;
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
@ -175,6 +174,8 @@ use crate::raw_vec::RawVec;
#[cfg(not(no_global_oom_handling))]
use crate::str::from_boxed_utf8_unchecked;
#[cfg(not(no_global_oom_handling))]
use crate::string::String;
#[cfg(not(no_global_oom_handling))]
use crate::vec::Vec;
#[unstable(feature = "thin_box", issue = "92791")]
@ -182,7 +183,7 @@ pub use thin::ThinBox;
mod thin;
/// A pointer type for heap allocation.
/// A pointer type that uniquely owns a heap allocation of type `T`.
///
/// See the [module-level documentation](../../std/boxed/index.html) for more.
#[lang = "owned_box"]
@ -210,6 +211,7 @@ impl<T> Box<T> {
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
#[rustc_diagnostic_item = "box_new"]
pub fn new(x: T) -> Self {
#[rustc_box]
Box::new(x)
@ -279,9 +281,7 @@ impl<T> Box<T> {
#[must_use]
#[inline(always)]
pub fn pin(x: T) -> Pin<Box<T>> {
(#[rustc_box]
Box::new(x))
.into()
Box::new(x).into()
}
/// Allocates memory on the heap then places `x` into it,
@ -373,12 +373,11 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline]
pub const fn new_in(x: T, alloc: A) -> Self
pub fn new_in(x: T, alloc: A) -> Self
where
A: ~const Allocator + ~const Destruct,
A: Allocator,
{
let mut boxed = Self::new_uninit_in(alloc);
unsafe {
@ -403,12 +402,10 @@ impl<T, A: Allocator> Box<T, A> {
/// # Ok::<(), std::alloc::AllocError>(())
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
where
T: ~const Destruct,
A: ~const Allocator + ~const Destruct,
A: Allocator,
{
let mut boxed = Self::try_new_uninit_in(alloc)?;
unsafe {
@ -438,13 +435,12 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
#[must_use]
// #[unstable(feature = "new_uninit", issue = "63291")]
pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
A: ~const Allocator + ~const Destruct,
A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@ -479,10 +475,9 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
A: ~const Allocator + ~const Destruct,
A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate(layout)?.cast();
@ -510,13 +505,12 @@ impl<T, A: Allocator> Box<T, A> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
A: ~const Allocator + ~const Destruct,
A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@ -551,10 +545,9 @@ impl<T, A: Allocator> Box<T, A> {
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
A: ~const Allocator + ~const Destruct,
A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate_zeroed(layout)?.cast();
@ -570,12 +563,11 @@ impl<T, A: Allocator> Box<T, A> {
/// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline(always)]
pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
pub fn pin_in(x: T, alloc: A) -> Pin<Self>
where
A: 'static + ~const Allocator + ~const Destruct,
A: 'static + Allocator,
{
Self::into_pin(Self::new_in(x, alloc))
}
@ -584,8 +576,7 @@ impl<T, A: Allocator> Box<T, A> {
///
/// This conversion does not allocate on the heap and happens in place.
#[unstable(feature = "box_into_boxed_slice", issue = "71582")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
pub const fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
pub fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
let (raw, alloc) = Box::into_raw_with_allocator(boxed);
unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
}
@ -602,12 +593,8 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(Box::into_inner(c), 5);
/// ```
#[unstable(feature = "box_into_inner", issue = "80437")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
pub const fn into_inner(boxed: Self) -> T
where
Self: ~const Destruct,
{
pub fn into_inner(boxed: Self) -> T {
*boxed
}
}
@ -821,9 +808,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
pub const unsafe fn assume_init(self) -> Box<T, A> {
pub unsafe fn assume_init(self) -> Box<T, A> {
let (raw, alloc) = Box::into_raw_with_allocator(self);
unsafe { Box::from_raw_in(raw as *mut T, alloc) }
}
@ -856,9 +842,8 @@ impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
/// }
/// ```
#[unstable(feature = "new_uninit", issue = "63291")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
pub const fn write(mut boxed: Self, value: T) -> Box<T, A> {
pub fn write(mut boxed: Self, value: T) -> Box<T, A> {
unsafe {
(*boxed).write(value);
boxed.assume_init()
@ -949,6 +934,7 @@ impl<T: ?Sized> Box<T> {
/// [`Layout`]: crate::Layout
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
#[must_use = "call `drop(Box::from_raw(ptr))` if you intend to drop the `Box`"]
pub unsafe fn from_raw(raw: *mut T) -> Self {
unsafe { Self::from_raw_in(raw, Global) }
}
@ -1101,9 +1087,8 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
///
/// [memory layout]: self#memory-layout
#[unstable(feature = "allocator_api", issue = "32838")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
pub const fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
let (leaked, alloc) = Box::into_unique(b);
(leaked.as_ptr(), alloc)
}
@ -1113,10 +1098,9 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
issue = "none",
reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
)]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
#[doc(hidden)]
pub const fn into_unique(b: Self) -> (Unique<T>, A) {
pub fn into_unique(b: Self) -> (Unique<T>, A) {
// Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
// raw pointer for the type system. Turning it directly into a raw pointer would not be
// recognized as "releasing" the unique pointer to permit aliased raw accesses,
@ -1174,9 +1158,8 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// assert_eq!(*static_ref, [4, 2, 3]);
/// ```
#[stable(feature = "box_leak", since = "1.26.0")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
pub const fn leak<'a>(b: Self) -> &'a mut T
pub fn leak<'a>(b: Self) -> &'a mut T
where
A: 'a,
{
@ -1237,16 +1220,16 @@ unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Default> Default for Box<T> {
/// Creates a `Box<T>`, with the `Default` value for T.
#[inline]
fn default() -> Self {
#[rustc_box]
Box::new(T::default())
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for Box<[T]> {
impl<T> Default for Box<[T]> {
#[inline]
fn default() -> Self {
let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling();
Box(ptr, Global)
@ -1255,8 +1238,8 @@ impl<T> const Default for Box<[T]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "default_box_extra", since = "1.17.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl const Default for Box<str> {
impl Default for Box<str> {
#[inline]
fn default() -> Self {
// SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
let ptr: Unique<str> = unsafe {
@ -1452,8 +1435,7 @@ impl<T> From<T> for Box<T> {
}
#[stable(feature = "pin", since = "1.33.0")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
impl<T: ?Sized, A: Allocator> const From<Box<T, A>> for Pin<Box<T, A>>
impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Pin<Box<T, A>>
where
A: 'static,
{
@ -1473,23 +1455,24 @@ where
}
}
/// Specialization trait used for `From<&[T]>`.
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<T: Copy> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
/// and performs a copy of `slice`.
///
/// # Examples
/// ```rust
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice: Box<[u8]> = Box::from(slice);
///
/// println!("{boxed_slice:?}");
/// ```
fn from(slice: &[T]) -> Box<[T]> {
trait BoxFromSlice<T> {
fn from_slice(slice: &[T]) -> Self;
}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone> BoxFromSlice<T> for Box<[T]> {
#[inline]
default fn from_slice(slice: &[T]) -> Self {
slice.to_vec().into_boxed_slice()
}
}
#[cfg(not(no_global_oom_handling))]
impl<T: Copy> BoxFromSlice<T> for Box<[T]> {
#[inline]
fn from_slice(slice: &[T]) -> Self {
let len = slice.len();
let buf = RawVec::with_capacity(len);
unsafe {
@ -1499,9 +1482,31 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_slice", since = "1.17.0")]
impl<T: Clone> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
/// and performs a copy of `slice` and its contents.
///
/// # Examples
/// ```rust
/// // create a &[u8] which will be used to create a Box<[u8]>
/// let slice: &[u8] = &[104, 101, 108, 108, 111];
/// let boxed_slice: Box<[u8]> = Box::from(slice);
///
/// println!("{boxed_slice:?}");
/// ```
#[inline]
fn from(slice: &[T]) -> Box<[T]> {
<Self as BoxFromSlice<T>>::from_slice(slice)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_from_cow", since = "1.45.0")]
impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
impl<T: Clone> From<Cow<'_, [T]>> for Box<[T]> {
/// Converts a `Cow<'_, [T]>` into a `Box<[T]>`
///
/// When `cow` is the `Cow::Borrowed` variant, this
@ -1611,11 +1616,26 @@ impl<T, const N: usize> From<[T; N]> for Box<[T]> {
/// println!("{boxed:?}");
/// ```
fn from(array: [T; N]) -> Box<[T]> {
#[rustc_box]
Box::new(array)
}
}
/// Casts a boxed slice to a boxed array.
///
/// # Safety
///
/// `boxed_slice.len()` must be exactly `N`.
unsafe fn boxed_slice_as_array_unchecked<T, A: Allocator, const N: usize>(
boxed_slice: Box<[T], A>,
) -> Box<[T; N], A> {
debug_assert_eq!(boxed_slice.len(), N);
let (ptr, alloc) = Box::into_raw_with_allocator(boxed_slice);
// SAFETY: Pointer and allocator came from an existing box,
// and our safety condition requires that the length is exactly `N`
unsafe { Box::from_raw_in(ptr as *mut [T; N], alloc) }
}
#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
type Error = Box<[T]>;
@ -1631,13 +1651,46 @@ impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
/// `boxed_slice.len()` does not equal `N`.
fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
if boxed_slice.len() == N {
Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
} else {
Err(boxed_slice)
}
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "boxed_array_try_from_vec", since = "1.66.0")]
impl<T, const N: usize> TryFrom<Vec<T>> for Box<[T; N]> {
type Error = Vec<T>;
/// Attempts to convert a `Vec<T>` into a `Box<[T; N]>`.
///
/// Like [`Vec::into_boxed_slice`], this is in-place if `vec.capacity() == N`,
/// but will require a reallocation otherwise.
///
/// # Errors
///
/// Returns the original `Vec<T>` in the `Err` variant if
/// `boxed_slice.len()` does not equal `N`.
///
/// # Examples
///
/// This can be used with [`vec!`] to create an array on the heap:
///
/// ```
/// let state: Box<[f32; 100]> = vec![1.0; 100].try_into().unwrap();
/// assert_eq!(state.len(), 100);
/// ```
fn try_from(vec: Vec<T>) -> Result<Self, Self::Error> {
if vec.len() == N {
let boxed_slice = vec.into_boxed_slice();
Ok(unsafe { boxed_slice_as_array_unchecked(boxed_slice) })
} else {
Err(vec)
}
}
}
impl<A: Allocator> Box<dyn Any, A> {
/// Attempt to downcast the box to a concrete type.
///
@ -1841,8 +1894,7 @@ impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
type Target = T;
fn deref(&self) -> &T {
@ -1851,8 +1903,7 @@ impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
impl<T: ?Sized, A: Allocator> const DerefMut for Box<T, A> {
impl<T: ?Sized, A: Allocator> DerefMut for Box<T, A> {
fn deref_mut(&mut self) -> &mut T {
&mut **self
}
@ -1927,7 +1978,7 @@ impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A
impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
type Output = <F as FnOnce<Args>>::Output;
extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
@ -1936,20 +1987,20 @@ impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
<F as FnMut<Args>>::call_mut(self, args)
}
}
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
<F as Fn<Args>>::call(self, args)
}
}
#[unstable(feature = "coerce_unsized", issue = "27732")]
#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@ -2031,8 +2082,7 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
* could have a method to project a Pin<T> from it.
*/
#[stable(feature = "pin", since = "1.33.0")]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
impl<T: ?Sized, A: Allocator> const Unpin for Box<T, A> where A: 'static {}
impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
#[unstable(feature = "generator_trait", issue = "43122")]
impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
@ -2084,3 +2134,292 @@ impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
(**self).size_hint()
}
}
impl dyn Error {
#[inline]
#[stable(feature = "error_downcast", since = "1.3.0")]
#[rustc_allow_incoherent_impl]
/// Attempts to downcast the box to a concrete type.
pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
if self.is::<T>() {
unsafe {
let raw: *mut dyn Error = Box::into_raw(self);
Ok(Box::from_raw(raw as *mut T))
}
} else {
Err(self)
}
}
}
impl dyn Error + Send {
#[inline]
#[stable(feature = "error_downcast", since = "1.3.0")]
#[rustc_allow_incoherent_impl]
/// Attempts to downcast the box to a concrete type.
pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
let err: Box<dyn Error> = self;
<dyn Error>::downcast(err).map_err(|s| unsafe {
// Reapply the `Send` marker.
mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
})
}
}
impl dyn Error + Send + Sync {
#[inline]
#[stable(feature = "error_downcast", since = "1.3.0")]
#[rustc_allow_incoherent_impl]
/// Attempts to downcast the box to a concrete type.
pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
let err: Box<dyn Error> = self;
<dyn Error>::downcast(err).map_err(|s| unsafe {
// Reapply the `Send + Sync` marker.
mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
})
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
/// Converts a type of [`Error`] into a box of dyn [`Error`].
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::fmt;
/// use std::mem;
///
/// #[derive(Debug)]
/// struct AnError;
///
/// impl fmt::Display for AnError {
/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// write!(f, "An error")
/// }
/// }
///
/// impl Error for AnError {}
///
/// let an_error = AnError;
/// assert!(0 == mem::size_of_val(&an_error));
/// let a_boxed_error = Box::<dyn Error>::from(an_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// ```
fn from(err: E) -> Box<dyn Error + 'a> {
Box::new(err)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
/// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
/// dyn [`Error`] + [`Send`] + [`Sync`].
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::fmt;
/// use std::mem;
///
/// #[derive(Debug)]
/// struct AnError;
///
/// impl fmt::Display for AnError {
/// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// write!(f, "An error")
/// }
/// }
///
/// impl Error for AnError {}
///
/// unsafe impl Send for AnError {}
///
/// unsafe impl Sync for AnError {}
///
/// let an_error = AnError;
/// assert!(0 == mem::size_of_val(&an_error));
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// ```
fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
Box::new(err)
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl From<String> for Box<dyn Error + Send + Sync> {
/// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_string_error = "a string error".to_string();
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// ```
#[inline]
fn from(err: String) -> Box<dyn Error + Send + Sync> {
struct StringError(String);
impl Error for StringError {
#[allow(deprecated)]
fn description(&self) -> &str {
&self.0
}
}
impl fmt::Display for StringError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
}
// Purposefully skip printing "StringError(..)"
impl fmt::Debug for StringError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, f)
}
}
Box::new(StringError(err))
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "string_box_error", since = "1.6.0")]
impl From<String> for Box<dyn Error> {
/// Converts a [`String`] into a box of dyn [`Error`].
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_string_error = "a string error".to_string();
/// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// ```
fn from(str_err: String) -> Box<dyn Error> {
let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
let err2: Box<dyn Error> = err1;
err2
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
/// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
///
/// [`str`]: prim@str
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_str_error = "a str error";
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// ```
#[inline]
fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
From::from(String::from(err))
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "string_box_error", since = "1.6.0")]
impl From<&str> for Box<dyn Error> {
/// Converts a [`str`] into a box of dyn [`Error`].
///
/// [`str`]: prim@str
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::mem;
///
/// let a_str_error = "a str error";
/// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// ```
fn from(err: &str) -> Box<dyn Error> {
From::from(String::from(err))
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_box_error", since = "1.22.0")]
impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
/// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::mem;
/// use std::borrow::Cow;
///
/// let a_cow_str_error = Cow::from("a str error");
/// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
/// assert!(
/// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
/// ```
fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
From::from(String::from(err))
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_box_error", since = "1.22.0")]
impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
/// Converts a [`Cow`] into a box of dyn [`Error`].
///
/// # Examples
///
/// ```
/// use std::error::Error;
/// use std::mem;
/// use std::borrow::Cow;
///
/// let a_cow_str_error = Cow::from("a str error");
/// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
/// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
/// ```
fn from(err: Cow<'a, str>) -> Box<dyn Error> {
From::from(String::from(err))
}
}
#[stable(feature = "box_error", since = "1.8.0")]
impl<T: core::error::Error> core::error::Error for Box<T> {
#[allow(deprecated, deprecated_in_future)]
fn description(&self) -> &str {
core::error::Error::description(&**self)
}
#[allow(deprecated)]
fn cause(&self) -> Option<&dyn core::error::Error> {
core::error::Error::cause(&**self)
}
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
core::error::Error::source(&**self)
}
}

View file

@ -2,11 +2,12 @@
// https://github.com/matthieu-m/rfc2580/blob/b58d1d3cba0d4b5e859d3617ea2d0943aaa31329/examples/thin.rs
// by matthieu-m
use crate::alloc::{self, Layout, LayoutError};
use core::error::Error;
use core::fmt::{self, Debug, Display, Formatter};
use core::marker::PhantomData;
#[cfg(not(no_global_oom_handling))]
use core::marker::Unsize;
use core::mem;
use core::mem::{self, SizedTypeProperties};
use core::ops::{Deref, DerefMut};
use core::ptr::Pointee;
use core::ptr::{self, NonNull};
@ -47,7 +48,7 @@ unsafe impl<T: ?Sized + Sync> Sync for ThinBox<T> {}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T> ThinBox<T> {
/// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on
/// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on
/// the stack.
///
/// # Examples
@ -58,6 +59,8 @@ impl<T> ThinBox<T> {
///
/// let five = ThinBox::new(5);
/// ```
///
/// [`Metadata`]: core::ptr::Pointee::Metadata
#[cfg(not(no_global_oom_handling))]
pub fn new(value: T) -> Self {
let meta = ptr::metadata(&value);
@ -68,7 +71,7 @@ impl<T> ThinBox<T> {
#[unstable(feature = "thin_box", issue = "92791")]
impl<Dyn: ?Sized> ThinBox<Dyn> {
/// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on
/// Moves a type to the heap with its [`Metadata`] stored in the heap allocation instead of on
/// the stack.
///
/// # Examples
@ -79,6 +82,8 @@ impl<Dyn: ?Sized> ThinBox<Dyn> {
///
/// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
/// ```
///
/// [`Metadata`]: core::ptr::Pointee::Metadata
#[cfg(not(no_global_oom_handling))]
pub fn new_unsize<T>(value: T) -> Self
where
@ -197,9 +202,7 @@ impl<H> WithHeader<H> {
let ptr = if layout.size() == 0 {
// Some paranoia checking, mostly so that the ThinBox tests are
// more able to catch issues.
debug_assert!(
value_offset == 0 && mem::size_of::<T>() == 0 && mem::size_of::<H>() == 0
);
debug_assert!(value_offset == 0 && T::IS_ZST && H::IS_ZST);
layout.dangling()
} else {
let ptr = alloc::alloc(layout);
@ -225,24 +228,43 @@ impl<H> WithHeader<H> {
// - Assumes that either `value` can be dereferenced, or is the
// `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
struct DropGuard<H> {
ptr: NonNull<u8>,
value_layout: Layout,
_marker: PhantomData<H>,
}
impl<H> Drop for DropGuard<H> {
fn drop(&mut self) {
unsafe {
// SAFETY: Layout must have been computable if we're in drop
let (layout, value_offset) =
WithHeader::<H>::alloc_layout(self.value_layout).unwrap_unchecked();
// Note: Don't deallocate if the layout size is zero, because the pointer
// didn't come from the allocator.
if layout.size() != 0 {
alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout);
} else {
debug_assert!(
value_offset == 0 && H::IS_ZST && self.value_layout.size() == 0
);
}
}
}
}
unsafe {
let value_layout = Layout::for_value_raw(value);
// SAFETY: Layout must have been computable if we're in drop
let (layout, value_offset) = Self::alloc_layout(value_layout).unwrap_unchecked();
// `_guard` will deallocate the memory when dropped, even if `drop_in_place` unwinds.
let _guard = DropGuard {
ptr: self.0,
value_layout: Layout::for_value_raw(value),
_marker: PhantomData::<H>,
};
// We only drop the value because the Pointee trait requires that the metadata is copy
// aka trivially droppable.
ptr::drop_in_place::<T>(value);
// Note: Don't deallocate if the layout size is zero, because the pointer
// didn't come from the allocator.
if layout.size() != 0 {
alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
} else {
debug_assert!(
value_offset == 0 && mem::size_of::<H>() == 0 && value_layout.size() == 0
);
}
}
}
@ -271,3 +293,10 @@ impl<H> WithHeader<H> {
Layout::new::<H>().extend(value_layout)
}
}
#[unstable(feature = "thin_box", issue = "92791")]
impl<T: ?Sized + Error> Error for ThinBox<T> {
fn source(&self) -> Option<&(dyn Error + 'static)> {
self.deref().source()
}
}

View file

@ -145,8 +145,9 @@
use core::alloc::Allocator;
use core::fmt;
use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
use core::mem::{self, swap, ManuallyDrop};
use core::num::NonZeroUsize;
use core::ops::{Deref, DerefMut};
use core::ptr;
@ -155,8 +156,6 @@ use crate::collections::TryReserveError;
use crate::slice;
use crate::vec::{self, AsVecIntoIter, Vec};
use super::SpecExtend;
#[cfg(test)]
mod tests;
@ -167,12 +166,20 @@ mod tests;
/// It is a logic error for an item to be modified in such a way that the
/// item's ordering relative to any other item, as determined by the [`Ord`]
/// trait, changes while it is in the heap. This is normally only possible
/// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
/// through interior mutability, global state, I/O, or unsafe code. The
/// behavior resulting from such a logic error is not specified, but will
/// be encapsulated to the `BinaryHeap` that observed the logic error and not
/// result in undefined behavior. This could include panics, incorrect results,
/// aborts, memory leaks, and non-termination.
///
/// As long as no elements change their relative order while being in the heap
/// as described above, the API of `BinaryHeap` guarantees that the heap
/// invariant remains intact i.e. its methods all behave as documented. For
/// example if a method is documented as iterating in sorted order, that's
/// guaranteed to work as long as elements in the heap have not changed order,
/// even in the presence of closures getting unwinded out of, iterators getting
/// leaked, and similar foolishness.
///
/// # Examples
///
/// ```
@ -258,7 +265,6 @@ mod tests;
/// more detailed analysis.
///
/// [`core::cmp::Reverse`]: core::cmp::Reverse
/// [`Ord`]: core::cmp::Ord
/// [`Cell`]: core::cell::Cell
/// [`RefCell`]: core::cell::RefCell
/// [push]: BinaryHeap::push
@ -274,6 +280,7 @@ pub struct BinaryHeap<
data: Vec<T, A>,
}
// XXX: PeekMut<A>
/// Structure wrapping a mutable reference to the greatest item on a
/// `BinaryHeap`.
///
@ -282,13 +289,11 @@ pub struct BinaryHeap<
///
/// [`peek_mut`]: BinaryHeap::peek_mut
#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
pub struct PeekMut<
'a,
T: 'a + Ord,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
> {
heap: &'a mut BinaryHeap<T, A>,
sift: bool,
pub struct PeekMut<'a, T: 'a + Ord> {
heap: &'a mut BinaryHeap<T>,
// If a set_len + sift_down are required, this is Some. If a &mut T has not
// yet been exposed to peek_mut()'s caller, it's None.
original_len: Option<NonZeroUsize>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
@ -301,7 +306,14 @@ impl<'a, T: Ord + fmt::Debug, A: Allocator + 'a> fmt::Debug for PeekMut<'a, T, A
#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
impl<'a, T: Ord, A: Allocator + 'a> Drop for PeekMut<'a, T, A> {
fn drop(&mut self) {
if self.sift {
if let Some(original_len) = self.original_len {
// SAFETY: That's how many elements were in the Vec at the time of
// the PeekMut::deref_mut call, and therefore also at the time of
// the BinaryHeap::peek_mut call. Since the PeekMut did not end up
// getting leaked, we are now undoing the leak amplification that
// the DerefMut prepared for.
unsafe { self.heap.data.set_len(original_len.get()) };
// SAFETY: PeekMut is only instantiated for non-empty heaps.
unsafe { self.heap.sift_down(0) };
}
@ -322,19 +334,46 @@ impl<'a, T: Ord, A: Allocator + 'a> Deref for PeekMut<'a, T, A> {
impl<'a, T: Ord, A: Allocator + 'a> DerefMut for PeekMut<'a, T, A> {
fn deref_mut(&mut self) -> &mut T {
debug_assert!(!self.heap.is_empty());
self.sift = true;
let len = self.heap.len();
if len > 1 {
// Here we preemptively leak all the rest of the underlying vector
// after the currently max element. If the caller mutates the &mut T
// we're about to give them, and then leaks the PeekMut, all these
// elements will remain leaked. If they don't leak the PeekMut, then
// either Drop or PeekMut::pop will un-leak the vector elements.
//
// This is technique is described throughout several other places in
// the standard library as "leak amplification".
unsafe {
// SAFETY: len > 1 so len != 0.
self.original_len = Some(NonZeroUsize::new_unchecked(len));
// SAFETY: len > 1 so all this does for now is leak elements,
// which is safe.
self.heap.data.set_len(1);
}
}
// SAFE: PeekMut is only instantiated for non-empty heaps
unsafe { self.heap.data.get_unchecked_mut(0) }
}
}
// XXX: PeekMut<A>
impl<'a, T: Ord, A: Allocator + 'a> PeekMut<'a, T, A> {
/// Removes the peeked value from the heap and returns it.
#[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
pub fn pop(mut this: PeekMut<'a, T, A>) -> T {
let value = this.heap.pop().unwrap();
this.sift = false;
value
pub fn pop(mut this: PeekMut<'a, T>) -> T {
if let Some(original_len) = this.original_len.take() {
// SAFETY: This is how many elements were in the Vec at the time of
// the BinaryHeap::peek_mut call.
unsafe { this.heap.data.set_len(original_len.get()) };
// Unlike in Drop, here we don't also need to do a sift_down even if
// the caller could've mutated the element. It is removed from the
// heap on the next line and pop() is not sensitive to its value.
}
this.heap.pop().unwrap()
}
}
@ -365,7 +404,19 @@ impl<T: fmt::Debug, A: Allocator> fmt::Debug for BinaryHeap<T, A> {
}
}
impl<T: Ord> BinaryHeap<T, Global> {
struct RebuildOnDrop<'a, T: Ord> {
heap: &'a mut BinaryHeap<T>,
rebuild_from: usize,
}
impl<'a, T: Ord> Drop for RebuildOnDrop<'a, T> {
fn drop(&mut self) {
self.heap.rebuild_tail(self.rebuild_from);
}
}
// XXX: BinaryHeap<T, A>
impl<T: Ord> BinaryHeap<T> {
/// Creates an empty `BinaryHeap` as a max-heap.
///
/// # Examples
@ -450,11 +501,13 @@ impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
BinaryHeap { data: Vec::with_capacity_in(capacity, alloc) }
}
// XXX: peek_mut
/// Returns a mutable reference to the greatest item in the binary heap, or
/// `None` if it is empty.
///
/// Note: If the `PeekMut` value is leaked, the heap may be in an
/// inconsistent state.
/// Note: If the `PeekMut` value is leaked, some heap elements might get
/// leaked along with it, but the remaining elements will remain a valid
/// heap.
///
/// # Examples
///
@ -480,8 +533,12 @@ impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
/// If the item is modified then the worst case time complexity is *O*(log(*n*)),
/// otherwise it's *O*(1).
#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, A>> {
if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
if self.is_empty() {
None
} else {
Some(PeekMut { heap: self, original_len: None })
}
}
/// Removes the greatest item from the binary heap and returns it, or `None` if it
@ -847,7 +904,6 @@ impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
/// Basic usage:
///
/// ```
/// #![feature(binary_heap_retain)]
/// use std::collections::BinaryHeap;
///
/// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
@ -856,23 +912,24 @@ impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
///
/// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
/// ```
#[unstable(feature = "binary_heap_retain", issue = "71503")]
#[stable(feature = "binary_heap_retain", since = "1.70.0")]
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
let mut first_removed = self.len();
// rebuild_start will be updated to the first touched element below, and the rebuild will
// only be done for the tail.
let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
let mut i = 0;
self.data.retain(|e| {
guard.heap.data.retain(|e| {
let keep = f(e);
if !keep && i < first_removed {
first_removed = i;
if !keep && i < guard.rebuild_from {
guard.rebuild_from = i;
}
i += 1;
keep
});
// data[0..first_removed] is untouched, so we only need to rebuild the tail:
self.rebuild_tail(first_removed);
}
}
@ -1065,7 +1122,8 @@ impl<T, A: Allocator> BinaryHeap<T, A> {
/// current length. The allocator may reserve more space to speculatively
/// avoid frequent allocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
/// `Ok(())`. Does nothing if capacity is already sufficient.
/// `Ok(())`. Does nothing if capacity is already sufficient. This method
/// preserves the contents even if an error occurs.
///
/// # Errors
///
@ -1418,7 +1476,6 @@ impl<T> FusedIterator for Iter<'_, T> {}
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: BinaryHeap::into_iter
/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone)]
pub struct IntoIter<
@ -1468,6 +1525,20 @@ impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for IntoIter<T> {
/// Creates an empty `binary_heap::IntoIter`.
///
/// ```
/// # use std::collections::binary_heap;
/// let iter: binary_heap::IntoIter<u8> = Default::default();
/// assert_eq!(iter.len(), 0);
/// ```
fn default() -> Self {
IntoIter { iter: Default::default() }
}
}
// In addition to the SAFETY invariants of the following three unsafe traits
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
@ -1726,7 +1797,8 @@ impl<'a, T, A: Allocator + 'a> IntoIterator for &'a BinaryHeap<T, A> {
impl<T: Ord, A: Allocator> Extend<T> for BinaryHeap<T, A> {
#[inline]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
<Self as SpecExtend<I>>::spec_extend(self, iter);
let guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
guard.heap.data.extend(iter);
}
#[inline]
@ -1740,37 +1812,6 @@ impl<T: Ord, A: Allocator> Extend<T> for BinaryHeap<T, A> {
}
}
impl<T: Ord, A: Allocator, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T, A> {
default fn spec_extend(&mut self, iter: I) {
self.extend_desugared(iter.into_iter());
}
}
impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
fn spec_extend(&mut self, ref mut other: Vec<T>) {
let start = self.data.len();
self.data.append(other);
self.rebuild_tail(start);
}
}
impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
self.append(other);
}
}
impl<T: Ord, A: Allocator> BinaryHeap<T, A> {
fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
let iterator = iter.into_iter();
let (lower, _) = iterator.size_hint();
self.reserve(lower);
iterator.for_each(move |elem| self.push(elem));
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {

View file

@ -1,8 +1,9 @@
use super::*;
use crate::boxed::Box;
use crate::testing::crash_test::{CrashTestDummy, Panic};
use core::mem;
use std::iter::TrustedLen;
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::{AtomicU32, Ordering};
#[test]
fn test_iterator() {
@ -146,6 +147,24 @@ fn test_peek_mut() {
assert_eq!(heap.peek(), Some(&9));
}
#[test]
fn test_peek_mut_leek() {
let data = vec![4, 2, 7];
let mut heap = BinaryHeap::from(data);
let mut max = heap.peek_mut().unwrap();
*max = -1;
// The PeekMut object's Drop impl would have been responsible for moving the
// -1 out of the max position of the BinaryHeap, but we don't run it.
mem::forget(max);
// Absent some mitigation like leak amplification, the -1 would incorrectly
// end up in the last position of the returned Vec, with the rest of the
// heap's original contents in front of it in sorted order.
let sorted_vec = heap.into_sorted_vec();
assert!(sorted_vec.is_sorted(), "{:?}", sorted_vec);
}
#[test]
fn test_peek_mut_pop() {
let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
@ -291,33 +310,83 @@ fn test_drain_sorted() {
#[test]
fn test_drain_sorted_leak() {
static DROPS: AtomicU32 = AtomicU32::new(0);
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct D(u32, bool);
impl Drop for D {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
if self.1 {
panic!("panic in `drop`");
}
}
}
let d0 = CrashTestDummy::new(0);
let d1 = CrashTestDummy::new(1);
let d2 = CrashTestDummy::new(2);
let d3 = CrashTestDummy::new(3);
let d4 = CrashTestDummy::new(4);
let d5 = CrashTestDummy::new(5);
let mut q = BinaryHeap::from(vec![
D(0, false),
D(1, false),
D(2, false),
D(3, true),
D(4, false),
D(5, false),
d0.spawn(Panic::Never),
d1.spawn(Panic::Never),
d2.spawn(Panic::Never),
d3.spawn(Panic::InDrop),
d4.spawn(Panic::Never),
d5.spawn(Panic::Never),
]);
catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).ok();
catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).unwrap_err();
assert_eq!(DROPS.load(Ordering::SeqCst), 6);
assert_eq!(d0.dropped(), 1);
assert_eq!(d1.dropped(), 1);
assert_eq!(d2.dropped(), 1);
assert_eq!(d3.dropped(), 1);
assert_eq!(d4.dropped(), 1);
assert_eq!(d5.dropped(), 1);
assert!(q.is_empty());
}
#[test]
fn test_drain_forget() {
let a = CrashTestDummy::new(0);
let b = CrashTestDummy::new(1);
let c = CrashTestDummy::new(2);
let mut q =
BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
catch_unwind(AssertUnwindSafe(|| {
let mut it = q.drain();
it.next();
mem::forget(it);
}))
.unwrap();
// Behaviour after leaking is explicitly unspecified and order is arbitrary,
// so it's fine if these start failing, but probably worth knowing.
assert!(q.is_empty());
assert_eq!(a.dropped() + b.dropped() + c.dropped(), 1);
assert_eq!(a.dropped(), 0);
assert_eq!(b.dropped(), 0);
assert_eq!(c.dropped(), 1);
drop(q);
assert_eq!(a.dropped(), 0);
assert_eq!(b.dropped(), 0);
assert_eq!(c.dropped(), 1);
}
#[test]
fn test_drain_sorted_forget() {
let a = CrashTestDummy::new(0);
let b = CrashTestDummy::new(1);
let c = CrashTestDummy::new(2);
let mut q =
BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
catch_unwind(AssertUnwindSafe(|| {
let mut it = q.drain_sorted();
it.next();
mem::forget(it);
}))
.unwrap();
// Behaviour after leaking is explicitly unspecified,
// so it's fine if these start failing, but probably worth knowing.
assert_eq!(q.len(), 2);
assert_eq!(a.dropped(), 0);
assert_eq!(b.dropped(), 0);
assert_eq!(c.dropped(), 1);
drop(q);
assert_eq!(a.dropped(), 1);
assert_eq!(b.dropped(), 1);
assert_eq!(c.dropped(), 1);
}
#[test]
@ -405,6 +474,25 @@ fn test_retain() {
assert!(a.is_empty());
}
#[test]
fn test_retain_catch_unwind() {
let mut heap = BinaryHeap::from(vec![3, 1, 2]);
// Removes the 3, then unwinds out of retain.
let _ = catch_unwind(AssertUnwindSafe(|| {
heap.retain(|e| {
if *e == 1 {
panic!();
}
false
});
}));
// Naively this would be [1, 2] (an invalid heap) if BinaryHeap delegates to
// Vec's retain impl and then does not rebuild the heap after that unwinds.
assert_eq!(heap.into_vec(), [2, 1]);
}
// old binaryheap failed this test
//
// Integrity means that all elements are present after a comparison panics,
@ -415,7 +503,7 @@ fn test_retain() {
#[test]
#[cfg(not(target_os = "emscripten"))]
fn panic_safe() {
use rand::{seq::SliceRandom, thread_rng};
use rand::seq::SliceRandom;
use std::cmp;
use std::panic::{self, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
@ -440,7 +528,7 @@ fn panic_safe() {
self.0.partial_cmp(&other.0)
}
}
let mut rng = thread_rng();
let mut rng = crate::test_helpers::test_rng();
const DATASZ: usize = 32;
// Miri is too slow
let ntest = if cfg!(miri) { 1 } else { 10 };

View file

@ -41,6 +41,28 @@ impl<'a, T> DormantMutRef<'a, T> {
// SAFETY: our own safety conditions imply this reference is again unique.
unsafe { &mut *self.ptr.as_ptr() }
}
/// Borrows a new mutable reference from the unique borrow initially captured.
///
/// # Safety
///
/// The reborrow must have ended, i.e., the reference returned by `new` and
/// all pointers and references derived from it, must not be used anymore.
pub unsafe fn reborrow(&mut self) -> &'a mut T {
// SAFETY: our own safety conditions imply this reference is again unique.
unsafe { &mut *self.ptr.as_ptr() }
}
/// Borrows a new shared reference from the unique borrow initially captured.
///
/// # Safety
///
/// The reborrow must have ended, i.e., the reference returned by `new` and
/// all pointers and references derived from it, must not be used anymore.
pub unsafe fn reborrow_shared(&self) -> &'a T {
// SAFETY: our own safety conditions imply this reference is again unique.
unsafe { &*self.ptr.as_ptr() }
}
}
#[cfg(test)]

View file

@ -3,7 +3,9 @@ use core::iter::Peekable;
/// A iterator for deduping the key of a sorted iterator.
/// When encountering the duplicated key, only the last key-value pair is yielded.
///
/// Used by [`BTreeMap::bulk_build_from_sorted_iter`].
/// Used by [`BTreeMap::bulk_build_from_sorted_iter`][1].
///
/// [1]: crate::collections::BTreeMap::bulk_build_from_sorted_iter
pub struct DedupSortedIter<K, V, I>
where
I: Iterator<Item = (K, V)>,

File diff suppressed because it is too large Load diff

View file

@ -133,6 +133,16 @@ impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display
}
}
#[unstable(feature = "map_try_insert", issue = "82766")]
impl<'a, K: core::fmt::Debug + Ord, V: core::fmt::Debug> core::error::Error
for crate::collections::btree_map::OccupiedError<'a, K, V>
{
#[allow(deprecated)]
fn description(&self) -> &str {
"key already exists"
}
}
impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
@ -337,7 +347,7 @@ impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> {
/// assert_eq!(map["poneyland"], 37);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(self, value: V) -> &'a mut V {
pub fn insert(mut self, value: V) -> &'a mut V {
let out_ptr = match self.handle {
None => {
// SAFETY: There is no tree yet so no reference to it exists.
@ -348,25 +358,27 @@ impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> {
map.length = 1;
val_ptr
}
Some(handle) => match handle.insert_recursing(self.key, value, self.alloc.clone()) {
(None, val_ptr) => {
// SAFETY: We have consumed self.handle.
let map = unsafe { self.dormant_map.awaken() };
map.length += 1;
val_ptr
}
(Some(ins), val_ptr) => {
drop(ins.left);
// SAFETY: We have consumed self.handle and dropped the
// remaining reference to the tree, ins.left.
let map = unsafe { self.dormant_map.awaken() };
let root = map.root.as_mut().unwrap(); // same as ins.left
root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right);
map.length += 1;
val_ptr
}
},
Some(handle) => {
let new_handle =
handle.insert_recursing(self.key, value, self.alloc.clone(), |ins| {
drop(ins.left);
// SAFETY: Pushing a new root node doesn't invalidate
// handles to existing nodes.
let map = unsafe { self.dormant_map.reborrow() };
let root = map.root.as_mut().unwrap(); // same as ins.left
root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right)
});
// Get the pointer to the value
let val_ptr = new_handle.into_val_mut();
// SAFETY: We have consumed self.handle.
let map = unsafe { self.dormant_map.awaken() };
map.length += 1;
val_ptr
}
};
// Now that we have finished growing the tree using borrowed references,
// dereference the pointer to a part of it, that we picked up along the way.
unsafe { &mut *out_ptr }

View file

@ -1,16 +1,16 @@
use super::super::testing::crash_test::{CrashTestDummy, Panic};
use super::super::testing::ord_chaos::{Cyclic3, Governed, Governor};
use super::super::testing::rng::DeterministicRng;
use super::Entry::{Occupied, Vacant};
use super::*;
use crate::boxed::Box;
use crate::fmt::Debug;
use crate::rc::Rc;
use crate::string::{String, ToString};
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use core::assert_matches::assert_matches;
use std::cmp::Ordering;
use std::convert::TryFrom;
use std::iter::{self, FromIterator};
use std::iter;
use std::mem;
use std::ops::Bound::{self, Excluded, Included, Unbounded};
use std::ops::RangeBounds;
@ -2336,3 +2336,134 @@ fn from_array() {
let unordered_duplicates = BTreeMap::from([(3, 4), (1, 2), (1, 2)]);
assert_eq!(map, unordered_duplicates);
}
#[test]
fn test_cursor() {
let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.lower_bound(Bound::Unbounded);
assert_eq!(cur.key(), Some(&1));
cur.move_next();
assert_eq!(cur.key(), Some(&2));
assert_eq!(cur.peek_next(), Some((&3, &'c')));
cur.move_prev();
assert_eq!(cur.key(), Some(&1));
assert_eq!(cur.peek_prev(), None);
let mut cur = map.upper_bound(Bound::Excluded(&1));
assert_eq!(cur.key(), None);
cur.move_next();
assert_eq!(cur.key(), Some(&1));
cur.move_prev();
assert_eq!(cur.key(), None);
assert_eq!(cur.peek_prev(), Some((&3, &'c')));
}
#[test]
fn test_cursor_mut() {
let mut map = BTreeMap::from([(1, 'a'), (3, 'c'), (5, 'e')]);
let mut cur = map.lower_bound_mut(Bound::Excluded(&3));
assert_eq!(cur.key(), Some(&5));
cur.insert_before(4, 'd');
assert_eq!(cur.key(), Some(&5));
assert_eq!(cur.peek_prev(), Some((&4, &mut 'd')));
cur.move_next();
assert_eq!(cur.key(), None);
cur.insert_before(6, 'f');
assert_eq!(cur.key(), None);
assert_eq!(cur.remove_current(), None);
assert_eq!(cur.key(), None);
cur.insert_after(0, '?');
assert_eq!(cur.key(), None);
assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd'), (5, 'e'), (6, 'f')]));
let mut cur = map.upper_bound_mut(Bound::Included(&5));
assert_eq!(cur.key(), Some(&5));
assert_eq!(cur.remove_current(), Some((5, 'e')));
assert_eq!(cur.key(), Some(&6));
assert_eq!(cur.remove_current_and_move_back(), Some((6, 'f')));
assert_eq!(cur.key(), Some(&4));
assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd')]));
}
#[should_panic(expected = "key must be ordered above the previous element")]
#[test]
fn test_cursor_mut_insert_before_1() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(0, 'd');
}
#[should_panic(expected = "key must be ordered above the previous element")]
#[test]
fn test_cursor_mut_insert_before_2() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(1, 'd');
}
#[should_panic(expected = "key must be ordered below the current element")]
#[test]
fn test_cursor_mut_insert_before_3() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(2, 'd');
}
#[should_panic(expected = "key must be ordered below the current element")]
#[test]
fn test_cursor_mut_insert_before_4() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_before(3, 'd');
}
#[should_panic(expected = "key must be ordered above the current element")]
#[test]
fn test_cursor_mut_insert_after_1() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(1, 'd');
}
#[should_panic(expected = "key must be ordered above the current element")]
#[test]
fn test_cursor_mut_insert_after_2() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(2, 'd');
}
#[should_panic(expected = "key must be ordered below the next element")]
#[test]
fn test_cursor_mut_insert_after_3() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(3, 'd');
}
#[should_panic(expected = "key must be ordered below the next element")]
#[test]
fn test_cursor_mut_insert_after_4() {
let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
let mut cur = map.upper_bound_mut(Bound::Included(&2));
cur.insert_after(4, 'd');
}
#[test]
fn cursor_peek_prev_agrees_with_cursor_mut() {
let mut map = BTreeMap::from([(1, 1), (2, 2), (3, 3)]);
let cursor = map.lower_bound(Bound::Excluded(&3));
assert!(cursor.key().is_none());
let prev = cursor.peek_prev();
assert_matches!(prev, Some((&3, _)));
// Shadow names so the two parts of this test match.
let mut cursor = map.lower_bound_mut(Bound::Excluded(&3));
assert!(cursor.key().is_none());
let prev = cursor.peek_prev();
assert_matches!(prev, Some((&3, _)));
}

View file

@ -13,7 +13,6 @@ pub mod set;
mod set_val;
mod split;
#[doc(hidden)]
trait Recover<Q: ?Sized> {
type Key;
@ -21,6 +20,3 @@ trait Recover<Q: ?Sized> {
fn take(&mut self, key: &Q) -> Option<Self::Key>;
fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
}
#[cfg(test)]
mod testing;

View file

@ -4,6 +4,7 @@ use core::ops::RangeBounds;
use core::ptr;
use super::node::{marker, ForceResult::*, Handle, NodeRef};
use super::search::SearchBound;
use crate::alloc::Allocator;
// `front` and `back` are always both `None` or both `Some`.
@ -18,6 +19,12 @@ impl<'a, K: 'a, V: 'a> Clone for LeafRange<marker::Immut<'a>, K, V> {
}
}
impl<B, K, V> Default for LeafRange<B, K, V> {
fn default() -> Self {
LeafRange { front: None, back: None }
}
}
impl<BorrowType, K, V> LeafRange<BorrowType, K, V> {
pub fn none() -> Self {
LeafRange { front: None, back: None }
@ -123,6 +130,12 @@ pub struct LazyLeafRange<BorrowType, K, V> {
back: Option<LazyLeafHandle<BorrowType, K, V>>,
}
impl<B, K, V> Default for LazyLeafRange<B, K, V> {
fn default() -> Self {
LazyLeafRange { front: None, back: None }
}
}
impl<'a, K: 'a, V: 'a> Clone for LazyLeafRange<marker::Immut<'a>, K, V> {
fn clone(&self) -> Self {
LazyLeafRange { front: self.front.clone(), back: self.back.clone() }
@ -386,7 +399,7 @@ impl<BorrowType: marker::BorrowType, K, V>
/// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
/// on the left side, which is either in the same leaf node or in an ancestor node.
/// If the leaf edge is the first one in the tree, returns [`Result::Err`] with the root node.
fn next_back_kv(
pub fn next_back_kv(
self,
) -> Result<
Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
@ -707,7 +720,9 @@ impl<BorrowType: marker::BorrowType, K, V>
}
/// Returns the leaf edge closest to a KV for backward navigation.
fn next_back_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
pub fn next_back_leaf_edge(
self,
) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
match self.force() {
Leaf(leaf_kv) => leaf_kv.left_edge(),
Internal(internal_kv) => {
@ -717,3 +732,51 @@ impl<BorrowType: marker::BorrowType, K, V>
}
}
}
impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
/// Returns the leaf edge corresponding to the first point at which the
/// given bound is true.
pub fn lower_bound<Q: ?Sized>(
self,
mut bound: SearchBound<&Q>,
) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>
where
Q: Ord,
K: Borrow<Q>,
{
let mut node = self;
loop {
let (edge, new_bound) = node.find_lower_bound_edge(bound);
match edge.force() {
Leaf(edge) => return edge,
Internal(edge) => {
node = edge.descend();
bound = new_bound;
}
}
}
}
/// Returns the leaf edge corresponding to the last point at which the
/// given bound is true.
pub fn upper_bound<Q: ?Sized>(
self,
mut bound: SearchBound<&Q>,
) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>
where
Q: Ord,
K: Borrow<Q>,
{
let mut node = self;
loop {
let (edge, new_bound) = node.find_upper_bound_edge(bound);
match edge.force() {
Leaf(edge) => return edge,
Internal(edge) => {
node = edge.descend();
bound = new_bound;
}
}
}
}
}

View file

@ -206,9 +206,9 @@ impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> {
unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {}
unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {}
unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {}
unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::ValMut<'a>, K, V, Type> {}
unsafe impl<K: Sync, V: Sync, Type> Send for NodeRef<marker::Immut<'_>, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Mut<'_>, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::ValMut<'_>, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Dying, K, V, Type> {}
@ -318,7 +318,10 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type>
pub fn ascend(
self,
) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
assert!(BorrowType::PERMITS_TRAVERSAL);
const {
assert!(BorrowType::TRAVERSAL_PERMIT);
}
// We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
// there might be outstanding mutable references to values that we must not invalidate.
let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
@ -439,6 +442,24 @@ impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
// SAFETY: we have exclusive access to the entire node.
unsafe { &mut *ptr }
}
/// Returns a dormant copy of this node with its lifetime erased which can
/// be reawakened later.
pub fn dormant(&self) -> NodeRef<marker::DormantMut, K, V, Type> {
NodeRef { height: self.height, node: self.node, _marker: PhantomData }
}
}
impl<K, V, Type> NodeRef<marker::DormantMut, K, V, Type> {
/// Revert to the unique borrow initially captured.
///
/// # Safety
///
/// The reborrow must have ended, i.e., the reference returned by `new` and
/// all pointers and references derived from it, must not be used anymore.
pub unsafe fn awaken<'a>(self) -> NodeRef<marker::Mut<'a>, K, V, Type> {
NodeRef { height: self.height, node: self.node, _marker: PhantomData }
}
}
impl<K, V, Type> NodeRef<marker::Dying, K, V, Type> {
@ -795,6 +816,25 @@ impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeT
// We can't use Handle::new_kv or Handle::new_edge because we don't know our type
Handle { node: unsafe { self.node.reborrow_mut() }, idx: self.idx, _marker: PhantomData }
}
/// Returns a dormant copy of this handle which can be reawakened later.
///
/// See `DormantMutRef` for more details.
pub fn dormant(&self) -> Handle<NodeRef<marker::DormantMut, K, V, NodeType>, HandleType> {
Handle { node: self.node.dormant(), idx: self.idx, _marker: PhantomData }
}
}
impl<K, V, NodeType, HandleType> Handle<NodeRef<marker::DormantMut, K, V, NodeType>, HandleType> {
/// Revert to the unique borrow initially captured.
///
/// # Safety
///
/// The reborrow must have ended, i.e., the reference returned by `new` and
/// all pointers and references derived from it, must not be used anymore.
pub unsafe fn awaken<'a>(self) -> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> {
Handle { node: unsafe { self.node.awaken() }, idx: self.idx, _marker: PhantomData }
}
}
impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
@ -848,9 +888,11 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark
/// Inserts a new key-value pair between the key-value pairs to the right and left of
/// this edge. This method assumes that there is enough space in the node for the new
/// pair to fit.
///
/// The returned pointer points to the inserted value.
fn insert_fit(&mut self, key: K, val: V) -> *mut V {
unsafe fn insert_fit(
mut self,
key: K,
val: V,
) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
debug_assert!(self.node.len() < CAPACITY);
let new_len = self.node.len() + 1;
@ -859,7 +901,7 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark
slice_insert(self.node.val_area_mut(..new_len), self.idx, val);
*self.node.len_mut() = new_len as u16;
self.node.val_area_mut(self.idx).assume_init_mut()
Handle::new_kv(self.node, self.idx)
}
}
}
@ -868,21 +910,26 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark
/// Inserts a new key-value pair between the key-value pairs to the right and left of
/// this edge. This method splits the node if there isn't enough room.
///
/// The returned pointer points to the inserted value.
/// Returns a dormant handle to the inserted node which can be reawakened
/// once splitting is complete.
fn insert<A: Allocator + Clone>(
mut self,
self,
key: K,
val: V,
alloc: A,
) -> (Option<SplitResult<'a, K, V, marker::Leaf>>, *mut V) {
) -> (
Option<SplitResult<'a, K, V, marker::Leaf>>,
Handle<NodeRef<marker::DormantMut, K, V, marker::Leaf>, marker::KV>,
) {
if self.node.len() < CAPACITY {
let val_ptr = self.insert_fit(key, val);
(None, val_ptr)
// SAFETY: There is enough space in the node for insertion.
let handle = unsafe { self.insert_fit(key, val) };
(None, handle.dormant())
} else {
let (middle_kv_idx, insertion) = splitpoint(self.idx);
let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) };
let mut result = middle.split(alloc);
let mut insertion_edge = match insertion {
let insertion_edge = match insertion {
LeftOrRight::Left(insert_idx) => unsafe {
Handle::new_edge(result.left.reborrow_mut(), insert_idx)
},
@ -890,8 +937,10 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark
Handle::new_edge(result.right.borrow_mut(), insert_idx)
},
};
let val_ptr = insertion_edge.insert_fit(key, val);
(Some(result), val_ptr)
// SAFETY: We just split the node, so there is enough space for
// insertion.
let handle = unsafe { insertion_edge.insert_fit(key, val).dormant() };
(Some(result), handle)
}
}
}
@ -973,21 +1022,31 @@ impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, mark
key: K,
value: V,
alloc: A,
) -> (Option<SplitResult<'a, K, V, marker::LeafOrInternal>>, *mut V) {
let (mut split, val_ptr) = match self.insert(key, value, alloc.clone()) {
(None, val_ptr) => return (None, val_ptr),
(Some(split), val_ptr) => (split.forget_node_type(), val_ptr),
split_root: impl FnOnce(SplitResult<'a, K, V, marker::LeafOrInternal>),
) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
let (mut split, handle) = match self.insert(key, value, alloc.clone()) {
// SAFETY: we have finished splitting and can now re-awaken the
// handle to the inserted element.
(None, handle) => return unsafe { handle.awaken() },
(Some(split), handle) => (split.forget_node_type(), handle),
};
loop {
split = match split.left.ascend() {
Ok(parent) => {
match parent.insert(split.kv.0, split.kv.1, split.right, alloc.clone()) {
None => return (None, val_ptr),
// SAFETY: we have finished splitting and can now re-awaken the
// handle to the inserted element.
None => return unsafe { handle.awaken() },
Some(split) => split.forget_node_type(),
}
}
Err(root) => return (Some(SplitResult { left: root, ..split }), val_ptr),
Err(root) => {
split_root(SplitResult { left: root, ..split });
// SAFETY: we have finished splitting and can now re-awaken the
// handle to the inserted element.
return unsafe { handle.awaken() };
}
};
}
}
@ -1003,7 +1062,10 @@ impl<BorrowType: marker::BorrowType, K, V>
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
assert!(BorrowType::PERMITS_TRAVERSAL);
const {
assert!(BorrowType::TRAVERSAL_PERMIT);
}
// We need to use raw pointers to nodes because, if BorrowType is
// marker::ValMut, there might be outstanding mutable references to
// values that we must not invalidate. There's no worry accessing the
@ -1037,6 +1099,14 @@ impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>
let leaf = self.node.into_leaf_mut();
unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }
}
pub fn into_kv_valmut(self) -> (&'a K, &'a mut V) {
debug_assert!(self.idx < self.node.len());
let leaf = self.node.into_leaf_mut();
let k = unsafe { leaf.keys.get_unchecked(self.idx).assume_init_ref() };
let v = unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() };
(k, v)
}
}
impl<'a, K, V, NodeType> Handle<NodeRef<marker::ValMut<'a>, K, V, NodeType>, marker::KV> {
@ -1661,25 +1731,29 @@ pub mod marker {
pub enum Owned {}
pub enum Dying {}
pub enum DormantMut {}
pub struct Immut<'a>(PhantomData<&'a ()>);
pub struct Mut<'a>(PhantomData<&'a mut ()>);
pub struct ValMut<'a>(PhantomData<&'a mut ()>);
pub trait BorrowType {
// Whether node references of this borrow type allow traversing
// to other nodes in the tree.
const PERMITS_TRAVERSAL: bool = true;
/// If node references of this borrow type allow traversing to other
/// nodes in the tree, this constant is set to `true`. It can be used
/// for a compile-time assertion.
const TRAVERSAL_PERMIT: bool = true;
}
impl BorrowType for Owned {
// Traversal isn't needed, it happens using the result of `borrow_mut`.
// By disabling traversal, and only creating new references to roots,
// we know that every reference of the `Owned` type is to a root node.
const PERMITS_TRAVERSAL: bool = false;
/// Reject traversal, because it isn't needed. Instead traversal
/// happens using the result of `borrow_mut`.
/// By disabling traversal, and only creating new references to roots,
/// we know that every reference of the `Owned` type is to a root node.
const TRAVERSAL_PERMIT: bool = false;
}
impl BorrowType for Dying {}
impl<'a> BorrowType for Immut<'a> {}
impl<'a> BorrowType for Mut<'a> {}
impl<'a> BorrowType for ValMut<'a> {}
impl BorrowType for DormantMut {}
pub enum KV {}
pub enum Edge {}

View file

@ -94,6 +94,7 @@ fn test_partial_eq() {
#[test]
#[cfg(target_arch = "x86_64")]
#[cfg_attr(miri, ignore)] // We'd like to run Miri with layout randomization
fn test_sizes() {
assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);

View file

@ -1,13 +1,10 @@
// This is pretty much entirely stolen from TreeSet, since BTreeMap has an identical interface
// to TreeMap
use crate::vec::Vec;
use core::borrow::Borrow;
use core::cmp::Ordering::{self, Equal, Greater, Less};
use core::cmp::{max, min};
use core::fmt::{self, Debug};
use core::hash::{Hash, Hasher};
use core::iter::{FromIterator, FusedIterator, Peekable};
use core::iter::{FusedIterator, Peekable};
use core::mem::ManuallyDrop;
use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
@ -18,8 +15,6 @@ use super::Recover;
use crate::alloc::{Allocator, Global};
// FIXME(conventions): implement bounded iterators
/// An ordered set based on a B-Tree.
///
/// See [`BTreeMap`]'s documentation for a detailed discussion of this collection's performance
@ -35,7 +30,6 @@ use crate::alloc::{Allocator, Global};
/// Iterators returned by [`BTreeSet::iter`] produce their items in order, and take worst-case
/// logarithmic and amortized constant time per item returned.
///
/// [`Ord`]: core::cmp::Ord
/// [`Cell`]: core::cell::Cell
/// [`RefCell`]: core::cell::RefCell
///
@ -152,7 +146,6 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: BTreeSet#method.into_iter
/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IntoIter<
@ -343,7 +336,7 @@ impl<T> BTreeSet<T> {
/// let mut set: BTreeSet<i32> = BTreeSet::new();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
#[rustc_const_stable(feature = "const_btree_new", since = "1.66.0")]
#[must_use]
pub const fn new() -> BTreeSet<T> {
BTreeSet { map: BTreeMap::new() }
@ -786,7 +779,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// Basic usage:
///
/// ```
/// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@ -797,7 +789,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// assert_eq!(set.first(), Some(&1));
/// ```
#[must_use]
#[unstable(feature = "map_first_last", issue = "62924")]
#[stable(feature = "map_first_last", since = "1.66.0")]
pub fn first(&self) -> Option<&T>
where
T: Ord,
@ -813,7 +805,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// Basic usage:
///
/// ```
/// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@ -824,7 +815,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// assert_eq!(set.last(), Some(&2));
/// ```
#[must_use]
#[unstable(feature = "map_first_last", issue = "62924")]
#[stable(feature = "map_first_last", since = "1.66.0")]
pub fn last(&self) -> Option<&T>
where
T: Ord,
@ -838,7 +829,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// # Examples
///
/// ```
/// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@ -849,7 +839,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// }
/// assert!(set.is_empty());
/// ```
#[unstable(feature = "map_first_last", issue = "62924")]
#[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_first(&mut self) -> Option<T>
where
T: Ord,
@ -863,7 +853,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// # Examples
///
/// ```
/// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@ -874,7 +863,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// }
/// assert!(set.is_empty());
/// ```
#[unstable(feature = "map_first_last", issue = "62924")]
#[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_last(&mut self) -> Option<T>
where
T: Ord,
@ -1174,7 +1163,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
#[rustc_const_unstable(
feature = "const_btree_len",
issue = "71835",
implied_by = "const_btree_new"
)]
pub const fn len(&self) -> usize {
self.map.len()
}
@ -1193,7 +1186,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
#[rustc_const_unstable(
feature = "const_btree_len",
issue = "71835",
implied_by = "const_btree_new"
)]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
@ -1504,11 +1501,17 @@ impl<'a, T> Iterator for Iter<'a, T> {
self.next_back()
}
fn min(mut self) -> Option<&'a T> {
fn min(mut self) -> Option<&'a T>
where
&'a T: Ord,
{
self.next()
}
fn max(mut self) -> Option<&'a T> {
fn max(mut self) -> Option<&'a T>
where
&'a T: Ord,
{
self.next_back()
}
}
@ -1540,6 +1543,21 @@ impl<T, A: Allocator + Clone> Iterator for IntoIter<T, A> {
self.iter.size_hint()
}
}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for Iter<'_, T> {
/// Creates an empty `btree_set::Iter`.
///
/// ```
/// # use std::collections::btree_set;
/// let iter: btree_set::Iter<'_, u8> = Default::default();
/// assert_eq!(iter.len(), 0);
/// ```
fn default() -> Self {
Iter { iter: Default::default() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator + Clone> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
@ -1556,6 +1574,23 @@ impl<T, A: Allocator + Clone> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator + Clone> FusedIterator for IntoIter<T, A> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T, A> Default for IntoIter<T, A>
where
A: Allocator + Default + Clone,
{
/// Creates an empty `btree_set::IntoIter`.
///
/// ```
/// # use std::collections::btree_set;
/// let iter: btree_set::IntoIter<u8> = Default::default();
/// assert_eq!(iter.len(), 0);
/// ```
fn default() -> Self {
IntoIter { iter: Default::default() }
}
}
#[stable(feature = "btree_range", since = "1.17.0")]
impl<T> Clone for Range<'_, T> {
fn clone(&self) -> Self {
@ -1575,11 +1610,17 @@ impl<'a, T> Iterator for Range<'a, T> {
self.next_back()
}
fn min(mut self) -> Option<&'a T> {
fn min(mut self) -> Option<&'a T>
where
&'a T: Ord,
{
self.next()
}
fn max(mut self) -> Option<&'a T> {
fn max(mut self) -> Option<&'a T>
where
&'a T: Ord,
{
self.next_back()
}
}
@ -1594,6 +1635,20 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Range<'_, T> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for Range<'_, T> {
/// Creates an empty `btree_set::Range`.
///
/// ```
/// # use std::collections::btree_set;
/// let iter: btree_set::Range<'_, u8> = Default::default();
/// assert_eq!(iter.count(), 0);
/// ```
fn default() -> Self {
Range { iter: Default::default() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator + Clone> Clone for Difference<'_, T, A> {
fn clone(&self) -> Self {

View file

@ -1,10 +1,9 @@
use super::super::testing::crash_test::{CrashTestDummy, Panic};
use super::super::testing::rng::DeterministicRng;
use super::*;
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::iter::FromIterator;
use std::ops::Bound::{Excluded, Included};
use std::panic::{catch_unwind, AssertUnwindSafe};

View file

@ -15,12 +15,13 @@
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::iter::{FromIterator, FusedIterator};
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use core::ptr::{NonNull, Unique};
use super::SpecExtend;
use crate::alloc::{Allocator, Global};
use crate::boxed::Box;
#[cfg(test)]
@ -47,11 +48,15 @@ mod tests;
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "LinkedList")]
#[rustc_insignificant_dtor]
pub struct LinkedList<T> {
pub struct LinkedList<
T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
head: Option<NonNull<Node<T>>>,
tail: Option<NonNull<Node<T>>>,
len: usize,
marker: PhantomData<Box<Node<T>>>,
alloc: A,
marker: PhantomData<Box<Node<T>, A>>,
}
struct Node<T> {
@ -81,6 +86,7 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
head: self.head,
tail: self.tail,
len: self.len,
alloc: Global,
marker: PhantomData,
}))
.field(&self.len)
@ -117,6 +123,7 @@ impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
head: self.head,
tail: self.tail,
len: self.len,
alloc: Global,
marker: PhantomData,
}))
.field(&self.len)
@ -130,15 +137,17 @@ impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: LinkedList::into_iter
/// [`IntoIterator`]: core::iter::IntoIterator
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
list: LinkedList<T>,
pub struct IntoIter<
T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
list: LinkedList<T, A>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IntoIter").field(&self.list).finish()
}
@ -149,22 +158,25 @@ impl<T> Node<T> {
Node { next: None, prev: None, element }
}
fn into_element(self: Box<Self>) -> T {
fn into_element<A: Allocator>(self: Box<Self, A>) -> T {
self.element
}
}
// private methods
impl<T> LinkedList<T> {
impl<T, A: Allocator> LinkedList<T, A> {
/// Adds the given node to the front of the list.
///
/// # Safety
/// `node` must point to a valid node that was boxed using the list's allocator.
#[inline]
fn push_front_node(&mut self, mut node: Box<Node<T>>) {
unsafe fn push_front_node(&mut self, node: Unique<Node<T>>) {
// This method takes care not to create mutable references to whole nodes,
// to maintain validity of aliasing pointers into `element`.
unsafe {
node.next = self.head;
node.prev = None;
let node = Some(Box::leak(node).into());
(*node.as_ptr()).next = self.head;
(*node.as_ptr()).prev = None;
let node = Some(NonNull::from(node));
match self.head {
None => self.tail = node,
@ -179,11 +191,11 @@ impl<T> LinkedList<T> {
/// Removes and returns the node at the front of the list.
#[inline]
fn pop_front_node(&mut self) -> Option<Box<Node<T>>> {
fn pop_front_node(&mut self) -> Option<Box<Node<T>, &A>> {
// This method takes care not to create mutable references to whole nodes,
// to maintain validity of aliasing pointers into `element`.
self.head.map(|node| unsafe {
let node = Box::from_raw(node.as_ptr());
let node = Box::from_raw_in(node.as_ptr(), &self.alloc);
self.head = node.next;
match self.head {
@ -198,14 +210,17 @@ impl<T> LinkedList<T> {
}
/// Adds the given node to the back of the list.
///
/// # Safety
/// `node` must point to a valid node that was boxed using the list's allocator.
#[inline]
fn push_back_node(&mut self, mut node: Box<Node<T>>) {
unsafe fn push_back_node(&mut self, node: Unique<Node<T>>) {
// This method takes care not to create mutable references to whole nodes,
// to maintain validity of aliasing pointers into `element`.
unsafe {
node.next = None;
node.prev = self.tail;
let node = Some(Box::leak(node).into());
(*node.as_ptr()).next = None;
(*node.as_ptr()).prev = self.tail;
let node = Some(NonNull::from(node));
match self.tail {
None => self.head = node,
@ -220,11 +235,11 @@ impl<T> LinkedList<T> {
/// Removes and returns the node at the back of the list.
#[inline]
fn pop_back_node(&mut self) -> Option<Box<Node<T>>> {
fn pop_back_node(&mut self) -> Option<Box<Node<T>, &A>> {
// This method takes care not to create mutable references to whole nodes,
// to maintain validity of aliasing pointers into `element`.
self.tail.map(|node| unsafe {
let node = Box::from_raw(node.as_ptr());
let node = Box::from_raw_in(node.as_ptr(), &self.alloc);
self.tail = node.prev;
match self.tail {
@ -322,7 +337,10 @@ impl<T> LinkedList<T> {
&mut self,
split_node: Option<NonNull<Node<T>>>,
at: usize,
) -> Self {
) -> Self
where
A: Clone,
{
// The split node is the new head node of the second part
if let Some(mut split_node) = split_node {
let first_part_head;
@ -343,6 +361,7 @@ impl<T> LinkedList<T> {
head: first_part_head,
tail: first_part_tail,
len: at,
alloc: self.alloc.clone(),
marker: PhantomData,
};
@ -352,7 +371,7 @@ impl<T> LinkedList<T> {
first_part
} else {
mem::replace(self, LinkedList::new())
mem::replace(self, LinkedList::new_in(self.alloc.clone()))
}
}
@ -361,7 +380,10 @@ impl<T> LinkedList<T> {
&mut self,
split_node: Option<NonNull<Node<T>>>,
at: usize,
) -> Self {
) -> Self
where
A: Clone,
{
// The split node is the new tail node of the first part and owns
// the head of the second part.
if let Some(mut split_node) = split_node {
@ -383,6 +405,7 @@ impl<T> LinkedList<T> {
head: second_part_head,
tail: second_part_tail,
len: self.len - at,
alloc: self.alloc.clone(),
marker: PhantomData,
};
@ -392,7 +415,7 @@ impl<T> LinkedList<T> {
second_part
} else {
mem::replace(self, LinkedList::new())
mem::replace(self, LinkedList::new_in(self.alloc.clone()))
}
}
}
@ -421,7 +444,7 @@ impl<T> LinkedList<T> {
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
pub const fn new() -> Self {
LinkedList { head: None, tail: None, len: 0, marker: PhantomData }
LinkedList { head: None, tail: None, len: 0, alloc: Global, marker: PhantomData }
}
/// Moves all elements from `other` to the end of the list.
@ -472,7 +495,26 @@ impl<T> LinkedList<T> {
}
}
}
}
impl<T, A: Allocator> LinkedList<T, A> {
/// Constructs an empty `LinkedList<T, A>`.
///
/// # Examples
///
/// ```
/// #![feature(allocator_api)]
///
/// use std::alloc::System;
/// use std::collections::LinkedList;
///
/// let list: LinkedList<u32, _> = LinkedList::new_in(System);
/// ```
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub const fn new_in(alloc: A) -> Self {
LinkedList { head: None, tail: None, len: 0, alloc, marker: PhantomData }
}
/// Provides a forward iterator.
///
/// # Examples
@ -533,7 +575,7 @@ impl<T> LinkedList<T> {
#[inline]
#[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_front(&self) -> Cursor<'_, T> {
pub fn cursor_front(&self) -> Cursor<'_, T, A> {
Cursor { index: 0, current: self.head, list: self }
}
@ -543,7 +585,7 @@ impl<T> LinkedList<T> {
#[inline]
#[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_front_mut(&mut self) -> CursorMut<'_, T> {
pub fn cursor_front_mut(&mut self) -> CursorMut<'_, T, A> {
CursorMut { index: 0, current: self.head, list: self }
}
@ -553,7 +595,7 @@ impl<T> LinkedList<T> {
#[inline]
#[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_back(&self) -> Cursor<'_, T> {
pub fn cursor_back(&self) -> Cursor<'_, T, A> {
Cursor { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
}
@ -563,7 +605,7 @@ impl<T> LinkedList<T> {
#[inline]
#[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn cursor_back_mut(&mut self) -> CursorMut<'_, T> {
pub fn cursor_back_mut(&mut self) -> CursorMut<'_, T, A> {
CursorMut { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
}
@ -639,7 +681,15 @@ impl<T> LinkedList<T> {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn clear(&mut self) {
*self = Self::new();
// We need to drop the nodes while keeping self.alloc
// We can do this by moving (head, tail, len) into a new list that borrows self.alloc
drop(LinkedList {
head: self.head.take(),
tail: self.tail.take(),
len: mem::take(&mut self.len),
alloc: &self.alloc,
marker: PhantomData,
});
}
/// Returns `true` if the `LinkedList` contains an element equal to the
@ -791,7 +841,12 @@ impl<T> LinkedList<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_front(&mut self, elt: T) {
self.push_front_node(Box::new(Node::new(elt)));
let node = Box::new_in(Node::new(elt), &self.alloc);
let node_ptr = Unique::from(Box::leak(node));
// SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc
unsafe {
self.push_front_node(node_ptr);
}
}
/// Removes the first element and returns it, or `None` if the list is
@ -834,7 +889,12 @@ impl<T> LinkedList<T> {
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_back(&mut self, elt: T) {
self.push_back_node(Box::new(Node::new(elt)));
let node = Box::new_in(Node::new(elt), &self.alloc);
let node_ptr = Unique::from(Box::leak(node));
// SAFETY: node_ptr is a unique pointer to a node we boxed with self.alloc
unsafe {
self.push_back_node(node_ptr);
}
}
/// Removes the last element from a list and returns it, or `None` if
@ -884,13 +944,16 @@ impl<T> LinkedList<T> {
/// assert_eq!(split.pop_front(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn split_off(&mut self, at: usize) -> LinkedList<T> {
pub fn split_off(&mut self, at: usize) -> LinkedList<T, A>
where
A: Clone,
{
let len = self.len();
assert!(at <= len, "Cannot split off at a nonexistent index");
if at == 0 {
return mem::take(self);
return mem::replace(self, Self::new_in(self.alloc.clone()));
} else if at == len {
return Self::new();
return Self::new_in(self.alloc.clone());
}
// Below, we iterate towards the `i-1`th node, either from the start or the end,
@ -988,7 +1051,7 @@ impl<T> LinkedList<T> {
/// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 9, 11, 13, 15]);
/// ```
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F>
pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
@ -1001,11 +1064,11 @@ impl<T> LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T> Drop for LinkedList<T> {
unsafe impl<#[may_dangle] T, A: Allocator> Drop for LinkedList<T, A> {
fn drop(&mut self) {
struct DropGuard<'a, T>(&'a mut LinkedList<T>);
struct DropGuard<'a, T, A: Allocator>(&'a mut LinkedList<T, A>);
impl<'a, T> Drop for DropGuard<'a, T> {
impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
fn drop(&mut self) {
// Continue the same loop we do below. This only runs when a destructor has
// panicked. If another one panics this will abort.
@ -1013,11 +1076,10 @@ unsafe impl<#[may_dangle] T> Drop for LinkedList<T> {
}
}
while let Some(node) = self.pop_front_node() {
let guard = DropGuard(self);
drop(node);
mem::forget(guard);
}
// Wrap self so that if a destructor panics, we can try to keep looping
let guard = DropGuard(self);
while guard.0.pop_front_node().is_some() {}
mem::forget(guard);
}
}
@ -1075,6 +1137,20 @@ impl<T> ExactSizeIterator for Iter<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Iter<'_, T> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for Iter<'_, T> {
/// Creates an empty `linked_list::Iter`.
///
/// ```
/// # use std::collections::linked_list;
/// let iter: linked_list::Iter<'_, u8> = Default::default();
/// assert_eq!(iter.len(), 0);
/// ```
fn default() -> Self {
Iter { head: None, tail: None, len: 0, marker: Default::default() }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
@ -1129,6 +1205,13 @@ impl<T> ExactSizeIterator for IterMut<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IterMut<'_, T> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for IterMut<'_, T> {
fn default() -> Self {
IterMut { head: None, tail: None, len: 0, marker: Default::default() }
}
}
/// A cursor over a `LinkedList`.
///
/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth.
@ -1139,14 +1222,18 @@ impl<T> FusedIterator for IterMut<'_, T> {}
///
/// When created, cursors start at the front of the list, or the "ghost" non-element if the list is empty.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub struct Cursor<'a, T: 'a> {
pub struct Cursor<
'a,
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
index: usize,
current: Option<NonNull<Node<T>>>,
list: &'a LinkedList<T>,
list: &'a LinkedList<T, A>,
}
#[unstable(feature = "linked_list_cursors", issue = "58533")]
impl<T> Clone for Cursor<'_, T> {
impl<T, A: Allocator> Clone for Cursor<'_, T, A> {
fn clone(&self) -> Self {
let Cursor { index, current, list } = *self;
Cursor { index, current, list }
@ -1154,7 +1241,7 @@ impl<T> Clone for Cursor<'_, T> {
}
#[unstable(feature = "linked_list_cursors", issue = "58533")]
impl<T: fmt::Debug> fmt::Debug for Cursor<'_, T> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Cursor<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Cursor").field(&self.list).field(&self.index()).finish()
}
@ -1171,20 +1258,24 @@ impl<T: fmt::Debug> fmt::Debug for Cursor<'_, T> {
/// To accommodate this, there is a "ghost" non-element that yields `None` between the head and
/// tail of the list.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub struct CursorMut<'a, T: 'a> {
pub struct CursorMut<
'a,
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
index: usize,
current: Option<NonNull<Node<T>>>,
list: &'a mut LinkedList<T>,
list: &'a mut LinkedList<T, A>,
}
#[unstable(feature = "linked_list_cursors", issue = "58533")]
impl<T: fmt::Debug> fmt::Debug for CursorMut<'_, T> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for CursorMut<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CursorMut").field(&self.list).field(&self.index()).finish()
}
}
impl<'a, T> Cursor<'a, T> {
impl<'a, T, A: Allocator> Cursor<'a, T, A> {
/// Returns the cursor position index within the `LinkedList`.
///
/// This returns `None` if the cursor is currently pointing to the
@ -1301,7 +1392,7 @@ impl<'a, T> Cursor<'a, T> {
}
}
impl<'a, T> CursorMut<'a, T> {
impl<'a, T, A: Allocator> CursorMut<'a, T, A> {
/// Returns the cursor position index within the `LinkedList`.
///
/// This returns `None` if the cursor is currently pointing to the
@ -1406,7 +1497,7 @@ impl<'a, T> CursorMut<'a, T> {
/// `CursorMut` is frozen for the lifetime of the `Cursor`.
#[must_use]
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn as_cursor(&self) -> Cursor<'_, T> {
pub fn as_cursor(&self) -> Cursor<'_, T, A> {
Cursor { list: self.list, current: self.current, index: self.index }
}
}
@ -1414,86 +1505,6 @@ impl<'a, T> CursorMut<'a, T> {
// Now the list editing operations
impl<'a, T> CursorMut<'a, T> {
/// Inserts a new element into the `LinkedList` after the current one.
///
/// If the cursor is pointing at the "ghost" non-element then the new element is
/// inserted at the front of the `LinkedList`.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn insert_after(&mut self, item: T) {
unsafe {
let spliced_node = Box::leak(Box::new(Node::new(item))).into();
let node_next = match self.current {
None => self.list.head,
Some(node) => node.as_ref().next,
};
self.list.splice_nodes(self.current, node_next, spliced_node, spliced_node, 1);
if self.current.is_none() {
// The "ghost" non-element's index has changed.
self.index = self.list.len;
}
}
}
/// Inserts a new element into the `LinkedList` before the current one.
///
/// If the cursor is pointing at the "ghost" non-element then the new element is
/// inserted at the end of the `LinkedList`.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn insert_before(&mut self, item: T) {
unsafe {
let spliced_node = Box::leak(Box::new(Node::new(item))).into();
let node_prev = match self.current {
None => self.list.tail,
Some(node) => node.as_ref().prev,
};
self.list.splice_nodes(node_prev, self.current, spliced_node, spliced_node, 1);
self.index += 1;
}
}
/// Removes the current element from the `LinkedList`.
///
/// The element that was removed is returned, and the cursor is
/// moved to point to the next element in the `LinkedList`.
///
/// If the cursor is currently pointing to the "ghost" non-element then no element
/// is removed and `None` is returned.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn remove_current(&mut self) -> Option<T> {
let unlinked_node = self.current?;
unsafe {
self.current = unlinked_node.as_ref().next;
self.list.unlink_node(unlinked_node);
let unlinked_node = Box::from_raw(unlinked_node.as_ptr());
Some(unlinked_node.element)
}
}
/// Removes the current element from the `LinkedList` without deallocating the list node.
///
/// The node that was removed is returned as a new `LinkedList` containing only this node.
/// The cursor is moved to point to the next element in the current `LinkedList`.
///
/// If the cursor is currently pointing to the "ghost" non-element then no element
/// is removed and `None` is returned.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn remove_current_as_list(&mut self) -> Option<LinkedList<T>> {
let mut unlinked_node = self.current?;
unsafe {
self.current = unlinked_node.as_ref().next;
self.list.unlink_node(unlinked_node);
unlinked_node.as_mut().prev = None;
unlinked_node.as_mut().next = None;
Some(LinkedList {
head: Some(unlinked_node),
tail: Some(unlinked_node),
len: 1,
marker: PhantomData,
})
}
}
/// Inserts the elements from the given `LinkedList` after the current one.
///
/// If the cursor is pointing at the "ghost" non-element then the new elements are
@ -1536,6 +1547,92 @@ impl<'a, T> CursorMut<'a, T> {
self.index += splice_len;
}
}
}
impl<'a, T, A: Allocator> CursorMut<'a, T, A> {
/// Inserts a new element into the `LinkedList` after the current one.
///
/// If the cursor is pointing at the "ghost" non-element then the new element is
/// inserted at the front of the `LinkedList`.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn insert_after(&mut self, item: T) {
unsafe {
let spliced_node = Box::leak(Box::new_in(Node::new(item), &self.list.alloc)).into();
let node_next = match self.current {
None => self.list.head,
Some(node) => node.as_ref().next,
};
self.list.splice_nodes(self.current, node_next, spliced_node, spliced_node, 1);
if self.current.is_none() {
// The "ghost" non-element's index has changed.
self.index = self.list.len;
}
}
}
/// Inserts a new element into the `LinkedList` before the current one.
///
/// If the cursor is pointing at the "ghost" non-element then the new element is
/// inserted at the end of the `LinkedList`.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn insert_before(&mut self, item: T) {
unsafe {
let spliced_node = Box::leak(Box::new_in(Node::new(item), &self.list.alloc)).into();
let node_prev = match self.current {
None => self.list.tail,
Some(node) => node.as_ref().prev,
};
self.list.splice_nodes(node_prev, self.current, spliced_node, spliced_node, 1);
self.index += 1;
}
}
/// Removes the current element from the `LinkedList`.
///
/// The element that was removed is returned, and the cursor is
/// moved to point to the next element in the `LinkedList`.
///
/// If the cursor is currently pointing to the "ghost" non-element then no element
/// is removed and `None` is returned.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn remove_current(&mut self) -> Option<T> {
let unlinked_node = self.current?;
unsafe {
self.current = unlinked_node.as_ref().next;
self.list.unlink_node(unlinked_node);
let unlinked_node = Box::from_raw(unlinked_node.as_ptr());
Some(unlinked_node.element)
}
}
/// Removes the current element from the `LinkedList` without deallocating the list node.
///
/// The node that was removed is returned as a new `LinkedList` containing only this node.
/// The cursor is moved to point to the next element in the current `LinkedList`.
///
/// If the cursor is currently pointing to the "ghost" non-element then no element
/// is removed and `None` is returned.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn remove_current_as_list(&mut self) -> Option<LinkedList<T, A>>
where
A: Clone,
{
let mut unlinked_node = self.current?;
unsafe {
self.current = unlinked_node.as_ref().next;
self.list.unlink_node(unlinked_node);
unlinked_node.as_mut().prev = None;
unlinked_node.as_mut().next = None;
Some(LinkedList {
head: Some(unlinked_node),
tail: Some(unlinked_node),
len: 1,
alloc: self.list.alloc.clone(),
marker: PhantomData,
})
}
}
/// Splits the list into two after the current element. This will return a
/// new list consisting of everything after the cursor, with the original
@ -1544,7 +1641,10 @@ impl<'a, T> CursorMut<'a, T> {
/// If the cursor is pointing at the "ghost" non-element then the entire contents
/// of the `LinkedList` are moved.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn split_after(&mut self) -> LinkedList<T> {
pub fn split_after(&mut self) -> LinkedList<T, A>
where
A: Clone,
{
let split_off_idx = if self.index == self.list.len { 0 } else { self.index + 1 };
if self.index == self.list.len {
// The "ghost" non-element's index has changed to 0.
@ -1560,7 +1660,10 @@ impl<'a, T> CursorMut<'a, T> {
/// If the cursor is pointing at the "ghost" non-element then the entire contents
/// of the `LinkedList` are moved.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn split_before(&mut self) -> LinkedList<T> {
pub fn split_before(&mut self) -> LinkedList<T, A>
where
A: Clone,
{
let split_off_idx = self.index;
self.index = 0;
unsafe { self.list.split_off_before_node(self.current, split_off_idx) }
@ -1570,7 +1673,7 @@ impl<'a, T> CursorMut<'a, T> {
/// that the cursor points to is unchanged, even if it is the "ghost" node.
///
/// This operation should compute in *O*(1) time.
// `push_front` continues to point to "ghost" when it addes a node to mimic
// `push_front` continues to point to "ghost" when it adds a node to mimic
// the behavior of `insert_before` on an empty list.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn push_front(&mut self, elt: T) {
@ -1613,7 +1716,7 @@ impl<'a, T> CursorMut<'a, T> {
None
} else {
// We can't point to the node that we pop. Copying the behavior of
// `remove_current`, we move on the the next node in the sequence.
// `remove_current`, we move on to the next node in the sequence.
// If the list is of length 1 then we end pointing to the "ghost"
// node at index 0, which is expected.
if self.list.head == self.current {
@ -1702,11 +1805,15 @@ impl<'a, T> CursorMut<'a, T> {
/// An iterator produced by calling `drain_filter` on LinkedList.
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
pub struct DrainFilter<'a, T: 'a, F: 'a>
where
pub struct DrainFilter<
'a,
T: 'a,
F: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> where
F: FnMut(&mut T) -> bool,
{
list: &'a mut LinkedList<T>,
list: &'a mut LinkedList<T, A>,
it: Option<NonNull<Node<T>>>,
pred: F,
idx: usize,
@ -1714,7 +1821,7 @@ where
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
impl<T, F> Iterator for DrainFilter<'_, T, F>
impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
@ -1743,16 +1850,16 @@ where
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
impl<T, F> Drop for DrainFilter<'_, T, F>
impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
fn drop(&mut self) {
struct DropGuard<'r, 'a, T, F>(&'r mut DrainFilter<'a, T, F>)
struct DropGuard<'r, 'a, T, F, A: Allocator>(&'r mut DrainFilter<'a, T, F, A>)
where
F: FnMut(&mut T) -> bool;
impl<'r, 'a, T, F> Drop for DropGuard<'r, 'a, T, F>
impl<'r, 'a, T, F, A: Allocator> Drop for DropGuard<'r, 'a, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
@ -1780,7 +1887,7 @@ where
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Iterator for IntoIter<T> {
impl<T, A: Allocator> Iterator for IntoIter<T, A> {
type Item = T;
#[inline]
@ -1795,7 +1902,7 @@ impl<T> Iterator for IntoIter<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> DoubleEndedIterator for IntoIter<T> {
impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.list.pop_back()
@ -1803,10 +1910,24 @@ impl<T> DoubleEndedIterator for IntoIter<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {}
impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IntoIter<T> {}
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for IntoIter<T> {
/// Creates an empty `linked_list::IntoIter`.
///
/// ```
/// # use std::collections::linked_list;
/// let iter: linked_list::IntoIter<u8> = Default::default();
/// assert_eq!(iter.len(), 0);
/// ```
fn default() -> Self {
LinkedList::new().into_iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for LinkedList<T> {
@ -1818,19 +1939,19 @@ impl<T> FromIterator<T> for LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> IntoIterator for LinkedList<T> {
impl<T, A: Allocator> IntoIterator for LinkedList<T, A> {
type Item = T;
type IntoIter = IntoIter<T>;
type IntoIter = IntoIter<T, A>;
/// Consumes the list into an iterator yielding elements by value.
#[inline]
fn into_iter(self) -> IntoIter<T> {
fn into_iter(self) -> IntoIter<T, A> {
IntoIter { list: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a LinkedList<T> {
impl<'a, T, A: Allocator> IntoIterator for &'a LinkedList<T, A> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
@ -1840,7 +1961,7 @@ impl<'a, T> IntoIterator for &'a LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut LinkedList<T> {
impl<'a, T, A: Allocator> IntoIterator for &'a mut LinkedList<T, A> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
@ -1850,7 +1971,7 @@ impl<'a, T> IntoIterator for &'a mut LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Extend<T> for LinkedList<T> {
impl<T, A: Allocator> Extend<T> for LinkedList<T, A> {
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
<Self as SpecExtend<I>>::spec_extend(self, iter);
}
@ -1861,7 +1982,7 @@ impl<T> Extend<T> for LinkedList<T> {
}
}
impl<I: IntoIterator> SpecExtend<I> for LinkedList<I::Item> {
impl<I: IntoIterator, A: Allocator> SpecExtend<I> for LinkedList<I::Item, A> {
default fn spec_extend(&mut self, iter: I) {
iter.into_iter().for_each(move |elt| self.push_back(elt));
}
@ -1874,7 +1995,7 @@ impl<T> SpecExtend<LinkedList<T>> for LinkedList<T> {
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList<T> {
impl<'a, T: 'a + Copy, A: Allocator> Extend<&'a T> for LinkedList<T, A> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
@ -1886,7 +2007,7 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialEq> PartialEq for LinkedList<T> {
impl<T: PartialEq, A: Allocator> PartialEq for LinkedList<T, A> {
fn eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.iter().eq(other)
}
@ -1897,17 +2018,17 @@ impl<T: PartialEq> PartialEq for LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq> Eq for LinkedList<T> {}
impl<T: Eq, A: Allocator> Eq for LinkedList<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd> PartialOrd for LinkedList<T> {
impl<T: PartialOrd, A: Allocator> PartialOrd for LinkedList<T, A> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord> Ord for LinkedList<T> {
impl<T: Ord, A: Allocator> Ord for LinkedList<T, A> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other)
@ -1915,9 +2036,11 @@ impl<T: Ord> Ord for LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for LinkedList<T> {
impl<T: Clone, A: Allocator + Clone> Clone for LinkedList<T, A> {
fn clone(&self) -> Self {
self.iter().cloned().collect()
let mut list = Self::new_in(self.alloc.clone());
list.extend(self.iter().cloned());
list
}
fn clone_from(&mut self, other: &Self) {
@ -1935,14 +2058,14 @@ impl<T: Clone> Clone for LinkedList<T> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for LinkedList<T> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for LinkedList<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self).finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Hash> Hash for LinkedList<T> {
impl<T: Hash, A: Allocator> Hash for LinkedList<T, A> {
fn hash<H: Hasher>(&self, state: &mut H) {
state.write_length_prefix(self.len());
for elt in self {
@ -1982,10 +2105,10 @@ fn assert_covariance() {
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send> Send for LinkedList<T> {}
unsafe impl<T: Send, A: Allocator + Send> Send for LinkedList<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for LinkedList<T> {}
unsafe impl<T: Sync, A: Allocator + Sync> Sync for LinkedList<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Send for Iter<'_, T> {}
@ -2000,13 +2123,13 @@ unsafe impl<T: Send> Send for IterMut<'_, T> {}
unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
#[unstable(feature = "linked_list_cursors", issue = "58533")]
unsafe impl<T: Sync> Send for Cursor<'_, T> {}
unsafe impl<T: Sync, A: Allocator + Sync> Send for Cursor<'_, T, A> {}
#[unstable(feature = "linked_list_cursors", issue = "58533")]
unsafe impl<T: Sync> Sync for Cursor<'_, T> {}
unsafe impl<T: Sync, A: Allocator + Sync> Sync for Cursor<'_, T, A> {}
#[unstable(feature = "linked_list_cursors", issue = "58533")]
unsafe impl<T: Send> Send for CursorMut<'_, T> {}
unsafe impl<T: Send, A: Allocator + Send> Send for CursorMut<'_, T, A> {}
#[unstable(feature = "linked_list_cursors", issue = "58533")]
unsafe impl<T: Sync> Sync for CursorMut<'_, T> {}
unsafe impl<T: Sync, A: Allocator + Sync> Sync for CursorMut<'_, T, A> {}

View file

@ -1,10 +1,11 @@
use super::*;
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::vec::Vec;
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::thread;
use rand::{thread_rng, RngCore};
use rand::RngCore;
#[test]
fn test_basic() {
@ -480,12 +481,12 @@ fn test_split_off_2() {
}
}
fn fuzz_test(sz: i32) {
fn fuzz_test(sz: i32, rng: &mut impl RngCore) {
let mut m: LinkedList<_> = LinkedList::new();
let mut v = vec![];
for i in 0..sz {
check_links(&m);
let r: u8 = thread_rng().next_u32() as u8;
let r: u8 = rng.next_u32() as u8;
match r % 6 {
0 => {
m.pop_back();
@ -520,11 +521,12 @@ fn fuzz_test(sz: i32) {
#[test]
fn test_fuzz() {
let mut rng = crate::test_helpers::test_rng();
for _ in 0..25 {
fuzz_test(3);
fuzz_test(16);
fuzz_test(3, &mut rng);
fuzz_test(16, &mut rng);
#[cfg(not(miri))] // Miri is too slow
fuzz_test(189);
fuzz_test(189, &mut rng);
}
}
@ -984,35 +986,34 @@ fn drain_filter_complex() {
#[test]
fn drain_filter_drop_panic_leak() {
static mut DROPS: i32 = 0;
struct D(bool);
impl Drop for D {
fn drop(&mut self) {
unsafe {
DROPS += 1;
}
if self.0 {
panic!("panic in `drop`");
}
}
}
let d0 = CrashTestDummy::new(0);
let d1 = CrashTestDummy::new(1);
let d2 = CrashTestDummy::new(2);
let d3 = CrashTestDummy::new(3);
let d4 = CrashTestDummy::new(4);
let d5 = CrashTestDummy::new(5);
let d6 = CrashTestDummy::new(6);
let d7 = CrashTestDummy::new(7);
let mut q = LinkedList::new();
q.push_back(D(false));
q.push_back(D(false));
q.push_back(D(false));
q.push_back(D(false));
q.push_back(D(false));
q.push_front(D(false));
q.push_front(D(true));
q.push_front(D(false));
q.push_back(d3.spawn(Panic::Never));
q.push_back(d4.spawn(Panic::Never));
q.push_back(d5.spawn(Panic::Never));
q.push_back(d6.spawn(Panic::Never));
q.push_back(d7.spawn(Panic::Never));
q.push_front(d2.spawn(Panic::Never));
q.push_front(d1.spawn(Panic::InDrop));
q.push_front(d0.spawn(Panic::Never));
catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).ok();
catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).unwrap_err();
assert_eq!(unsafe { DROPS }, 8);
assert_eq!(d0.dropped(), 1);
assert_eq!(d1.dropped(), 1);
assert_eq!(d2.dropped(), 1);
assert_eq!(d3.dropped(), 1);
assert_eq!(d4.dropped(), 1);
assert_eq!(d5.dropped(), 1);
assert_eq!(d6.dropped(), 1);
assert_eq!(d7.dropped(), 1);
assert!(q.is_empty());
}

View file

@ -139,7 +139,7 @@ impl Display for TryReserveError {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
" because the memory allocator returned a error"
" because the memory allocator returned an error"
}
};
fmt.write_str(reason)
@ -152,3 +152,6 @@ trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}
#[stable(feature = "try_reserve", since = "1.57.0")]
impl core::error::Error for TryReserveError {}

View file

@ -1,10 +1,12 @@
use core::iter::FusedIterator;
use core::ptr::{self, NonNull};
use core::{fmt, mem};
use core::marker::PhantomData;
use core::mem::{self, SizedTypeProperties};
use core::ptr::NonNull;
use core::{fmt, ptr};
use crate::alloc::{Allocator, Global};
use super::{count, Iter, VecDeque};
use super::VecDeque;
/// A draining iterator over the elements of a `VecDeque`.
///
@ -18,20 +20,56 @@ pub struct Drain<
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
after_tail: usize,
after_head: usize,
iter: Iter<'a, T>,
// We can't just use a &mut VecDeque<T, A>, as that would make Drain invariant over T
// and we want it to be covariant instead
deque: NonNull<VecDeque<T, A>>,
// drain_start is stored in deque.len
drain_len: usize,
// index into the logical array, not the physical one (always lies in [0..deque.len))
idx: usize,
// number of elements after the drain range
tail_len: usize,
remaining: usize,
// Needed to make Drain covariant over T
_marker: PhantomData<&'a T>,
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new(
after_tail: usize,
after_head: usize,
iter: Iter<'a, T>,
deque: NonNull<VecDeque<T, A>>,
deque: &'a mut VecDeque<T, A>,
drain_start: usize,
drain_len: usize,
) -> Self {
Drain { after_tail, after_head, iter, deque }
let orig_len = mem::replace(&mut deque.len, drain_start);
let tail_len = orig_len - drain_start - drain_len;
Drain {
deque: NonNull::from(deque),
drain_len,
idx: drain_start,
tail_len,
remaining: drain_len,
_marker: PhantomData,
}
}
// Only returns pointers to the slices, as that's all we need
// to drop them. May only be called if `self.remaining != 0`.
unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
unsafe {
let deque = self.deque.as_ref();
// We know that `self.idx + self.remaining <= deque.len <= usize::MAX`, so this won't overflow.
let logical_remaining_range = self.idx..self.idx + self.remaining;
// SAFETY: `logical_remaining_range` represents the
// range into the logical buffer of elements that
// haven't been drained yet, so they're all initialized,
// and `slice::range(start..end, end) == start..end`,
// so the preconditions for `slice_ranges` are met.
let (a_range, b_range) =
deque.slice_ranges(logical_remaining_range.clone(), logical_remaining_range.end);
(deque.buffer_range(a_range), deque.buffer_range(b_range))
}
}
}
@ -39,9 +77,10 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain")
.field(&self.after_tail)
.field(&self.after_head)
.field(&self.iter)
.field(&self.drain_len)
.field(&self.idx)
.field(&self.tail_len)
.field(&self.remaining)
.finish()
}
}
@ -58,57 +97,81 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) {
self.0.for_each(drop);
if self.0.remaining != 0 {
unsafe {
// SAFETY: We just checked that `self.remaining != 0`.
let (front, back) = self.0.as_slices();
ptr::drop_in_place(front);
ptr::drop_in_place(back);
}
}
let source_deque = unsafe { self.0.deque.as_mut() };
// T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
//
// T t h H
// [. . . o o x x o o . . .]
//
let orig_tail = source_deque.tail;
let drain_tail = source_deque.head;
let drain_head = self.0.after_tail;
let orig_head = self.0.after_head;
let drain_start = source_deque.len();
let drain_len = self.0.drain_len;
let drain_end = drain_start + drain_len;
let tail_len = count(orig_tail, drain_tail, source_deque.cap());
let head_len = count(drain_head, orig_head, source_deque.cap());
let orig_len = self.0.tail_len + drain_end;
// Restore the original head value
source_deque.head = orig_head;
if T::IS_ZST {
// no need to copy around any memory if T is a ZST
source_deque.len = orig_len - drain_len;
return;
}
match (tail_len, head_len) {
let head_len = drain_start;
let tail_len = self.0.tail_len;
match (head_len, tail_len) {
(0, 0) => {
source_deque.head = 0;
source_deque.tail = 0;
source_deque.len = 0;
}
(0, _) => {
source_deque.tail = drain_head;
source_deque.head = source_deque.to_physical_idx(drain_len);
source_deque.len = orig_len - drain_len;
}
(_, 0) => {
source_deque.head = drain_tail;
source_deque.len = orig_len - drain_len;
}
_ => unsafe {
if tail_len <= head_len {
source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
if head_len <= tail_len {
source_deque.wrap_copy(
source_deque.head,
source_deque.to_physical_idx(drain_len),
head_len,
);
source_deque.head = source_deque.to_physical_idx(drain_len);
source_deque.len = orig_len - drain_len;
} else {
source_deque.head = source_deque.wrap_add(drain_tail, head_len);
source_deque.wrap_copy(drain_tail, drain_head, head_len);
source_deque.wrap_copy(
source_deque.to_physical_idx(head_len + drain_len),
source_deque.to_physical_idx(head_len),
tail_len,
);
source_deque.len = orig_len - drain_len;
}
},
}
}
}
while let Some(item) = self.next() {
let guard = DropGuard(self);
drop(item);
mem::forget(guard);
let guard = DropGuard(self);
if guard.0.remaining != 0 {
unsafe {
// SAFETY: We just checked that `self.remaining != 0`.
let (front, back) = guard.0.as_slices();
// since idx is a logical index, we don't need to worry about wrapping.
guard.0.idx += front.len();
guard.0.remaining -= front.len();
ptr::drop_in_place(front);
guard.0.remaining = 0;
ptr::drop_in_place(back);
}
}
DropGuard(self);
// Dropping `guard` handles moving the remaining elements into place.
}
}
@ -118,12 +181,19 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
self.iter.next().map(|elt| unsafe { ptr::read(elt) })
if self.remaining == 0 {
return None;
}
let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
self.idx += 1;
self.remaining -= 1;
Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
let len = self.remaining;
(len, Some(len))
}
}
@ -131,7 +201,12 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
if self.remaining == 0 {
return None;
}
self.remaining -= 1;
let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
}
}

View file

@ -1,5 +1,6 @@
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
use core::num::NonZeroUsize;
use core::{array, fmt, mem::MaybeUninit, ops::Try, ptr};
use crate::alloc::{Allocator, Global};
@ -11,7 +12,6 @@ use super::VecDeque;
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: VecDeque::into_iter
/// [`IntoIterator`]: core::iter::IntoIterator
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<
@ -25,6 +25,10 @@ impl<T, A: Allocator> IntoIter<T, A> {
pub(super) fn new(inner: VecDeque<T, A>) -> Self {
IntoIter { inner }
}
pub(super) fn into_vecdeque(self) -> VecDeque<T, A> {
self.inner
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
@ -48,6 +52,127 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
let len = self.inner.len();
(len, Some(len))
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let len = self.inner.len;
let rem = if len < n {
self.inner.clear();
n - len
} else {
self.inner.drain(..n);
0
};
NonZeroUsize::new(rem).map_or(Ok(()), Err)
}
#[inline]
fn count(self) -> usize {
self.inner.len
}
fn try_fold<B, F, R>(&mut self, mut init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
struct Guard<'a, T, A: Allocator> {
deque: &'a mut VecDeque<T, A>,
// `consumed <= deque.len` always holds.
consumed: usize,
}
impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> {
fn drop(&mut self) {
self.deque.len -= self.consumed;
self.deque.head = self.deque.to_physical_idx(self.consumed);
}
}
let mut guard = Guard { deque: &mut self.inner, consumed: 0 };
let (head, tail) = guard.deque.as_slices();
init = head
.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: Because we incremented `guard.consumed`, the
// deque effectively forgot the element, so we can take
// ownership
unsafe { ptr::read(elem) }
})
.try_fold(init, &mut f)?;
tail.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: Same as above.
unsafe { ptr::read(elem) }
})
.try_fold(init, &mut f)
}
#[inline]
fn fold<B, F>(mut self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
match self.try_fold(init, |b, item| Ok::<B, !>(f(b, item))) {
Ok(b) => b,
Err(e) => match e {},
}
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.inner.pop_back()
}
fn next_chunk<const N: usize>(
&mut self,
) -> Result<[Self::Item; N], array::IntoIter<Self::Item, N>> {
let mut raw_arr = MaybeUninit::uninit_array();
let raw_arr_ptr = raw_arr.as_mut_ptr().cast();
let (head, tail) = self.inner.as_slices();
if head.len() >= N {
// SAFETY: By manually adjusting the head and length of the deque, we effectively
// make it forget the first `N` elements, so taking ownership of them is safe.
unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, N) };
self.inner.head = self.inner.to_physical_idx(N);
self.inner.len -= N;
// SAFETY: We initialized the entire array with items from `head`
return Ok(unsafe { raw_arr.transpose().assume_init() });
}
// SAFETY: Same argument as above.
unsafe { ptr::copy_nonoverlapping(head.as_ptr(), raw_arr_ptr, head.len()) };
let remaining = N - head.len();
if tail.len() >= remaining {
// SAFETY: Same argument as above.
unsafe {
ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), remaining)
};
self.inner.head = self.inner.to_physical_idx(N);
self.inner.len -= N;
// SAFETY: We initialized the entire array with items from `head` and `tail`
Ok(unsafe { raw_arr.transpose().assume_init() })
} else {
// SAFETY: Same argument as above.
unsafe {
ptr::copy_nonoverlapping(tail.as_ptr(), raw_arr_ptr.add(head.len()), tail.len())
};
let init = head.len() + tail.len();
// We completely drained all the deques elements.
self.inner.head = 0;
self.inner.len = 0;
// SAFETY: We copied all elements from both slices to the beginning of the array, so
// the given range is initialized.
Err(unsafe { array::IntoIter::new_unchecked(raw_arr, 0..init) })
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
@ -56,10 +181,74 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
self.inner.pop_back()
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let len = self.inner.len;
let rem = if len < n {
self.inner.clear();
n - len
} else {
self.inner.truncate(len - n);
0
};
NonZeroUsize::new(rem).map_or(Ok(()), Err)
}
fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
struct Guard<'a, T, A: Allocator> {
deque: &'a mut VecDeque<T, A>,
// `consumed <= deque.len` always holds.
consumed: usize,
}
impl<'a, T, A: Allocator> Drop for Guard<'a, T, A> {
fn drop(&mut self) {
self.deque.len -= self.consumed;
}
}
let mut guard = Guard { deque: &mut self.inner, consumed: 0 };
let (head, tail) = guard.deque.as_slices();
init = tail
.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: See `try_fold`'s safety comment.
unsafe { ptr::read(elem) }
})
.try_rfold(init, &mut f)?;
head.iter()
.map(|elem| {
guard.consumed += 1;
// SAFETY: Same as above.
unsafe { ptr::read(elem) }
})
.try_rfold(init, &mut f)
}
#[inline]
fn rfold<B, F>(mut self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
match self.try_rfold(init, |b, item| Ok::<B, !>(f(b, item))) {
Ok(b) => b,
Err(e) => match e {},
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[inline]
fn is_empty(&self) -> bool {
self.inner.is_empty()
}

View file

@ -1,9 +1,7 @@
use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::mem::MaybeUninit;
use core::num::NonZeroUsize;
use core::ops::Try;
use super::{count, wrap_index, RingSlices};
use core::{fmt, mem, slice};
/// An iterator over the elements of a `VecDeque`.
///
@ -13,30 +11,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter`]: super::VecDeque::iter
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ring: &'a [MaybeUninit<T>],
tail: usize,
head: usize,
i1: slice::Iter<'a, T>,
i2: slice::Iter<'a, T>,
}
impl<'a, T> Iter<'a, T> {
pub(super) fn new(ring: &'a [MaybeUninit<T>], tail: usize, head: usize) -> Self {
Iter { ring, tail, head }
pub(super) fn new(i1: slice::Iter<'a, T>, i2: slice::Iter<'a, T>) -> Self {
Self { i1, i2 }
}
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
// Safety:
// - `self.head` and `self.tail` in a ring buffer are always valid indices.
// - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
f.debug_tuple("Iter")
.field(&MaybeUninit::slice_assume_init_ref(front))
.field(&MaybeUninit::slice_assume_init_ref(back))
.finish()
}
f.debug_tuple("Iter").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
@ -44,7 +32,7 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Clone for Iter<'_, T> {
fn clone(&self) -> Self {
Iter { ring: self.ring, tail: self.tail, head: self.head }
Iter { i1: self.i1.clone(), i2: self.i2.clone() }
}
}
@ -54,72 +42,52 @@ impl<'a, T> Iterator for Iter<'a, T> {
#[inline]
fn next(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
match self.i1.next() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i1 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i1.next()
}
}
}
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let remaining = self.i1.advance_by(n);
match remaining {
Ok(()) => return Ok(()),
Err(n) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(n.get())
}
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
// Safety:
// - `self.tail` in a ring buffer is always a valid index.
// - `self.head` and `self.tail` equality is checked above.
unsafe { Some(self.ring.get_unchecked(tail).assume_init_ref()) }
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
let len = self.len();
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
// Safety:
// - `self.head` and `self.tail` in a ring buffer are always valid indices.
// - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
accum = MaybeUninit::slice_assume_init_ref(front).iter().fold(accum, &mut f);
MaybeUninit::slice_assume_init_ref(back).iter().fold(accum, &mut f)
}
let accum = self.i1.fold(accum, &mut f);
self.i2.fold(accum, &mut f)
}
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let (mut iter, final_res);
if self.tail <= self.head {
// Safety: single slice self.ring[self.tail..self.head] is initialized.
iter = unsafe { MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]) }
.iter();
final_res = iter.try_fold(init, &mut f);
} else {
// Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
let (front, back) = self.ring.split_at(self.tail);
let mut back_iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
let res = back_iter.try_fold(init, &mut f);
let len = self.ring.len();
self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
iter = unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
final_res = iter.try_fold(res?, &mut f);
}
self.tail = self.head - iter.len();
final_res
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
if n >= count(self.tail, self.head, self.ring.len()) {
self.tail = self.head;
None
} else {
self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
self.next()
}
let acc = self.i1.try_fold(init, &mut f)?;
self.i2.try_fold(acc, &mut f)
}
#[inline]
@ -132,8 +100,12 @@ impl<'a, T> Iterator for Iter<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
self.ring.get_unchecked(idx).assume_init_ref()
let i1_len = self.i1.len();
if idx < i1_len {
self.i1.__iterator_get_unchecked(idx)
} else {
self.i2.__iterator_get_unchecked(idx - i1_len)
}
}
}
}
@ -142,63 +114,56 @@ impl<'a, T> Iterator for Iter<'a, T> {
impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
match self.i2.next_back() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the second one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i2 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i2.next_back()
}
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
// Safety:
// - `self.head` in a ring buffer is always a valid index.
// - `self.head` and `self.tail` equality is checked above.
unsafe { Some(self.ring.get_unchecked(self.head).assume_init_ref()) }
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
match self.i2.advance_back_by(n) {
Ok(()) => return Ok(()),
Err(n) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i2.advance_back_by(n.get())
}
}
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
// Safety:
// - `self.head` and `self.tail` in a ring buffer are always valid indices.
// - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
unsafe {
accum = MaybeUninit::slice_assume_init_ref(back).iter().rfold(accum, &mut f);
MaybeUninit::slice_assume_init_ref(front).iter().rfold(accum, &mut f)
}
let accum = self.i2.rfold(accum, &mut f);
self.i1.rfold(accum, &mut f)
}
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let (mut iter, final_res);
if self.tail <= self.head {
// Safety: single slice self.ring[self.tail..self.head] is initialized.
iter = unsafe {
MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]).iter()
};
final_res = iter.try_rfold(init, &mut f);
} else {
// Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
let (front, back) = self.ring.split_at(self.tail);
let mut front_iter =
unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
let res = front_iter.try_rfold(init, &mut f);
self.head = front_iter.len();
iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
final_res = iter.try_rfold(res?, &mut f);
}
self.head = self.tail + iter.len();
final_res
let acc = self.i2.try_rfold(init, &mut f)?;
self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for Iter<'_, T> {
fn len(&self) -> usize {
self.i1.len() + self.i2.len()
}
fn is_empty(&self) -> bool {
self.head == self.tail
self.i1.is_empty() && self.i2.is_empty()
}
}

View file

@ -1,8 +1,7 @@
use core::fmt;
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
use core::marker::PhantomData;
use super::{count, wrap_index, RingSlices};
use core::num::NonZeroUsize;
use core::ops::Try;
use core::{fmt, mem, slice};
/// A mutable iterator over the elements of a `VecDeque`.
///
@ -12,39 +11,20 @@ use super::{count, wrap_index, RingSlices};
/// [`iter_mut`]: super::VecDeque::iter_mut
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
// Internal safety invariant: the entire slice is dereferenceable.
ring: *mut [T],
tail: usize,
head: usize,
phantom: PhantomData<&'a mut [T]>,
i1: slice::IterMut<'a, T>,
i2: slice::IterMut<'a, T>,
}
impl<'a, T> IterMut<'a, T> {
pub(super) unsafe fn new(
ring: *mut [T],
tail: usize,
head: usize,
phantom: PhantomData<&'a mut [T]>,
) -> Self {
IterMut { ring, tail, head, phantom }
pub(super) fn new(i1: slice::IterMut<'a, T>, i2: slice::IterMut<'a, T>) -> Self {
Self { i1, i2 }
}
}
// SAFETY: we do nothing thread-local and there is no interior mutability,
// so the usual structural `Send`/`Sync` apply.
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send> Send for IterMut<'_, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
// SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
// The `IterMut` invariant also ensures everything is dereferenceable.
let (front, back) = unsafe { (&*front, &*back) };
f.debug_tuple("IterMut").field(&front).field(&back).finish()
f.debug_tuple("IterMut").field(&self.i1.as_slice()).field(&self.i2.as_slice()).finish()
}
}
@ -54,44 +34,51 @@ impl<'a, T> Iterator for IterMut<'a, T> {
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
match self.i1.next() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i1 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i1.next()
}
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
}
unsafe {
let elem = self.ring.get_unchecked_mut(tail);
Some(&mut *elem)
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
match self.i1.advance_by(n) {
Ok(()) => return Ok(()),
Err(remaining) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(remaining.get())
}
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
let len = self.len();
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
fn fold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
// SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
// The `IterMut` invariant also ensures everything is dereferenceable.
let (front, back) = unsafe { (&mut *front, &mut *back) };
accum = front.iter_mut().fold(accum, &mut f);
back.iter_mut().fold(accum, &mut f)
let accum = self.i1.fold(accum, &mut f);
self.i2.fold(accum, &mut f)
}
fn nth(&mut self, n: usize) -> Option<Self::Item> {
if n >= count(self.tail, self.head, self.ring.len()) {
self.tail = self.head;
None
} else {
self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
self.next()
}
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let acc = self.i1.try_fold(init, &mut f)?;
self.i2.try_fold(acc, &mut f)
}
#[inline]
@ -104,8 +91,12 @@ impl<'a, T> Iterator for IterMut<'a, T> {
// Safety: The TrustedRandomAccess contract requires that callers only pass an index
// that is in bounds.
unsafe {
let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
&mut *self.ring.get_unchecked_mut(idx)
let i1_len = self.i1.len();
if idx < i1_len {
self.i1.__iterator_get_unchecked(idx)
} else {
self.i2.__iterator_get_unchecked(idx - i1_len)
}
}
}
}
@ -114,34 +105,56 @@ impl<'a, T> Iterator for IterMut<'a, T> {
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(self.head);
Some(&mut *elem)
match self.i2.next_back() {
Some(val) => Some(val),
None => {
// most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure
// that the first branch is taken as often as possible,
// without sacrificing correctness, as i2 is empty anyways
mem::swap(&mut self.i1, &mut self.i2);
self.i2.next_back()
}
}
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
match self.i2.advance_back_by(n) {
Ok(()) => return Ok(()),
Err(remaining) => {
mem::swap(&mut self.i1, &mut self.i2);
self.i2.advance_back_by(remaining.get())
}
}
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
// SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
// The `IterMut` invariant also ensures everything is dereferenceable.
let (front, back) = unsafe { (&mut *front, &mut *back) };
accum = back.iter_mut().rfold(accum, &mut f);
front.iter_mut().rfold(accum, &mut f)
let accum = self.i2.rfold(accum, &mut f);
self.i1.rfold(accum, &mut f)
}
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let acc = self.i2.try_rfold(init, &mut f)?;
self.i1.try_rfold(acc, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IterMut<'_, T> {
fn len(&self) -> usize {
self.i1.len() + self.i2.len()
}
fn is_empty(&self) -> bool {
self.head == self.tail
self.i1.is_empty() && self.i2.is_empty()
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,67 +0,0 @@
use core::cmp::{self};
use core::mem::replace;
use crate::alloc::Allocator;
use super::VecDeque;
/// PairSlices pairs up equal length slice parts of two deques
///
/// For example, given deques "A" and "B" with the following division into slices:
///
/// A: [0 1 2] [3 4 5]
/// B: [a b] [c d e]
///
/// It produces the following sequence of matching slices:
///
/// ([0 1], [a b])
/// (\[2\], \[c\])
/// ([3 4], [d e])
///
/// and the uneven remainder of either A or B is skipped.
pub struct PairSlices<'a, 'b, T> {
a0: &'a mut [T],
a1: &'a mut [T],
b0: &'b [T],
b1: &'b [T],
}
impl<'a, 'b, T> PairSlices<'a, 'b, T> {
pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
let (a0, a1) = to.as_mut_slices();
let (b0, b1) = from.as_slices();
PairSlices { a0, a1, b0, b1 }
}
pub fn has_remainder(&self) -> bool {
!self.b0.is_empty()
}
pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
IntoIterator::into_iter([self.b0, self.b1])
}
}
impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
type Item = (&'a mut [T], &'b [T]);
fn next(&mut self) -> Option<Self::Item> {
// Get next part length
let part = cmp::min(self.a0.len(), self.b0.len());
if part == 0 {
return None;
}
let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
let (q0, q1) = self.b0.split_at(part);
// Move a1 into a0, if it's empty (and b1, b0 the same way).
self.a0 = p1;
self.b0 = q1;
if self.a0.is_empty() {
self.a0 = replace(&mut self.a1, &mut []);
}
if self.b0.is_empty() {
self.b0 = replace(&mut self.b1, &[]);
}
Some((p0, q0))
}
}

View file

@ -1,56 +0,0 @@
use core::ptr::{self};
/// Returns the two slices that cover the `VecDeque`'s valid range
pub trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<T> RingSlices for &[T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<T> RingSlices for &mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
impl<T> RingSlices for *mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
assert!(from <= to && to < self.len());
// Not using `get_unchecked_mut` to keep this a safe operation.
let len = to - from;
ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
}
fn split_at(self, mid: usize) -> (Self, Self) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert!(mid <= len);
(
ptr::slice_from_raw_parts_mut(ptr, mid),
ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
)
}
}

View file

@ -1,6 +1,6 @@
use crate::alloc::Allocator;
use crate::vec;
use core::iter::{ByRefSized, TrustedLen};
use core::iter::TrustedLen;
use core::slice;
use super::VecDeque;
@ -17,19 +17,33 @@ where
default fn spec_extend(&mut self, mut iter: I) {
// This function should be the moral equivalent of:
//
// for item in iter {
// self.push_back(item);
// }
while let Some(element) = iter.next() {
if self.len() == self.capacity() {
let (lower, _) = iter.size_hint();
self.reserve(lower.saturating_add(1));
}
// for item in iter {
// self.push_back(item);
// }
let head = self.head;
self.head = self.wrap_add(self.head, 1);
unsafe {
self.buffer_write(head, element);
// May only be called if `deque.len() < deque.capacity()`
unsafe fn push_unchecked<T, A: Allocator>(deque: &mut VecDeque<T, A>, element: T) {
// SAFETY: Because of the precondition, it's guaranteed that there is space
// in the logical array after the last element.
unsafe { deque.buffer_write(deque.to_physical_idx(deque.len), element) };
// This can't overflow because `deque.len() < deque.capacity() <= usize::MAX`.
deque.len += 1;
}
while let Some(element) = iter.next() {
let (lower, _) = iter.size_hint();
self.reserve(lower.saturating_add(1));
// SAFETY: We just reserved space for at least one element.
unsafe { push_unchecked(self, element) };
// Inner loop to avoid repeatedly calling `reserve`.
while self.len < self.capacity() {
let Some(element) = iter.next() else {
return;
};
// SAFETY: The loop condition guarantees that `self.len() < self.capacity()`.
unsafe { push_unchecked(self, element) };
}
}
}
@ -39,7 +53,7 @@ impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
where
I: TrustedLen<Item = T>,
{
default fn spec_extend(&mut self, mut iter: I) {
default fn spec_extend(&mut self, iter: I) {
// This is the case for a TrustedLen iterator.
let (low, high) = iter.size_hint();
if let Some(additional) = high {
@ -51,35 +65,12 @@ where
);
self.reserve(additional);
struct WrapAddOnDrop<'a, T, A: Allocator> {
vec_deque: &'a mut VecDeque<T, A>,
written: usize,
}
impl<'a, T, A: Allocator> Drop for WrapAddOnDrop<'a, T, A> {
fn drop(&mut self) {
self.vec_deque.head =
self.vec_deque.wrap_add(self.vec_deque.head, self.written);
}
}
let mut wrapper = WrapAddOnDrop { vec_deque: self, written: 0 };
let head_room = wrapper.vec_deque.cap() - wrapper.vec_deque.head;
unsafe {
wrapper.vec_deque.write_iter(
wrapper.vec_deque.head,
ByRefSized(&mut iter).take(head_room),
&mut wrapper.written,
);
if additional > head_room {
wrapper.vec_deque.write_iter(0, iter, &mut wrapper.written);
}
}
let written = unsafe {
self.write_iter_wrapping(self.to_physical_idx(self.len), iter, additional)
};
debug_assert_eq!(
additional, wrapper.written,
additional, written,
"The number of items written to VecDeque doesn't match the TrustedLen size hint"
);
} else {
@ -99,8 +90,8 @@ impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
self.reserve(slice.len());
unsafe {
self.copy_slice(self.head, slice);
self.head = self.wrap_add(self.head, slice.len());
self.copy_slice(self.to_physical_idx(self.len), slice);
self.len += slice.len();
}
iterator.forget_remaining_elements();
}
@ -125,8 +116,8 @@ where
self.reserve(slice.len());
unsafe {
self.copy_slice(self.head, slice);
self.head = self.wrap_add(self.head, slice.len());
self.copy_slice(self.to_physical_idx(self.len), slice);
self.len += slice.len();
}
}
}

View file

@ -0,0 +1,33 @@
use super::{IntoIter, VecDeque};
/// Specialization trait used for `VecDeque::from_iter`
pub(super) trait SpecFromIter<T, I> {
fn spec_from_iter(iter: I) -> Self;
}
impl<T, I> SpecFromIter<T, I> for VecDeque<T>
where
I: Iterator<Item = T>,
{
default fn spec_from_iter(iterator: I) -> Self {
// Since converting is O(1) now, just re-use the `Vec` logic for
// anything where we can't do something extra-special for `VecDeque`,
// especially as that could save us some monomorphization work
// if one uses the same iterators (like slice ones) with both.
crate::vec::Vec::from_iter(iterator).into()
}
}
impl<T> SpecFromIter<T, crate::vec::IntoIter<T>> for VecDeque<T> {
#[inline]
fn spec_from_iter(iterator: crate::vec::IntoIter<T>) -> Self {
iterator.into_vecdeque()
}
}
impl<T> SpecFromIter<T, IntoIter<T>> for VecDeque<T> {
#[inline]
fn spec_from_iter(iterator: IntoIter<T>) -> Self {
iterator.into_vecdeque()
}
}

View file

@ -3,7 +3,6 @@ use core::iter::TrustedLen;
use super::*;
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
@ -11,12 +10,11 @@ fn bench_push_back_100(b: &mut test::Bencher) {
deq.push_back(i);
}
deq.head = 0;
deq.tail = 0;
deq.len = 0;
})
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
@ -24,18 +22,21 @@ fn bench_push_front_100(b: &mut test::Bencher) {
deq.push_front(i);
}
deq.head = 0;
deq.tail = 0;
deq.len = 0;
})
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
let size = 100;
let mut deq = VecDeque::<i32>::with_capacity(size + 1);
// We'll mess with private state to pretend like `deq` is filled.
// Make sure the buffer is initialized so that we don't read uninit memory.
unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| {
deq.head = 100;
deq.tail = 0;
deq.head = 0;
deq.len = 100;
while !deq.is_empty() {
test::black_box(deq.pop_back());
}
@ -43,9 +44,9 @@ fn bench_pop_back_100(b: &mut test::Bencher) {
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_whole_10000(b: &mut test::Bencher) {
let v = (1..100000).collect::<VecDeque<u32>>();
let size = if cfg!(miri) { 1000 } else { 100000 };
let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
@ -54,9 +55,9 @@ fn bench_retain_whole_10000(b: &mut test::Bencher) {
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_odd_10000(b: &mut test::Bencher) {
let v = (1..100000).collect::<VecDeque<u32>>();
let size = if cfg!(miri) { 1000 } else { 100000 };
let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
@ -65,24 +66,27 @@ fn bench_retain_odd_10000(b: &mut test::Bencher) {
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_retain_half_10000(b: &mut test::Bencher) {
let v = (1..100000).collect::<VecDeque<u32>>();
let size = if cfg!(miri) { 1000 } else { 100000 };
let v = (1..size).collect::<VecDeque<u32>>();
b.iter(|| {
let mut v = v.clone();
v.retain(|x| *x > 50000)
v.retain(|x| *x > size / 2)
})
}
#[bench]
#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
fn bench_pop_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
let size = 100;
let mut deq = VecDeque::<i32>::with_capacity(size + 1);
// We'll mess with private state to pretend like `deq` is filled.
// Make sure the buffer is initialized so that we don't read uninit memory.
unsafe { deq.ptr().write_bytes(0u8, size + 1) };
b.iter(|| {
deq.head = 100;
deq.tail = 0;
deq.head = 0;
deq.len = 100;
while !deq.is_empty() {
test::black_box(deq.pop_front());
}
@ -101,9 +105,9 @@ fn test_swap_front_back_remove() {
for len in 0..final_len {
let expected: VecDeque<_> =
if back { (0..len).collect() } else { (0..len).rev().collect() };
for tail_pos in 0..usable_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
for head_pos in 0..usable_cap {
tester.head = head_pos;
tester.len = 0;
if back {
for i in 0..len * 2 {
tester.push_front(i);
@ -120,8 +124,8 @@ fn test_swap_front_back_remove() {
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
}
}
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(tester.head <= tester.capacity());
assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@ -146,18 +150,18 @@ fn test_insert() {
for len in minlen..cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for head_pos in 0..cap {
for to_insert in 0..len {
tester.tail = tail_pos;
tester.head = tail_pos;
tester.head = head_pos;
tester.len = 0;
for i in 0..len {
if i != to_insert {
tester.push_back(i);
}
}
tester.insert(to_insert, to_insert);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(tester.head <= tester.capacity());
assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@ -253,13 +257,14 @@ fn test_swap_panic() {
#[test]
fn test_reserve_exact() {
let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
assert!(tester.capacity() == 1);
assert_eq!(tester.capacity(), 1);
tester.reserve_exact(50);
assert!(tester.capacity() >= 51);
assert_eq!(tester.capacity(), 50);
tester.reserve_exact(40);
assert!(tester.capacity() >= 51);
// reserving won't shrink the buffer
assert_eq!(tester.capacity(), 50);
tester.reserve_exact(200);
assert!(tester.capacity() >= 200);
assert_eq!(tester.capacity(), 200);
}
#[test]
@ -319,6 +324,7 @@ fn test_contains() {
#[test]
fn test_rotate_left_right() {
let mut tester: VecDeque<_> = (1..=10).collect();
tester.reserve(1);
assert_eq!(tester.len(), 10);
@ -459,7 +465,7 @@ fn test_binary_search_key() {
}
#[test]
fn make_contiguous_big_tail() {
fn make_contiguous_big_head() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..3 {
@ -474,14 +480,14 @@ fn make_contiguous_big_tail() {
assert_eq!(tester.capacity(), 15);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
let expected_start = tester.head;
let expected_start = tester.as_slices().1.len();
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
fn make_contiguous_big_head() {
fn make_contiguous_big_tail() {
let mut tester = VecDeque::with_capacity(15);
for i in 0..8 {
@ -495,44 +501,46 @@ fn make_contiguous_big_head() {
// 01234567......98
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
}
#[test]
fn make_contiguous_small_free() {
let mut tester = VecDeque::with_capacity(15);
let mut tester = VecDeque::with_capacity(16);
for i in 'A' as u8..'I' as u8 {
for i in b'A'..b'I' {
tester.push_back(i as char);
}
for i in 'I' as u8..'N' as u8 {
for i in b'I'..b'N' {
tester.push_front(i as char);
}
assert_eq!(tester, ['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']);
// ABCDEFGH...MLKJI
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!(tester.head, expected_start);
assert_eq!(
(&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
tester.as_slices()
);
tester.clear();
for i in 'I' as u8..'N' as u8 {
for i in b'I'..b'N' {
tester.push_back(i as char);
}
for i in 'A' as u8..'I' as u8 {
for i in b'A'..b'I' {
tester.push_front(i as char);
}
// IJKLM...HGFEDCBA
let expected_start = 0;
let expected_start = 3;
tester.make_contiguous();
assert_eq!(tester.tail, expected_start);
assert_eq!(tester.head, expected_start);
assert_eq!(
(&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
tester.as_slices()
@ -541,16 +549,55 @@ fn make_contiguous_small_free() {
#[test]
fn make_contiguous_head_to_end() {
let mut dq = VecDeque::with_capacity(3);
dq.push_front('B');
dq.push_front('A');
dq.push_back('C');
dq.make_contiguous();
let expected_tail = 0;
let expected_head = 3;
assert_eq!(expected_tail, dq.tail);
assert_eq!(expected_head, dq.head);
assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices());
let mut tester = VecDeque::with_capacity(16);
for i in b'A'..b'L' {
tester.push_back(i as char);
}
for i in b'L'..b'Q' {
tester.push_front(i as char);
}
assert_eq!(
tester,
['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
);
// ABCDEFGHIJKPONML
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.head, expected_start);
assert_eq!(
(
&['P', 'O', 'N', 'M', 'L', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K']
as &[_],
&[] as &[_]
),
tester.as_slices()
);
tester.clear();
for i in b'L'..b'Q' {
tester.push_back(i as char);
}
for i in b'A'..b'L' {
tester.push_front(i as char);
}
// LMNOPKJIHGFEDCBA
let expected_start = 0;
tester.make_contiguous();
assert_eq!(tester.head, expected_start);
assert_eq!(
(
&['K', 'J', 'I', 'H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'L', 'M', 'N', 'O', 'P']
as &[_],
&[] as &[_]
),
tester.as_slices()
);
}
#[test]
@ -584,10 +631,10 @@ fn test_remove() {
for len in minlen..cap - 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for head_pos in 0..cap {
for to_remove in 0..=len {
tester.tail = tail_pos;
tester.head = tail_pos;
tester.head = head_pos;
tester.len = 0;
for i in 0..len {
if i == to_remove {
tester.push_back(1234);
@ -598,8 +645,8 @@ fn test_remove() {
tester.push_back(1234);
}
tester.remove(to_remove);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(tester.head <= tester.capacity());
assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@ -613,11 +660,11 @@ fn test_range() {
let cap = tester.capacity();
let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
for len in minlen..=cap {
for tail in 0..=cap {
for head in 0..=cap {
for start in 0..=len {
for end in start..=len {
tester.tail = tail;
tester.head = tail;
tester.head = head;
tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
@ -638,17 +685,17 @@ fn test_range_mut() {
let cap = tester.capacity();
for len in 0..=cap {
for tail in 0..=cap {
for head in 0..=cap {
for start in 0..=len {
for end in start..=len {
tester.tail = tail;
tester.head = tail;
tester.head = head;
tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
let head_was = tester.head;
let tail_was = tester.tail;
let len_was = tester.len;
// Check that we iterate over the correct values
let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
@ -658,8 +705,8 @@ fn test_range_mut() {
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
assert_eq!(tester.tail, tail_was);
assert_eq!(tester.head, head_was);
assert_eq!(tester.len, len_was);
}
}
}
@ -672,11 +719,11 @@ fn test_drain() {
let cap = tester.capacity();
for len in 0..=cap {
for tail in 0..=cap {
for head in 0..cap {
for drain_start in 0..=len {
for drain_end in drain_start..=len {
tester.tail = tail;
tester.head = tail;
tester.head = head;
tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
@ -689,8 +736,8 @@ fn test_drain() {
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(tester.head <= tester.capacity());
assert!(tester.len <= tester.capacity());
// We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect();
@ -701,6 +748,48 @@ fn test_drain() {
}
}
#[test]
fn issue_108453() {
let mut deque = VecDeque::with_capacity(10);
deque.push_back(1u8);
deque.push_back(2);
deque.push_back(3);
deque.push_front(10);
deque.push_front(9);
deque.shrink_to(9);
assert_eq!(deque.into_iter().collect::<Vec<_>>(), vec![9, 10, 1, 2, 3]);
}
#[test]
fn test_shrink_to() {
// test deques with capacity 16 with all possible head positions, lengths and target capacities.
let cap = 16;
for len in 0..cap {
for head in 0..cap {
let expected = (1..=len).collect::<VecDeque<_>>();
for target_cap in len..cap {
let mut deque = VecDeque::with_capacity(cap);
// currently, `with_capacity` always allocates the exact capacity if it's greater than 8.
assert_eq!(deque.capacity(), cap);
// we can let the head point anywhere in the buffer since the deque is empty.
deque.head = head;
deque.extend(1..=len);
deque.shrink_to(target_cap);
assert_eq!(deque, expected);
}
}
}
}
#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
@ -717,17 +806,18 @@ fn test_shrink_to_fit() {
for len in 0..=cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..=max_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
for head_pos in 0..=max_cap {
tester.reserve(head_pos);
tester.head = head_pos;
tester.len = 0;
tester.reserve(63);
for i in 0..len {
tester.push_back(i);
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(tester.head <= tester.capacity());
assert!(tester.len <= tester.capacity());
assert_eq!(tester, expected);
}
}
@ -754,17 +844,17 @@ fn test_split_off() {
// at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
tester.tail = tail_pos;
tester.head = tail_pos;
for head_pos in 0..cap {
tester.head = head_pos;
tester.len = 0;
for i in 0..len {
tester.push_back(i);
}
let result = tester.split_off(at);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(result.tail < result.cap());
assert!(result.head < result.cap());
assert!(tester.head <= tester.capacity());
assert!(tester.len <= tester.capacity());
assert!(result.head <= result.capacity());
assert!(result.len <= result.capacity());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
@ -781,16 +871,10 @@ fn test_from_vec() {
vec.extend(0..len);
let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec));
}
}
let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY - 1]);
let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
}
#[test]
@ -842,10 +926,6 @@ fn test_extend_impl(trusted_len: bool) {
}
assert_eq!(self.test, self.expected);
let (a1, b1) = self.test.as_slices();
let (a2, b2) = self.expected.as_slices();
assert_eq!(a1, a2);
assert_eq!(b1, b2);
}
fn drain<R: RangeBounds<usize> + Clone>(&mut self, range: R) {
@ -868,7 +948,7 @@ fn test_extend_impl(trusted_len: bool) {
let mut tester = VecDequeTester::new(trusted_len);
// Initial capacity
tester.test_extend(0..tester.remaining_capacity() - 1);
tester.test_extend(0..tester.remaining_capacity());
// Grow
tester.test_extend(1024..2048);
@ -876,7 +956,7 @@ fn test_extend_impl(trusted_len: bool) {
// Wrap around
tester.drain(..128);
tester.test_extend(0..tester.remaining_capacity() - 1);
tester.test_extend(0..tester.remaining_capacity());
// Continue
tester.drain(256..);
@ -888,16 +968,6 @@ fn test_extend_impl(trusted_len: bool) {
tester.test_extend(0..32);
}
#[test]
#[should_panic = "capacity overflow"]
fn test_from_vec_zst_overflow() {
use crate::vec::Vec;
let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY]);
let vd = VecDeque::from(vec.clone()); // no room for +1
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
}
#[test]
fn test_from_array() {
fn test<const N: usize>() {
@ -913,7 +983,6 @@ fn test_from_array() {
assert_eq!(deq[i], i);
}
assert!(deq.cap().is_power_of_two());
assert_eq!(deq.len(), N);
}
test::<0>();
@ -921,11 +990,6 @@ fn test_from_array() {
test::<2>();
test::<32>();
test::<35>();
let array = [(); MAXIMUM_ZST_CAPACITY - 1];
let deq = VecDeque::from(array);
assert!(deq.cap().is_power_of_two());
assert_eq!(deq.len(), MAXIMUM_ZST_CAPACITY - 1);
}
#[test]

View file

@ -108,7 +108,7 @@ use crate::sync::Arc;
/// and other memory errors.
#[derive(PartialEq, PartialOrd, Eq, Ord, Hash, Clone)]
#[cfg_attr(not(test), rustc_diagnostic_item = "cstring_type")]
#[unstable(feature = "alloc_c_string", issue = "94079")]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub struct CString {
// Invariant 1: the slice ends with a zero byte and has a length of at least one.
// Invariant 2: the slice contains only one zero byte.
@ -132,7 +132,7 @@ pub struct CString {
/// let _: NulError = CString::new(b"f\0oo".to_vec()).unwrap_err();
/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(feature = "alloc_c_string", issue = "94079")]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub struct NulError(usize, Vec<u8>);
#[derive(Clone, PartialEq, Eq, Debug)]
@ -157,7 +157,7 @@ enum FromBytesWithNulErrorKind {
/// let _: FromVecWithNulError = CString::from_vec_with_nul(b"f\0oo".to_vec()).unwrap_err();
/// ```
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(feature = "alloc_c_string", issue = "94079")]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub struct FromVecWithNulError {
error_kind: FromBytesWithNulErrorKind,
bytes: Vec<u8>,
@ -223,7 +223,7 @@ impl FromVecWithNulError {
/// This `struct` is created by [`CString::into_string()`]. See
/// its documentation for more.
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(feature = "alloc_c_string", issue = "94079")]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub struct IntoStringError {
inner: CString,
error: Utf8Error,
@ -436,9 +436,9 @@ impl CString {
///
/// unsafe {
/// assert_eq!(b'f', *ptr as u8);
/// assert_eq!(b'o', *ptr.offset(1) as u8);
/// assert_eq!(b'o', *ptr.offset(2) as u8);
/// assert_eq!(b'\0', *ptr.offset(3) as u8);
/// assert_eq!(b'o', *ptr.add(1) as u8);
/// assert_eq!(b'o', *ptr.add(2) as u8);
/// assert_eq!(b'\0', *ptr.add(3) as u8);
///
/// // retake pointer to free memory
/// let _ = CString::from_raw(ptr);
@ -991,12 +991,6 @@ impl IntoStringError {
pub fn utf8_error(&self) -> Utf8Error {
self.error
}
#[doc(hidden)]
#[unstable(feature = "cstr_internals", issue = "none")]
pub fn __source(&self) -> &Utf8Error {
&self.error
}
}
impl IntoStringError {
@ -1121,3 +1115,26 @@ impl CStr {
CString::from(self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl core::error::Error for NulError {
#[allow(deprecated)]
fn description(&self) -> &str {
"nul byte found in data"
}
}
#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
impl core::error::Error for FromVecWithNulError {}
#[stable(feature = "cstring_into", since = "1.7.0")]
impl core::error::Error for IntoStringError {
#[allow(deprecated)]
fn description(&self) -> &str {
"C string contained non-utf8 bytes"
}
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
Some(&self.error)
}
}

View file

@ -78,11 +78,11 @@
//! [`String`]: crate::string::String
//! [`CStr`]: core::ffi::CStr
#![unstable(feature = "alloc_ffi", issue = "94079")]
#![stable(feature = "alloc_ffi", since = "1.64.0")]
#[unstable(feature = "alloc_c_string", issue = "94079")]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub use self::c_str::FromVecWithNulError;
#[unstable(feature = "alloc_c_string", issue = "94079")]
#[stable(feature = "alloc_c_string", since = "1.64.0")]
pub use self::c_str::{CString, IntoStringError, NulError};
mod c_str;

View file

@ -327,7 +327,7 @@
//! - `text` must not contain any `'{'` or `'}'` characters,
//! - `ws` is any character for which [`char::is_whitespace`] returns `true`, has no semantic
//! meaning and is completely optional,
//! - `integer` is a decimal integer that may contain leading zeroes and
//! - `integer` is a decimal integer that may contain leading zeroes and must fit into an `usize` and
//! - `identifier` is an `IDENTIFIER_OR_KEYWORD` (not an `IDENTIFIER`) as defined by the [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html).
//!
//! # Formatting traits
@ -363,7 +363,7 @@
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
@ -399,7 +399,7 @@
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
@ -410,7 +410,7 @@
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
@ -419,7 +419,7 @@
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! let string = format!("{magnitude:.decimals$}");
//! f.pad_integral(true, "", &string)
//! }
//! }
@ -517,8 +517,8 @@
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! fn my_fmt_fn(args: fmt::Arguments<'_>) {
//! write!(&mut io::stdout(), "{args}");
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
@ -551,14 +551,12 @@
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
pub use core::fmt::{write, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]

View file

@ -3,7 +3,7 @@
//! This library provides smart pointers and collections for managing
//! heap-allocated values.
//!
//! This library, like libcore, normally doesnt need to be used directly
//! This library, like core, normally doesnt need to be used directly
//! since its contents are re-exported in the [`std` crate](../std/index.html).
//! Crates that use the `#![no_std]` attribute however will typically
//! not depend on `std`, so theyd use this crate instead.
@ -56,10 +56,6 @@
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
@ -73,109 +69,134 @@
any(not(feature = "miri-test-libstd"), test, doctest),
no_global_oom_handling,
not(no_global_oom_handling),
not(no_rc),
not(no_sync),
target_has_atomic = "ptr"
))]
#![no_std]
#![needs_allocator]
// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
#![deny(fuzzy_provenance_casts)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
#![warn(multiple_supertrait_upcastable)]
//
// Library features:
#![cfg_attr(not(no_global_oom_handling), feature(alloc_c_string))]
// tidy-alphabetical-start
#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
#![cfg_attr(test, feature(is_sorted))]
#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
#![feature(array_into_iter_constructors)]
#![feature(array_methods)]
#![feature(array_windows)]
#![feature(ascii_char)]
#![feature(assert_matches)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
#![feature(const_box)]
#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
#![feature(const_cow_is_borrowed)]
#![feature(const_convert)]
#![feature(const_size_of_val)]
#![feature(const_align_of_val)]
#![feature(const_ptr_read)]
#![feature(const_maybe_uninit_write)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_refs_to_cell)]
#![feature(core_c_str)]
#![feature(core_intrinsics)]
#![feature(const_box)]
#![feature(const_cow_is_borrowed)]
#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_write)]
#![feature(const_maybe_uninit_zeroed)]
#![feature(const_pin)]
#![feature(cstr_from_bytes_until_nul)]
#![feature(const_refs_to_cell)]
#![feature(const_size_of_val)]
#![feature(const_waker)]
#![feature(core_intrinsics)]
#![feature(core_panic)]
#![feature(dispatch_from_dyn)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
#![feature(exact_size_is_empty)]
#![feature(extend_one)]
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(hasher_prefixfree_extras)]
#![feature(inline_const)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
#![feature(layout_for_ptr)]
#![feature(maybe_uninit_slice)]
#![cfg_attr(test, feature(new_uninit))]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
#![feature(provide_any)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
#![feature(receiver_trait)]
#![feature(saturating_int_impl)]
#![feature(set_ptr_value)]
#![feature(sized_type_properties)]
#![feature(slice_from_ptr_range)]
#![feature(slice_group_by)]
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(slice_range)]
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
#![feature(tuple_trait)]
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
#![feature(std_internals)]
#![feature(utf8_chunks)]
// tidy-alphabetical-end
//
// Language features:
// tidy-alphabetical-start
#![cfg_attr(not(test), feature(generator_trait))]
#![cfg_attr(test, feature(panic_update_hook))]
#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
#![feature(c_unwind)]
#![feature(cfg_sanitize)]
#![feature(const_deref)]
#![feature(const_mut_refs)]
#![feature(const_ptr_write)]
#![feature(const_precise_live_drops)]
#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(dropck_eyepatch)]
#![feature(exclusive_range_pattern)]
#![feature(fundamental)]
#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
#![feature(let_else)]
#![feature(min_specialization)]
#![feature(multiple_supertrait_upcastable)]
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(pointer_is_aligned)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
#![feature(pointer_is_aligned)]
#![feature(slice_internals)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
#![cfg_attr(test, feature(test))]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(c_unwind)]
#![feature(with_negative_coherence)]
// tidy-alphabetical-end
//
// Rustdoc features:
#![feature(doc_cfg)]
@ -192,6 +213,8 @@
extern crate std;
#[cfg(test)]
extern crate test;
#[cfg(test)]
mod testing;
// Module with internal macros used by other modules (needs to be included before other modules).
#[macro_use]
@ -216,16 +239,17 @@ mod boxed {
}
pub mod borrow;
pub mod collections;
#[cfg(not(no_global_oom_handling))]
#[cfg(all(not(no_rc), not(no_sync), not(no_global_oom_handling)))]
pub mod ffi;
pub mod fmt;
#[cfg(not(no_rc))]
pub mod rc;
pub mod slice;
pub mod str;
pub mod string;
#[cfg(target_has_atomic = "ptr")]
#[cfg(all(not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod sync;
#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
#[cfg(all(not(no_global_oom_handling), not(no_rc), not(no_sync), target_has_atomic = "ptr"))]
pub mod task;
#[cfg(test)]
mod tests;
@ -236,3 +260,20 @@ pub mod vec;
pub mod __export {
pub use core::format_args;
}
#[cfg(test)]
#[allow(dead_code)] // Not used in all configurations
pub(crate) mod test_helpers {
/// Copied from `std::test_helpers::test_rng`, since these tests rely on the
/// seed not being the same for every RNG invocation too.
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use std::hash::{BuildHasher, Hash, Hasher};
let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
std::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec =
hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>();
let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
rand::SeedableRng::from_seed(seed)
}
}

View file

@ -48,6 +48,8 @@ macro_rules! vec {
);
($($x:expr),+ $(,)?) => (
$crate::__rust_force_expr!(<[_]>::into_vec(
// This rustc_box is not required, but it produces a dramatic improvement in compile
// time when constructing arrays with many elements.
#[rustc_box]
$crate::boxed::Box::new([$($x),+])
))
@ -107,6 +109,8 @@ macro_rules! vec {
/// format!("test");
/// format!("hello {}", "world!");
/// format!("x = {}, y = {y}", 10, y = 30);
/// let (x, y) = (1, 2);
/// format!("{x} + {y} = 3");
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]

View file

@ -3,8 +3,7 @@
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ptr::{self, NonNull, Unique};
use core::slice;
@ -168,7 +167,7 @@ impl<T, A: Allocator> RawVec<T, A> {
#[cfg(not(no_global_oom_handling))]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
if mem::size_of::<T>() == 0 || capacity == 0 {
if T::IS_ZST || capacity == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
@ -229,7 +228,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
if T::IS_ZST { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@ -238,13 +237,18 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
if T::IS_ZST || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
// We could use Layout::array here which ensures the absence of isize and usize overflows
// and could hypothetically handle differences between stride and size, but this memory
// has already been allocated so we know it can't overflow and currently rust does not
// support such types. So we can do better by skipping some checks and avoid an unwrap.
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
let align = mem::align_of::<T>();
let size = mem::size_of::<T>().unchecked_mul(self.cap);
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
@ -380,7 +384,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if mem::size_of::<T>() == 0 {
if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
@ -406,7 +410,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
if T::IS_ZST {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
@ -426,11 +430,13 @@ impl<T, A: Allocator> RawVec<T, A> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
// See current_memory() why this assert is here
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
// overflowed earlier when capacity was larger.
let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
let new_size = mem::size_of::<T>().unchecked_mul(cap);
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?

View file

@ -15,7 +15,7 @@
//!
//! [`Rc`] uses non-atomic reference counting. This means that overhead is very
//! low, but an [`Rc`] cannot be sent between threads, and consequently [`Rc`]
//! does not implement [`Send`][send]. As a result, the Rust compiler
//! does not implement [`Send`]. As a result, the Rust compiler
//! will check *at compile time* that you are not sending [`Rc`]s between
//! threads. If you need multi-threaded, atomic reference counting, use
//! [`sync::Arc`][arc].
@ -232,7 +232,6 @@
//! [clone]: Clone::clone
//! [`Cell`]: core::cell::Cell
//! [`RefCell`]: core::cell::RefCell
//! [send]: core::marker::Send
//! [arc]: crate::sync::Arc
//! [`Deref`]: core::ops::Deref
//! [downgrade]: Rc::downgrade
@ -251,13 +250,12 @@ use core::any::Any;
use core::borrow;
use core::cell::Cell;
use core::cmp::Ordering;
use core::convert::{From, TryFrom};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::abort;
#[cfg(not(no_global_oom_handling))]
use core::iter;
use core::marker::{self, PhantomData, Unpin, Unsize};
use core::marker::{PhantomData, Unsize};
#[cfg(not(no_global_oom_handling))]
use core::mem::size_of_val;
use core::mem::{self, align_of_val_raw, forget};
@ -293,6 +291,15 @@ struct RcBox<T: ?Sized> {
value: T,
}
/// Calculate layout for `RcBox<T>` using the inner value's layout
fn rcbox_layout_for_value_layout(layout: Layout) -> Layout {
// Calculate layout using the given value layout.
// Previously, layout was calculated on the expression
// `&*(ptr as *const RcBox<T>)`, but this created a misaligned
// reference (see #54908).
Layout::new::<RcBox<()>>().extend(layout).unwrap().0.pad_to_align()
}
/// A single-threaded reference-counting pointer. 'Rc' stands for 'Reference
/// Counted'.
///
@ -312,7 +319,7 @@ pub struct Rc<T: ?Sized> {
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> !marker::Send for Rc<T> {}
impl<T: ?Sized> !Send for Rc<T> {}
// Note that this negative impl isn't strictly necessary for correctness,
// as `Rc` transitively contains a `Cell`, which is itself `!Sync`.
@ -320,14 +327,14 @@ impl<T: ?Sized> !marker::Send for Rc<T> {}
// having an explicit negative impl is nice for documentation purposes
// and results in nicer error messages.
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> !marker::Sync for Rc<T> {}
impl<T: ?Sized> !Sync for Rc<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
#[stable(feature = "rc_ref_unwind_safe", since = "1.58.0")]
impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for Rc<T> {}
#[unstable(feature = "coerce_unsized", issue = "27732")]
#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@ -672,6 +679,24 @@ impl<T> Rc<T> {
Err(this)
}
}
/// Returns the inner value, if the `Rc` has exactly one strong reference.
///
/// Otherwise, [`None`] is returned and the `Rc` is dropped.
///
/// This will succeed even if there are outstanding weak references.
///
/// If `Rc::into_inner` is called on every clone of this `Rc`,
/// it is guaranteed that exactly one of the calls returns the inner value.
/// This means in particular that the inner value is not dropped.
///
/// This is equivalent to `Rc::try_unwrap(this).ok()`. (Note that these are not equivalent for
/// [`Arc`](crate::sync::Arc), due to race conditions that do not apply to `Rc`.)
#[inline]
#[stable(feature = "rc_into_inner", since = "1.70.0")]
pub fn into_inner(this: Self) -> Option<T> {
Rc::try_unwrap(this).ok()
}
}
impl<T> Rc<[T]> {
@ -1033,7 +1058,7 @@ impl<T: ?Sized> Rc<T> {
#[inline]
#[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
pub unsafe fn decrement_strong_count(ptr: *const T) {
unsafe { mem::drop(Rc::from_raw(ptr)) };
unsafe { drop(Rc::from_raw(ptr)) };
}
/// Returns `true` if there are no other `Rc` or [`Weak`] pointers to
@ -1082,10 +1107,11 @@ impl<T: ?Sized> Rc<T> {
///
/// # Safety
///
/// Any other `Rc` or [`Weak`] pointers to the same allocation must not be dereferenced
/// for the duration of the returned borrow.
/// This is trivially the case if no such pointers exist,
/// for example immediately after `Rc::new`.
/// If any other `Rc` or [`Weak`] pointers to the same allocation exist, then
/// they must not be dereferenced or have active borrows for the duration
/// of the returned borrow, and their inner type must be exactly the same as the
/// inner type of this Rc (including lifetimes). This is trivially the case if no
/// such pointers exist, for example immediately after `Rc::new`.
///
/// # Examples
///
@ -1100,6 +1126,38 @@ impl<T: ?Sized> Rc<T> {
/// }
/// assert_eq!(*x, "foo");
/// ```
/// Other `Rc` pointers to the same allocation must be to the same type.
/// ```no_run
/// #![feature(get_mut_unchecked)]
///
/// use std::rc::Rc;
///
/// let x: Rc<str> = Rc::from("Hello, world!");
/// let mut y: Rc<[u8]> = x.clone().into();
/// unsafe {
/// // this is Undefined Behavior, because x's inner type is str, not [u8]
/// Rc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
/// }
/// println!("{}", &*x); // Invalid UTF-8 in a str
/// ```
/// Other `Rc` pointers to the same allocation must be to the exact same type, including lifetimes.
/// ```no_run
/// #![feature(get_mut_unchecked)]
///
/// use std::rc::Rc;
///
/// let x: Rc<&str> = Rc::new("Hello, world!");
/// {
/// let s = String::from("Oh, no!");
/// let mut y: Rc<&str> = x.clone().into();
/// unsafe {
/// // this is Undefined Behavior, because x's inner type
/// // is &'long str, not &'short str
/// *Rc::get_mut_unchecked(&mut y) = &s;
/// }
/// }
/// println!("{}", &*x); // Use-after-free
/// ```
#[inline]
#[unstable(feature = "get_mut_unchecked", issue = "63292")]
pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
@ -1110,8 +1168,8 @@ impl<T: ?Sized> Rc<T> {
#[inline]
#[stable(feature = "ptr_eq", since = "1.17.0")]
/// Returns `true` if the two `Rc`s point to the same allocation
/// (in a vein similar to [`ptr::eq`]).
/// Returns `true` if the two `Rc`s point to the same allocation in a vein similar to
/// [`ptr::eq`]. See [that function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Examples
///
@ -1142,7 +1200,7 @@ impl<T: Clone> Rc<T> {
/// be cloned.
///
/// See also [`get_mut`], which will fail rather than cloning the inner value
/// or diassociating [`Weak`] pointers.
/// or disassociating [`Weak`] pointers.
///
/// [`clone`]: Clone::clone
/// [`get_mut`]: Rc::get_mut
@ -1334,11 +1392,7 @@ impl<T: ?Sized> Rc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
) -> *mut RcBox<T> {
// Calculate layout using the given value layout.
// Previously, layout was calculated on the expression
// `&*(ptr as *const RcBox<T>)`, but this created a misaligned
// reference (see #54908).
let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
let layout = rcbox_layout_for_value_layout(value_layout);
unsafe {
Rc::try_allocate_for_layout(value_layout, allocate, mem_to_rcbox)
.unwrap_or_else(|_| handle_alloc_error(layout))
@ -1357,11 +1411,7 @@ impl<T: ?Sized> Rc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
) -> Result<*mut RcBox<T>, AllocError> {
// Calculate layout using the given value layout.
// Previously, layout was calculated on the expression
// `&*(ptr as *const RcBox<T>)`, but this created a misaligned
// reference (see #54908).
let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
let layout = rcbox_layout_for_value_layout(value_layout);
// Allocate for the layout.
let ptr = allocate(layout)?;
@ -1386,7 +1436,7 @@ impl<T: ?Sized> Rc<T> {
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.allocate(layout),
|mem| mem.with_metadata_of(ptr as *mut RcBox<T>),
|mem| mem.with_metadata_of(ptr as *const RcBox<T>),
)
}
}
@ -1428,7 +1478,7 @@ impl<T> Rc<[T]> {
}
}
/// Copy elements from slice into newly allocated Rc<\[T\]>
/// Copy elements from slice into newly allocated `Rc<[T]>`
///
/// Unsafe because the caller must either take ownership or bind `T: Copy`
#[cfg(not(no_global_oom_handling))]
@ -1444,7 +1494,7 @@ impl<T> Rc<[T]> {
///
/// Behavior is undefined should the size be wrong.
#[cfg(not(no_global_oom_handling))]
unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Rc<[T]> {
unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Rc<[T]> {
// Panic guard while cloning T elements.
// In the event of a panic, elements that have been written
// into the new RcBox will be dropped, then the memory freed.
@ -1686,11 +1736,11 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
/// Inequality for two `Rc`s.
///
/// Two `Rc`s are unequal if their inner values are unequal.
/// Two `Rc`s are not equal if their inner values are not equal.
///
/// If `T` also implements `Eq` (implying reflexivity of equality),
/// two `Rc`s that point to the same allocation are
/// never unequal.
/// always equal.
///
/// # Examples
///
@ -1968,10 +2018,8 @@ impl<T> From<Vec<T>> for Rc<[T]> {
fn from(mut v: Vec<T>) -> Rc<[T]> {
unsafe {
let rc = Rc::copy_from_slice(&v);
// Allow the Vec to free its memory, but not destroy its contents
v.set_len(0);
rc
}
}
@ -1991,7 +2039,7 @@ where
/// ```rust
/// # use std::rc::Rc;
/// # use std::borrow::Cow;
/// let cow: Cow<str> = Cow::Borrowed("eggplant");
/// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
/// let shared: Rc<str> = Rc::from(cow);
/// assert_eq!("eggplant", &shared[..]);
/// ```
@ -2038,7 +2086,7 @@ impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_iter", since = "1.37.0")]
impl<T> iter::FromIterator<T> for Rc<[T]> {
impl<T> FromIterator<T> for Rc<[T]> {
/// Takes each element in the `Iterator` and collects it into an `Rc<[T]>`.
///
/// # Performance characteristics
@ -2077,7 +2125,7 @@ impl<T> iter::FromIterator<T> for Rc<[T]> {
/// let evens: Rc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ```
fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
ToRcSlice::to_rc_slice(iter.into_iter())
}
}
@ -2113,7 +2161,7 @@ impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
Rc::from_iter_exact(self, low)
}
} else {
// TrustedLen contract guarantees that `upper_bound == `None` implies an iterator
// TrustedLen contract guarantees that `upper_bound == None` implies an iterator
// length exceeding `usize::MAX`.
// The default implementation would collect into a vec which would panic.
// Thus we panic here immediately without invoking `Vec` code.
@ -2147,18 +2195,18 @@ pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesnt need
// to allocate space on the heap. That's not a value a real pointer
// to allocate space on the heap. That's not a value a real pointer
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<RcBox<T>>,
}
#[stable(feature = "rc_weak", since = "1.4.0")]
impl<T: ?Sized> !marker::Send for Weak<T> {}
impl<T: ?Sized> !Send for Weak<T> {}
#[stable(feature = "rc_weak", since = "1.4.0")]
impl<T: ?Sized> !marker::Sync for Weak<T> {}
impl<T: ?Sized> !Sync for Weak<T> {}
#[unstable(feature = "coerce_unsized", issue = "27732")]
#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@ -2419,9 +2467,9 @@ impl<T: ?Sized> Weak<T> {
}
}
/// Returns `true` if the two `Weak`s point to the same allocation (similar to
/// [`ptr::eq`]), or if both don't point to any allocation
/// (because they were created with `Weak::new()`).
/// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
/// both don't point to any allocation (because they were created with `Weak::new()`). See [that
/// function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Notes
///
@ -2529,7 +2577,7 @@ impl<T: ?Sized> Clone for Weak<T> {
}
#[stable(feature = "rc_weak", since = "1.4.0")]
impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
impl<T: ?Sized> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(Weak)")
}

View file

@ -151,6 +151,21 @@ fn try_unwrap() {
assert_eq!(Rc::try_unwrap(x), Ok(5));
}
#[test]
fn into_inner() {
let x = Rc::new(3);
assert_eq!(Rc::into_inner(x), Some(3));
let x = Rc::new(4);
let y = Rc::clone(&x);
assert_eq!(Rc::into_inner(x), None);
assert_eq!(Rc::into_inner(y), Some(4));
let x = Rc::new(5);
let _w = Rc::downgrade(&x);
assert_eq!(Rc::into_inner(x), Some(5));
}
#[test]
fn into_from_raw() {
let x = Rc::new(Box::new("hello"));

View file

@ -1,82 +1,12 @@
//! A dynamically-sized view into a contiguous sequence, `[T]`.
//! Utilities for the slice primitive type.
//!
//! *[See also the slice primitive type](slice).*
//!
//! Slices are a view into a block of memory represented as a pointer and a
//! length.
//! Most of the structs in this module are iterator types which can only be created
//! using a certain function. For example, `slice.iter()` yields an [`Iter`].
//!
//! ```
//! // slicing a Vec
//! let vec = vec![1, 2, 3];
//! let int_slice = &vec[..];
//! // coercing an array to a slice
//! let str_slice: &[&str] = &["one", "two", "three"];
//! ```
//!
//! Slices are either mutable or shared. The shared slice type is `&[T]`,
//! while the mutable slice type is `&mut [T]`, where `T` represents the element
//! type. For example, you can mutate the block of memory that a mutable slice
//! points to:
//!
//! ```
//! let x = &mut [1, 2, 3];
//! x[1] = 7;
//! assert_eq!(x, &[1, 7, 3]);
//! ```
//!
//! Here are some of the things this module contains:
//!
//! ## Structs
//!
//! There are several structs that are useful for slices, such as [`Iter`], which
//! represents iteration over a slice.
//!
//! ## Trait Implementations
//!
//! There are several implementations of common traits for slices. Some examples
//! include:
//!
//! * [`Clone`]
//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
//! * [`Hash`] - for slices whose element type is [`Hash`].
//!
//! ## Iteration
//!
//! The slices implement `IntoIterator`. The iterator yields references to the
//! slice elements.
//!
//! ```
//! let numbers = &[0, 1, 2];
//! for n in numbers {
//! println!("{n} is a number!");
//! }
//! ```
//!
//! The mutable slice yields mutable references to the elements:
//!
//! ```
//! let mut scores = [7, 8, 9];
//! for score in &mut scores[..] {
//! *score += 1;
//! }
//! ```
//!
//! This iterator yields mutable references to the slice's elements, so while
//! the element type of the slice is `i32`, the element type of the iterator is
//! `&mut i32`.
//!
//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
//! iterators.
//! * Further methods that return iterators are [`.split`], [`.splitn`],
//! [`.chunks`], [`.windows`] and more.
//!
//! [`Hash`]: core::hash::Hash
//! [`.iter`]: slice::iter
//! [`.iter_mut`]: slice::iter_mut
//! [`.split`]: slice::split
//! [`.splitn`]: slice::splitn
//! [`.chunks`]: slice::chunks
//! [`.windows`]: slice::windows
//! A few functions are provided to create a slice from a value reference
//! or from a raw pointer.
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
@ -86,20 +16,23 @@ use core::borrow::{Borrow, BorrowMut};
#[cfg(not(no_global_oom_handling))]
use core::cmp::Ordering::{self, Less};
#[cfg(not(no_global_oom_handling))]
use core::mem;
#[cfg(not(no_global_oom_handling))]
use core::mem::size_of;
use core::mem::{self, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ptr;
#[cfg(not(no_global_oom_handling))]
use core::slice::sort;
use crate::alloc::Allocator;
#[cfg(not(no_global_oom_handling))]
use crate::alloc::Global;
use crate::alloc::{self, Global};
#[cfg(not(no_global_oom_handling))]
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::vec::Vec;
#[cfg(test)]
mod tests;
#[unstable(feature = "slice_range", issue = "76393")]
pub use core::slice::range;
#[unstable(feature = "array_chunks", issue = "74985")]
@ -275,7 +208,7 @@ impl<T> [T] {
where
T: Ord,
{
merge_sort(self, |a, b| a.lt(b));
stable_sort(self, T::lt);
}
/// Sorts the slice with a comparator function.
@ -331,7 +264,7 @@ impl<T> [T] {
where
F: FnMut(&T, &T) -> Ordering,
{
merge_sort(self, |a, b| compare(a, b) == Less);
stable_sort(self, |a, b| compare(a, b) == Less);
}
/// Sorts the slice with a key extraction function.
@ -374,7 +307,7 @@ impl<T> [T] {
F: FnMut(&T) -> K,
K: Ord,
{
merge_sort(self, |a, b| f(a).lt(&f(b)));
stable_sort(self, |a, b| f(a).lt(&f(b)));
}
/// Sorts the slice with a key extraction function.
@ -530,7 +463,7 @@ impl<T> [T] {
hack::into_vec(self)
}
/// Creates a vector by repeating a slice `n` times.
/// Creates a vector by copying a slice `n` times.
///
/// # Panics
///
@ -725,7 +658,7 @@ impl [u8] {
///
/// ```error
/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
/// --> src/liballoc/slice.rs:608:6
/// --> library/alloc/src/slice.rs:608:6
/// |
/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
/// | ^ unconstrained type parameter
@ -836,19 +769,51 @@ impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
////////////////////////////////////////////////////////////////////////////////
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Borrow<[T]> for Vec<T> {
impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
fn borrow(&self) -> &[T] {
&self[..]
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> BorrowMut<[T]> for Vec<T> {
impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
fn borrow_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
// Specializable trait for implementing ToOwned::clone_into. This is
// public in the crate and has the Allocator parameter so that
// vec::clone_from use it too.
#[cfg(not(no_global_oom_handling))]
pub(crate) trait SpecCloneIntoVec<T, A: Allocator> {
fn clone_into(&self, target: &mut Vec<T, A>);
}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
default fn clone_into(&self, target: &mut Vec<T, A>) {
// drop anything in target that will not be overwritten
target.truncate(self.len());
// target.len <= self.len due to the truncate above, so the
// slices here are always in-bounds.
let (init, tail) = self.split_at(target.len());
// reuse the contained values' allocations/resources.
target.clone_from_slice(init);
target.extend_from_slice(tail);
}
}
#[cfg(not(no_global_oom_handling))]
impl<T: Copy, A: Allocator> SpecCloneIntoVec<T, A> for [T] {
fn clone_into(&self, target: &mut Vec<T, A>) {
target.clear();
target.extend_from_slice(self);
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> ToOwned for [T] {
@ -864,16 +829,7 @@ impl<T: Clone> ToOwned for [T] {
}
fn clone_into(&self, target: &mut Vec<T>) {
// drop anything in target that will not be overwritten
target.truncate(self.len());
// target.len <= self.len due to the truncate above, so the
// slices here are always in-bounds.
let (init, tail) = self.split_at(target.len());
// reuse the contained values' allocations/resources.
target.clone_from_slice(init);
target.extend_from_slice(tail);
SpecCloneIntoVec::clone_into(self, target);
}
}
@ -881,324 +837,52 @@ impl<T: Clone> ToOwned for [T] {
// Sorting
////////////////////////////////////////////////////////////////////////////////
/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
///
/// This is the integral subroutine of insertion sort.
#[inline]
#[cfg(not(no_global_oom_handling))]
fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
fn stable_sort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
if v.len() >= 2 && is_less(&v[1], &v[0]) {
unsafe {
// There are three ways to implement insertion here:
//
// 1. Swap adjacent elements until the first one gets to its final destination.
// However, this way we copy data around more than is necessary. If elements are big
// structures (costly to copy), this method will be slow.
//
// 2. Iterate until the right place for the first element is found. Then shift the
// elements succeeding it to make room for it and finally place it into the
// remaining hole. This is a good method.
//
// 3. Copy the first element into a temporary variable. Iterate until the right place
// for it is found. As we go along, copy every traversed element into the slot
// preceding it. Finally, copy data from the temporary variable into the remaining
// hole. This method is very good. Benchmarks demonstrated slightly better
// performance than with the 2nd method.
//
// All methods were benchmarked, and the 3rd showed best results. So we chose that one.
let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
// Intermediate state of the insertion process is always tracked by `hole`, which
// serves two purposes:
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` in the end.
//
// Panic safety:
//
// If `is_less` panics at any point during the process, `hole` will get dropped and
// fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
// initially held exactly once.
let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
for i in 2..v.len() {
if !is_less(&v[i], &*tmp) {
break;
}
ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
hole.dest = &mut v[i];
}
// `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
}
}
// When dropped, copies from `src` into `dest`.
struct InsertionHole<T> {
src: *const T,
dest: *mut T,
}
impl<T> Drop for InsertionHole<T> {
fn drop(&mut self) {
unsafe {
ptr::copy_nonoverlapping(self.src, self.dest, 1);
}
}
}
}
/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
/// stores the result into `v[..]`.
///
/// # Safety
///
/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
#[cfg(not(no_global_oom_handling))]
unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
where
F: FnMut(&T, &T) -> bool,
{
let len = v.len();
let v = v.as_mut_ptr();
let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
// The merge process first copies the shorter run into `buf`. Then it traces the newly copied
// run and the longer run forwards (or backwards), comparing their next unconsumed elements and
// copying the lesser (or greater) one into `v`.
//
// As soon as the shorter run is fully consumed, the process is done. If the longer run gets
// consumed first, then we must copy whatever is left of the shorter run into the remaining
// hole in `v`.
//
// Intermediate state of the process is always tracked by `hole`, which serves two purposes:
// 1. Protects integrity of `v` from panics in `is_less`.
// 2. Fills the remaining hole in `v` if the longer run gets consumed first.
//
// Panic safety:
//
// If `is_less` panics at any point during the process, `hole` will get dropped and fill the
// hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
// object it initially held exactly once.
let mut hole;
if mid <= len - mid {
// The left run is shorter.
unsafe {
ptr::copy_nonoverlapping(v, buf, mid);
hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
}
// Initially, these pointers point to the beginnings of their arrays.
let left = &mut hole.start;
let mut right = v_mid;
let out = &mut hole.dest;
while *left < hole.end && right < v_end {
// Consume the lesser side.
// If equal, prefer the left run to maintain stability.
unsafe {
let to_copy = if is_less(&*right, &**left) {
get_and_increment(&mut right)
} else {
get_and_increment(left)
};
ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
}
}
} else {
// The right run is shorter.
unsafe {
ptr::copy_nonoverlapping(v_mid, buf, len - mid);
hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
}
// Initially, these pointers point past the ends of their arrays.
let left = &mut hole.dest;
let right = &mut hole.end;
let mut out = v_end;
while v < *left && buf < *right {
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
unsafe {
let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
};
ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
}
}
}
// Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
// it will now be copied into the hole in `v`.
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
*ptr = unsafe { ptr.offset(1) };
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
*ptr = unsafe { ptr.offset(-1) };
*ptr
}
// When dropped, copies the range `start..end` into `dest..`.
struct MergeHole<T> {
start: *mut T,
end: *mut T,
dest: *mut T,
}
impl<T> Drop for MergeHole<T> {
fn drop(&mut self) {
// `T` is not a zero-sized type, and these are pointers into a slice's elements.
unsafe {
let len = self.end.sub_ptr(self.start);
ptr::copy_nonoverlapping(self.start, self.dest, len);
}
}
}
}
/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
///
/// The algorithm identifies strictly descending and non-descending subsequences, which are called
/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
/// satisfied:
///
/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
///
/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
#[cfg(not(no_global_oom_handling))]
fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
// Slices of up to this length get sorted using insertion sort.
const MAX_INSERTION: usize = 20;
// Very short runs are extended using insertion sort to span at least this many elements.
const MIN_RUN: usize = 10;
// Sorting has no meaningful behavior on zero-sized types.
if size_of::<T>() == 0 {
if T::IS_ZST {
// Sorting has no meaningful behavior on zero-sized types. Do nothing.
return;
}
let len = v.len();
let elem_alloc_fn = |len: usize| -> *mut T {
// SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
// v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap
// elements.
unsafe { alloc::alloc(alloc::Layout::array::<T>(len).unwrap_unchecked()) as *mut T }
};
// Short arrays get sorted in-place via insertion sort to avoid allocations.
if len <= MAX_INSERTION {
if len >= 2 {
for i in (0..len - 1).rev() {
insert_head(&mut v[i..], &mut is_less);
}
let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| {
// SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
// v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
// len.
unsafe {
alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::<T>(len).unwrap_unchecked());
}
return;
}
};
// Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
// shallow copies of the contents of `v` without risking the dtors running on copies if
// `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
// which will always have length at most `len / 2`.
let mut buf = Vec::with_capacity(len / 2);
// In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
// strange decision, but consider the fact that merges more often go in the opposite direction
// (forwards). According to benchmarks, merging forwards is slightly faster than merging
// backwards. To conclude, identifying runs by traversing backwards improves performance.
let mut runs = vec![];
let mut end = len;
while end > 0 {
// Find the next natural run, and reverse it if it's strictly descending.
let mut start = end - 1;
if start > 0 {
start -= 1;
unsafe {
if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
start -= 1;
}
v[start..end].reverse();
} else {
while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
{
start -= 1;
}
}
}
let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun {
// SAFETY: Creating the layout is safe as long as merge_sort never calls this with an
// obscene length or 0.
unsafe {
alloc::alloc(alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked())
as *mut sort::TimSortRun
}
};
// Insert some more elements into the run if it's too short. Insertion sort is faster than
// merge sort on short sequences, so this significantly improves performance.
while start > 0 && end - start < MIN_RUN {
start -= 1;
insert_head(&mut v[start..end], &mut is_less);
let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| {
// SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
// len.
unsafe {
alloc::dealloc(
buf_ptr as *mut u8,
alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked(),
);
}
};
// Push this run onto the stack.
runs.push(Run { start, len: end - start });
end = start;
// Merge some pairs of adjacent runs to satisfy the invariants.
while let Some(r) = collapse(&runs) {
let left = runs[r + 1];
let right = runs[r];
unsafe {
merge(
&mut v[left.start..right.start + right.len],
left.len,
buf.as_mut_ptr(),
&mut is_less,
);
}
runs[r] = Run { start: left.start, len: left.len + right.len };
runs.remove(r + 1);
}
}
// Finally, exactly one run must remain in the stack.
debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
// Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
// if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
// algorithm should continue building a new run instead, `None` is returned.
//
// TimSort is infamous for its buggy implementations, as described here:
// http://envisage-project.eu/timsort-specification-and-verification/
//
// The gist of the story is: we must enforce the invariants on the top four runs on the stack.
// Enforcing them on just top three is not sufficient to ensure that the invariants will still
// hold for *all* runs in the stack.
//
// This function correctly checks invariants for the top four runs. Additionally, if the top
// run starts at index 0, it will always demand a merge operation until the stack is fully
// collapsed, in order to complete the sort.
#[inline]
fn collapse(runs: &[Run]) -> Option<usize> {
let n = runs.len();
if n >= 2
&& (runs[n - 1].start == 0
|| runs[n - 2].len <= runs[n - 1].len
|| (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
|| (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
{
if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
} else {
None
}
}
#[derive(Clone, Copy)]
struct Run {
start: usize,
len: usize,
}
sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn);
}

View file

@ -0,0 +1,359 @@
use crate::borrow::ToOwned;
use crate::rc::Rc;
use crate::string::ToString;
use crate::test_helpers::test_rng;
use crate::vec::Vec;
use core::cell::Cell;
use core::cmp::Ordering::{self, Equal, Greater, Less};
use core::convert::identity;
use core::fmt;
use core::mem;
use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use rand::{distributions::Standard, prelude::*, Rng, RngCore};
use std::panic;
macro_rules! do_test {
($input:ident, $func:ident) => {
let len = $input.len();
// Work out the total number of comparisons required to sort
// this array...
let mut count = 0usize;
$input.to_owned().$func(|a, b| {
count += 1;
a.cmp(b)
});
// ... and then panic on each and every single one.
for panic_countdown in 0..count {
// Refresh the counters.
VERSIONS.store(0, Relaxed);
for i in 0..len {
DROP_COUNTS[i].store(0, Relaxed);
}
let v = $input.to_owned();
let _ = std::panic::catch_unwind(move || {
let mut v = v;
let mut panic_countdown = panic_countdown;
v.$func(|a, b| {
if panic_countdown == 0 {
SILENCE_PANIC.with(|s| s.set(true));
panic!();
}
panic_countdown -= 1;
a.cmp(b)
})
});
// Check that the number of things dropped is exactly
// what we expect (i.e., the contents of `v`).
for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
let count = c.load(Relaxed);
assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
}
// Check that the most recent versions of values were dropped.
assert_eq!(VERSIONS.load(Relaxed), 0);
}
};
}
const MAX_LEN: usize = 80;
static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
// FIXME(RFC 1109): AtomicUsize is not Copy.
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
AtomicUsize::new(0),
];
static VERSIONS: AtomicUsize = AtomicUsize::new(0);
#[derive(Clone, Eq)]
struct DropCounter {
x: u32,
id: usize,
version: Cell<usize>,
}
impl PartialEq for DropCounter {
fn eq(&self, other: &Self) -> bool {
self.partial_cmp(other) == Some(Ordering::Equal)
}
}
impl PartialOrd for DropCounter {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.version.set(self.version.get() + 1);
other.version.set(other.version.get() + 1);
VERSIONS.fetch_add(2, Relaxed);
self.x.partial_cmp(&other.x)
}
}
impl Ord for DropCounter {
fn cmp(&self, other: &Self) -> Ordering {
self.partial_cmp(other).unwrap()
}
}
impl Drop for DropCounter {
fn drop(&mut self) {
DROP_COUNTS[self.id].fetch_add(1, Relaxed);
VERSIONS.fetch_sub(self.version.get(), Relaxed);
}
}
std::thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
#[test]
#[cfg_attr(target_os = "emscripten", ignore)] // no threads
fn panic_safe() {
panic::update_hook(move |prev, info| {
if !SILENCE_PANIC.with(|s| s.get()) {
prev(info);
}
});
let mut rng = test_rng();
// Miri is too slow (but still need to `chain` to make the types match)
let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
for len in lens {
for &modulus in moduli {
for &has_runs in &[false, true] {
let mut input = (0..len)
.map(|id| DropCounter {
x: rng.next_u32() % modulus,
id: id,
version: Cell::new(0),
})
.collect::<Vec<_>>();
if has_runs {
for c in &mut input {
c.x = c.id as u32;
}
for _ in 0..5 {
let a = rng.gen::<usize>() % len;
let b = rng.gen::<usize>() % len;
if a < b {
input[a..b].reverse();
} else {
input.swap(a, b);
}
}
}
do_test!(input, sort_by);
do_test!(input, sort_unstable_by);
}
}
}
// Set default panic hook again.
drop(panic::take_hook());
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_sort() {
let mut rng = test_rng();
for len in (2..25).chain(500..510) {
for &modulus in &[5, 10, 100, 1000] {
for _ in 0..10 {
let orig: Vec<_> = (&mut rng)
.sample_iter::<i32, _>(&Standard)
.map(|x| x % modulus)
.take(len)
.collect();
// Sort in default order.
let mut v = orig.clone();
v.sort();
assert!(v.windows(2).all(|w| w[0] <= w[1]));
// Sort in ascending order.
let mut v = orig.clone();
v.sort_by(|a, b| a.cmp(b));
assert!(v.windows(2).all(|w| w[0] <= w[1]));
// Sort in descending order.
let mut v = orig.clone();
v.sort_by(|a, b| b.cmp(a));
assert!(v.windows(2).all(|w| w[0] >= w[1]));
// Sort in lexicographic order.
let mut v1 = orig.clone();
let mut v2 = orig.clone();
v1.sort_by_key(|x| x.to_string());
v2.sort_by_cached_key(|x| x.to_string());
assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
assert!(v1 == v2);
// Sort with many pre-sorted runs.
let mut v = orig.clone();
v.sort();
v.reverse();
for _ in 0..5 {
let a = rng.gen::<usize>() % len;
let b = rng.gen::<usize>() % len;
if a < b {
v[a..b].reverse();
} else {
v.swap(a, b);
}
}
v.sort();
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
}
// Sort using a completely random comparison function.
// This will reorder the elements *somehow*, but won't panic.
let mut v = [0; 500];
for i in 0..v.len() {
v[i] = i as i32;
}
v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
v.sort();
for i in 0..v.len() {
assert_eq!(v[i], i as i32);
}
// Should not panic.
[0i32; 0].sort();
[(); 10].sort();
[(); 100].sort();
let mut v = [0xDEADBEEFu64];
v.sort();
assert!(v == [0xDEADBEEF]);
}
#[test]
fn test_sort_stability() {
// Miri is too slow
let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
let rounds = if cfg!(miri) { 1 } else { 10 };
let mut rng = test_rng();
for len in (2..25).chain(large_range) {
for _ in 0..rounds {
let mut counts = [0; 10];
// create a vector like [(6, 1), (5, 1), (6, 2), ...],
// where the first item of each tuple is random, but
// the second item represents which occurrence of that
// number this element is, i.e., the second elements
// will occur in sorted order.
let orig: Vec<_> = (0..len)
.map(|_| {
let n = rng.gen::<usize>() % 10;
counts[n] += 1;
(n, counts[n])
})
.collect();
let mut v = orig.clone();
// Only sort on the first element, so an unstable sort
// may mix up the counts.
v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
// This comparison includes the count (the second item
// of the tuple), so elements with equal first items
// will need to be ordered with increasing
// counts... i.e., exactly asserting that this sort is
// stable.
assert!(v.windows(2).all(|w| w[0] <= w[1]));
let mut v = orig.clone();
v.sort_by_cached_key(|&(x, _)| x);
assert!(v.windows(2).all(|w| w[0] <= w[1]));
}
}
}

View file

@ -1,26 +1,6 @@
//! Unicode string slices.
//! Utilities for the `str` primitive type.
//!
//! *[See also the `str` primitive type](str).*
//!
//! The `&str` type is one of the two main string types, the other being `String`.
//! Unlike its `String` counterpart, its contents are borrowed.
//!
//! # Basic Usage
//!
//! A basic string declaration of `&str` type:
//!
//! ```
//! let hello_world = "Hello, World!";
//! ```
//!
//! Here we have declared a string literal, also known as a string slice.
//! String literals have a static lifetime, which means the string `hello_world`
//! is guaranteed to be valid for the duration of the entire program.
//! We can explicitly specify `hello_world`'s lifetime as well:
//!
//! ```
//! let hello_world: &'static str = "Hello, world!";
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
@ -71,6 +51,8 @@ pub use core::str::{RSplit, Split};
pub use core::str::{RSplitN, SplitN};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitTerminator, SplitTerminator};
#[unstable(feature = "utf8_chunks", issue = "99543")]
pub use core::str::{Utf8Chunk, Utf8Chunks};
/// Note: `str` in `Concat<str>` is not meaningful here.
/// This type parameter of the trait only exists to enable another impl.
@ -274,7 +256,7 @@ impl str {
/// assert_eq!("than an old", s.replace("is", "an"));
/// ```
///
/// When the pattern doesn't match:
/// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
@ -315,7 +297,7 @@ impl str {
/// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
/// ```
///
/// When the pattern doesn't match:
/// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
@ -422,12 +404,12 @@ impl str {
// See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992
// for the definition of `Final_Sigma`.
debug_assert!('Σ'.len_utf8() == 2);
let is_word_final = case_ignoreable_then_cased(from[..i].chars().rev())
&& !case_ignoreable_then_cased(from[i + 2..].chars());
let is_word_final = case_ignorable_then_cased(from[..i].chars().rev())
&& !case_ignorable_then_cased(from[i + 2..].chars());
to.push_str(if is_word_final { "ς" } else { "σ" });
}
fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
fn case_ignorable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
use core::unicode::{Case_Ignorable, Cased};
match iter.skip_while(|&c| Case_Ignorable(c)).next() {
Some(c) => Cased(c),
@ -577,10 +559,9 @@ impl str {
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> String {
let mut bytes = self.as_bytes().to_vec();
bytes.make_ascii_uppercase();
// make_ascii_uppercase() preserves the UTF-8 invariant.
unsafe { String::from_utf8_unchecked(bytes) }
let mut s = self.to_owned();
s.make_ascii_uppercase();
s
}
/// Returns a copy of this string where each character is mapped to its
@ -610,10 +591,9 @@ impl str {
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> String {
let mut bytes = self.as_bytes().to_vec();
bytes.make_ascii_lowercase();
// make_ascii_lowercase() preserves the UTF-8 invariant.
unsafe { String::from_utf8_unchecked(bytes) }
let mut s = self.to_owned();
s.make_ascii_lowercase();
s
}
}

View file

@ -42,13 +42,12 @@
#![stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(no_global_oom_handling))]
use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
use core::error::Error;
use core::fmt;
use core::hash;
use core::iter::FusedIterator;
#[cfg(not(no_global_oom_handling))]
use core::iter::{from_fn, FromIterator};
use core::iter::from_fn;
use core::iter::FusedIterator;
#[cfg(not(no_global_oom_handling))]
use core::ops::Add;
#[cfg(not(no_global_oom_handling))]
@ -58,15 +57,15 @@ use core::ops::Bound::{Excluded, Included, Unbounded};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr;
use core::slice;
#[cfg(not(no_global_oom_handling))]
use core::str::lossy;
use core::str::pattern::Pattern;
#[cfg(not(no_global_oom_handling))]
use core::str::Utf8Chunks;
#[cfg(not(no_global_oom_handling))]
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::str::{self, Chars, Utf8Error};
use crate::str::{self, from_utf8_unchecked_mut, Chars, Utf8Error};
#[cfg(not(no_global_oom_handling))]
use crate::str::{from_boxed_utf8_unchecked, FromStr};
use crate::vec::Vec;
@ -317,11 +316,11 @@ use crate::vec::Vec;
///
/// ```text
/// 0
/// 5
/// 10
/// 20
/// 20
/// 40
/// 8
/// 16
/// 16
/// 32
/// 32
/// ```
///
/// At first, we have no memory allocated at all, but as we append to the
@ -360,9 +359,9 @@ use crate::vec::Vec;
/// [Deref]: core::ops::Deref "ops::Deref"
/// [`Deref`]: core::ops::Deref "ops::Deref"
/// [`as_str()`]: String::as_str
#[derive(PartialOrd, Eq, Ord)]
#[cfg_attr(not(test), rustc_diagnostic_item = "String")]
#[derive(PartialEq, PartialOrd, Eq, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), lang = "String")]
pub struct String {
vec: Vec<u8>,
}
@ -628,11 +627,11 @@ impl String {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
let mut iter = Utf8Chunks::new(v);
let first_valid = if let Some(chunk) = iter.next() {
let lossy::Utf8LossyChunk { valid, broken } = chunk;
if broken.is_empty() {
let valid = chunk.valid();
if chunk.invalid().is_empty() {
debug_assert_eq!(valid.len(), v.len());
return Cow::Borrowed(valid);
}
@ -647,9 +646,9 @@ impl String {
res.push_str(first_valid);
res.push_str(REPLACEMENT);
for lossy::Utf8LossyChunk { valid, broken } in iter {
res.push_str(valid);
if !broken.is_empty() {
for chunk in iter {
res.push_str(chunk.valid());
if !chunk.invalid().is_empty() {
res.push_str(REPLACEMENT);
}
}
@ -682,7 +681,7 @@ impl String {
// This isn't done via collect::<Result<_, _>>() for performance reasons.
// FIXME: the function can be simplified again when #48994 is closed.
let mut ret = String::with_capacity(v.len());
for c in decode_utf16(v.iter().cloned()) {
for c in char::decode_utf16(v.iter().cloned()) {
if let Ok(c) = c {
ret.push(c);
} else {
@ -721,7 +720,9 @@ impl String {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf16_lossy(v: &[u16]) -> String {
decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect()
char::decode_utf16(v.iter().cloned())
.map(|r| r.unwrap_or(char::REPLACEMENT_CHARACTER))
.collect()
}
/// Decomposes a `String` into its raw components.
@ -927,12 +928,12 @@ impl String {
/// Copies elements from `src` range to the end of the string.
///
/// ## Panics
/// # Panics
///
/// Panics if the starting point or end point do not lie on a [`char`]
/// boundary, or if they're out of bounds.
///
/// ## Examples
/// # Examples
///
/// ```
/// #![feature(string_extend_from_within)]
@ -948,7 +949,7 @@ impl String {
/// assert_eq!(string, "abcdecdeabecde");
/// ```
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "string_extend_from_within", issue = "none")]
#[unstable(feature = "string_extend_from_within", issue = "103806")]
pub fn extend_from_within<R>(&mut self, src: R)
where
R: RangeBounds<usize>,
@ -1080,7 +1081,8 @@ impl String {
/// current length. The allocator may reserve more space to speculatively
/// avoid frequent allocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
/// `Ok(())`. Does nothing if capacity is already sufficient.
/// `Ok(())`. Does nothing if capacity is already sufficient. This method
/// preserves the contents even if an error occurs.
///
/// # Errors
///
@ -1847,6 +1849,35 @@ impl String {
let slice = self.vec.into_boxed_slice();
unsafe { from_boxed_utf8_unchecked(slice) }
}
/// Consumes and leaks the `String`, returning a mutable reference to the contents,
/// `&'a mut str`.
///
/// This is mainly useful for data that lives for the remainder of
/// the program's life. Dropping the returned reference will cause a memory
/// leak.
///
/// It does not reallocate or shrink the `String`,
/// so the leaked allocation may include unused capacity that is not part
/// of the returned slice.
///
/// # Examples
///
/// Simple usage:
///
/// ```
/// #![feature(string_leak)]
///
/// let x = String::from("bucket");
/// let static_ref: &'static mut str = x.leak();
/// assert_eq!(static_ref, "bucket");
/// ```
#[unstable(feature = "string_leak", issue = "102929")]
#[inline]
pub fn leak<'a>(self) -> &'a mut str {
let slice = self.vec.leak();
unsafe { from_utf8_unchecked_mut(slice) }
}
}
impl FromUtf8Error {
@ -1938,6 +1969,22 @@ impl fmt::Display for FromUtf16Error {
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Error for FromUtf8Error {
#[allow(deprecated)]
fn description(&self) -> &str {
"invalid utf-8"
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Error for FromUtf16Error {
#[allow(deprecated)]
fn description(&self) -> &str {
"invalid utf-16"
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for String {
@ -2160,18 +2207,6 @@ impl<'a, 'b> Pattern<'a> for &'b String {
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl PartialEq for String {
#[inline]
fn eq(&self, other: &String) -> bool {
PartialEq::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String) -> bool {
PartialEq::ne(&self[..], &other[..])
}
}
macro_rules! impl_eq {
($lhs:ty, $rhs: ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
@ -2212,8 +2247,7 @@ impl_eq! { Cow<'a, str>, &'b str }
impl_eq! { Cow<'a, str>, String }
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl const Default for String {
impl Default for String {
/// Creates an empty `String`.
#[inline]
fn default() -> String {
@ -2492,6 +2526,15 @@ impl<T: fmt::Display + ?Sized> ToString for T {
}
}
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "ascii_char", issue = "110998")]
impl ToString for core::ascii::Char {
#[inline]
fn to_string(&self) -> String {
self.as_str().to_owned()
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "char_to_string_specialization", since = "1.46.0")]
impl ToString for char {
@ -2501,6 +2544,15 @@ impl ToString for char {
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "bool_to_string_specialization", since = "1.68.0")]
impl ToString for bool {
#[inline]
fn to_string(&self) -> String {
String::from(if *self { "true" } else { "false" })
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
impl ToString for u8 {
@ -2571,6 +2623,15 @@ impl ToString for String {
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "fmt_arguments_to_string_specialization", since = "1.71.0")]
impl ToString for fmt::Arguments<'_> {
#[inline]
fn to_string(&self) -> String {
crate::fmt::format(*self)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl AsRef<str> for String {
#[inline]
@ -2631,7 +2692,7 @@ impl From<&String> for String {
}
}
// note: test pulls in libstd, which causes errors here
// note: test pulls in std, which causes errors here
#[cfg(not(test))]
#[stable(feature = "string_from_box", since = "1.18.0")]
impl From<Box<str>> for String {
@ -2689,7 +2750,7 @@ impl<'a> From<Cow<'a, str>> for String {
/// ```
/// # use std::borrow::Cow;
/// // If the string is not owned...
/// let cow: Cow<str> = Cow::Borrowed("eggplant");
/// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
/// // It will allocate on the heap and copy the string.
/// let owned: String = String::from(cow);
/// assert_eq!(&owned[..], "eggplant");

View file

@ -3,18 +3,21 @@
//! Thread-safe reference-counting pointers.
//!
//! See the [`Arc<T>`][Arc] documentation for more details.
//!
//! **Note**: This module is only available on platforms that support atomic
//! loads and stores of pointers. This may be detected at compile time using
//! `#[cfg(target_has_atomic = "ptr")]`.
use core::any::Any;
use core::borrow;
use core::cmp::Ordering;
use core::convert::{From, TryFrom};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::hint;
use core::intrinsics::abort;
#[cfg(not(no_global_oom_handling))]
use core::iter;
use core::marker::{PhantomData, Unpin, Unsize};
use core::marker::{PhantomData, Unsize};
#[cfg(not(no_global_oom_handling))]
use core::mem::size_of_val;
use core::mem::{self, align_of_val_raw};
@ -47,8 +50,16 @@ mod tests;
///
/// Going above this limit will abort your program (although not
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
/// Trying to go above it might call a `panic` (if not actually going above it).
///
/// This is a global invariant, and also applies when using a compare-exchange loop.
///
/// See comment in `Arc::clone`.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
#[cfg(not(sanitize = "thread"))]
macro_rules! acquire {
($x:expr) => {
@ -82,6 +93,11 @@ macro_rules! acquire {
/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
/// types.
///
/// **Note**: This type is only available on platforms that support atomic
/// loads and stores of pointers, which includes all platforms that support
/// the `std` crate but not all those which only support [`alloc`](crate).
/// This may be detected at compile time using `#[cfg(target_has_atomic = "ptr")]`.
///
/// ## Thread Safety
///
/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
@ -171,8 +187,6 @@ macro_rules! acquire {
/// [mutex]: ../../std/sync/struct.Mutex.html
/// [rwlock]: ../../std/sync/struct.RwLock.html
/// [atomic]: core::sync::atomic
/// [`Send`]: core::marker::Send
/// [`Sync`]: core::marker::Sync
/// [deref]: core::ops::Deref
/// [downgrade]: Arc::downgrade
/// [upgrade]: Weak::upgrade
@ -245,7 +259,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
#[unstable(feature = "coerce_unsized", issue = "27732")]
#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@ -286,7 +300,7 @@ pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesnt need
// to allocate space on the heap. That's not a value a real pointer
// to allocate space on the heap. That's not a value a real pointer
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<ArcInner<T>>,
@ -297,13 +311,13 @@ unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
#[unstable(feature = "coerce_unsized", issue = "27732")]
#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
impl<T: ?Sized> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(Weak)")
}
@ -324,6 +338,15 @@ struct ArcInner<T: ?Sized> {
data: T,
}
/// Calculate layout for `ArcInner<T>` using the inner value's layout
fn arcinner_layout_for_value_layout(layout: Layout) -> Layout {
// Calculate layout using the given value layout.
// Previously, layout was calculated on the expression
// `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
// reference (see #54908).
Layout::new::<ArcInner<()>>().extend(layout).unwrap().0.pad_to_align()
}
unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
@ -479,6 +502,7 @@ impl<T> Arc<T> {
/// assert_eq!(*five, 5)
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
@ -512,6 +536,7 @@ impl<T> Arc<T> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[cfg(not(no_global_oom_handling))]
#[inline]
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
@ -636,6 +661,17 @@ impl<T> Arc<T> {
///
/// This will succeed even if there are outstanding weak references.
///
/// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
/// want to keep the `Arc` in the [`Err`] case.
/// Immediately dropping the [`Err`] payload, like in the expression
/// `Arc::try_unwrap(this).ok()`, can still cause the strong count to
/// drop to zero and the inner value of the `Arc` to be dropped:
/// For instance if two threads each execute this expression in parallel, then
/// there is a race condition. The threads could first both check whether they
/// have the last clone of their `Arc` via `Arc::try_unwrap`, and then
/// both drop their `Arc` in the call to [`ok`][`Result::ok`],
/// taking the strong count from two down to zero.
///
/// # Examples
///
/// ```
@ -667,6 +703,123 @@ impl<T> Arc<T> {
Ok(elem)
}
}
/// Returns the inner value, if the `Arc` has exactly one strong reference.
///
/// Otherwise, [`None`] is returned and the `Arc` is dropped.
///
/// This will succeed even if there are outstanding weak references.
///
/// If `Arc::into_inner` is called on every clone of this `Arc`,
/// it is guaranteed that exactly one of the calls returns the inner value.
/// This means in particular that the inner value is not dropped.
///
/// The similar expression `Arc::try_unwrap(this).ok()` does not
/// offer such a guarantee. See the last example below
/// and the documentation of [`Arc::try_unwrap`].
///
/// # Examples
///
/// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
/// ```
/// use std::sync::Arc;
///
/// let x = Arc::new(3);
/// let y = Arc::clone(&x);
///
/// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
/// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
/// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
///
/// let x_inner_value = x_thread.join().unwrap();
/// let y_inner_value = y_thread.join().unwrap();
///
/// // One of the threads is guaranteed to receive the inner value:
/// assert!(matches!(
/// (x_inner_value, y_inner_value),
/// (None, Some(3)) | (Some(3), None)
/// ));
/// // The result could also be `(None, None)` if the threads called
/// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
/// ```
///
/// A more practical example demonstrating the need for `Arc::into_inner`:
/// ```
/// use std::sync::Arc;
///
/// // Definition of a simple singly linked list using `Arc`:
/// #[derive(Clone)]
/// struct LinkedList<T>(Option<Arc<Node<T>>>);
/// struct Node<T>(T, Option<Arc<Node<T>>>);
///
/// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
/// // can cause a stack overflow. To prevent this, we can provide a
/// // manual `Drop` implementation that does the destruction in a loop:
/// impl<T> Drop for LinkedList<T> {
/// fn drop(&mut self) {
/// let mut link = self.0.take();
/// while let Some(arc_node) = link.take() {
/// if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
/// link = next;
/// }
/// }
/// }
/// }
///
/// // Implementation of `new` and `push` omitted
/// impl<T> LinkedList<T> {
/// /* ... */
/// # fn new() -> Self {
/// # LinkedList(None)
/// # }
/// # fn push(&mut self, x: T) {
/// # self.0 = Some(Arc::new(Node(x, self.0.take())));
/// # }
/// }
///
/// // The following code could have still caused a stack overflow
/// // despite the manual `Drop` impl if that `Drop` impl had used
/// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
///
/// // Create a long list and clone it
/// let mut x = LinkedList::new();
/// for i in 0..100000 {
/// x.push(i); // Adds i to the front of x
/// }
/// let y = x.clone();
///
/// // Drop the clones in parallel
/// let x_thread = std::thread::spawn(|| drop(x));
/// let y_thread = std::thread::spawn(|| drop(y));
/// x_thread.join().unwrap();
/// y_thread.join().unwrap();
/// ```
#[inline]
#[stable(feature = "arc_into_inner", since = "1.70.0")]
pub fn into_inner(this: Self) -> Option<T> {
// Make sure that the ordinary `Drop` implementation isnt called as well
let mut this = mem::ManuallyDrop::new(this);
// Following the implementation of `drop` and `drop_slow`
if this.inner().strong.fetch_sub(1, Release) != 1 {
return None;
}
acquire!(this.inner().strong);
// SAFETY: This mirrors the line
//
// unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
//
// in `drop_slow`. Instead of dropping the value behind the pointer,
// it is read and eventually returned; `ptr::read` has the same
// safety conditions as `ptr::drop_in_place`.
let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
drop(Weak { ptr: this.ptr });
Some(inner)
}
}
impl<T> Arc<[T]> {
@ -693,6 +846,7 @@ impl<T> Arc<[T]> {
/// assert_eq!(*values, [1, 2, 3])
/// ```
#[cfg(not(no_global_oom_handling))]
#[inline]
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
@ -720,6 +874,7 @@ impl<T> Arc<[T]> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[cfg(not(no_global_oom_handling))]
#[inline]
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
@ -941,6 +1096,9 @@ impl<T: ?Sized> Arc<T> {
continue;
}
// We can't allow the refcount to increase much past `MAX_REFCOUNT`.
assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
// NOTE: this code currently ignores the possibility of overflow
// into usize::MAX; in general both Rc and Arc need to be adjusted
// to deal with overflow.
@ -1084,7 +1242,7 @@ impl<T: ?Sized> Arc<T> {
#[inline]
#[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
pub unsafe fn decrement_strong_count(ptr: *const T) {
unsafe { mem::drop(Arc::from_raw(ptr)) };
unsafe { drop(Arc::from_raw(ptr)) };
}
#[inline]
@ -1108,8 +1266,8 @@ impl<T: ?Sized> Arc<T> {
drop(Weak { ptr: self.ptr });
}
/// Returns `true` if the two `Arc`s point to the same allocation
/// (in a vein similar to [`ptr::eq`]).
/// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
/// [`ptr::eq`]. See [that function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Examples
///
@ -1145,15 +1303,11 @@ impl<T: ?Sized> Arc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
// Calculate layout using the given value layout.
// Previously, layout was calculated on the expression
// `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
// reference (see #54908).
let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
unsafe {
Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner)
.unwrap_or_else(|_| handle_alloc_error(layout))
}
let layout = arcinner_layout_for_value_layout(value_layout);
let ptr = allocate(layout).unwrap_or_else(|_| handle_alloc_error(layout));
unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) }
}
/// Allocates an `ArcInner<T>` with sufficient space for
@ -1167,15 +1321,20 @@ impl<T: ?Sized> Arc<T> {
allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> Result<*mut ArcInner<T>, AllocError> {
// Calculate layout using the given value layout.
// Previously, layout was calculated on the expression
// `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
// reference (see #54908).
let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
let layout = arcinner_layout_for_value_layout(value_layout);
let ptr = allocate(layout)?;
// Initialize the ArcInner
let inner = unsafe { Self::initialize_arcinner(ptr, layout, mem_to_arcinner) };
Ok(inner)
}
unsafe fn initialize_arcinner(
ptr: NonNull<[u8]>,
layout: Layout,
mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
@ -1184,7 +1343,7 @@ impl<T: ?Sized> Arc<T> {
ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
}
Ok(inner)
inner
}
/// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
@ -1195,7 +1354,7 @@ impl<T: ?Sized> Arc<T> {
Self::allocate_for_layout(
Layout::for_value(&*ptr),
|layout| Global.allocate(layout),
|mem| mem.with_metadata_of(ptr as *mut ArcInner<T>),
|mem| mem.with_metadata_of(ptr as *const ArcInner<T>),
)
}
}
@ -1237,7 +1396,7 @@ impl<T> Arc<[T]> {
}
}
/// Copy elements from slice into newly allocated Arc<\[T\]>
/// Copy elements from slice into newly allocated `Arc<[T]>`
///
/// Unsafe because the caller must either take ownership or bind `T: Copy`.
#[cfg(not(no_global_oom_handling))]
@ -1255,7 +1414,7 @@ impl<T> Arc<[T]> {
///
/// Behavior is undefined should the size be wrong.
#[cfg(not(no_global_oom_handling))]
unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
// Panic guard while cloning T elements.
// In the event of a panic, elements that have been written
// into the new ArcInner will be dropped, then the memory freed.
@ -1364,6 +1523,11 @@ impl<T: ?Sized> Clone for Arc<T> {
// the worst already happened and we actually do overflow the `usize` counter. However, that
// requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
// above and the `abort` below, which seems exceedingly unlikely.
//
// This is a global invariant, and also applies when using a compare-exchange loop to increment
// counters in other methods.
// Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
// and then overflow using a few `fetch_add`s.
if old_size > MAX_REFCOUNT {
abort();
}
@ -1577,10 +1741,11 @@ impl<T: ?Sized> Arc<T> {
///
/// # Safety
///
/// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced
/// for the duration of the returned borrow.
/// This is trivially the case if no such pointers exist,
/// for example immediately after `Arc::new`.
/// If any other `Arc` or [`Weak`] pointers to the same allocation exist, then
/// they must not be dereferenced or have active borrows for the duration
/// of the returned borrow, and their inner type must be exactly the same as the
/// inner type of this Rc (including lifetimes). This is trivially the case if no
/// such pointers exist, for example immediately after `Arc::new`.
///
/// # Examples
///
@ -1595,6 +1760,38 @@ impl<T: ?Sized> Arc<T> {
/// }
/// assert_eq!(*x, "foo");
/// ```
/// Other `Arc` pointers to the same allocation must be to the same type.
/// ```no_run
/// #![feature(get_mut_unchecked)]
///
/// use std::sync::Arc;
///
/// let x: Arc<str> = Arc::from("Hello, world!");
/// let mut y: Arc<[u8]> = x.clone().into();
/// unsafe {
/// // this is Undefined Behavior, because x's inner type is str, not [u8]
/// Arc::get_mut_unchecked(&mut y).fill(0xff); // 0xff is invalid in UTF-8
/// }
/// println!("{}", &*x); // Invalid UTF-8 in a str
/// ```
/// Other `Arc` pointers to the same allocation must be to the exact same type, including lifetimes.
/// ```no_run
/// #![feature(get_mut_unchecked)]
///
/// use std::sync::Arc;
///
/// let x: Arc<&str> = Arc::new("Hello, world!");
/// {
/// let s = String::from("Oh, no!");
/// let mut y: Arc<&str> = x.clone().into();
/// unsafe {
/// // this is Undefined Behavior, because x's inner type
/// // is &'long str, not &'short str
/// *Arc::get_mut_unchecked(&mut y) = &s;
/// }
/// }
/// println!("{}", &*x); // Use-after-free
/// ```
#[inline]
#[unstable(feature = "get_mut_unchecked", issue = "63292")]
pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
@ -1613,7 +1810,7 @@ impl<T: ?Sized> Arc<T> {
//
// The acquire label here ensures a happens-before relationship with any
// writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
// of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
// of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
// weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
// This needs to be an `Acquire` to synchronize with the decrement of the `strong`
@ -1669,7 +1866,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
}
// This fence is needed to prevent reordering of use of the data and
// deletion of the data. Because it is marked `Release`, the decreasing
// deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
@ -1980,33 +2177,24 @@ impl<T: ?Sized> Weak<T> {
// We use a CAS loop to increment the strong count instead of a
// fetch_add as this function should never take the reference count
// from zero to one.
let inner = self.inner()?;
// Relaxed load because any write of 0 that we can observe
// leaves the field in a permanently zero state (so a
// "stale" read of 0 is fine), and any other value is
// confirmed via the CAS below.
let mut n = inner.strong.load(Relaxed);
loop {
if n == 0 {
return None;
}
// See comments in `Arc::clone` for why we do this (for `mem::forget`).
if n > MAX_REFCOUNT {
abort();
}
self.inner()?
.strong
// Relaxed is fine for the failure case because we don't have any expectations about the new state.
// Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
// value can be initialized after `Weak` references have already been created. In that case, we
// expect to observe the fully initialized value.
match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) {
Ok(_) => return Some(unsafe { Arc::from_inner(self.ptr) }), // null checked above
Err(old) => n = old,
}
}
.fetch_update(Acquire, Relaxed, |n| {
// Any write of 0 we can observe leaves the field in permanently zero state.
if n == 0 {
return None;
}
// See comments in `Arc::clone` for why we do this (for `mem::forget`).
assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
Some(n + 1)
})
.ok()
// null checked above
.map(|_| unsafe { Arc::from_inner(self.ptr) })
}
/// Gets the number of strong (`Arc`) pointers pointing to this allocation.
@ -2067,9 +2255,9 @@ impl<T: ?Sized> Weak<T> {
}
}
/// Returns `true` if the two `Weak`s point to the same allocation (similar to
/// [`ptr::eq`]), or if both don't point to any allocation
/// (because they were created with `Weak::new()`).
/// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
/// both don't point to any allocation (because they were created with `Weak::new()`). See [that
/// function][`ptr::eq`] for caveats when comparing `dyn Trait` pointers.
///
/// # Notes
///
@ -2136,7 +2324,7 @@ impl<T: ?Sized> Clone for Weak<T> {
} else {
return Weak { ptr: self.ptr };
};
// See comments in Arc::clone() for why this is relaxed. This can use a
// See comments in Arc::clone() for why this is relaxed. This can use a
// fetch_add (ignoring the lock) because the weak count is only locked
// where are *no other* weak pointers in existence. (So we can't be
// running this code in that case).
@ -2280,10 +2468,10 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
/// Inequality for two `Arc`s.
///
/// Two `Arc`s are unequal if their inner values are unequal.
/// Two `Arc`s are not equal if their inner values are not equal.
///
/// If `T` also implements `Eq` (implying reflexivity of equality),
/// two `Arc`s that point to the same value are never unequal.
/// two `Arc`s that point to the same value are always equal.
///
/// # Examples
///
@ -2571,12 +2759,10 @@ impl<T> From<Vec<T>> for Arc<[T]> {
#[inline]
fn from(mut v: Vec<T>) -> Arc<[T]> {
unsafe {
let arc = Arc::copy_from_slice(&v);
let rc = Arc::copy_from_slice(&v);
// Allow the Vec to free its memory, but not destroy its contents
v.set_len(0);
arc
rc
}
}
}
@ -2595,7 +2781,7 @@ where
/// ```rust
/// # use std::sync::Arc;
/// # use std::borrow::Cow;
/// let cow: Cow<str> = Cow::Borrowed("eggplant");
/// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
/// let shared: Arc<str> = Arc::from(cow);
/// assert_eq!("eggplant", &shared[..]);
/// ```
@ -2642,7 +2828,7 @@ impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_iter", since = "1.37.0")]
impl<T> iter::FromIterator<T> for Arc<[T]> {
impl<T> FromIterator<T> for Arc<[T]> {
/// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
///
/// # Performance characteristics
@ -2681,7 +2867,7 @@ impl<T> iter::FromIterator<T> for Arc<[T]> {
/// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ```
fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
ToArcSlice::to_arc_slice(iter.into_iter())
}
}
@ -2716,7 +2902,7 @@ impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
Arc::from_iter_exact(self, low)
}
} else {
// TrustedLen contract guarantees that `upper_bound == `None` implies an iterator
// TrustedLen contract guarantees that `upper_bound == None` implies an iterator
// length exceeding `usize::MAX`.
// The default implementation would collect into a vec which would panic.
// Thus we panic here immediately without invoking `Vec` code.
@ -2763,3 +2949,24 @@ fn data_offset_align(align: usize) -> usize {
let layout = Layout::new::<ArcInner<()>>();
layout.size() + layout.padding_needed_for(align)
}
#[stable(feature = "arc_error", since = "1.52.0")]
impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
#[allow(deprecated, deprecated_in_future)]
fn description(&self) -> &str {
core::error::Error::description(&**self)
}
#[allow(deprecated)]
fn cause(&self) -> Option<&dyn core::error::Error> {
core::error::Error::cause(&**self)
}
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
core::error::Error::source(&**self)
}
fn provide<'a>(&'a self, req: &mut core::any::Demand<'a>) {
core::error::Error::provide(&**self, req);
}
}

View file

@ -101,6 +101,38 @@ fn try_unwrap() {
assert_eq!(Arc::try_unwrap(x), Ok(5));
}
#[test]
fn into_inner() {
for _ in 0..100
// ^ Increase chances of hitting potential race conditions
{
let x = Arc::new(3);
let y = Arc::clone(&x);
let r_thread = std::thread::spawn(|| Arc::into_inner(x));
let s_thread = std::thread::spawn(|| Arc::into_inner(y));
let r = r_thread.join().expect("r_thread panicked");
let s = s_thread.join().expect("s_thread panicked");
assert!(
matches!((r, s), (None, Some(3)) | (Some(3), None)),
"assertion failed: unexpected result `{:?}`\
\n expected `(None, Some(3))` or `(Some(3), None)`",
(r, s),
);
}
let x = Arc::new(3);
assert_eq!(Arc::into_inner(x), Some(3));
let x = Arc::new(4);
let y = Arc::clone(&x);
assert_eq!(Arc::into_inner(x), None);
assert_eq!(Arc::into_inner(y), Some(4));
let x = Arc::new(5);
let _w = Arc::downgrade(&x);
assert_eq!(Arc::into_inner(x), Some(5));
}
#[test]
fn into_from_raw() {
let x = Arc::new(Box::new("hello"));
@ -618,3 +650,22 @@ fn test_arc_cyclic_two_refs() {
assert_eq!(Arc::strong_count(&two_refs), 3);
assert_eq!(Arc::weak_count(&two_refs), 2);
}
/// Test for Arc::drop bug (https://github.com/rust-lang/rust/issues/55005)
#[test]
#[cfg(miri)] // relies on Stacked Borrows in Miri
fn arc_drop_dereferenceable_race() {
// The bug seems to take up to 700 iterations to reproduce with most seeds (tested 0-9).
for _ in 0..750 {
let arc_1 = Arc::new(());
let arc_2 = arc_1.clone();
let thread = thread::spawn(|| drop(arc_2));
// Spin a bit; makes the race more likely to appear
let mut i = 0;
while i < 256 {
i += 1;
}
drop(arc_1);
thread.join().unwrap();
}
}

View file

@ -1,5 +1,11 @@
#![stable(feature = "wake_trait", since = "1.51.0")]
//! Types and Traits for working with asynchronous tasks.
//!
//! **Note**: This module is only available on platforms that support atomic
//! loads and stores of pointers. This may be detected at compile time using
//! `#[cfg(target_has_atomic = "ptr")]`.
use core::mem::ManuallyDrop;
use core::task::{RawWaker, RawWakerVTable, Waker};
@ -33,6 +39,7 @@ use crate::sync::Arc;
/// use std::sync::Arc;
/// use std::task::{Context, Poll, Wake};
/// use std::thread::{self, Thread};
/// use core::pin::pin;
///
/// /// A waker that wakes up the current thread when called.
/// struct ThreadWaker(Thread);
@ -46,7 +53,7 @@ use crate::sync::Arc;
/// /// Run a future to completion on the current thread.
/// fn block_on<T>(fut: impl Future<Output = T>) -> T {
/// // Pin the future so it can be polled.
/// let mut fut = Box::pin(fut);
/// let mut fut = pin!(fut);
///
/// // Create a new context to be passed to the future.
/// let t = thread::current();

View file

@ -4,7 +4,6 @@ use core::any::Any;
use core::clone::Clone;
use core::convert::TryInto;
use core::ops::Deref;
use core::result::Result::{Err, Ok};
use std::boxed::Box;
@ -15,7 +14,7 @@ fn test_owned_clone() {
assert!(a == b);
}
#[derive(PartialEq, Eq)]
#[derive(Debug, PartialEq, Eq)]
struct Test;
#[test]
@ -23,24 +22,17 @@ fn any_move() {
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
match a.downcast::<i32>() {
Ok(a) => {
assert!(a == Box::new(8));
}
Err(..) => panic!(),
}
match b.downcast::<Test>() {
Ok(a) => {
assert!(a == Box::new(Test));
}
Err(..) => panic!(),
}
let a: Box<i32> = a.downcast::<i32>().unwrap();
assert_eq!(*a, 8);
let b: Box<Test> = b.downcast::<Test>().unwrap();
assert_eq!(*b, Test);
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
assert!(a.downcast::<Box<Test>>().is_err());
assert!(b.downcast::<Box<i32>>().is_err());
assert!(a.downcast::<Box<i32>>().is_err());
assert!(b.downcast::<Box<Test>>().is_err());
}
#[test]

View file

@ -1,5 +1,4 @@
use crate::borrow::Cow;
use core::iter::FromIterator;
use super::Vec;

View file

@ -1,7 +1,7 @@
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
use core::mem;
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::ptr::{self, NonNull};
use core::slice::{self};
@ -16,7 +16,7 @@ use super::Vec;
///
/// ```
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::Drain<_> = v.drain(..);
/// let iter: std::vec::Drain<'_, _> = v.drain(..);
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<
@ -65,6 +65,75 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
/// Keep unyielded elements in the source `Vec`.
///
/// # Examples
///
/// ```
/// #![feature(drain_keep_rest)]
///
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
///
/// assert_eq!(drain.next().unwrap(), 'a');
///
/// // This call keeps 'b' and 'c' in the vec.
/// drain.keep_rest();
///
/// // If we wouldn't call `keep_rest()`,
/// // `vec` would be empty.
/// assert_eq!(vec, ['b', 'c']);
/// ```
#[unstable(feature = "drain_keep_rest", issue = "101122")]
pub fn keep_rest(self) {
// At this moment layout looks like this:
//
// [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
// ^-- start \_________/-- unyielded_len \____/-- self.tail_len
// ^-- unyielded_ptr ^-- tail
//
// Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
// Here we want to
// 1. Move [unyielded] to `start`
// 2. Move [tail] to a new start at `start + len(unyielded)`
// 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
// a. In case of ZST, this is the only thing we want to do
// 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
let mut this = ManuallyDrop::new(self);
unsafe {
let source_vec = this.vec.as_mut();
let start = source_vec.len();
let tail = this.tail_start;
let unyielded_len = this.iter.len();
let unyielded_ptr = this.iter.as_slice().as_ptr();
// ZSTs have no identity, so we don't need to move them around.
if !T::IS_ZST {
let start_ptr = source_vec.as_mut_ptr().add(start);
// memmove back unyielded elements
if unyielded_ptr != start_ptr {
let src = unyielded_ptr;
let dst = start_ptr;
ptr::copy(src, dst, unyielded_len);
}
// memmove back untouched tail
if tail != (start + unyielded_len) {
let src = source_vec.as_ptr().add(tail);
let dst = start_ptr.add(unyielded_len);
ptr::copy(src, dst, this.tail_len);
}
}
source_vec.set_len(start + unyielded_len + this.tail_len);
}
}
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
@ -126,12 +195,12 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
}
let iter = mem::replace(&mut self.iter, (&mut []).iter());
let iter = mem::take(&mut self.iter);
let drop_len = iter.len();
let mut vec = self.vec;
if mem::size_of::<T>() == 0 {
if T::IS_ZST {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
@ -152,9 +221,9 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
// as_slice() must only be called when iter.len() is > 0 because
// vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
// the iterator's internal pointers. Creating a reference to deallocated memory
// is invalid even when it is zero-length
// it also gets touched by vec::Splice which may turn it into a dangling pointer
// which would make it and the vec pointer point to different allocations which would
// lead to invalid pointer arithmetic below.
let drop_ptr = iter.as_slice().as_ptr();
unsafe {

View file

@ -1,6 +1,7 @@
use crate::alloc::{Allocator, Global};
use core::ptr::{self};
use core::slice::{self};
use core::mem::{ManuallyDrop, SizedTypeProperties};
use core::ptr;
use core::slice;
use super::Vec;
@ -15,7 +16,7 @@ use super::Vec;
/// #![feature(drain_filter)]
///
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
/// let iter: std::vec::DrainFilter<'_, _, _> = v.drain_filter(|x| *x % 2 == 0);
/// ```
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
#[derive(Debug)]
@ -54,6 +55,59 @@ where
pub fn allocator(&self) -> &A {
self.vec.allocator()
}
/// Keep unyielded elements in the source `Vec`.
///
/// # Examples
///
/// ```
/// #![feature(drain_filter)]
/// #![feature(drain_keep_rest)]
///
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain_filter(|_| true);
///
/// assert_eq!(drain.next().unwrap(), 'a');
///
/// // This call keeps 'b' and 'c' in the vec.
/// drain.keep_rest();
///
/// // If we wouldn't call `keep_rest()`,
/// // `vec` would be empty.
/// assert_eq!(vec, ['b', 'c']);
/// ```
#[unstable(feature = "drain_keep_rest", issue = "101122")]
pub fn keep_rest(self) {
// At this moment layout looks like this:
//
// _____________________/-- old_len
// / \
// [kept] [yielded] [tail]
// \_______/ ^-- idx
// \-- del
//
// Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
//
// 1. Move [tail] after [kept]
// 2. Update length of the original vec to `old_len - del`
// a. In case of ZST, this is the only thing we want to do
// 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
let mut this = ManuallyDrop::new(self);
unsafe {
// ZSTs have no identity, so we don't need to move them around.
if !T::IS_ZST && this.idx < this.old_len && this.del > 0 {
let ptr = this.vec.as_mut_ptr();
let src = ptr.add(this.idx);
let dst = src.sub(this.del);
let tail_len = this.old_len - this.idx;
src.copy_to(dst, tail_len);
}
let new_len = this.old_len - this.del;
this.vec.set_len(new_len);
}
}
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]

View file

@ -55,6 +55,9 @@
//! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by
//! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`).
//!
//! If dropping any remaining source item (`T`) panics then [`InPlaceDstBufDrop`] will handle dropping
//! the already collected sink items (`U`) and freeing the allocation.
//!
//! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining()
//!
//! # O(1) collect
@ -135,10 +138,10 @@
//! vec.truncate(write_idx);
//! ```
use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
use core::mem::{self, ManuallyDrop};
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
use core::ptr::{self};
use super::{InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};
use super::{InPlaceDrop, InPlaceDstBufDrop, SpecFromIter, SpecFromIterNested, Vec};
/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
/// source allocation, i.e. executing the pipeline in place.
@ -154,7 +157,7 @@ where
default fn from_iter(mut iterator: I) -> Self {
// See "Layout constraints" section in the module documentation. We rely on const
// optimization here since these conditions currently cannot be expressed as trait bounds
if mem::size_of::<T>() == 0
if T::IS_ZST
|| mem::size_of::<T>()
!= mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
|| mem::align_of::<T>()
@ -175,7 +178,8 @@ where
)
};
let len = SpecInPlaceCollect::collect_in_place(&mut iterator, dst_buf, dst_end);
// SAFETY: `dst_buf` and `dst_end` are the start and end of the buffer.
let len = unsafe { SpecInPlaceCollect::collect_in_place(&mut iterator, dst_buf, dst_end) };
let src = unsafe { iterator.as_inner().as_into_iter() };
// check if SourceIter contract was upheld
@ -191,14 +195,17 @@ where
);
}
// Drop any remaining values at the tail of the source but prevent drop of the allocation
// itself once IntoIter goes out of scope.
// If the drop panics then we also leak any elements collected into dst_buf.
// The ownership of the allocation and the new `T` values is temporarily moved into `dst_guard`.
// This is safe because `forget_allocation_drop_remaining` immediately forgets the allocation
// before any panic can occur in order to avoid any double free, and then proceeds to drop
// any remaining values at the tail of the source.
//
// Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce
// contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the
// module documenttation why this is ok anyway.
// module documentation why this is ok anyway.
let dst_guard = InPlaceDstBufDrop { ptr: dst_buf, len, cap };
src.forget_allocation_drop_remaining();
mem::forget(dst_guard);
let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) };
@ -233,7 +240,7 @@ trait SpecInPlaceCollect<T, I>: Iterator<Item = T> {
/// `Iterator::__iterator_get_unchecked` calls with a `TrustedRandomAccessNoCoerce` bound
/// on `I` which means the caller of this method must take the safety conditions
/// of that trait into consideration.
fn collect_in_place(&mut self, dst: *mut T, end: *const T) -> usize;
unsafe fn collect_in_place(&mut self, dst: *mut T, end: *const T) -> usize;
}
impl<T, I> SpecInPlaceCollect<T, I> for I
@ -241,7 +248,7 @@ where
I: Iterator<Item = T>,
{
#[inline]
default fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
default unsafe fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
// use try-fold since
// - it vectorizes better for some iterator adapters
// - unlike most internal iteration methods, it only takes a &mut self
@ -259,7 +266,7 @@ where
I: Iterator<Item = T> + TrustedRandomAccessNoCoerce,
{
#[inline]
fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
unsafe fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
let len = self.size();
let mut drop_guard = InPlaceDrop { inner: dst_buf, dst: dst_buf };
for i in 0..len {
@ -267,7 +274,7 @@ where
// one slot in the underlying storage will have been freed up and we can immediately
// write back the result.
unsafe {
let dst = dst_buf.offset(i as isize);
let dst = dst_buf.add(i);
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
ptr::write(dst, self.__iterator_get_unchecked(i));
// Since this executes user code which can panic we have to bump the pointer

View file

@ -22,3 +22,18 @@ impl<T> Drop for InPlaceDrop<T> {
}
}
}
// A helper struct for in-place collection that drops the destination allocation and elements,
// to avoid leaking them if some other destructor panics.
pub(super) struct InPlaceDstBufDrop<T> {
pub(super) ptr: *mut T,
pub(super) len: usize,
pub(super) cap: usize,
}
impl<T> Drop for InPlaceDstBufDrop<T> {
#[inline]
fn drop(&mut self) {
unsafe { super::Vec::from_raw_parts(self.ptr, self.len, self.cap) };
}
}

View file

@ -1,14 +1,17 @@
#[cfg(not(no_global_oom_handling))]
use super::AsVecIntoIter;
use crate::alloc::{Allocator, Global};
#[cfg(not(no_global_oom_handling))]
use crate::collections::VecDeque;
use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
use core::intrinsics::arith_offset;
use core::iter::{
FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop};
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::num::NonZeroUsize;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@ -38,7 +41,9 @@ pub struct IntoIter<
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
pub(super) alloc: ManuallyDrop<A>,
pub(super) ptr: *const T,
pub(super) end: *const T,
pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
// ptr == end is a quick test for the Iterator being empty, that works
// for both ZST and non-ZST.
}
#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
@ -95,13 +100,16 @@ impl<T, A: Allocator> IntoIter<T, A> {
}
/// Drops remaining elements and relinquishes the backing allocation.
/// This method guarantees it won't panic before relinquishing
/// the backing allocation.
///
/// This is roughly equivalent to the following, but more efficient
///
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
/// (&mut into_iter).for_each(core::mem::drop);
/// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
/// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
/// (&mut into_iter).for_each(drop);
/// std::mem::forget(into_iter);
/// ```
///
/// This method is used by in-place iteration, refer to the vec::in_place_collect
@ -118,15 +126,45 @@ impl<T, A: Allocator> IntoIter<T, A> {
self.ptr = self.buf.as_ptr();
self.end = self.buf.as_ptr();
// Dropping the remaining elements can panic, so this needs to be
// done only after updating the other fields.
unsafe {
ptr::drop_in_place(remaining);
}
}
/// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
#[cfg(not(no_global_oom_handling))]
pub(crate) fn forget_remaining_elements(&mut self) {
self.ptr = self.end;
// For th ZST case, it is crucial that we mutate `end` here, not `ptr`.
// `ptr` must stay aligned, while `end` may be unaligned.
self.end = self.ptr;
}
#[cfg(not(no_global_oom_handling))]
#[inline]
pub(crate) fn into_vecdeque(self) -> VecDeque<T, A> {
// Keep our `Drop` impl from dropping the elements and the allocator
let mut this = ManuallyDrop::new(self);
// SAFETY: This allocation originally came from a `Vec`, so it passes
// all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
// so the `sub_ptr`s below cannot wrap, and will produce a well-formed
// range. `end` ≤ `buf + cap`, so the range will be in-bounds.
// Taking `alloc` is ok because nothing else is going to look at it,
// since our `Drop` impl isn't going to run so there's no more code.
unsafe {
let buf = this.buf.as_ptr();
let initialized = if T::IS_ZST {
// All the pointers are the same for ZSTs, so it's fine to
// say that they're all at the beginning of the "allocation".
0..this.len()
} else {
this.ptr.sub_ptr(buf)..this.end.sub_ptr(buf)
};
let cap = this.cap;
let alloc = ManuallyDrop::take(&mut this.alloc);
VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc)
}
}
}
@ -148,19 +186,18 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
if self.ptr as *const _ == self.end {
if self.ptr == self.end {
None
} else if mem::size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
} else if T::IS_ZST {
// `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
// reducing the `end`.
self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
self.ptr = unsafe { self.ptr.offset(1) };
self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
@ -168,7 +205,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = if mem::size_of::<T>() == 0 {
let exact = if T::IS_ZST {
self.end.addr().wrapping_sub(self.ptr.addr())
} else {
unsafe { self.end.sub_ptr(self.ptr) }
@ -177,14 +214,12 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if mem::size_of::<T>() == 0 {
// SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
// effectively results in unsigned pointers representing positions 0..usize::MAX,
// which is valid for ZSTs.
self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
if T::IS_ZST {
// See `next` for why we sub `end` here.
self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
@ -193,10 +228,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
if step_size < n {
return Err(step_size);
}
Ok(())
NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
@ -204,6 +236,43 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
self.len()
}
#[inline]
fn next_chunk<const N: usize>(&mut self) -> Result<[T; N], core::array::IntoIter<T, N>> {
let mut raw_ary = MaybeUninit::uninit_array();
let len = self.len();
if T::IS_ZST {
if len < N {
self.forget_remaining_elements();
// Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
}
self.end = self.end.wrapping_byte_sub(N);
// Safety: ditto
return Ok(unsafe { raw_ary.transpose().assume_init() });
}
if len < N {
// Safety: `len` indicates that this many elements are available and we just checked that
// it fits into the array.
unsafe {
ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, len);
self.forget_remaining_elements();
return Err(array::IntoIter::new_unchecked(raw_ary, 0..len));
}
}
// Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize
// the array.
return unsafe {
ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N);
self.ptr = self.ptr.add(N);
Ok(raw_ary.transpose().assume_init())
};
}
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
where
Self: TrustedRandomAccessNoCoerce,
@ -217,7 +286,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
unsafe {
if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
}
}
}
@ -228,40 +297,35 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
} else if mem::size_of::<T>() == 0 {
} else if T::IS_ZST {
// See above for why 'ptr.offset' isn't used
self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
self.end = unsafe { self.end.offset(-1) };
self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
if mem::size_of::<T>() == 0 {
if T::IS_ZST {
// SAFETY: same as for advance_by()
self.end = unsafe {
arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
}
self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: same as for advance_by()
self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
unsafe {
ptr::drop_in_place(to_drop);
}
if step_size < n {
return Err(step_size);
}
Ok(())
NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
}
@ -278,6 +342,24 @@ impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T, A> Default for IntoIter<T, A>
where
A: Allocator + Default,
{
/// Creates an empty `vec::IntoIter`.
///
/// ```
/// # use std::vec;
/// let iter: vec::IntoIter<u8> = Default::default();
/// assert_eq!(iter.len(), 0);
/// assert_eq!(iter.as_slice(), &[]);
/// ```
fn default() -> Self {
super::Vec::new_in(Default::default()).into_iter()
}
}
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]

View file

@ -1,8 +1,11 @@
use core::num::{Saturating, Wrapping};
use crate::boxed::Box;
#[rustc_specialization_trait]
pub(super) unsafe trait IsZero {
/// Whether this value's representation is all zeros
/// Whether this value's representation is all zeros,
/// or can be represented with all zeroes.
fn is_zero(&self) -> bool;
}
@ -17,12 +20,14 @@ macro_rules! impl_is_zero {
};
}
impl_is_zero!(i8, |x| x == 0); // It is needed to impl for arrays and tuples of i8.
impl_is_zero!(i16, |x| x == 0);
impl_is_zero!(i32, |x| x == 0);
impl_is_zero!(i64, |x| x == 0);
impl_is_zero!(i128, |x| x == 0);
impl_is_zero!(isize, |x| x == 0);
impl_is_zero!(u8, |x| x == 0); // It is needed to impl for arrays and tuples of u8.
impl_is_zero!(u16, |x| x == 0);
impl_is_zero!(u32, |x| x == 0);
impl_is_zero!(u64, |x| x == 0);
@ -53,16 +58,42 @@ unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
#[inline]
fn is_zero(&self) -> bool {
// Because this is generated as a runtime check, it's not obvious that
// it's worth doing if the array is really long. The threshold here
// is largely arbitrary, but was picked because as of 2022-05-01 LLVM
// can const-fold the check in `vec![[0; 32]; n]` but not in
// `vec![[0; 64]; n]`: https://godbolt.org/z/WTzjzfs5b
// it's worth doing if the array is really long. The threshold here
// is largely arbitrary, but was picked because as of 2022-07-01 LLVM
// fails to const-fold the check in `vec![[1; 32]; n]`
// See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
// Feel free to tweak if you have better evidence.
N <= 32 && self.iter().all(IsZero::is_zero)
N <= 16 && self.iter().all(IsZero::is_zero)
}
}
// This is recursive macro.
macro_rules! impl_for_tuples {
// Stopper
() => {
// No use for implementing for empty tuple because it is ZST.
};
($first_arg:ident $(,$rest:ident)*) => {
unsafe impl <$first_arg: IsZero, $($rest: IsZero,)*> IsZero for ($first_arg, $($rest,)*){
#[inline]
fn is_zero(&self) -> bool{
// Destructure tuple to N references
// Rust allows to hide generic params by local variable names.
#[allow(non_snake_case)]
let ($first_arg, $($rest,)*) = self;
$first_arg.is_zero()
$( && $rest.is_zero() )*
}
}
impl_for_tuples!($($rest),*);
}
}
impl_for_tuples!(A, B, C, D, E, F, G, H);
// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
// For fat pointers, the bytes that would be the pointer metadata in the `Some`
// variant are padding in the `None` variant, so ignoring them and
@ -116,3 +147,56 @@ impl_is_zero_option_of_nonzero!(
NonZeroUsize,
NonZeroIsize,
);
macro_rules! impl_is_zero_option_of_num {
($($t:ty,)+) => {$(
unsafe impl IsZero for Option<$t> {
#[inline]
fn is_zero(&self) -> bool {
const {
let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
assert!(none.is_none());
}
self.is_none()
}
}
)+};
}
impl_is_zero_option_of_num!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize,);
unsafe impl<T: IsZero> IsZero for Wrapping<T> {
#[inline]
fn is_zero(&self) -> bool {
self.0.is_zero()
}
}
unsafe impl<T: IsZero> IsZero for Saturating<T> {
#[inline]
fn is_zero(&self) -> bool {
self.0.is_zero()
}
}
macro_rules! impl_for_optional_bool {
($($t:ty,)+) => {$(
unsafe impl IsZero for $t {
#[inline]
fn is_zero(&self) -> bool {
// SAFETY: This is *not* a stable layout guarantee, but
// inside `core` we're allowed to rely on the current rustc
// behaviour that options of bools will be one byte with
// no padding, so long as they're nested less than 254 deep.
let raw: u8 = unsafe { core::mem::transmute(*self) };
raw == 0
}
}
)+};
}
impl_for_optional_bool! {
Option<bool>,
Option<Option<bool>>,
Option<Option<Option<bool>>>,
// Could go further, but not worth the metadata overhead
}

View file

@ -56,15 +56,11 @@
#[cfg(not(no_global_oom_handling))]
use core::cmp;
use core::cmp::Ordering;
use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::{arith_offset, assume};
use core::iter;
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice::{self, SliceIndex};
@ -125,7 +121,7 @@ use self::set_len_on_drop::SetLenOnDrop;
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
use self::in_place_drop::InPlaceDrop;
use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@ -166,7 +162,7 @@ mod spec_extend;
/// vec[0] = 7;
/// assert_eq!(vec[0], 7);
///
/// vec.extend([1, 2, 3].iter().copied());
/// vec.extend([1, 2, 3]);
///
/// for x in &vec {
/// println!("{x}");
@ -378,8 +374,8 @@ mod spec_extend;
/// Currently, `Vec` does not guarantee the order in which elements are dropped.
/// The order has changed in the past and may change again.
///
/// [`get`]: ../../std/vec/struct.Vec.html#method.get
/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
/// [`get`]: slice::get
/// [`get_mut`]: slice::get_mut
/// [`String`]: crate::string::String
/// [`&str`]: type@str
/// [`shrink_to_fit`]: Vec::shrink_to_fit
@ -436,7 +432,7 @@ impl<T> Vec<T> {
/// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
/// If it is imporant to know the exact allocated capacity of a `Vec`,
/// If it is important to know the exact allocated capacity of a `Vec`,
/// always use the [`capacity`] method after construction.
///
/// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
@ -483,15 +479,15 @@ impl<T> Vec<T> {
Self::with_capacity_in(capacity, Global)
}
/// Creates a `Vec<T>` directly from the raw components of another vector.
/// Creates a `Vec<T>` directly from a pointer, a capacity, and a length.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
/// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
/// (at least, it's highly likely to be incorrect if it wasn't).
/// * `ptr` must have been allocated using the global allocator, such as via
/// the [`alloc::alloc`] function.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@ -500,6 +496,14 @@ impl<T> Vec<T> {
/// to be the same size as the pointer was allocated with. (Because similar to
/// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
/// * The first `length` values must be properly initialized values of type `T`.
/// * `capacity` needs to be the capacity that the pointer was allocated with.
/// * The allocated size in bytes must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// These requirements are always upheld by any `ptr` that has been allocated
/// via `Vec<T>`. Other allocation sources are allowed if the invariants are
/// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is normally **not** safe
@ -520,6 +524,7 @@ impl<T> Vec<T> {
/// function.
///
/// [`String`]: crate::string::String
/// [`alloc::alloc`]: crate::alloc::alloc
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
///
/// # Examples
@ -542,8 +547,8 @@ impl<T> Vec<T> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
/// for i in 0..len as isize {
/// ptr::write(p.offset(i), 4 + i);
/// for i in 0..len {
/// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@ -551,6 +556,32 @@ impl<T> Vec<T> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
///
/// Using memory that was allocated elsewhere:
///
/// ```rust
/// #![feature(allocator_api)]
///
/// use std::alloc::{AllocError, Allocator, Global, Layout};
///
/// fn main() {
/// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
///
/// let vec = unsafe {
/// let mem = match Global.allocate(layout) {
/// Ok(mem) => mem.cast::<u32>().as_ptr(),
/// Err(AllocError) => return,
/// };
///
/// mem.write(1_000_000);
///
/// Vec::from_raw_parts_in(mem, 1, 16, Global)
/// };
///
/// assert_eq!(vec, &[1_000_000]);
/// assert_eq!(vec.capacity(), 16);
/// }
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
@ -591,7 +622,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
/// If it is imporant to know the exact allocated capacity of a `Vec`,
/// If it is important to know the exact allocated capacity of a `Vec`,
/// always use the [`capacity`] method after construction.
///
/// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
@ -615,14 +646,14 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// // The vector contains no items, even though it has capacity for more
/// assert_eq!(vec.len(), 0);
/// assert_eq!(vec.capacity(), 10);
/// assert!(vec.capacity() >= 10);
///
/// // These are all done without reallocating...
/// for i in 0..10 {
/// vec.push(i);
/// }
/// assert_eq!(vec.len(), 10);
/// assert_eq!(vec.capacity(), 10);
/// assert!(vec.capacity() >= 10);
///
/// // ...but this may make the vector reallocate
/// vec.push(11);
@ -641,21 +672,31 @@ impl<T, A: Allocator> Vec<T, A> {
Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
}
/// Creates a `Vec<T, A>` directly from the raw components of another vector.
/// Creates a `Vec<T, A>` directly from a pointer, a capacity, a length,
/// and an allocator.
///
/// # Safety
///
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
/// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
/// (at least, it's highly likely to be incorrect if it wasn't).
/// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
/// * `ptr` must be [*currently allocated*] via the given allocator `alloc`.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
/// allocated and deallocated with the same layout.)
/// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
/// to be the same size as the pointer was allocated with. (Because similar to
/// alignment, [`dealloc`] must be called with the same layout `size`.)
/// * `length` needs to be less than or equal to `capacity`.
/// * `capacity` needs to be the capacity that the pointer was allocated with.
/// * The first `length` values must be properly initialized values of type `T`.
/// * `capacity` needs to [*fit*] the layout size that the pointer was allocated with.
/// * The allocated size in bytes must be no larger than `isize::MAX`.
/// See the safety documentation of [`pointer::offset`].
///
/// These requirements are always upheld by any `ptr` that has been allocated
/// via `Vec<T, A>`. Other allocation sources are allowed if the invariants are
/// upheld.
///
/// Violating these may cause problems like corrupting the allocator's
/// internal data structures. For example it is **not** safe
@ -673,6 +714,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// [`String`]: crate::string::String
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
/// [*currently allocated*]: crate::alloc::Allocator#currently-allocated-memory
/// [*fit*]: crate::alloc::Allocator#memory-fitting
///
/// # Examples
///
@ -702,8 +745,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
/// for i in 0..len as isize {
/// ptr::write(p.offset(i), 4 + i);
/// for i in 0..len {
/// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@ -711,6 +754,29 @@ impl<T, A: Allocator> Vec<T, A> {
/// assert_eq!(rebuilt, [4, 5, 6]);
/// }
/// ```
///
/// Using memory that was allocated elsewhere:
///
/// ```rust
/// use std::alloc::{alloc, Layout};
///
/// fn main() {
/// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
/// let vec = unsafe {
/// let mem = alloc(layout).cast::<u32>();
/// if mem.is_null() {
/// return;
/// }
///
/// mem.write(1_000_000);
///
/// Vec::from_raw_parts(mem, 1, 16)
/// };
///
/// assert_eq!(vec, &[1_000_000]);
/// assert_eq!(vec.capacity(), 16);
/// }
/// ```
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
@ -803,14 +869,15 @@ impl<T, A: Allocator> Vec<T, A> {
(ptr, len, capacity, alloc)
}
/// Returns the number of elements the vector can hold without
/// Returns the total number of elements the vector can hold without
/// reallocating.
///
/// # Examples
///
/// ```
/// let vec: Vec<i32> = Vec::with_capacity(10);
/// assert_eq!(vec.capacity(), 10);
/// let mut vec: Vec<i32> = Vec::with_capacity(10);
/// vec.push(42);
/// assert!(vec.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
@ -875,7 +942,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
/// `Ok(())`. Does nothing if capacity is already sufficient.
/// `Ok(())`. Does nothing if capacity is already sufficient. This method
/// preserves the contents even if an error occurs.
///
/// # Errors
///
@ -960,7 +1028,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
/// assert_eq!(vec.capacity(), 10);
/// assert!(vec.capacity() >= 10);
/// vec.shrink_to_fit();
/// assert!(vec.capacity() >= 3);
/// ```
@ -987,7 +1055,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
/// assert_eq!(vec.capacity(), 10);
/// assert!(vec.capacity() >= 10);
/// vec.shrink_to(4);
/// assert!(vec.capacity() >= 4);
/// vec.shrink_to(0);
@ -1003,7 +1071,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// Converts the vector into [`Box<[T]>`][owned slice].
///
/// Note that this will drop any excess capacity.
/// If the vector has excess capacity, its items will be moved into a
/// newly-allocated buffer with exactly the right capacity.
///
/// [owned slice]: Box
///
@ -1021,7 +1090,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
///
/// assert_eq!(vec.capacity(), 10);
/// assert!(vec.capacity() >= 10);
/// let slice = vec.into_boxed_slice();
/// assert_eq!(slice.into_vec().capacity(), 3);
/// ```
@ -1167,11 +1236,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
// `deref`, which creates an intermediate reference.
let ptr = self.buf.ptr();
unsafe {
assume(!ptr.is_null());
}
ptr
self.buf.ptr()
}
/// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
@ -1204,11 +1269,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
// `deref_mut`, which creates an intermediate reference.
let ptr = self.buf.ptr();
unsafe {
assume(!ptr.is_null());
}
ptr
self.buf.ptr()
}
/// Returns a reference to the underlying allocator.
@ -1393,7 +1454,7 @@ impl<T, A: Allocator> Vec<T, A> {
if index < len {
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
ptr::copy(p, p.offset(1), len - index);
ptr::copy(p, p.add(1), len - index);
} else if index == len {
// No elements need shifting.
} else {
@ -1455,7 +1516,7 @@ impl<T, A: Allocator> Vec<T, A> {
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
ptr::copy(ptr.offset(1), ptr, len - index - 1);
ptr::copy(ptr.add(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
@ -1773,6 +1834,51 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
/// Appends an element if there is sufficient spare capacity, otherwise an error is returned
/// with the element.
///
/// Unlike [`push`] this method will not reallocate when there's insufficient capacity.
/// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity.
///
/// [`push`]: Vec::push
/// [`reserve`]: Vec::reserve
/// [`try_reserve`]: Vec::try_reserve
///
/// # Examples
///
/// A manual, panic-free alternative to [`FromIterator`]:
///
/// ```
/// #![feature(vec_push_within_capacity)]
///
/// use std::collections::TryReserveError;
/// fn from_iter_fallible<T>(iter: impl Iterator<Item=T>) -> Result<Vec<T>, TryReserveError> {
/// let mut vec = Vec::new();
/// for value in iter {
/// if let Err(value) = vec.push_within_capacity(value) {
/// vec.try_reserve(1)?;
/// // this cannot fail, the previous line either returned or added at least 1 free slot
/// let _ = vec.push_within_capacity(value);
/// }
/// }
/// Ok(vec)
/// }
/// assert_eq!(from_iter_fallible(0..100), Ok(Vec::from_iter(0..100)));
/// ```
#[inline]
#[unstable(feature = "vec_push_within_capacity", issue = "100486")]
pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> {
if self.len == self.buf.capacity() {
return Err(value);
}
unsafe {
let end = self.as_mut_ptr().add(self.len);
ptr::write(end, value);
self.len += 1;
}
Ok(())
}
/// Removes the last element from a vector and returns it, or [`None`] if it
/// is empty.
///
@ -1888,9 +1994,7 @@ impl<T, A: Allocator> Vec<T, A> {
unsafe {
// set self.vec length's to start, to be safe in case Drain is leaked
self.set_len(start);
// Use the borrow in the IterMut to indicate borrowing behavior of the
// whole Drain iterator (like &mut T).
let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
Drain {
tail_start: end,
tail_len: len - end,
@ -2053,7 +2157,7 @@ impl<T, A: Allocator> Vec<T, A> {
{
let len = self.len();
if new_len > len {
self.extend_with(new_len - len, ExtendFunc(f));
self.extend_trusted(iter::repeat_with(f).take(new_len - len));
} else {
self.truncate(new_len);
}
@ -2082,7 +2186,6 @@ impl<T, A: Allocator> Vec<T, A> {
/// static_ref[0] += 1;
/// assert_eq!(static_ref, &[2, 2, 3]);
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_leak", since = "1.47.0")]
#[inline]
pub fn leak<'a>(self) -> &'a mut [T]
@ -2252,7 +2355,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
let len = self.len();
if new_len > len {
self.extend_with(new_len - len, ExtendElement(value))
self.extend_with(new_len - len, value)
} else {
self.truncate(new_len);
}
@ -2314,7 +2417,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
self.reserve(range.len());
// SAFETY:
// - `slice::range` guarantees that the given range is valid for indexing self
// - `slice::range` guarantees that the given range is valid for indexing self
unsafe {
self.spec_extend_from_within(range);
}
@ -2346,7 +2449,7 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
#[unstable(feature = "slice_flatten", issue = "95629")]
pub fn into_flattened(self) -> Vec<T, A> {
let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
let (new_len, new_cap) = if mem::size_of::<T>() == 0 {
let (new_len, new_cap) = if T::IS_ZST {
(len.checked_mul(N).expect("vec len overflow"), usize::MAX)
} else {
// SAFETY:
@ -2366,36 +2469,10 @@ impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
}
}
// This code generalizes `extend_with_{element,default}`.
trait ExtendWith<T> {
fn next(&mut self) -> T;
fn last(self) -> T;
}
struct ExtendElement<T>(T);
impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
fn next(&mut self) -> T {
self.0.clone()
}
fn last(self) -> T {
self.0
}
}
struct ExtendFunc<F>(F);
impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
fn next(&mut self) -> T {
(self.0)()
}
fn last(mut self) -> T {
(self.0)()
}
}
impl<T, A: Allocator> Vec<T, A> {
impl<T: Clone, A: Allocator> Vec<T, A> {
#[cfg(not(no_global_oom_handling))]
/// Extend the vector by `n` values, using the given generator.
fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
/// Extend the vector by `n` clones of value.
fn extend_with(&mut self, n: usize, value: T) {
self.reserve(n);
unsafe {
@ -2407,15 +2484,15 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
ptr = ptr.offset(1);
// Increment the length in every step in case next() panics
ptr::write(ptr, value.clone());
ptr = ptr.add(1);
// Increment the length in every step in case clone() panics
local_len.increment_len(1);
}
if n > 0 {
// We can write the last element directly without cloning needlessly
ptr::write(ptr, value.last());
ptr::write(ptr, value);
local_len.increment_len(1);
}
@ -2479,7 +2556,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
// SAFETY:
// - caller guaratees that src is a valid index
// - caller guarantees that src is a valid index
let to_clone = unsafe { this.get_unchecked(src) };
iter::zip(to_clone, spare)
@ -2498,7 +2575,7 @@ impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
let (init, spare) = self.split_at_spare_mut();
// SAFETY:
// - caller guaratees that `src` is a valid index
// - caller guarantees that `src` is a valid index
let source = unsafe { init.get_unchecked(src) };
// SAFETY:
@ -2541,35 +2618,6 @@ impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
}
}
#[cfg(not(no_global_oom_handling))]
trait SpecCloneFrom {
fn clone_from(this: &mut Self, other: &Self);
}
#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> {
default fn clone_from(this: &mut Self, other: &Self) {
// drop anything that will not be overwritten
this.truncate(other.len());
// self.len <= other.len due to the truncate above, so the
// slices here are always in-bounds.
let (init, tail) = other.split_at(this.len());
// reuse the contained values' allocations/resources.
this.clone_from_slice(init);
this.extend_from_slice(tail);
}
}
#[cfg(not(no_global_oom_handling))]
impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> {
fn clone_from(this: &mut Self, other: &Self) {
this.clear();
this.extend_from_slice(other);
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
@ -2581,7 +2629,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
// HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
// required for this method definition, is not available. Instead use the
// `slice::to_vec` function which is only available with cfg(test)
// `slice::to_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
fn clone(&self) -> Self {
@ -2590,7 +2638,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
}
fn clone_from(&mut self, other: &Self) {
SpecCloneFrom::clone_from(self, other)
crate::slice::SpecCloneIntoVec::clone_into(other.as_slice(), self);
}
}
@ -2598,7 +2646,6 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// as required by the `core::borrow::Borrow` implementation.
///
/// ```
/// #![feature(build_hasher_simple_hash_one)]
/// use std::hash::BuildHasher;
///
/// let b = std::collections::hash_map::RandomState::new();
@ -2671,13 +2718,13 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
/// assert_eq!(v_iter.next(), None);
/// ```
#[inline]
fn into_iter(self) -> IntoIter<T, A> {
fn into_iter(self) -> Self::IntoIter {
unsafe {
let mut me = ManuallyDrop::new(self);
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
let begin = me.as_mut_ptr();
let end = if mem::size_of::<T>() == 0 {
arith_offset(begin as *const i8, me.len() as isize) as *const T
let end = if T::IS_ZST {
begin.wrapping_byte_add(me.len())
} else {
begin.add(me.len()) as *const T
};
@ -2699,7 +2746,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
type Item = &'a T;
type IntoIter = slice::Iter<'a, T>;
fn into_iter(self) -> slice::Iter<'a, T> {
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
@ -2709,7 +2756,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
type Item = &'a mut T;
type IntoIter = slice::IterMut<'a, T>;
fn into_iter(self) -> slice::IterMut<'a, T> {
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
@ -2761,6 +2808,40 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
// specific extend for `TrustedLen` iterators, called both by the specializations
// and internal places where resolving specialization makes compilation slower
#[cfg(not(no_global_oom_handling))]
fn extend_trusted(&mut self, iterator: impl iter::TrustedLen<Item = T>) {
let (low, high) = iterator.size_hint();
if let Some(additional) = high {
debug_assert_eq!(
low,
additional,
"TrustedLen iterator's size hint is not exact: {:?}",
(low, high)
);
self.reserve(additional);
unsafe {
let ptr = self.as_mut_ptr();
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr.add(local_len.current_len()), element);
// Since the loop executes user code which can panic we have to update
// the length every step to correctly drop what we've written.
// NB can't overflow since we would have had to alloc the address space
local_len.increment_len(1);
});
}
} else {
// Per TrustedLen contract a `None` upper bound means that the iterator length
// truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
// Since the other branch already panics eagerly (via `reserve()`) we do the same here.
// This avoids additional codegen for a fallback code path which would eventually
// panic anyway.
panic!("capacity overflow");
}
}
/// Creates a splicing iterator that replaces the specified range in the vector
/// with the given `replace_with` iterator and yields the removed items.
/// `replace_with` does not need to be the same length as `range`.
@ -2889,7 +2970,7 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
}
}
/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[inline]
@ -2901,7 +2982,7 @@ impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
#[inline]
@ -2924,9 +3005,10 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for Vec<T> {
impl<T> Default for Vec<T> {
/// Creates an empty `Vec<T>`.
///
/// The vector will not allocate until elements are pushed onto it.
fn default() -> Vec<T> {
Vec::new()
}
@ -3019,10 +3101,7 @@ impl<T, const N: usize> From<[T; N]> for Vec<T> {
/// ```
#[cfg(not(test))]
fn from(s: [T; N]) -> Vec<T> {
<[T]>::into_vec(
#[rustc_box]
Box::new(s),
)
<[T]>::into_vec(Box::new(s))
}
#[cfg(test)]
@ -3046,8 +3125,8 @@ where
///
/// ```
/// # use std::borrow::Cow;
/// let o: Cow<[i32]> = Cow::Owned(vec![1, 2, 3]);
/// let b: Cow<[i32]> = Cow::Borrowed(&[1, 2, 3]);
/// let o: Cow<'_, [i32]> = Cow::Owned(vec![1, 2, 3]);
/// let b: Cow<'_, [i32]> = Cow::Borrowed(&[1, 2, 3]);
/// assert_eq!(Vec::from(o), Vec::from(b));
/// ```
fn from(s: Cow<'a, [T]>) -> Vec<T> {
@ -3055,7 +3134,7 @@ where
}
}
// note: test pulls in libstd, which causes errors here
// note: test pulls in std, which causes errors here
#[cfg(not(test))]
#[stable(feature = "vec_from_box", since = "1.18.0")]
impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
@ -3073,7 +3152,7 @@ impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
}
}
// note: test pulls in libstd, which causes errors here
// note: test pulls in std, which causes errors here
#[cfg(not(no_global_oom_handling))]
#[cfg(not(test))]
#[stable(feature = "box_from_vec", since = "1.20.0")]
@ -3088,6 +3167,14 @@ impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
/// ```
/// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
/// ```
///
/// Any excess capacity is removed:
/// ```
/// let mut vec = Vec::with_capacity(10);
/// vec.extend([1, 2, 3]);
///
/// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice());
/// ```
fn from(v: Vec<T, A>) -> Self {
v.into_boxed_slice()
}

View file

@ -18,6 +18,11 @@ impl<'a> SetLenOnDrop<'a> {
pub(super) fn increment_len(&mut self, increment: usize) {
self.local_len += increment;
}
#[inline]
pub(super) fn current_len(&self) -> usize {
self.local_len
}
}
impl Drop for SetLenOnDrop<'_> {

View file

@ -1,9 +1,8 @@
use crate::alloc::Allocator;
use core::iter::TrustedLen;
use core::ptr::{self};
use core::slice::{self};
use super::{IntoIter, SetLenOnDrop, Vec};
use super::{IntoIter, Vec};
// Specialization trait used for Vec::extend
pub(super) trait SpecExtend<T, I> {
@ -24,36 +23,7 @@ where
I: TrustedLen<Item = T>,
{
default fn spec_extend(&mut self, iterator: I) {
// This is the case for a TrustedLen iterator.
let (low, high) = iterator.size_hint();
if let Some(additional) = high {
debug_assert_eq!(
low,
additional,
"TrustedLen iterator's size hint is not exact: {:?}",
(low, high)
);
self.reserve(additional);
unsafe {
let mut ptr = self.as_mut_ptr().add(self.len());
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr, element);
ptr = ptr.offset(1);
// Since the loop executes user code which can panic we have to bump the pointer
// after each step.
// NB can't overflow since we would have had to alloc the address space
local_len.increment_len(1);
});
}
} else {
// Per TrustedLen contract a `None` upper bound means that the iterator length
// truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
// Since the other branch already panics eagerly (via `reserve()`) we do the same here.
// This avoids additional codegen for a fallback code path which would eventually
// panic anyway.
panic!("capacity overflow");
}
self.extend_trusted(iterator)
}
}

View file

@ -1,8 +1,9 @@
use core::ptr;
use crate::alloc::Allocator;
use crate::raw_vec::RawVec;
use core::ptr::{self};
use super::{ExtendElement, IsZero, Vec};
use super::{IsZero, Vec};
// Specialization trait used for Vec::from_elem
pub(super) trait SpecFromElem: Sized {
@ -12,7 +13,19 @@ pub(super) trait SpecFromElem: Sized {
impl<T: Clone> SpecFromElem for T {
default fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A> {
let mut v = Vec::with_capacity_in(n, alloc);
v.extend_with(n, ExtendElement(elem));
v.extend_with(n, elem);
v
}
}
impl<T: Clone + IsZero> SpecFromElem for T {
#[inline]
default fn from_elem<A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
if elem.is_zero() {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
}
let mut v = Vec::with_capacity_in(n, alloc);
v.extend_with(n, elem);
v
}
}
@ -46,15 +59,3 @@ impl SpecFromElem for u8 {
}
}
}
impl<T: Clone + IsZero> SpecFromElem for T {
#[inline]
fn from_elem<A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
if elem.is_zero() {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
}
let mut v = Vec::with_capacity_in(n, alloc);
v.extend_with(n, ExtendElement(elem));
v
}
}

View file

@ -14,7 +14,7 @@ use super::{Drain, Vec};
/// ```
/// let mut v = vec![0, 1, 2];
/// let new = [7, 8];
/// let iter: std::vec::Splice<_> = v.splice(1.., new);
/// let iter: std::vec::Splice<'_, _> = v.splice(1.., new);
/// ```
#[derive(Debug)]
#[stable(feature = "vec_splice", since = "1.21.0")]
@ -54,6 +54,12 @@ impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
fn drop(&mut self) {
self.drain.by_ref().for_each(drop);
// At this point draining is done and the only remaining tasks are splicing
// and moving things into the final place.
// Which means we can replace the slice::Iter with pointers that won't point to deallocated
// memory, so that Drain::drop is still allowed to call iter.len(), otherwise it would break
// the ptr.sub_ptr contract.
self.drain.iter = (&[]).iter();
unsafe {
if self.drain.tail_len == 0 {