1
Fork 0

Changes according to code review

This commit is contained in:
Markus Everling 2022-11-25 03:36:11 +01:00
parent a1bf25e2bd
commit ecca8c5328
6 changed files with 180 additions and 143 deletions

View file

@ -53,27 +53,36 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
} }
// Only returns pointers to the slices, as that's // Only returns pointers to the slices, as that's
// all we need to drop them // all we need to drop them. May only be called if `self.remaining != 0`.
fn as_slices(&self) -> (*mut [T], *mut [T]) { unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
unsafe { unsafe {
let deque = self.deque.as_ref(); let deque = self.deque.as_ref();
let wrapped_start = deque.wrap_idx(self.idx); // FIXME: This is doing almost exactly the same thing as the else branch in `VecDeque::slice_ranges`.
// Unfortunately, we can't just call `slice_ranges` here, as the deque's `len` is currently
// just `drain_start`, so the range check would (almost) always panic. Between temporarily
// adjusting the deques `len` to call `slice_ranges`, and just copy pasting the `slice_ranges`
// implementation, this seemed like the less hacky solution, though it might be good to
// find a better one in the future.
if self.remaining <= deque.capacity() - wrapped_start { // because `self.remaining != 0`, we know that `self.idx < deque.original_len`, so it's a valid
// there's only one contigous slice // logical index.
( let wrapped_start = deque.to_physical_idx(self.idx);
ptr::slice_from_raw_parts_mut(deque.ptr().add(wrapped_start), self.remaining),
&mut [] as *mut [T], let head_len = deque.capacity() - wrapped_start;
)
let (a_range, b_range) = if head_len >= self.remaining {
(wrapped_start..wrapped_start + self.remaining, 0..0)
} else { } else {
let head_len = deque.capacity() - wrapped_start;
// this will never overflow due to the if condition
let tail_len = self.remaining - head_len; let tail_len = self.remaining - head_len;
( (wrapped_start..deque.capacity(), 0..tail_len)
ptr::slice_from_raw_parts_mut(deque.ptr().add(wrapped_start), head_len), };
ptr::slice_from_raw_parts_mut(deque.ptr(), tail_len),
) // SAFETY: the range `self.idx..self.idx+self.remaining` lies strictly inside
} // the range `0..deque.original_len`. because of this, and because of the fact
// that we acquire `a_range` and `b_range` exactly like `slice_ranges` would,
// it's guaranteed that `a_range` and `b_range` represent valid ranges into
// the deques buffer.
(deque.buffer_range(a_range), deque.buffer_range(b_range))
} }
} }
} }
@ -103,8 +112,9 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> { impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) { fn drop(&mut self) {
if self.0.remaining != 0 { if self.0.remaining != 0 {
let (front, back) = self.0.as_slices();
unsafe { unsafe {
// SAFETY: We just checked that `self.remaining != 0`.
let (front, back) = self.0.as_slices();
ptr::drop_in_place(front); ptr::drop_in_place(front);
ptr::drop_in_place(back); ptr::drop_in_place(back);
} }
@ -133,7 +143,7 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
source_deque.len = 0; source_deque.len = 0;
} }
(0, _) => { (0, _) => {
source_deque.head = source_deque.wrap_idx(drain_len); source_deque.head = source_deque.to_physical_idx(drain_len);
source_deque.len = orig_len - drain_len; source_deque.len = orig_len - drain_len;
} }
(_, 0) => { (_, 0) => {
@ -143,15 +153,15 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
if head_len <= tail_len { if head_len <= tail_len {
source_deque.wrap_copy( source_deque.wrap_copy(
source_deque.head, source_deque.head,
source_deque.wrap_idx(drain_len), source_deque.to_physical_idx(drain_len),
head_len, head_len,
); );
source_deque.head = source_deque.wrap_idx(drain_len); source_deque.head = source_deque.to_physical_idx(drain_len);
source_deque.len = orig_len - drain_len; source_deque.len = orig_len - drain_len;
} else { } else {
source_deque.wrap_copy( source_deque.wrap_copy(
source_deque.wrap_idx(head_len + drain_len), source_deque.to_physical_idx(head_len + drain_len),
source_deque.wrap_idx(head_len), source_deque.to_physical_idx(head_len),
tail_len, tail_len,
); );
source_deque.len = orig_len - drain_len; source_deque.len = orig_len - drain_len;
@ -162,14 +172,17 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
} }
let guard = DropGuard(self); let guard = DropGuard(self);
let (front, back) = guard.0.as_slices(); if guard.0.remaining != 0 {
unsafe { unsafe {
// since idx is a logical index, we don't need to worry about wrapping. // SAFETY: We just checked that `self.remaining != 0`.
guard.0.idx += front.len(); let (front, back) = guard.0.as_slices();
guard.0.remaining -= front.len(); // since idx is a logical index, we don't need to worry about wrapping.
ptr::drop_in_place(front); guard.0.idx += front.len();
guard.0.remaining = 0; guard.0.remaining -= front.len();
ptr::drop_in_place(back); ptr::drop_in_place(front);
guard.0.remaining = 0;
ptr::drop_in_place(back);
}
} }
// Dropping `guard` handles moving the remaining elements into place. // Dropping `guard` handles moving the remaining elements into place.
@ -185,7 +198,7 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
if self.remaining == 0 { if self.remaining == 0 {
return None; return None;
} }
let wrapped_idx = unsafe { self.deque.as_ref().wrap_idx(self.idx) }; let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx) };
self.idx += 1; self.idx += 1;
self.remaining -= 1; self.remaining -= 1;
Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) }) Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
@ -206,7 +219,7 @@ impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
return None; return None;
} }
self.remaining -= 1; self.remaining -= 1;
let wrapped_idx = unsafe { self.deque.as_ref().wrap_idx(self.idx + self.remaining) }; let wrapped_idx = unsafe { self.deque.as_ref().to_physical_idx(self.idx + self.remaining) };
Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) }) Some(unsafe { self.deque.as_mut().buffer_read(wrapped_idx) })
} }
} }

View file

@ -39,15 +39,6 @@ impl<T> Clone for Iter<'_, T> {
impl<'a, T> Iterator for Iter<'a, T> { impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T; type Item = &'a T;
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let m = match self.i1.advance_by(n) {
Ok(_) => return Ok(()),
Err(m) => m,
};
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(n - m).map_err(|o| o + m)
}
#[inline] #[inline]
fn next(&mut self) -> Option<&'a T> { fn next(&mut self) -> Option<&'a T> {
match self.i1.next() { match self.i1.next() {
@ -64,6 +55,15 @@ impl<'a, T> Iterator for Iter<'a, T> {
} }
} }
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let m = match self.i1.advance_by(n) {
Ok(_) => return Ok(()),
Err(m) => m,
};
mem::swap(&mut self.i1, &mut self.i2);
self.i1.advance_by(n - m).map_err(|o| o + m)
}
#[inline] #[inline]
fn size_hint(&self) -> (usize, Option<usize>) { fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len(); let len = self.len();
@ -75,17 +75,16 @@ impl<'a, T> Iterator for Iter<'a, T> {
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let accum = self.i1.fold(accum, &mut f); let accum = self.i1.fold(accum, &mut f);
self.i2.fold(accum, f) self.i2.fold(accum, &mut f)
} }
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where where
Self: Sized,
F: FnMut(B, Self::Item) -> R, F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>, R: Try<Output = B>,
{ {
let acc = self.i1.try_fold(init, &mut f)?; let acc = self.i1.try_fold(init, &mut f)?;
self.i2.try_fold(acc, f) self.i2.try_fold(acc, &mut f)
} }
#[inline] #[inline]
@ -117,7 +116,7 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
None => { None => {
// most of the time, the iterator will either always // most of the time, the iterator will either always
// call next(), or always call next_back(). By swapping // call next(), or always call next_back(). By swapping
// the iterators once the first one is empty, we ensure // the iterators once the second one is empty, we ensure
// that the first branch is taken as often as possible, // that the first branch is taken as often as possible,
// without sacrificing correctness, as i2 is empty anyways // without sacrificing correctness, as i2 is empty anyways
mem::swap(&mut self.i1, &mut self.i2); mem::swap(&mut self.i1, &mut self.i2);
@ -141,17 +140,16 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let accum = self.i2.rfold(accum, &mut f); let accum = self.i2.rfold(accum, &mut f);
self.i1.rfold(accum, f) self.i1.rfold(accum, &mut f)
} }
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where where
Self: Sized,
F: FnMut(B, Self::Item) -> R, F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>, R: Try<Output = B>,
{ {
let acc = self.i2.try_rfold(init, &mut f)?; let acc = self.i2.try_rfold(init, &mut f)?;
self.i1.try_rfold(acc, f) self.i1.try_rfold(acc, &mut f)
} }
} }

View file

@ -67,17 +67,16 @@ impl<'a, T> Iterator for IterMut<'a, T> {
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let accum = self.i1.fold(accum, &mut f); let accum = self.i1.fold(accum, &mut f);
self.i2.fold(accum, f) self.i2.fold(accum, &mut f)
} }
fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where where
Self: Sized,
F: FnMut(B, Self::Item) -> R, F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>, R: Try<Output = B>,
{ {
let acc = self.i1.try_fold(init, &mut f)?; let acc = self.i1.try_fold(init, &mut f)?;
self.i2.try_fold(acc, f) self.i2.try_fold(acc, &mut f)
} }
#[inline] #[inline]
@ -133,17 +132,16 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
F: FnMut(Acc, Self::Item) -> Acc, F: FnMut(Acc, Self::Item) -> Acc,
{ {
let accum = self.i2.rfold(accum, &mut f); let accum = self.i2.rfold(accum, &mut f);
self.i1.rfold(accum, f) self.i1.rfold(accum, &mut f)
} }
fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where where
Self: Sized,
F: FnMut(B, Self::Item) -> R, F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>, R: Try<Output = B>,
{ {
let acc = self.i2.try_rfold(init, &mut f)?; let acc = self.i2.try_rfold(init, &mut f)?;
self.i1.try_rfold(acc, f) self.i1.try_rfold(acc, &mut f)
} }
} }

View file

@ -91,7 +91,12 @@ pub struct VecDeque<
T, T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> { > {
// `self[0]`, if it exists, is `buf[head]`.
// `head < buf.capacity()`, unless `buf.capacity() == 0` when `head == 0`.
head: usize, head: usize,
// the number of initialized elements, starting from the one at `head` and potentially wrapping around.
// if `len == 0`, the exact value of `head` is unimportant.
// if `T` is zero-Sized, then `self.len <= usize::MAX`, otherwise `self.len <= isize::MAX as usize`.
len: usize, len: usize,
buf: RawVec<T, A>, buf: RawVec<T, A>,
} }
@ -106,15 +111,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
fn clone_from(&mut self, other: &Self) { fn clone_from(&mut self, other: &Self) {
self.clear(); self.clear();
self.head = 0; self.extend(other.iter().cloned());
self.reserve(other.len);
let (a, b) = other.as_slices();
unsafe {
self.write_iter(0, a.iter().cloned(), &mut 0);
self.write_iter(a.len(), b.iter().cloned(), &mut 0);
}
self.len = other.len;
} }
} }
@ -133,13 +130,11 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for VecDeque<T, A> {
} }
} }
if mem::needs_drop::<T>() { let (front, back) = self.as_mut_slices();
let (front, back) = self.as_mut_slices(); unsafe {
unsafe { let _back_dropper = Dropper(back);
let _back_dropper = Dropper(back); // use drop for [T]
// use drop for [T] ptr::drop_in_place(front);
ptr::drop_in_place(front);
}
} }
// RawVec handles deallocation // RawVec handles deallocation
} }
@ -175,6 +170,15 @@ impl<T, A: Allocator> VecDeque<T, A> {
} }
} }
/// Returns a slice pointer into the buffer.
/// `range` must lie inside `0..self.capacity()`.
#[inline]
unsafe fn buffer_range(&self, range: Range<usize>) -> *mut [T] {
unsafe {
ptr::slice_from_raw_parts_mut(self.ptr().add(range.start), range.end - range.start)
}
}
/// Returns `true` if the buffer is at full capacity. /// Returns `true` if the buffer is at full capacity.
#[inline] #[inline]
fn is_full(&self) -> bool { fn is_full(&self) -> bool {
@ -189,7 +193,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
} }
#[inline] #[inline]
fn wrap_idx(&self, idx: usize) -> usize { fn to_physical_idx(&self, idx: usize) -> usize {
self.wrap_add(self.head, idx) self.wrap_add(self.head, idx)
} }
@ -473,6 +477,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
debug_assert!(new_capacity >= old_capacity); debug_assert!(new_capacity >= old_capacity);
// Move the shortest contiguous section of the ring buffer // Move the shortest contiguous section of the ring buffer
//
// H := head
// L := last element (`self.to_physical_idx(self.len - 1)`)
//
// H L // H L
// [o o o o o o o . ] // [o o o o o o o . ]
// H L // H L
@ -597,7 +605,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self, index: usize) -> Option<&T> { pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len { if index < self.len {
let idx = self.wrap_idx(index); let idx = self.to_physical_idx(index);
unsafe { Some(&*self.ptr().add(idx)) } unsafe { Some(&*self.ptr().add(idx)) }
} else { } else {
None None
@ -626,7 +634,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> { pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len { if index < self.len {
let idx = self.wrap_idx(index); let idx = self.to_physical_idx(index);
unsafe { Some(&mut *self.ptr().add(idx)) } unsafe { Some(&mut *self.ptr().add(idx)) }
} else { } else {
None None
@ -660,8 +668,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
pub fn swap(&mut self, i: usize, j: usize) { pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len()); assert!(i < self.len());
assert!(j < self.len()); assert!(j < self.len());
let ri = self.wrap_idx(i); let ri = self.to_physical_idx(i);
let rj = self.wrap_idx(j); let rj = self.to_physical_idx(j);
unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) } unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
} }
@ -913,6 +921,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
if self.len == 0 { if self.len == 0 {
self.head = 0; self.head = 0;
} else if self.head >= target_cap && tail_outside { } else if self.head >= target_cap && tail_outside {
// H := head
// L := last element
// H L // H L
// [. . . . . . . . o o o o o o o . ] // [. . . . . . . . o o o o o o o . ]
// H L // H L
@ -923,6 +933,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
} }
self.head = 0; self.head = 0;
} else if self.head < target_cap && tail_outside { } else if self.head < target_cap && tail_outside {
// H := head
// L := last element
// H L // H L
// [. . . o o o o o o o . . . . . . ] // [. . . o o o o o o o . . . . . . ]
// L H // L H
@ -932,6 +944,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.copy_nonoverlapping(target_cap, 0, len); self.copy_nonoverlapping(target_cap, 0, len);
} }
} else if self.head >= target_cap { } else if self.head >= target_cap {
// H := head
// L := last element
// L H // L H
// [o o o o o . . . . . . . . . o o ] // [o o o o o . . . . . . . . . o o ]
// L H // L H
@ -998,11 +1012,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
return; return;
} }
if !mem::needs_drop::<T>() {
self.len = len;
return;
}
let (front, back) = self.as_mut_slices(); let (front, back) = self.as_mut_slices();
if len > front.len() { if len > front.len() {
let begin = len - front.len(); let begin = len - front.len();
@ -1102,18 +1111,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline] #[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")] #[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) { pub fn as_slices(&self) -> (&[T], &[T]) {
if self.is_contiguous() { let (a_range, b_range) = self.slice_ranges(..);
(unsafe { slice::from_raw_parts(self.ptr().add(self.head), self.len) }, &[]) // SAFETY: `slice_ranges` always returns valid ranges into
} else { // the physical buffer.
let head_len = self.capacity() - self.head; unsafe { (&*self.buffer_range(a_range), &*self.buffer_range(b_range)) }
let tail_len = self.len - head_len;
unsafe {
(
slice::from_raw_parts(self.ptr().add(self.head), head_len),
slice::from_raw_parts(self.ptr(), tail_len),
)
}
}
} }
/// Returns a pair of slices which contain, in order, the contents of the /// Returns a pair of slices which contain, in order, the contents of the
@ -1144,18 +1145,10 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline] #[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")] #[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) { pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
if self.is_contiguous() { let (a_range, b_range) = self.slice_ranges(..);
(unsafe { slice::from_raw_parts_mut(self.ptr().add(self.head), self.len) }, &mut []) // SAFETY: `slice_ranges` always returns valid ranges into
} else { // the physical buffer.
let head_len = self.capacity() - self.head; unsafe { (&mut *self.buffer_range(a_range), &mut *self.buffer_range(b_range)) }
let tail_len = self.len - head_len;
unsafe {
(
slice::from_raw_parts_mut(self.ptr().add(self.head), head_len),
slice::from_raw_parts_mut(self.ptr(), tail_len),
)
}
}
} }
/// Returns the number of elements in the deque. /// Returns the number of elements in the deque.
@ -1192,18 +1185,37 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.len == 0 self.len == 0
} }
/// Given a range into the logical buffer of the deque, this function
/// return two ranges into the physical buffer that correspond to
/// the given range.
fn slice_ranges<R>(&self, range: R) -> (Range<usize>, Range<usize>) fn slice_ranges<R>(&self, range: R) -> (Range<usize>, Range<usize>)
where where
R: RangeBounds<usize>, R: RangeBounds<usize>,
{ {
let Range { start, end } = slice::range(range, ..self.len); let Range { start, end } = slice::range(range, ..self.len);
let a_len = self.len.min(self.capacity() - self.head); let len = end - start;
if end <= a_len {
(start..end, 0..0) if len == 0 {
} else if start >= a_len { (0..0, 0..0)
(0..0, start - a_len..end - a_len)
} else { } else {
(start..a_len, 0..end - a_len) // `slice::range` guarantees that `start <= end <= self.len`.
// because `len != 0`, we know that `start < end`, so `start < self.len`
// and the indexing is valid.
let wrapped_start = self.to_physical_idx(start);
// this subtraction can never overflow because `wrapped_start` is
// at most `self.capacity()` (and if `self.capacity != 0`, then `wrapped_start` is strictly less
// than `self.capacity`).
let head_len = self.capacity() - wrapped_start;
if head_len >= len {
// we know that `len + wrapped_start <= self.capacity <= usize::MAX`, so this addition can't overflow
(wrapped_start..wrapped_start + len, 0..0)
} else {
// can't overflow because of the if condition
let tail_len = len - head_len;
(wrapped_start..self.capacity(), 0..tail_len)
}
} }
} }
@ -1233,9 +1245,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
where where
R: RangeBounds<usize>, R: RangeBounds<usize>,
{ {
let (a, b) = self.as_slices();
let (a_range, b_range) = self.slice_ranges(range); let (a_range, b_range) = self.slice_ranges(range);
Iter::new(a[a_range].iter(), b[b_range].iter()) // SAFETY: The ranges returned by `slice_ranges`
// are valid ranges into the physical buffer, so
// it's ok to pass them to `buffer_range` and
// dereference the result.
let a = unsafe { &*self.buffer_range(a_range) };
let b = unsafe { &*self.buffer_range(b_range) };
Iter::new(a.iter(), b.iter())
} }
/// Creates an iterator that covers the specified mutable range in the deque. /// Creates an iterator that covers the specified mutable range in the deque.
@ -1269,8 +1286,13 @@ impl<T, A: Allocator> VecDeque<T, A> {
R: RangeBounds<usize>, R: RangeBounds<usize>,
{ {
let (a_range, b_range) = self.slice_ranges(range); let (a_range, b_range) = self.slice_ranges(range);
let (a, b) = self.as_mut_slices(); // SAFETY: The ranges returned by `slice_ranges`
IterMut::new(a[a_range].iter_mut(), b[b_range].iter_mut()) // are valid ranges into the physical buffer, so
// it's ok to pass them to `buffer_range` and
// dereference the result.
let a = unsafe { &mut *self.buffer_range(a_range) };
let b = unsafe { &mut *self.buffer_range(b_range) };
IterMut::new(a.iter_mut(), b.iter_mut())
} }
/// Removes the specified range from the deque in bulk, returning all /// Removes the specified range from the deque in bulk, returning all
@ -1509,7 +1531,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
None None
} else { } else {
let old_head = self.head; let old_head = self.head;
self.head = self.wrap_idx(1); self.head = self.to_physical_idx(1);
self.len -= 1; self.len -= 1;
Some(unsafe { self.buffer_read(old_head) }) Some(unsafe { self.buffer_read(old_head) })
} }
@ -1535,7 +1557,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
None None
} else { } else {
self.len -= 1; self.len -= 1;
Some(unsafe { self.buffer_read(self.wrap_idx(self.len)) }) Some(unsafe { self.buffer_read(self.to_physical_idx(self.len)) })
} }
} }
@ -1583,7 +1605,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.grow(); self.grow();
} }
unsafe { self.buffer_write(self.wrap_idx(self.len), value) } unsafe { self.buffer_write(self.to_physical_idx(self.len), value) }
self.len += 1; self.len += 1;
} }
@ -1700,8 +1722,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// and panicked. // and panicked.
unsafe { unsafe {
// see `remove()` for explanation why this wrap_copy() call is safe. // see `remove()` for explanation why this wrap_copy() call is safe.
self.wrap_copy(self.wrap_idx(index), self.wrap_idx(index + 1), k); self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k);
self.buffer_write(self.wrap_idx(index), value); self.buffer_write(self.to_physical_idx(index), value);
self.len += 1; self.len += 1;
} }
} else { } else {
@ -1709,7 +1731,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.head = self.wrap_sub(self.head, 1); self.head = self.wrap_sub(self.head, 1);
unsafe { unsafe {
self.wrap_copy(old_head, self.head, index); self.wrap_copy(old_head, self.head, index);
self.buffer_write(self.wrap_idx(index), value); self.buffer_write(self.to_physical_idx(index), value);
self.len += 1; self.len += 1;
} }
} }
@ -1742,7 +1764,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
return None; return None;
} }
let wrapped_idx = self.wrap_idx(index); let wrapped_idx = self.to_physical_idx(index);
let elem = unsafe { Some(self.buffer_read(wrapped_idx)) }; let elem = unsafe { Some(self.buffer_read(wrapped_idx)) };
@ -1755,7 +1777,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.len -= 1; self.len -= 1;
} else { } else {
let old_head = self.head; let old_head = self.head;
self.head = self.wrap_idx(1); self.head = self.to_physical_idx(1);
unsafe { self.wrap_copy(old_head, self.head, index) }; unsafe { self.wrap_copy(old_head, self.head, index) };
self.len -= 1; self.len -= 1;
} }
@ -1866,14 +1888,14 @@ impl<T, A: Allocator> VecDeque<T, A> {
self.reserve(other.len); self.reserve(other.len);
unsafe { unsafe {
let (left, right) = other.as_slices(); let (left, right) = other.as_slices();
self.copy_slice(self.wrap_idx(self.len), left); self.copy_slice(self.to_physical_idx(self.len), left);
// no overflow, because self.capacity() >= old_cap + left.len() >= self.len + left.len() // no overflow, because self.capacity() >= old_cap + left.len() >= self.len + left.len()
self.copy_slice(self.wrap_idx(self.len + left.len()), right); self.copy_slice(self.to_physical_idx(self.len + left.len()), right);
} }
// SAFETY: Update pointers after copying to avoid leaving doppelganger // SAFETY: Update pointers after copying to avoid leaving doppelganger
// in case of panics. // in case of panics.
self.len += other.len; self.len += other.len;
// Silently drop values in `other`. // Now that we own its values, forget everything in `other`.
other.len = 0; other.len = 0;
other.head = 0; other.head = 0;
} }
@ -1982,8 +2004,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
// buffer without it being full emerge // buffer without it being full emerge
debug_assert!(self.is_full()); debug_assert!(self.is_full());
let old_cap = self.capacity(); let old_cap = self.capacity();
// if the cap was 0, we need to reserve at least 1. self.buf.reserve_for_push(old_cap);
self.buf.reserve(old_cap, old_cap.max(1));
unsafe { unsafe {
self.handle_capacity_increase(old_cap); self.handle_capacity_increase(old_cap);
} }
@ -2265,16 +2286,16 @@ impl<T, A: Allocator> VecDeque<T, A> {
unsafe fn rotate_left_inner(&mut self, mid: usize) { unsafe fn rotate_left_inner(&mut self, mid: usize) {
debug_assert!(mid * 2 <= self.len()); debug_assert!(mid * 2 <= self.len());
unsafe { unsafe {
self.wrap_copy(self.head, self.wrap_idx(self.len), mid); self.wrap_copy(self.head, self.to_physical_idx(self.len), mid);
} }
self.head = self.wrap_idx(mid); self.head = self.to_physical_idx(mid);
} }
unsafe fn rotate_right_inner(&mut self, k: usize) { unsafe fn rotate_right_inner(&mut self, k: usize) {
debug_assert!(k * 2 <= self.len()); debug_assert!(k * 2 <= self.len());
self.head = self.wrap_sub(self.head, k); self.head = self.wrap_sub(self.head, k);
unsafe { unsafe {
self.wrap_copy(self.wrap_idx(self.len), self.head, k); self.wrap_copy(self.to_physical_idx(self.len), self.head, k);
} }
} }
@ -2532,8 +2553,13 @@ impl<T: Clone, A: Allocator> VecDeque<T, A> {
/// Returns the index in the underlying buffer for a given logical element index. /// Returns the index in the underlying buffer for a given logical element index.
#[inline] #[inline]
fn wrap_index(index: usize, size: usize) -> usize { fn wrap_index(logical_index: usize, capacity: usize) -> usize {
if index >= size { index - size } else { index } debug_assert!(
(logical_index == 0 && capacity == 0)
|| logical_index < capacity
|| (logical_index - capacity) < capacity
);
if logical_index >= capacity { logical_index - capacity } else { logical_index }
} }
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]

View file

@ -38,7 +38,7 @@ where
// and `room == self.capacity() - self.len` // and `room == self.capacity() - self.len`
// => `self.len + room <= self.capacity()` // => `self.len + room <= self.capacity()`
self.write_iter_wrapping( self.write_iter_wrapping(
self.wrap_idx(self.len), self.to_physical_idx(self.len),
ByRefSized(&mut iter).take(room), ByRefSized(&mut iter).take(room),
room, room,
); );
@ -63,8 +63,9 @@ where
); );
self.reserve(additional); self.reserve(additional);
let written = let written = unsafe {
unsafe { self.write_iter_wrapping(self.wrap_idx(self.len), iter, additional) }; self.write_iter_wrapping(self.to_physical_idx(self.len), iter, additional)
};
debug_assert_eq!( debug_assert_eq!(
additional, written, additional, written,
@ -87,7 +88,7 @@ impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
self.reserve(slice.len()); self.reserve(slice.len());
unsafe { unsafe {
self.copy_slice(self.wrap_idx(self.len), slice); self.copy_slice(self.to_physical_idx(self.len), slice);
self.len += slice.len(); self.len += slice.len();
} }
iterator.forget_remaining_elements(); iterator.forget_remaining_elements();
@ -113,7 +114,7 @@ where
self.reserve(slice.len()); self.reserve(slice.len());
unsafe { unsafe {
self.copy_slice(self.wrap_idx(self.len), slice); self.copy_slice(self.to_physical_idx(self.len), slice);
self.len += slice.len(); self.len += slice.len();
} }
} }

View file

@ -257,13 +257,14 @@ fn test_swap_panic() {
#[test] #[test]
fn test_reserve_exact() { fn test_reserve_exact() {
let mut tester: VecDeque<i32> = VecDeque::with_capacity(1); let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
assert!(tester.capacity() == 1); assert_eq!(tester.capacity(), 1);
tester.reserve_exact(50); tester.reserve_exact(50);
assert!(tester.capacity() >= 50); assert_eq!(tester.capacity(), 50);
tester.reserve_exact(40); tester.reserve_exact(40);
assert!(tester.capacity() >= 40); // reserving won't shrink the buffer
assert_eq!(tester.capacity(), 50);
tester.reserve_exact(200); tester.reserve_exact(200);
assert!(tester.capacity() >= 200); assert_eq!(tester.capacity(), 200);
} }
#[test] #[test]
@ -479,7 +480,7 @@ fn make_contiguous_big_tail() {
assert_eq!(tester.capacity(), 15); assert_eq!(tester.capacity(), 15);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices()); assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
let expected_start = tester.wrap_idx(tester.len); let expected_start = tester.to_physical_idx(tester.len);
tester.make_contiguous(); tester.make_contiguous();
assert_eq!(tester.head, expected_start); assert_eq!(tester.head, expected_start);
assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices()); assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
@ -679,7 +680,7 @@ fn test_drain() {
let cap = tester.capacity(); let cap = tester.capacity();
for len in 0..=cap { for len in 0..=cap {
for head in 0..=cap { for head in 0..cap {
for drain_start in 0..=len { for drain_start in 0..=len {
for drain_end in drain_start..=len { for drain_end in drain_start..=len {
tester.head = head; tester.head = head;