rollup merge of #19944: steveklabnik/doc_sync_arc
Take the docs from Rc<T>, apply them to Arc<T>, and fix some line lengths.
This commit is contained in:
commit
c76590cb14
2 changed files with 329 additions and 88 deletions
|
@ -10,8 +10,61 @@
|
||||||
|
|
||||||
#![stable]
|
#![stable]
|
||||||
|
|
||||||
//! Concurrency-enabled mechanisms for sharing mutable and/or immutable state
|
//! Threadsafe reference-counted boxes (the `Arc<T>` type).
|
||||||
//! between tasks.
|
//!
|
||||||
|
//! The `Arc<T>` type provides shared ownership of an immutable value. Destruction is
|
||||||
|
//! deterministic, and will occur as soon as the last owner is gone. It is marked as `Send` because
|
||||||
|
//! it uses atomic reference counting.
|
||||||
|
//!
|
||||||
|
//! If you do not need thread-safety, and just need shared ownership, consider the [`Rc<T>`
|
||||||
|
//! type](../rc/struct.Rc.html). It is the same as `Arc<T>`, but does not use atomics, making it
|
||||||
|
//! both thread-unsafe as well as significantly faster when updating the reference count.
|
||||||
|
//!
|
||||||
|
//! The `downgrade` method can be used to create a non-owning `Weak<T>` pointer to the box. A
|
||||||
|
//! `Weak<T>` pointer can be upgraded to an `Arc<T>` pointer, but will return `None` if the value
|
||||||
|
//! has already been dropped.
|
||||||
|
//!
|
||||||
|
//! For example, a tree with parent pointers can be represented by putting the nodes behind strong
|
||||||
|
//! `Arc<T>` pointers, and then storing the parent pointers as `Weak<T>` pointers.
|
||||||
|
//!
|
||||||
|
//! # Examples
|
||||||
|
//!
|
||||||
|
//! Sharing some immutable data between tasks:
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! use std::sync::Arc;
|
||||||
|
//!
|
||||||
|
//! let five = Arc::new(5i);
|
||||||
|
//!
|
||||||
|
//! for i in range(0u, 10) {
|
||||||
|
//! let five = five.clone();
|
||||||
|
//!
|
||||||
|
//! spawn(move || {
|
||||||
|
//! println!("{}", five);
|
||||||
|
//! });
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! Sharing mutable data safely between tasks with a `Mutex`:
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! use std::sync::Arc;
|
||||||
|
//! use std::sync::Mutex;
|
||||||
|
//!
|
||||||
|
//! let five = Arc::new(Mutex::new(5i));
|
||||||
|
//!
|
||||||
|
//! for _ in range(0u, 10) {
|
||||||
|
//! let five = five.clone();
|
||||||
|
//!
|
||||||
|
//! spawn(move || {
|
||||||
|
//! let mut number = five.lock();
|
||||||
|
//!
|
||||||
|
//! number += 1;
|
||||||
|
//!
|
||||||
|
//! println!("{}", *number); // prints 6
|
||||||
|
//! });
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
|
||||||
use core::atomic;
|
use core::atomic;
|
||||||
use core::borrow::BorrowFrom;
|
use core::borrow::BorrowFrom;
|
||||||
|
@ -33,9 +86,8 @@ use heap::deallocate;
|
||||||
///
|
///
|
||||||
/// # Example
|
/// # Example
|
||||||
///
|
///
|
||||||
/// In this example, a large vector of floats is shared between several tasks.
|
/// In this example, a large vector of floats is shared between several tasks. With simple pipes,
|
||||||
/// With simple pipes, without `Arc`, a copy would have to be made for each
|
/// without `Arc`, a copy would have to be made for each task.
|
||||||
/// task.
|
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use std::sync::Arc;
|
/// use std::sync::Arc;
|
||||||
|
@ -66,8 +118,8 @@ pub struct Arc<T> {
|
||||||
|
|
||||||
/// A weak pointer to an `Arc`.
|
/// A weak pointer to an `Arc`.
|
||||||
///
|
///
|
||||||
/// Weak pointers will not keep the data inside of the `Arc` alive, and can be
|
/// Weak pointers will not keep the data inside of the `Arc` alive, and can be used to break cycles
|
||||||
/// used to break cycles between `Arc` pointers.
|
/// between `Arc` pointers.
|
||||||
#[unsafe_no_drop_flag]
|
#[unsafe_no_drop_flag]
|
||||||
#[experimental = "Weak pointers may not belong in this module."]
|
#[experimental = "Weak pointers may not belong in this module."]
|
||||||
pub struct Weak<T> {
|
pub struct Weak<T> {
|
||||||
|
@ -83,7 +135,15 @@ struct ArcInner<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Sync + Send> Arc<T> {
|
impl<T: Sync + Send> Arc<T> {
|
||||||
/// Creates an atomically reference counted wrapper.
|
/// Constructs a new `Arc<T>`.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
#[stable]
|
#[stable]
|
||||||
pub fn new(data: T) -> Arc<T> {
|
pub fn new(data: T) -> Arc<T> {
|
||||||
|
@ -97,11 +157,17 @@ impl<T: Sync + Send> Arc<T> {
|
||||||
Arc { _ptr: unsafe { mem::transmute(x) } }
|
Arc { _ptr: unsafe { mem::transmute(x) } }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Downgrades a strong pointer to a weak pointer.
|
/// Downgrades the `Arc<T>` to a `Weak<T>` reference.
|
||||||
///
|
///
|
||||||
/// Weak pointers will not keep the data alive. Once all strong references
|
/// # Examples
|
||||||
/// to the underlying data have been dropped, the data itself will be
|
///
|
||||||
/// destroyed.
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// let weak_five = five.downgrade();
|
||||||
|
/// ```
|
||||||
#[experimental = "Weak pointers may not belong in this module."]
|
#[experimental = "Weak pointers may not belong in this module."]
|
||||||
pub fn downgrade(&self) -> Weak<T> {
|
pub fn downgrade(&self) -> Weak<T> {
|
||||||
// See the clone() impl for why this is relaxed
|
// See the clone() impl for why this is relaxed
|
||||||
|
@ -113,11 +179,10 @@ impl<T: Sync + Send> Arc<T> {
|
||||||
impl<T> Arc<T> {
|
impl<T> Arc<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn inner(&self) -> &ArcInner<T> {
|
fn inner(&self) -> &ArcInner<T> {
|
||||||
// This unsafety is ok because while this arc is alive we're guaranteed
|
// This unsafety is ok because while this arc is alive we're guaranteed that the inner
|
||||||
// that the inner pointer is valid. Furthermore, we know that the
|
// pointer is valid. Furthermore, we know that the `ArcInner` structure itself is `Sync`
|
||||||
// `ArcInner` structure itself is `Sync` because the inner data is
|
// because the inner data is `Sync` as well, so we're ok loaning out an immutable pointer
|
||||||
// `Sync` as well, so we're ok loaning out an immutable pointer to
|
// to these contents.
|
||||||
// these contents.
|
|
||||||
unsafe { &*self._ptr }
|
unsafe { &*self._ptr }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,22 +199,28 @@ pub fn strong_count<T>(this: &Arc<T>) -> uint { this.inner().strong.load(atomic:
|
||||||
|
|
||||||
#[unstable = "waiting on stability of Clone"]
|
#[unstable = "waiting on stability of Clone"]
|
||||||
impl<T> Clone for Arc<T> {
|
impl<T> Clone for Arc<T> {
|
||||||
/// Duplicate an atomically reference counted wrapper.
|
/// Makes a clone of the `Arc<T>`.
|
||||||
///
|
///
|
||||||
/// The resulting two `Arc` objects will point to the same underlying data
|
/// This increases the strong reference count.
|
||||||
/// object. However, one of the `Arc` objects can be sent to another task,
|
///
|
||||||
/// allowing them to share the underlying data.
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five.clone();
|
||||||
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
fn clone(&self) -> Arc<T> {
|
fn clone(&self) -> Arc<T> {
|
||||||
// Using a relaxed ordering is alright here, as knowledge of the
|
// Using a relaxed ordering is alright here, as knowledge of the original reference
|
||||||
// original reference prevents other threads from erroneously deleting
|
// prevents other threads from erroneously deleting the object.
|
||||||
// the object.
|
|
||||||
//
|
//
|
||||||
// As explained in the [Boost documentation][1], Increasing the
|
// As explained in the [Boost documentation][1], Increasing the reference counter can
|
||||||
// reference counter can always be done with memory_order_relaxed: New
|
// always be done with memory_order_relaxed: New references to an object can only be formed
|
||||||
// references to an object can only be formed from an existing
|
// from an existing reference, and passing an existing reference from one thread to another
|
||||||
// reference, and passing an existing reference from one thread to
|
// must already provide any required synchronization.
|
||||||
// another must already provide any required synchronization.
|
|
||||||
//
|
//
|
||||||
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
||||||
self.inner().strong.fetch_add(1, atomic::Relaxed);
|
self.inner().strong.fetch_add(1, atomic::Relaxed);
|
||||||
|
@ -172,26 +243,33 @@ impl<T> Deref<T> for Arc<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Send + Sync + Clone> Arc<T> {
|
impl<T: Send + Sync + Clone> Arc<T> {
|
||||||
/// Acquires a mutable pointer to the inner contents by guaranteeing that
|
/// Make a mutable reference from the given `Arc<T>`.
|
||||||
/// the reference count is one (no sharing is possible).
|
|
||||||
///
|
///
|
||||||
/// This is also referred to as a copy-on-write operation because the inner
|
/// This is also referred to as a copy-on-write operation because the inner data is cloned if
|
||||||
/// data is cloned if the reference count is greater than one.
|
/// the reference count is greater than one.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let mut five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// let mut_five = five.make_unique();
|
||||||
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
#[experimental]
|
#[experimental]
|
||||||
pub fn make_unique(&mut self) -> &mut T {
|
pub fn make_unique(&mut self) -> &mut T {
|
||||||
// Note that we hold a strong reference, which also counts as
|
// Note that we hold a strong reference, which also counts as a weak reference, so we only
|
||||||
// a weak reference, so we only clone if there is an
|
// clone if there is an additional reference of either kind.
|
||||||
// additional reference of either kind.
|
|
||||||
if self.inner().strong.load(atomic::SeqCst) != 1 ||
|
if self.inner().strong.load(atomic::SeqCst) != 1 ||
|
||||||
self.inner().weak.load(atomic::SeqCst) != 1 {
|
self.inner().weak.load(atomic::SeqCst) != 1 {
|
||||||
*self = Arc::new((**self).clone())
|
*self = Arc::new((**self).clone())
|
||||||
}
|
}
|
||||||
// This unsafety is ok because we're guaranteed that the pointer
|
// This unsafety is ok because we're guaranteed that the pointer returned is the *only*
|
||||||
// returned is the *only* pointer that will ever be returned to T. Our
|
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
|
||||||
// reference count is guaranteed to be 1 at this point, and we required
|
// this point, and we required the Arc itself to be `mut`, so we're returning the only
|
||||||
// the Arc itself to be `mut`, so we're returning the only possible
|
// possible reference to the inner data.
|
||||||
// reference to the inner data.
|
|
||||||
let inner = unsafe { &mut *self._ptr };
|
let inner = unsafe { &mut *self._ptr };
|
||||||
&mut inner.data
|
&mut inner.data
|
||||||
}
|
}
|
||||||
|
@ -200,38 +278,59 @@ impl<T: Send + Sync + Clone> Arc<T> {
|
||||||
#[unsafe_destructor]
|
#[unsafe_destructor]
|
||||||
#[experimental = "waiting on stability of Drop"]
|
#[experimental = "waiting on stability of Drop"]
|
||||||
impl<T: Sync + Send> Drop for Arc<T> {
|
impl<T: Sync + Send> Drop for Arc<T> {
|
||||||
|
/// Drops the `Arc<T>`.
|
||||||
|
///
|
||||||
|
/// This will decrement the strong reference count. If the strong reference count becomes zero
|
||||||
|
/// and the only other references are `Weak<T>` ones, `drop`s the inner value.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// {
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// // stuff
|
||||||
|
///
|
||||||
|
/// drop(five); // explict drop
|
||||||
|
/// }
|
||||||
|
/// {
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// // stuff
|
||||||
|
///
|
||||||
|
/// } // implicit drop
|
||||||
|
/// ```
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// This structure has #[unsafe_no_drop_flag], so this drop glue may run
|
// This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but
|
||||||
// more than once (but it is guaranteed to be zeroed after the first if
|
// it is guaranteed to be zeroed after the first if it's run more than once)
|
||||||
// it's run more than once)
|
|
||||||
if self._ptr.is_null() { return }
|
if self._ptr.is_null() { return }
|
||||||
|
|
||||||
// Because `fetch_sub` is already atomic, we do not need to synchronize
|
// Because `fetch_sub` is already atomic, we do not need to synchronize with other threads
|
||||||
// with other threads unless we are going to delete the object. This
|
// unless we are going to delete the object. This same logic applies to the below
|
||||||
// same logic applies to the below `fetch_sub` to the `weak` count.
|
// `fetch_sub` to the `weak` count.
|
||||||
if self.inner().strong.fetch_sub(1, atomic::Release) != 1 { return }
|
if self.inner().strong.fetch_sub(1, atomic::Release) != 1 { return }
|
||||||
|
|
||||||
// This fence is needed to prevent reordering of use of the data and
|
// This fence is needed to prevent reordering of use of the data and deletion of the data.
|
||||||
// deletion of the data. Because it is marked `Release`, the
|
// Because it is marked `Release`, the decreasing of the reference count synchronizes with
|
||||||
// decreasing of the reference count synchronizes with this `Acquire`
|
// this `Acquire` fence. This means that use of the data happens before decreasing the
|
||||||
// fence. This means that use of the data happens before decreasing
|
// reference count, which happens before this fence, which happens before the deletion of
|
||||||
// the reference count, which happens before this fence, which
|
// the data.
|
||||||
// happens before the deletion of the data.
|
|
||||||
//
|
//
|
||||||
// As explained in the [Boost documentation][1],
|
// As explained in the [Boost documentation][1],
|
||||||
//
|
//
|
||||||
// It is important to enforce any possible access to the object in
|
// > It is important to enforce any possible access to the object in one thread (through an
|
||||||
// one thread (through an existing reference) to *happen before*
|
// > existing reference) to *happen before* deleting the object in a different thread. This
|
||||||
// deleting the object in a different thread. This is achieved by a
|
// > is achieved by a "release" operation after dropping a reference (any access to the
|
||||||
// "release" operation after dropping a reference (any access to the
|
// > object through this reference must obviously happened before), and an "acquire"
|
||||||
// object through this reference must obviously happened before),
|
// > operation before deleting the object.
|
||||||
// and an "acquire" operation before deleting the object.
|
|
||||||
//
|
//
|
||||||
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
||||||
atomic::fence(atomic::Acquire);
|
atomic::fence(atomic::Acquire);
|
||||||
|
|
||||||
// Destroy the data at this time, even though we may not free the box
|
// Destroy the data at this time, even though we may not free the box allocation itself
|
||||||
// allocation itself (there may still be weak pointers lying around).
|
// (there may still be weak pointers lying around).
|
||||||
unsafe { drop(ptr::read(&self.inner().data)); }
|
unsafe { drop(ptr::read(&self.inner().data)); }
|
||||||
|
|
||||||
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
|
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
|
||||||
|
@ -244,14 +343,26 @@ impl<T: Sync + Send> Drop for Arc<T> {
|
||||||
|
|
||||||
#[experimental = "Weak pointers may not belong in this module."]
|
#[experimental = "Weak pointers may not belong in this module."]
|
||||||
impl<T: Sync + Send> Weak<T> {
|
impl<T: Sync + Send> Weak<T> {
|
||||||
/// Attempts to upgrade this weak reference to a strong reference.
|
/// Upgrades a weak reference to a strong reference.
|
||||||
///
|
///
|
||||||
/// This method will not upgrade this reference if the strong reference count has already
|
/// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible.
|
||||||
/// reached 0, but if there are still other active strong references this function will return
|
///
|
||||||
/// a new strong reference to the data.
|
/// Returns `None` if there were no strong references and the data was destroyed.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// let weak_five = five.downgrade();
|
||||||
|
///
|
||||||
|
/// let strong_five: Option<Arc<_>> = weak_five.upgrade();
|
||||||
|
/// ```
|
||||||
pub fn upgrade(&self) -> Option<Arc<T>> {
|
pub fn upgrade(&self) -> Option<Arc<T>> {
|
||||||
// We use a CAS loop to increment the strong count instead of a
|
// We use a CAS loop to increment the strong count instead of a fetch_add because once the
|
||||||
// fetch_add because once the count hits 0 is must never be above 0.
|
// count hits 0 is must never be above 0.
|
||||||
let inner = self.inner();
|
let inner = self.inner();
|
||||||
loop {
|
loop {
|
||||||
let n = inner.strong.load(atomic::SeqCst);
|
let n = inner.strong.load(atomic::SeqCst);
|
||||||
|
@ -270,6 +381,19 @@ impl<T: Sync + Send> Weak<T> {
|
||||||
|
|
||||||
#[experimental = "Weak pointers may not belong in this module."]
|
#[experimental = "Weak pointers may not belong in this module."]
|
||||||
impl<T: Sync + Send> Clone for Weak<T> {
|
impl<T: Sync + Send> Clone for Weak<T> {
|
||||||
|
/// Makes a clone of the `Weak<T>`.
|
||||||
|
///
|
||||||
|
/// This increases the weak reference count.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let weak_five = Arc::new(5i).downgrade();
|
||||||
|
///
|
||||||
|
/// weak_five.clone();
|
||||||
|
/// ```
|
||||||
#[inline]
|
#[inline]
|
||||||
fn clone(&self) -> Weak<T> {
|
fn clone(&self) -> Weak<T> {
|
||||||
// See comments in Arc::clone() for why this is relaxed
|
// See comments in Arc::clone() for why this is relaxed
|
||||||
|
@ -281,13 +405,37 @@ impl<T: Sync + Send> Clone for Weak<T> {
|
||||||
#[unsafe_destructor]
|
#[unsafe_destructor]
|
||||||
#[experimental = "Weak pointers may not belong in this module."]
|
#[experimental = "Weak pointers may not belong in this module."]
|
||||||
impl<T: Sync + Send> Drop for Weak<T> {
|
impl<T: Sync + Send> Drop for Weak<T> {
|
||||||
|
/// Drops the `Weak<T>`.
|
||||||
|
///
|
||||||
|
/// This will decrement the weak reference count.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// {
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
/// let weak_five = five.downgrade();
|
||||||
|
///
|
||||||
|
/// // stuff
|
||||||
|
///
|
||||||
|
/// drop(weak_five); // explict drop
|
||||||
|
/// }
|
||||||
|
/// {
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
/// let weak_five = five.downgrade();
|
||||||
|
///
|
||||||
|
/// // stuff
|
||||||
|
///
|
||||||
|
/// } // implicit drop
|
||||||
|
/// ```
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
// see comments above for why this check is here
|
// see comments above for why this check is here
|
||||||
if self._ptr.is_null() { return }
|
if self._ptr.is_null() { return }
|
||||||
|
|
||||||
// If we find out that we were the last weak pointer, then its time to
|
// If we find out that we were the last weak pointer, then its time to deallocate the data
|
||||||
// deallocate the data entirely. See the discussion in Arc::drop() about
|
// entirely. See the discussion in Arc::drop() about the memory orderings
|
||||||
// the memory orderings
|
|
||||||
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
|
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
|
||||||
atomic::fence(atomic::Acquire);
|
atomic::fence(atomic::Acquire);
|
||||||
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
|
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
|
||||||
|
@ -298,18 +446,114 @@ impl<T: Sync + Send> Drop for Weak<T> {
|
||||||
|
|
||||||
#[unstable = "waiting on PartialEq"]
|
#[unstable = "waiting on PartialEq"]
|
||||||
impl<T: PartialEq> PartialEq for Arc<T> {
|
impl<T: PartialEq> PartialEq for Arc<T> {
|
||||||
|
/// Equality for two `Arc<T>`s.
|
||||||
|
///
|
||||||
|
/// Two `Arc<T>`s are equal if their inner value are equal.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five == Arc::new(5i);
|
||||||
|
/// ```
|
||||||
fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) }
|
fn eq(&self, other: &Arc<T>) -> bool { *(*self) == *(*other) }
|
||||||
|
|
||||||
|
/// Inequality for two `Arc<T>`s.
|
||||||
|
///
|
||||||
|
/// Two `Arc<T>`s are unequal if their inner value are unequal.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five != Arc::new(5i);
|
||||||
|
/// ```
|
||||||
fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) }
|
fn ne(&self, other: &Arc<T>) -> bool { *(*self) != *(*other) }
|
||||||
}
|
}
|
||||||
#[unstable = "waiting on PartialOrd"]
|
#[unstable = "waiting on PartialOrd"]
|
||||||
impl<T: PartialOrd> PartialOrd for Arc<T> {
|
impl<T: PartialOrd> PartialOrd for Arc<T> {
|
||||||
|
/// Partial comparison for two `Arc<T>`s.
|
||||||
|
///
|
||||||
|
/// The two are compared by calling `partial_cmp()` on their inner values.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five.partial_cmp(&Arc::new(5i));
|
||||||
|
/// ```
|
||||||
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
|
fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
|
||||||
(**self).partial_cmp(&**other)
|
(**self).partial_cmp(&**other)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Less-than comparison for two `Arc<T>`s.
|
||||||
|
///
|
||||||
|
/// The two are compared by calling `<` on their inner values.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five < Arc::new(5i);
|
||||||
|
/// ```
|
||||||
fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) }
|
fn lt(&self, other: &Arc<T>) -> bool { *(*self) < *(*other) }
|
||||||
|
|
||||||
|
/// 'Less-than or equal to' comparison for two `Arc<T>`s.
|
||||||
|
///
|
||||||
|
/// The two are compared by calling `<=` on their inner values.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five <= Arc::new(5i);
|
||||||
|
/// ```
|
||||||
fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) }
|
fn le(&self, other: &Arc<T>) -> bool { *(*self) <= *(*other) }
|
||||||
fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) }
|
|
||||||
|
/// Greater-than comparison for two `Arc<T>`s.
|
||||||
|
///
|
||||||
|
/// The two are compared by calling `>` on their inner values.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five > Arc::new(5i);
|
||||||
|
/// ```
|
||||||
fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) }
|
fn gt(&self, other: &Arc<T>) -> bool { *(*self) > *(*other) }
|
||||||
|
|
||||||
|
/// 'Greater-than or equal to' comparison for two `Arc<T>`s.
|
||||||
|
///
|
||||||
|
/// The two are compared by calling `>=` on their inner values.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::sync::Arc;
|
||||||
|
///
|
||||||
|
/// let five = Arc::new(5i);
|
||||||
|
///
|
||||||
|
/// five >= Arc::new(5i);
|
||||||
|
/// ```
|
||||||
|
fn ge(&self, other: &Arc<T>) -> bool { *(*self) >= *(*other) }
|
||||||
}
|
}
|
||||||
#[unstable = "waiting on Ord"]
|
#[unstable = "waiting on Ord"]
|
||||||
impl<T: Ord> Ord for Arc<T> {
|
impl<T: Ord> Ord for Arc<T> {
|
||||||
|
|
|
@ -168,12 +168,12 @@ struct RcBox<T> {
|
||||||
|
|
||||||
/// An immutable reference-counted pointer type.
|
/// An immutable reference-counted pointer type.
|
||||||
///
|
///
|
||||||
/// See the [module level documentation](../index.html) for more.
|
/// See the [module level documentation](../index.html) for more details.
|
||||||
#[unsafe_no_drop_flag]
|
#[unsafe_no_drop_flag]
|
||||||
#[stable]
|
#[stable]
|
||||||
pub struct Rc<T> {
|
pub struct Rc<T> {
|
||||||
// FIXME #12808: strange names to try to avoid interfering with
|
// FIXME #12808: strange names to try to avoid interfering with field accesses of the contained
|
||||||
// field accesses of the contained type via Deref
|
// type via Deref
|
||||||
_ptr: *mut RcBox<T>,
|
_ptr: *mut RcBox<T>,
|
||||||
_nosend: marker::NoSend,
|
_nosend: marker::NoSend,
|
||||||
_noshare: marker::NoSync
|
_noshare: marker::NoSync
|
||||||
|
@ -193,11 +193,9 @@ impl<T> Rc<T> {
|
||||||
pub fn new(value: T) -> Rc<T> {
|
pub fn new(value: T) -> Rc<T> {
|
||||||
unsafe {
|
unsafe {
|
||||||
Rc {
|
Rc {
|
||||||
// there is an implicit weak pointer owned by all the
|
// there is an implicit weak pointer owned by all the strong pointers, which
|
||||||
// strong pointers, which ensures that the weak
|
// ensures that the weak destructor never frees the allocation while the strong
|
||||||
// destructor never frees the allocation while the
|
// destructor is running, even if the weak pointer is stored inside the strong one.
|
||||||
// strong destructor is running, even if the weak
|
|
||||||
// pointer is stored inside the strong one.
|
|
||||||
_ptr: transmute(box RcBox {
|
_ptr: transmute(box RcBox {
|
||||||
value: value,
|
value: value,
|
||||||
strong: Cell::new(1),
|
strong: Cell::new(1),
|
||||||
|
@ -341,11 +339,10 @@ impl<T: Clone> Rc<T> {
|
||||||
if !is_unique(self) {
|
if !is_unique(self) {
|
||||||
*self = Rc::new((**self).clone())
|
*self = Rc::new((**self).clone())
|
||||||
}
|
}
|
||||||
// This unsafety is ok because we're guaranteed that the pointer
|
// This unsafety is ok because we're guaranteed that the pointer returned is the *only*
|
||||||
// returned is the *only* pointer that will ever be returned to T. Our
|
// pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at
|
||||||
// reference count is guaranteed to be 1 at this point, and we required
|
// this point, and we required the `Rc<T>` itself to be `mut`, so we're returning the only
|
||||||
// the `Rc<T>` itself to be `mut`, so we're returning the only possible
|
// possible reference to the inner value.
|
||||||
// reference to the inner value.
|
|
||||||
let inner = unsafe { &mut *self._ptr };
|
let inner = unsafe { &mut *self._ptr };
|
||||||
&mut inner.value
|
&mut inner.value
|
||||||
}
|
}
|
||||||
|
@ -399,8 +396,8 @@ impl<T> Drop for Rc<T> {
|
||||||
if self.strong() == 0 {
|
if self.strong() == 0 {
|
||||||
ptr::read(&**self); // destroy the contained object
|
ptr::read(&**self); // destroy the contained object
|
||||||
|
|
||||||
// remove the implicit "strong weak" pointer now
|
// remove the implicit "strong weak" pointer now that we've destroyed the
|
||||||
// that we've destroyed the contents.
|
// contents.
|
||||||
self.dec_weak();
|
self.dec_weak();
|
||||||
|
|
||||||
if self.weak() == 0 {
|
if self.weak() == 0 {
|
||||||
|
@ -687,8 +684,8 @@ impl<T> Drop for Weak<T> {
|
||||||
unsafe {
|
unsafe {
|
||||||
if !self._ptr.is_null() {
|
if !self._ptr.is_null() {
|
||||||
self.dec_weak();
|
self.dec_weak();
|
||||||
// the weak count starts at 1, and will only go to
|
// the weak count starts at 1, and will only go to zero if all the strong pointers
|
||||||
// zero if all the strong pointers have disappeared.
|
// have disappeared.
|
||||||
if self.weak() == 0 {
|
if self.weak() == 0 {
|
||||||
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
|
deallocate(self._ptr as *mut u8, size_of::<RcBox<T>>(),
|
||||||
min_align_of::<RcBox<T>>())
|
min_align_of::<RcBox<T>>())
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue