stabilize atomics (now atomic)
This commit stabilizes the `std::sync::atomics` module, renaming it to `std::sync::atomic` to match library precedent elsewhere, and tightening up behavior around incorrect memory ordering annotations. The vast majority of the module is now `stable`. However, the `AtomicOption` type has been deprecated, since it is essentially unused and is not truly a primitive atomic type. It will eventually be replaced by a higher-level abstraction like MVars. Due to deprecations, this is a: [breaking-change]
This commit is contained in:
parent
9de20198ae
commit
68bde0a073
36 changed files with 366 additions and 308 deletions
|
@ -13,7 +13,7 @@
|
||||||
//! Concurrency-enabled mechanisms for sharing mutable and/or immutable state
|
//! Concurrency-enabled mechanisms for sharing mutable and/or immutable state
|
||||||
//! between tasks.
|
//! between tasks.
|
||||||
|
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::clone::Clone;
|
use core::clone::Clone;
|
||||||
use core::kinds::{Share, Send};
|
use core::kinds::{Share, Send};
|
||||||
use core::mem::{min_align_of, size_of, drop};
|
use core::mem::{min_align_of, size_of, drop};
|
||||||
|
@ -71,8 +71,8 @@ pub struct Weak<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ArcInner<T> {
|
struct ArcInner<T> {
|
||||||
strong: atomics::AtomicUint,
|
strong: atomic::AtomicUint,
|
||||||
weak: atomics::AtomicUint,
|
weak: atomic::AtomicUint,
|
||||||
data: T,
|
data: T,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,8 +84,8 @@ impl<T: Share + Send> Arc<T> {
|
||||||
// Start the weak pointer count as 1 which is the weak pointer that's
|
// Start the weak pointer count as 1 which is the weak pointer that's
|
||||||
// held by all the strong pointers (kinda), see std/rc.rs for more info
|
// held by all the strong pointers (kinda), see std/rc.rs for more info
|
||||||
let x = box ArcInner {
|
let x = box ArcInner {
|
||||||
strong: atomics::AtomicUint::new(1),
|
strong: atomic::AtomicUint::new(1),
|
||||||
weak: atomics::AtomicUint::new(1),
|
weak: atomic::AtomicUint::new(1),
|
||||||
data: data,
|
data: data,
|
||||||
};
|
};
|
||||||
Arc { _ptr: unsafe { mem::transmute(x) } }
|
Arc { _ptr: unsafe { mem::transmute(x) } }
|
||||||
|
@ -109,7 +109,7 @@ impl<T: Share + Send> Arc<T> {
|
||||||
#[experimental = "Weak pointers may not belong in this module."]
|
#[experimental = "Weak pointers may not belong in this module."]
|
||||||
pub fn downgrade(&self) -> Weak<T> {
|
pub fn downgrade(&self) -> Weak<T> {
|
||||||
// See the clone() impl for why this is relaxed
|
// See the clone() impl for why this is relaxed
|
||||||
self.inner().weak.fetch_add(1, atomics::Relaxed);
|
self.inner().weak.fetch_add(1, atomic::Relaxed);
|
||||||
Weak { _ptr: self._ptr }
|
Weak { _ptr: self._ptr }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -134,7 +134,7 @@ impl<T: Share + Send> Clone for Arc<T> {
|
||||||
// another must already provide any required synchronization.
|
// another must already provide any required synchronization.
|
||||||
//
|
//
|
||||||
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
||||||
self.inner().strong.fetch_add(1, atomics::Relaxed);
|
self.inner().strong.fetch_add(1, atomic::Relaxed);
|
||||||
Arc { _ptr: self._ptr }
|
Arc { _ptr: self._ptr }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -159,8 +159,8 @@ impl<T: Send + Share + Clone> Arc<T> {
|
||||||
// Note that we hold a strong reference, which also counts as
|
// Note that we hold a strong reference, which also counts as
|
||||||
// a weak reference, so we only clone if there is an
|
// a weak reference, so we only clone if there is an
|
||||||
// additional reference of either kind.
|
// additional reference of either kind.
|
||||||
if self.inner().strong.load(atomics::SeqCst) != 1 ||
|
if self.inner().strong.load(atomic::SeqCst) != 1 ||
|
||||||
self.inner().weak.load(atomics::SeqCst) != 1 {
|
self.inner().weak.load(atomic::SeqCst) != 1 {
|
||||||
*self = Arc::new(self.deref().clone())
|
*self = Arc::new(self.deref().clone())
|
||||||
}
|
}
|
||||||
// This unsafety is ok because we're guaranteed that the pointer
|
// This unsafety is ok because we're guaranteed that the pointer
|
||||||
|
@ -185,7 +185,7 @@ impl<T: Share + Send> Drop for Arc<T> {
|
||||||
// Because `fetch_sub` is already atomic, we do not need to synchronize
|
// Because `fetch_sub` is already atomic, we do not need to synchronize
|
||||||
// with other threads unless we are going to delete the object. This
|
// with other threads unless we are going to delete the object. This
|
||||||
// same logic applies to the below `fetch_sub` to the `weak` count.
|
// same logic applies to the below `fetch_sub` to the `weak` count.
|
||||||
if self.inner().strong.fetch_sub(1, atomics::Release) != 1 { return }
|
if self.inner().strong.fetch_sub(1, atomic::Release) != 1 { return }
|
||||||
|
|
||||||
// This fence is needed to prevent reordering of use of the data and
|
// This fence is needed to prevent reordering of use of the data and
|
||||||
// deletion of the data. Because it is marked `Release`, the
|
// deletion of the data. Because it is marked `Release`, the
|
||||||
|
@ -204,14 +204,14 @@ impl<T: Share + Send> Drop for Arc<T> {
|
||||||
// and an "acquire" operation before deleting the object.
|
// and an "acquire" operation before deleting the object.
|
||||||
//
|
//
|
||||||
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
|
||||||
atomics::fence(atomics::Acquire);
|
atomic::fence(atomic::Acquire);
|
||||||
|
|
||||||
// Destroy the data at this time, even though we may not free the box
|
// Destroy the data at this time, even though we may not free the box
|
||||||
// allocation itself (there may still be weak pointers lying around).
|
// allocation itself (there may still be weak pointers lying around).
|
||||||
unsafe { drop(ptr::read(&self.inner().data)); }
|
unsafe { drop(ptr::read(&self.inner().data)); }
|
||||||
|
|
||||||
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
|
||||||
atomics::fence(atomics::Acquire);
|
atomic::fence(atomic::Acquire);
|
||||||
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
|
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
|
||||||
min_align_of::<ArcInner<T>>()) }
|
min_align_of::<ArcInner<T>>()) }
|
||||||
}
|
}
|
||||||
|
@ -230,9 +230,9 @@ impl<T: Share + Send> Weak<T> {
|
||||||
// fetch_add because once the count hits 0 is must never be above 0.
|
// fetch_add because once the count hits 0 is must never be above 0.
|
||||||
let inner = self.inner();
|
let inner = self.inner();
|
||||||
loop {
|
loop {
|
||||||
let n = inner.strong.load(atomics::SeqCst);
|
let n = inner.strong.load(atomic::SeqCst);
|
||||||
if n == 0 { return None }
|
if n == 0 { return None }
|
||||||
let old = inner.strong.compare_and_swap(n, n + 1, atomics::SeqCst);
|
let old = inner.strong.compare_and_swap(n, n + 1, atomic::SeqCst);
|
||||||
if old == n { return Some(Arc { _ptr: self._ptr }) }
|
if old == n { return Some(Arc { _ptr: self._ptr }) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -249,7 +249,7 @@ impl<T: Share + Send> Clone for Weak<T> {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn clone(&self) -> Weak<T> {
|
fn clone(&self) -> Weak<T> {
|
||||||
// See comments in Arc::clone() for why this is relaxed
|
// See comments in Arc::clone() for why this is relaxed
|
||||||
self.inner().weak.fetch_add(1, atomics::Relaxed);
|
self.inner().weak.fetch_add(1, atomic::Relaxed);
|
||||||
Weak { _ptr: self._ptr }
|
Weak { _ptr: self._ptr }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -264,8 +264,8 @@ impl<T: Share + Send> Drop for Weak<T> {
|
||||||
// If we find out that we were the last weak pointer, then its time to
|
// If we find out that we were the last weak pointer, then its time to
|
||||||
// deallocate the data entirely. See the discussion in Arc::drop() about
|
// deallocate the data entirely. See the discussion in Arc::drop() about
|
||||||
// the memory orderings
|
// the memory orderings
|
||||||
if self.inner().weak.fetch_sub(1, atomics::Release) == 1 {
|
if self.inner().weak.fetch_sub(1, atomic::Release) == 1 {
|
||||||
atomics::fence(atomics::Acquire);
|
atomic::fence(atomic::Acquire);
|
||||||
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
|
unsafe { deallocate(self._ptr as *mut u8, size_of::<ArcInner<T>>(),
|
||||||
min_align_of::<ArcInner<T>>()) }
|
min_align_of::<ArcInner<T>>()) }
|
||||||
}
|
}
|
||||||
|
@ -281,13 +281,13 @@ mod tests {
|
||||||
use std::mem::drop;
|
use std::mem::drop;
|
||||||
use std::ops::Drop;
|
use std::ops::Drop;
|
||||||
use std::option::{Option, Some, None};
|
use std::option::{Option, Some, None};
|
||||||
use std::sync::atomics;
|
use std::sync::atomic;
|
||||||
use std::task;
|
use std::task;
|
||||||
use std::vec::Vec;
|
use std::vec::Vec;
|
||||||
use super::{Arc, Weak};
|
use super::{Arc, Weak};
|
||||||
use std::sync::Mutex;
|
use std::sync::Mutex;
|
||||||
|
|
||||||
struct Canary(*mut atomics::AtomicUint);
|
struct Canary(*mut atomic::AtomicUint);
|
||||||
|
|
||||||
impl Drop for Canary
|
impl Drop for Canary
|
||||||
{
|
{
|
||||||
|
@ -295,7 +295,7 @@ mod tests {
|
||||||
unsafe {
|
unsafe {
|
||||||
match *self {
|
match *self {
|
||||||
Canary(c) => {
|
Canary(c) => {
|
||||||
(*c).fetch_add(1, atomics::SeqCst);
|
(*c).fetch_add(1, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -413,20 +413,20 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn drop_arc() {
|
fn drop_arc() {
|
||||||
let mut canary = atomics::AtomicUint::new(0);
|
let mut canary = atomic::AtomicUint::new(0);
|
||||||
let x = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
|
let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
|
||||||
drop(x);
|
drop(x);
|
||||||
assert!(canary.load(atomics::Acquire) == 1);
|
assert!(canary.load(atomic::Acquire) == 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn drop_arc_weak() {
|
fn drop_arc_weak() {
|
||||||
let mut canary = atomics::AtomicUint::new(0);
|
let mut canary = atomic::AtomicUint::new(0);
|
||||||
let arc = Arc::new(Canary(&mut canary as *mut atomics::AtomicUint));
|
let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUint));
|
||||||
let arc_weak = arc.downgrade();
|
let arc_weak = arc.downgrade();
|
||||||
assert!(canary.load(atomics::Acquire) == 0);
|
assert!(canary.load(atomic::Acquire) == 0);
|
||||||
drop(arc);
|
drop(arc);
|
||||||
assert!(canary.load(atomics::Acquire) == 1);
|
assert!(canary.load(atomic::Acquire) == 1);
|
||||||
drop(arc_weak);
|
drop(arc_weak);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,29 +10,35 @@
|
||||||
|
|
||||||
//! Core atomic primitives
|
//! Core atomic primitives
|
||||||
|
|
||||||
|
#![stable]
|
||||||
|
|
||||||
use intrinsics;
|
use intrinsics;
|
||||||
use std::kinds::marker;
|
use std::kinds::marker;
|
||||||
use cell::UnsafeCell;
|
use cell::UnsafeCell;
|
||||||
|
|
||||||
/// An atomic boolean type.
|
/// An atomic boolean type.
|
||||||
|
#[stable]
|
||||||
pub struct AtomicBool {
|
pub struct AtomicBool {
|
||||||
v: UnsafeCell<uint>,
|
v: UnsafeCell<uint>,
|
||||||
nocopy: marker::NoCopy
|
nocopy: marker::NoCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A signed atomic integer type, supporting basic atomic arithmetic operations
|
/// A signed atomic integer type, supporting basic atomic arithmetic operations
|
||||||
|
#[stable]
|
||||||
pub struct AtomicInt {
|
pub struct AtomicInt {
|
||||||
v: UnsafeCell<int>,
|
v: UnsafeCell<int>,
|
||||||
nocopy: marker::NoCopy
|
nocopy: marker::NoCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An unsigned atomic integer type, supporting basic atomic arithmetic operations
|
/// An unsigned atomic integer type, supporting basic atomic arithmetic operations
|
||||||
|
#[stable]
|
||||||
pub struct AtomicUint {
|
pub struct AtomicUint {
|
||||||
v: UnsafeCell<uint>,
|
v: UnsafeCell<uint>,
|
||||||
nocopy: marker::NoCopy
|
nocopy: marker::NoCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An unsafe atomic pointer. Only supports basic atomic operations
|
/// An unsafe atomic pointer. Only supports basic atomic operations
|
||||||
|
#[stable]
|
||||||
pub struct AtomicPtr<T> {
|
pub struct AtomicPtr<T> {
|
||||||
p: UnsafeCell<uint>,
|
p: UnsafeCell<uint>,
|
||||||
nocopy: marker::NoCopy
|
nocopy: marker::NoCopy
|
||||||
|
@ -49,6 +55,7 @@ pub struct AtomicPtr<T> {
|
||||||
/// Rust's memory orderings are the same as in C++[1].
|
/// Rust's memory orderings are the same as in C++[1].
|
||||||
///
|
///
|
||||||
/// 1: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
|
/// 1: http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync
|
||||||
|
#[stable]
|
||||||
pub enum Ordering {
|
pub enum Ordering {
|
||||||
/// No ordering constraints, only atomic operations
|
/// No ordering constraints, only atomic operations
|
||||||
Relaxed,
|
Relaxed,
|
||||||
|
@ -69,18 +76,22 @@ pub enum Ordering {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An `AtomicBool` initialized to `false`
|
/// An `AtomicBool` initialized to `false`
|
||||||
|
#[unstable = "may be renamed, pending conventions for static initalizers"]
|
||||||
pub static INIT_ATOMIC_BOOL: AtomicBool =
|
pub static INIT_ATOMIC_BOOL: AtomicBool =
|
||||||
AtomicBool { v: UnsafeCell { value: 0 }, nocopy: marker::NoCopy };
|
AtomicBool { v: UnsafeCell { value: 0 }, nocopy: marker::NoCopy };
|
||||||
/// An `AtomicInt` initialized to `0`
|
/// An `AtomicInt` initialized to `0`
|
||||||
|
#[unstable = "may be renamed, pending conventions for static initalizers"]
|
||||||
pub static INIT_ATOMIC_INT: AtomicInt =
|
pub static INIT_ATOMIC_INT: AtomicInt =
|
||||||
AtomicInt { v: UnsafeCell { value: 0 }, nocopy: marker::NoCopy };
|
AtomicInt { v: UnsafeCell { value: 0 }, nocopy: marker::NoCopy };
|
||||||
/// An `AtomicUint` initialized to `0`
|
/// An `AtomicUint` initialized to `0`
|
||||||
|
#[unstable = "may be renamed, pending conventions for static initalizers"]
|
||||||
pub static INIT_ATOMIC_UINT: AtomicUint =
|
pub static INIT_ATOMIC_UINT: AtomicUint =
|
||||||
AtomicUint { v: UnsafeCell { value: 0, }, nocopy: marker::NoCopy };
|
AtomicUint { v: UnsafeCell { value: 0, }, nocopy: marker::NoCopy };
|
||||||
|
|
||||||
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
|
// NB: Needs to be -1 (0b11111111...) to make fetch_nand work correctly
|
||||||
static UINT_TRUE: uint = -1;
|
static UINT_TRUE: uint = -1;
|
||||||
|
|
||||||
|
#[stable]
|
||||||
impl AtomicBool {
|
impl AtomicBool {
|
||||||
/// Create a new `AtomicBool`
|
/// Create a new `AtomicBool`
|
||||||
pub fn new(v: bool) -> AtomicBool {
|
pub fn new(v: bool) -> AtomicBool {
|
||||||
|
@ -89,12 +100,20 @@ impl AtomicBool {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load the value
|
/// Load the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Release` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn load(&self, order: Ordering) -> bool {
|
pub fn load(&self, order: Ordering) -> bool {
|
||||||
unsafe { atomic_load(self.v.get() as *const uint, order) > 0 }
|
unsafe { atomic_load(self.v.get() as *const uint, order) > 0 }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the value
|
/// Store the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Acquire` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn store(&self, val: bool, order: Ordering) {
|
pub fn store(&self, val: bool, order: Ordering) {
|
||||||
let val = if val { UINT_TRUE } else { 0 };
|
let val = if val { UINT_TRUE } else { 0 };
|
||||||
|
@ -120,7 +139,7 @@ impl AtomicBool {
|
||||||
///
|
///
|
||||||
/// ```rust
|
/// ```rust
|
||||||
/// use std::sync::Arc;
|
/// use std::sync::Arc;
|
||||||
/// use std::sync::atomics::{AtomicBool, SeqCst};
|
/// use std::sync::atomic::{AtomicBool, SeqCst};
|
||||||
/// use std::task::deschedule;
|
/// use std::task::deschedule;
|
||||||
///
|
///
|
||||||
/// fn main() {
|
/// fn main() {
|
||||||
|
@ -170,7 +189,7 @@ impl AtomicBool {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicBool, SeqCst};
|
/// use std::sync::atomic::{AtomicBool, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicBool::new(true);
|
/// let foo = AtomicBool::new(true);
|
||||||
/// assert_eq!(true, foo.fetch_and(false, SeqCst));
|
/// assert_eq!(true, foo.fetch_and(false, SeqCst));
|
||||||
|
@ -200,7 +219,7 @@ impl AtomicBool {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicBool, SeqCst};
|
/// use std::sync::atomic::{AtomicBool, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicBool::new(true);
|
/// let foo = AtomicBool::new(true);
|
||||||
/// assert_eq!(true, foo.fetch_nand(false, SeqCst));
|
/// assert_eq!(true, foo.fetch_nand(false, SeqCst));
|
||||||
|
@ -231,7 +250,7 @@ impl AtomicBool {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicBool, SeqCst};
|
/// use std::sync::atomic::{AtomicBool, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicBool::new(true);
|
/// let foo = AtomicBool::new(true);
|
||||||
/// assert_eq!(true, foo.fetch_or(false, SeqCst));
|
/// assert_eq!(true, foo.fetch_or(false, SeqCst));
|
||||||
|
@ -261,7 +280,7 @@ impl AtomicBool {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicBool, SeqCst};
|
/// use std::sync::atomic::{AtomicBool, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicBool::new(true);
|
/// let foo = AtomicBool::new(true);
|
||||||
/// assert_eq!(true, foo.fetch_xor(false, SeqCst));
|
/// assert_eq!(true, foo.fetch_xor(false, SeqCst));
|
||||||
|
@ -283,6 +302,7 @@ impl AtomicBool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[stable]
|
||||||
impl AtomicInt {
|
impl AtomicInt {
|
||||||
/// Create a new `AtomicInt`
|
/// Create a new `AtomicInt`
|
||||||
pub fn new(v: int) -> AtomicInt {
|
pub fn new(v: int) -> AtomicInt {
|
||||||
|
@ -290,12 +310,20 @@ impl AtomicInt {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load the value
|
/// Load the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Release` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn load(&self, order: Ordering) -> int {
|
pub fn load(&self, order: Ordering) -> int {
|
||||||
unsafe { atomic_load(self.v.get() as *const int, order) }
|
unsafe { atomic_load(self.v.get() as *const int, order) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the value
|
/// Store the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Acquire` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn store(&self, val: int, order: Ordering) {
|
pub fn store(&self, val: int, order: Ordering) {
|
||||||
unsafe { atomic_store(self.v.get(), val, order); }
|
unsafe { atomic_store(self.v.get(), val, order); }
|
||||||
|
@ -322,7 +350,7 @@ impl AtomicInt {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicInt, SeqCst};
|
/// use std::sync::atomic::{AtomicInt, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicInt::new(0);
|
/// let foo = AtomicInt::new(0);
|
||||||
/// assert_eq!(0, foo.fetch_add(10, SeqCst));
|
/// assert_eq!(0, foo.fetch_add(10, SeqCst));
|
||||||
|
@ -338,7 +366,7 @@ impl AtomicInt {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicInt, SeqCst};
|
/// use std::sync::atomic::{AtomicInt, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicInt::new(0);
|
/// let foo = AtomicInt::new(0);
|
||||||
/// assert_eq!(0, foo.fetch_sub(10, SeqCst));
|
/// assert_eq!(0, foo.fetch_sub(10, SeqCst));
|
||||||
|
@ -354,7 +382,7 @@ impl AtomicInt {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(0b101101);
|
/// let foo = AtomicUint::new(0b101101);
|
||||||
/// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
|
/// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
|
||||||
|
@ -369,7 +397,7 @@ impl AtomicInt {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(0b101101);
|
/// let foo = AtomicUint::new(0b101101);
|
||||||
/// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
|
/// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
|
||||||
|
@ -384,7 +412,7 @@ impl AtomicInt {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(0b101101);
|
/// let foo = AtomicUint::new(0b101101);
|
||||||
/// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
|
/// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
|
||||||
|
@ -395,6 +423,7 @@ impl AtomicInt {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[stable]
|
||||||
impl AtomicUint {
|
impl AtomicUint {
|
||||||
/// Create a new `AtomicUint`
|
/// Create a new `AtomicUint`
|
||||||
pub fn new(v: uint) -> AtomicUint {
|
pub fn new(v: uint) -> AtomicUint {
|
||||||
|
@ -402,12 +431,20 @@ impl AtomicUint {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load the value
|
/// Load the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Release` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn load(&self, order: Ordering) -> uint {
|
pub fn load(&self, order: Ordering) -> uint {
|
||||||
unsafe { atomic_load(self.v.get() as *const uint, order) }
|
unsafe { atomic_load(self.v.get() as *const uint, order) }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the value
|
/// Store the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Acquire` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn store(&self, val: uint, order: Ordering) {
|
pub fn store(&self, val: uint, order: Ordering) {
|
||||||
unsafe { atomic_store(self.v.get(), val, order); }
|
unsafe { atomic_store(self.v.get(), val, order); }
|
||||||
|
@ -434,7 +471,7 @@ impl AtomicUint {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(0);
|
/// let foo = AtomicUint::new(0);
|
||||||
/// assert_eq!(0, foo.fetch_add(10, SeqCst));
|
/// assert_eq!(0, foo.fetch_add(10, SeqCst));
|
||||||
|
@ -450,7 +487,7 @@ impl AtomicUint {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(10);
|
/// let foo = AtomicUint::new(10);
|
||||||
/// assert_eq!(10, foo.fetch_sub(10, SeqCst));
|
/// assert_eq!(10, foo.fetch_sub(10, SeqCst));
|
||||||
|
@ -466,7 +503,7 @@ impl AtomicUint {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(0b101101);
|
/// let foo = AtomicUint::new(0b101101);
|
||||||
/// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
|
/// assert_eq!(0b101101, foo.fetch_and(0b110011, SeqCst));
|
||||||
|
@ -481,7 +518,7 @@ impl AtomicUint {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(0b101101);
|
/// let foo = AtomicUint::new(0b101101);
|
||||||
/// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
|
/// assert_eq!(0b101101, foo.fetch_or(0b110011, SeqCst));
|
||||||
|
@ -496,7 +533,7 @@ impl AtomicUint {
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::sync::atomics::{AtomicUint, SeqCst};
|
/// use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
///
|
///
|
||||||
/// let foo = AtomicUint::new(0b101101);
|
/// let foo = AtomicUint::new(0b101101);
|
||||||
/// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
|
/// assert_eq!(0b101101, foo.fetch_xor(0b110011, SeqCst));
|
||||||
|
@ -507,6 +544,7 @@ impl AtomicUint {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[stable]
|
||||||
impl<T> AtomicPtr<T> {
|
impl<T> AtomicPtr<T> {
|
||||||
/// Create a new `AtomicPtr`
|
/// Create a new `AtomicPtr`
|
||||||
pub fn new(p: *mut T) -> AtomicPtr<T> {
|
pub fn new(p: *mut T) -> AtomicPtr<T> {
|
||||||
|
@ -514,6 +552,10 @@ impl<T> AtomicPtr<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load the value
|
/// Load the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Release` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn load(&self, order: Ordering) -> *mut T {
|
pub fn load(&self, order: Ordering) -> *mut T {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
@ -522,6 +564,10 @@ impl<T> AtomicPtr<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Store the value
|
/// Store the value
|
||||||
|
///
|
||||||
|
/// # Failure
|
||||||
|
///
|
||||||
|
/// Fails if `order` is `Acquire` or `AcqRel`.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn store(&self, ptr: *mut T, order: Ordering) {
|
pub fn store(&self, ptr: *mut T, order: Ordering) {
|
||||||
unsafe { atomic_store(self.p.get(), ptr as uint, order); }
|
unsafe { atomic_store(self.p.get(), ptr as uint, order); }
|
||||||
|
@ -552,7 +598,9 @@ unsafe fn atomic_store<T>(dst: *mut T, val: T, order:Ordering) {
|
||||||
match order {
|
match order {
|
||||||
Release => intrinsics::atomic_store_rel(dst, val),
|
Release => intrinsics::atomic_store_rel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_store_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_store_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_store(dst, val)
|
SeqCst => intrinsics::atomic_store(dst, val),
|
||||||
|
Acquire => fail!("there is no such thing as an acquire store"),
|
||||||
|
AcqRel => fail!("there is no such thing as an acquire/release store"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -561,7 +609,9 @@ unsafe fn atomic_load<T>(dst: *const T, order:Ordering) -> T {
|
||||||
match order {
|
match order {
|
||||||
Acquire => intrinsics::atomic_load_acq(dst),
|
Acquire => intrinsics::atomic_load_acq(dst),
|
||||||
Relaxed => intrinsics::atomic_load_relaxed(dst),
|
Relaxed => intrinsics::atomic_load_relaxed(dst),
|
||||||
_ => intrinsics::atomic_load(dst)
|
SeqCst => intrinsics::atomic_load(dst),
|
||||||
|
Release => fail!("there is no such thing as a release load"),
|
||||||
|
AcqRel => fail!("there is no such thing as an acquire/release load"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -572,7 +622,7 @@ unsafe fn atomic_swap<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
Release => intrinsics::atomic_xchg_rel(dst, val),
|
Release => intrinsics::atomic_xchg_rel(dst, val),
|
||||||
AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
|
AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_xchg(dst, val)
|
SeqCst => intrinsics::atomic_xchg(dst, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -584,7 +634,7 @@ unsafe fn atomic_add<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
Release => intrinsics::atomic_xadd_rel(dst, val),
|
Release => intrinsics::atomic_xadd_rel(dst, val),
|
||||||
AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
|
AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_xadd(dst, val)
|
SeqCst => intrinsics::atomic_xadd(dst, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -596,7 +646,7 @@ unsafe fn atomic_sub<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
Release => intrinsics::atomic_xsub_rel(dst, val),
|
Release => intrinsics::atomic_xsub_rel(dst, val),
|
||||||
AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
|
AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_xsub(dst, val)
|
SeqCst => intrinsics::atomic_xsub(dst, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -607,7 +657,7 @@ unsafe fn atomic_compare_and_swap<T>(dst: *mut T, old:T, new:T, order: Ordering)
|
||||||
Release => intrinsics::atomic_cxchg_rel(dst, old, new),
|
Release => intrinsics::atomic_cxchg_rel(dst, old, new),
|
||||||
AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
|
AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new),
|
||||||
Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
|
Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new),
|
||||||
_ => intrinsics::atomic_cxchg(dst, old, new),
|
SeqCst => intrinsics::atomic_cxchg(dst, old, new),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -618,7 +668,7 @@ unsafe fn atomic_and<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
Release => intrinsics::atomic_and_rel(dst, val),
|
Release => intrinsics::atomic_and_rel(dst, val),
|
||||||
AcqRel => intrinsics::atomic_and_acqrel(dst, val),
|
AcqRel => intrinsics::atomic_and_acqrel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_and_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_and_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_and(dst, val)
|
SeqCst => intrinsics::atomic_and(dst, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -629,7 +679,7 @@ unsafe fn atomic_nand<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
Release => intrinsics::atomic_nand_rel(dst, val),
|
Release => intrinsics::atomic_nand_rel(dst, val),
|
||||||
AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
|
AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_nand(dst, val)
|
SeqCst => intrinsics::atomic_nand(dst, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -641,7 +691,7 @@ unsafe fn atomic_or<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
Release => intrinsics::atomic_or_rel(dst, val),
|
Release => intrinsics::atomic_or_rel(dst, val),
|
||||||
AcqRel => intrinsics::atomic_or_acqrel(dst, val),
|
AcqRel => intrinsics::atomic_or_acqrel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_or_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_or_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_or(dst, val)
|
SeqCst => intrinsics::atomic_or(dst, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -653,7 +703,7 @@ unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
Release => intrinsics::atomic_xor_rel(dst, val),
|
Release => intrinsics::atomic_xor_rel(dst, val),
|
||||||
AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
|
AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
|
||||||
Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
|
Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
|
||||||
_ => intrinsics::atomic_xor(dst, val)
|
SeqCst => intrinsics::atomic_xor(dst, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -679,6 +729,7 @@ unsafe fn atomic_xor<T>(dst: *mut T, val: T, order: Ordering) -> T {
|
||||||
///
|
///
|
||||||
/// Fails if `order` is `Relaxed`
|
/// Fails if `order` is `Relaxed`
|
||||||
#[inline]
|
#[inline]
|
||||||
|
#[stable]
|
||||||
pub fn fence(order: Ordering) {
|
pub fn fence(order: Ordering) {
|
||||||
unsafe {
|
unsafe {
|
||||||
match order {
|
match order {
|
|
@ -113,7 +113,7 @@ pub mod ty {
|
||||||
/* Core types and methods on primitives */
|
/* Core types and methods on primitives */
|
||||||
|
|
||||||
pub mod any;
|
pub mod any;
|
||||||
pub mod atomics;
|
pub mod atomic;
|
||||||
pub mod bool;
|
pub mod bool;
|
||||||
pub mod cell;
|
pub mod cell;
|
||||||
pub mod char;
|
pub mod char;
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
// option. This file may not be copied, modified, or distributed
|
// option. This file may not be copied, modified, or distributed
|
||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
use core::atomics::*;
|
use core::atomic::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn bool_() {
|
fn bool_() {
|
|
@ -14,7 +14,7 @@ extern crate test;
|
||||||
extern crate libc;
|
extern crate libc;
|
||||||
|
|
||||||
mod any;
|
mod any;
|
||||||
mod atomics;
|
mod atomic;
|
||||||
mod cell;
|
mod cell;
|
||||||
mod char;
|
mod char;
|
||||||
mod cmp;
|
mod cmp;
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
//! loop if no other one is provided (and M:N scheduling is desired).
|
//! loop if no other one is provided (and M:N scheduling is desired).
|
||||||
|
|
||||||
use alloc::arc::Arc;
|
use alloc::arc::Arc;
|
||||||
use std::sync::atomics;
|
use std::sync::atomic;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback};
|
use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback};
|
||||||
use std::rt::rtio::{PausableIdleCallback, Callback};
|
use std::rt::rtio::{PausableIdleCallback, Callback};
|
||||||
|
@ -33,7 +33,7 @@ struct BasicLoop {
|
||||||
next_remote: uint,
|
next_remote: uint,
|
||||||
messages: Arc<Exclusive<Vec<Message>>>,
|
messages: Arc<Exclusive<Vec<Message>>>,
|
||||||
idle: Option<Box<Callback + Send>>,
|
idle: Option<Box<Callback + Send>>,
|
||||||
idle_active: Option<Arc<atomics::AtomicBool>>,
|
idle_active: Option<Arc<atomic::AtomicBool>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
enum Message { RunRemote(uint), RemoveRemote(uint) }
|
enum Message { RunRemote(uint), RemoveRemote(uint) }
|
||||||
|
@ -89,7 +89,7 @@ impl BasicLoop {
|
||||||
fn idle(&mut self) {
|
fn idle(&mut self) {
|
||||||
match self.idle {
|
match self.idle {
|
||||||
Some(ref mut idle) => {
|
Some(ref mut idle) => {
|
||||||
if self.idle_active.get_ref().load(atomics::SeqCst) {
|
if self.idle_active.get_ref().load(atomic::SeqCst) {
|
||||||
idle.call();
|
idle.call();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ impl BasicLoop {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn has_idle(&self) -> bool {
|
fn has_idle(&self) -> bool {
|
||||||
self.idle.is_some() && self.idle_active.get_ref().load(atomics::SeqCst)
|
self.idle.is_some() && self.idle_active.get_ref().load(atomic::SeqCst)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ impl EventLoop for BasicLoop {
|
||||||
-> Box<PausableIdleCallback + Send> {
|
-> Box<PausableIdleCallback + Send> {
|
||||||
rtassert!(self.idle.is_none());
|
rtassert!(self.idle.is_none());
|
||||||
self.idle = Some(cb);
|
self.idle = Some(cb);
|
||||||
let a = Arc::new(atomics::AtomicBool::new(true));
|
let a = Arc::new(atomic::AtomicBool::new(true));
|
||||||
self.idle_active = Some(a.clone());
|
self.idle_active = Some(a.clone());
|
||||||
box BasicPausable { active: a } as Box<PausableIdleCallback + Send>
|
box BasicPausable { active: a } as Box<PausableIdleCallback + Send>
|
||||||
}
|
}
|
||||||
|
@ -183,21 +183,21 @@ impl Drop for BasicRemote {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct BasicPausable {
|
struct BasicPausable {
|
||||||
active: Arc<atomics::AtomicBool>,
|
active: Arc<atomic::AtomicBool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PausableIdleCallback for BasicPausable {
|
impl PausableIdleCallback for BasicPausable {
|
||||||
fn pause(&mut self) {
|
fn pause(&mut self) {
|
||||||
self.active.store(false, atomics::SeqCst);
|
self.active.store(false, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
fn resume(&mut self) {
|
fn resume(&mut self) {
|
||||||
self.active.store(true, atomics::SeqCst);
|
self.active.store(true, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BasicPausable {
|
impl Drop for BasicPausable {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.active.store(false, atomics::SeqCst);
|
self.active.store(false, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -232,7 +232,7 @@ use std::rt::rtio;
|
||||||
use std::rt::thread::Thread;
|
use std::rt::thread::Thread;
|
||||||
use std::rt::task::TaskOpts;
|
use std::rt::task::TaskOpts;
|
||||||
use std::rt;
|
use std::rt;
|
||||||
use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
|
use std::sync::atomic::{SeqCst, AtomicUint, INIT_ATOMIC_UINT};
|
||||||
use std::sync::deque;
|
use std::sync::deque;
|
||||||
use std::task::{TaskBuilder, Spawner};
|
use std::task::{TaskBuilder, Spawner};
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
// except according to those terms.
|
// except according to those terms.
|
||||||
|
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use std::sync::atomics;
|
use std::sync::atomic;
|
||||||
use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
|
use std::os::{errno, page_size, MemoryMap, MapReadable, MapWritable,
|
||||||
MapNonStandardFlags, getenv};
|
MapNonStandardFlags, getenv};
|
||||||
use libc;
|
use libc;
|
||||||
|
@ -158,8 +158,8 @@ impl StackPool {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn max_cached_stacks() -> uint {
|
fn max_cached_stacks() -> uint {
|
||||||
static mut AMT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
|
static mut AMT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
|
||||||
match unsafe { AMT.load(atomics::SeqCst) } {
|
match unsafe { AMT.load(atomic::SeqCst) } {
|
||||||
0 => {}
|
0 => {}
|
||||||
n => return n - 1,
|
n => return n - 1,
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ fn max_cached_stacks() -> uint {
|
||||||
let amt = amt.unwrap_or(10);
|
let amt = amt.unwrap_or(10);
|
||||||
// 0 is our sentinel value, so ensure that we'll never see 0 after
|
// 0 is our sentinel value, so ensure that we'll never see 0 after
|
||||||
// initialization has run
|
// initialization has run
|
||||||
unsafe { AMT.store(amt + 1, atomics::SeqCst); }
|
unsafe { AMT.store(amt + 1, atomic::SeqCst); }
|
||||||
return amt;
|
return amt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,7 @@ use std::os;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use std::rt::rtio;
|
use std::rt::rtio;
|
||||||
use std::rt::rtio::{IoResult, IoError};
|
use std::rt::rtio::{IoResult, IoError};
|
||||||
use std::sync::atomics;
|
use std::sync::atomic;
|
||||||
use std::rt::mutex;
|
use std::rt::mutex;
|
||||||
|
|
||||||
use super::c;
|
use super::c;
|
||||||
|
@ -128,8 +128,8 @@ impl Drop for Event {
|
||||||
struct Inner {
|
struct Inner {
|
||||||
handle: libc::HANDLE,
|
handle: libc::HANDLE,
|
||||||
lock: mutex::NativeMutex,
|
lock: mutex::NativeMutex,
|
||||||
read_closed: atomics::AtomicBool,
|
read_closed: atomic::AtomicBool,
|
||||||
write_closed: atomics::AtomicBool,
|
write_closed: atomic::AtomicBool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Inner {
|
impl Inner {
|
||||||
|
@ -137,8 +137,8 @@ impl Inner {
|
||||||
Inner {
|
Inner {
|
||||||
handle: handle,
|
handle: handle,
|
||||||
lock: unsafe { mutex::NativeMutex::new() },
|
lock: unsafe { mutex::NativeMutex::new() },
|
||||||
read_closed: atomics::AtomicBool::new(false),
|
read_closed: atomic::AtomicBool::new(false),
|
||||||
write_closed: atomics::AtomicBool::new(false),
|
write_closed: atomic::AtomicBool::new(false),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -326,11 +326,11 @@ impl UnixStream {
|
||||||
fn handle(&self) -> libc::HANDLE { self.inner.handle }
|
fn handle(&self) -> libc::HANDLE { self.inner.handle }
|
||||||
|
|
||||||
fn read_closed(&self) -> bool {
|
fn read_closed(&self) -> bool {
|
||||||
self.inner.read_closed.load(atomics::SeqCst)
|
self.inner.read_closed.load(atomic::SeqCst)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_closed(&self) -> bool {
|
fn write_closed(&self) -> bool {
|
||||||
self.inner.write_closed.load(atomics::SeqCst)
|
self.inner.write_closed.load(atomic::SeqCst)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cancel_io(&self) -> IoResult<()> {
|
fn cancel_io(&self) -> IoResult<()> {
|
||||||
|
@ -525,14 +525,14 @@ impl rtio::RtioPipe for UnixStream {
|
||||||
// and 2 with a lock with respect to close_read(), we're guaranteed that
|
// and 2 with a lock with respect to close_read(), we're guaranteed that
|
||||||
// no thread will erroneously sit in a read forever.
|
// no thread will erroneously sit in a read forever.
|
||||||
let _guard = unsafe { self.inner.lock.lock() };
|
let _guard = unsafe { self.inner.lock.lock() };
|
||||||
self.inner.read_closed.store(true, atomics::SeqCst);
|
self.inner.read_closed.store(true, atomic::SeqCst);
|
||||||
self.cancel_io()
|
self.cancel_io()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn close_write(&mut self) -> IoResult<()> {
|
fn close_write(&mut self) -> IoResult<()> {
|
||||||
// see comments in close_read() for why this lock is necessary
|
// see comments in close_read() for why this lock is necessary
|
||||||
let _guard = unsafe { self.inner.lock.lock() };
|
let _guard = unsafe { self.inner.lock.lock() };
|
||||||
self.inner.write_closed.store(true, atomics::SeqCst);
|
self.inner.write_closed.store(true, atomic::SeqCst);
|
||||||
self.cancel_io()
|
self.cancel_io()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ use std::os;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
use std::rt::rtio;
|
use std::rt::rtio;
|
||||||
use std::rt::rtio::IoResult;
|
use std::rt::rtio::IoResult;
|
||||||
use std::sync::atomics;
|
use std::sync::atomic;
|
||||||
use std::comm;
|
use std::comm;
|
||||||
|
|
||||||
use io::c;
|
use io::c;
|
||||||
|
@ -207,8 +207,8 @@ impl Timer {
|
||||||
// instead of ()
|
// instead of ()
|
||||||
unsafe { HELPER.boot(|| {}, helper); }
|
unsafe { HELPER.boot(|| {}, helper); }
|
||||||
|
|
||||||
static mut ID: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
|
static mut ID: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
|
||||||
let id = unsafe { ID.fetch_add(1, atomics::Relaxed) };
|
let id = unsafe { ID.fetch_add(1, atomic::Relaxed) };
|
||||||
Ok(Timer {
|
Ok(Timer {
|
||||||
id: id,
|
id: id,
|
||||||
inner: Some(box Inner {
|
inner: Some(box Inner {
|
||||||
|
|
|
@ -17,21 +17,21 @@ use core::prelude::*;
|
||||||
use alloc::boxed::Box;
|
use alloc::boxed::Box;
|
||||||
use collections::MutableSeq;
|
use collections::MutableSeq;
|
||||||
use collections::vec::Vec;
|
use collections::vec::Vec;
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::mem;
|
use core::mem;
|
||||||
|
|
||||||
use exclusive::Exclusive;
|
use exclusive::Exclusive;
|
||||||
|
|
||||||
type Queue = Exclusive<Vec<proc():Send>>;
|
type Queue = Exclusive<Vec<proc():Send>>;
|
||||||
|
|
||||||
static mut QUEUE: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
|
static mut QUEUE: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
|
||||||
static mut RUNNING: atomics::AtomicBool = atomics::INIT_ATOMIC_BOOL;
|
static mut RUNNING: atomic::AtomicBool = atomic::INIT_ATOMIC_BOOL;
|
||||||
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
let state: Box<Queue> = box Exclusive::new(Vec::new());
|
let state: Box<Queue> = box Exclusive::new(Vec::new());
|
||||||
unsafe {
|
unsafe {
|
||||||
rtassert!(!RUNNING.load(atomics::SeqCst));
|
rtassert!(!RUNNING.load(atomic::SeqCst));
|
||||||
assert!(QUEUE.swap(mem::transmute(state), atomics::SeqCst) == 0);
|
assert!(QUEUE.swap(mem::transmute(state), atomic::SeqCst) == 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -41,8 +41,8 @@ pub fn push(f: proc():Send) {
|
||||||
// all with respect to `run`, meaning that this could theoretically be a
|
// all with respect to `run`, meaning that this could theoretically be a
|
||||||
// use-after-free. There's not much we can do to protect against that,
|
// use-after-free. There's not much we can do to protect against that,
|
||||||
// however. Let's just assume a well-behaved runtime and go from there!
|
// however. Let's just assume a well-behaved runtime and go from there!
|
||||||
rtassert!(!RUNNING.load(atomics::SeqCst));
|
rtassert!(!RUNNING.load(atomic::SeqCst));
|
||||||
let queue = QUEUE.load(atomics::SeqCst);
|
let queue = QUEUE.load(atomic::SeqCst);
|
||||||
rtassert!(queue != 0);
|
rtassert!(queue != 0);
|
||||||
(*(queue as *const Queue)).lock().push(f);
|
(*(queue as *const Queue)).lock().push(f);
|
||||||
}
|
}
|
||||||
|
@ -50,8 +50,8 @@ pub fn push(f: proc():Send) {
|
||||||
|
|
||||||
pub fn run() {
|
pub fn run() {
|
||||||
let cur = unsafe {
|
let cur = unsafe {
|
||||||
rtassert!(!RUNNING.load(atomics::SeqCst));
|
rtassert!(!RUNNING.load(atomic::SeqCst));
|
||||||
let queue = QUEUE.swap(0, atomics::SeqCst);
|
let queue = QUEUE.swap(0, atomic::SeqCst);
|
||||||
rtassert!(queue != 0);
|
rtassert!(queue != 0);
|
||||||
|
|
||||||
let queue: Box<Queue> = mem::transmute(queue);
|
let queue: Box<Queue> = mem::transmute(queue);
|
||||||
|
|
|
@ -18,12 +18,12 @@
|
||||||
//! each respective runtime to make sure that they call increment() and
|
//! each respective runtime to make sure that they call increment() and
|
||||||
//! decrement() manually.
|
//! decrement() manually.
|
||||||
|
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::ops::Drop;
|
use core::ops::Drop;
|
||||||
|
|
||||||
use mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
|
use mutex::{StaticNativeMutex, NATIVE_MUTEX_INIT};
|
||||||
|
|
||||||
static mut TASK_COUNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
|
static mut TASK_COUNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
|
||||||
static mut TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
|
static mut TASK_LOCK: StaticNativeMutex = NATIVE_MUTEX_INIT;
|
||||||
|
|
||||||
pub struct Token { _private: () }
|
pub struct Token { _private: () }
|
||||||
|
@ -35,13 +35,13 @@ impl Drop for Token {
|
||||||
/// Increment the number of live tasks, returning a token which will decrement
|
/// Increment the number of live tasks, returning a token which will decrement
|
||||||
/// the count when dropped.
|
/// the count when dropped.
|
||||||
pub fn increment() -> Token {
|
pub fn increment() -> Token {
|
||||||
let _ = unsafe { TASK_COUNT.fetch_add(1, atomics::SeqCst) };
|
let _ = unsafe { TASK_COUNT.fetch_add(1, atomic::SeqCst) };
|
||||||
Token { _private: () }
|
Token { _private: () }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn decrement() {
|
pub fn decrement() {
|
||||||
unsafe {
|
unsafe {
|
||||||
if TASK_COUNT.fetch_sub(1, atomics::SeqCst) == 1 {
|
if TASK_COUNT.fetch_sub(1, atomic::SeqCst) == 1 {
|
||||||
let guard = TASK_LOCK.lock();
|
let guard = TASK_LOCK.lock();
|
||||||
guard.signal();
|
guard.signal();
|
||||||
}
|
}
|
||||||
|
@ -53,7 +53,7 @@ pub fn decrement() {
|
||||||
pub fn wait_for_other_tasks() {
|
pub fn wait_for_other_tasks() {
|
||||||
unsafe {
|
unsafe {
|
||||||
let guard = TASK_LOCK.lock();
|
let guard = TASK_LOCK.lock();
|
||||||
while TASK_COUNT.load(atomics::SeqCst) > 0 {
|
while TASK_COUNT.load(atomic::SeqCst) > 0 {
|
||||||
guard.wait();
|
guard.wait();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -519,7 +519,7 @@ mod imp {
|
||||||
#[cfg(windows)]
|
#[cfg(windows)]
|
||||||
mod imp {
|
mod imp {
|
||||||
use alloc::libc_heap::malloc_raw;
|
use alloc::libc_heap::malloc_raw;
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::ptr;
|
use core::ptr;
|
||||||
use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
|
use libc::{HANDLE, BOOL, LPSECURITY_ATTRIBUTES, c_void, DWORD, LPCSTR};
|
||||||
use libc;
|
use libc;
|
||||||
|
@ -533,20 +533,20 @@ mod imp {
|
||||||
|
|
||||||
pub struct Mutex {
|
pub struct Mutex {
|
||||||
// pointers for the lock/cond handles, atomically updated
|
// pointers for the lock/cond handles, atomically updated
|
||||||
lock: atomics::AtomicUint,
|
lock: atomic::AtomicUint,
|
||||||
cond: atomics::AtomicUint,
|
cond: atomic::AtomicUint,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub static MUTEX_INIT: Mutex = Mutex {
|
pub static MUTEX_INIT: Mutex = Mutex {
|
||||||
lock: atomics::INIT_ATOMIC_UINT,
|
lock: atomic::INIT_ATOMIC_UINT,
|
||||||
cond: atomics::INIT_ATOMIC_UINT,
|
cond: atomic::INIT_ATOMIC_UINT,
|
||||||
};
|
};
|
||||||
|
|
||||||
impl Mutex {
|
impl Mutex {
|
||||||
pub unsafe fn new() -> Mutex {
|
pub unsafe fn new() -> Mutex {
|
||||||
Mutex {
|
Mutex {
|
||||||
lock: atomics::AtomicUint::new(init_lock()),
|
lock: atomic::AtomicUint::new(init_lock()),
|
||||||
cond: atomics::AtomicUint::new(init_cond()),
|
cond: atomic::AtomicUint::new(init_cond()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub unsafe fn lock(&self) {
|
pub unsafe fn lock(&self) {
|
||||||
|
@ -573,38 +573,38 @@ mod imp {
|
||||||
/// that no other thread is currently holding the lock or waiting on the
|
/// that no other thread is currently holding the lock or waiting on the
|
||||||
/// condition variable contained inside.
|
/// condition variable contained inside.
|
||||||
pub unsafe fn destroy(&self) {
|
pub unsafe fn destroy(&self) {
|
||||||
let lock = self.lock.swap(0, atomics::SeqCst);
|
let lock = self.lock.swap(0, atomic::SeqCst);
|
||||||
let cond = self.cond.swap(0, atomics::SeqCst);
|
let cond = self.cond.swap(0, atomic::SeqCst);
|
||||||
if lock != 0 { free_lock(lock) }
|
if lock != 0 { free_lock(lock) }
|
||||||
if cond != 0 { free_cond(cond) }
|
if cond != 0 { free_cond(cond) }
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn getlock(&self) -> *mut c_void {
|
unsafe fn getlock(&self) -> *mut c_void {
|
||||||
match self.lock.load(atomics::SeqCst) {
|
match self.lock.load(atomic::SeqCst) {
|
||||||
0 => {}
|
0 => {}
|
||||||
n => return n as *mut c_void
|
n => return n as *mut c_void
|
||||||
}
|
}
|
||||||
let lock = init_lock();
|
let lock = init_lock();
|
||||||
match self.lock.compare_and_swap(0, lock, atomics::SeqCst) {
|
match self.lock.compare_and_swap(0, lock, atomic::SeqCst) {
|
||||||
0 => return lock as *mut c_void,
|
0 => return lock as *mut c_void,
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
free_lock(lock);
|
free_lock(lock);
|
||||||
return self.lock.load(atomics::SeqCst) as *mut c_void;
|
return self.lock.load(atomic::SeqCst) as *mut c_void;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn getcond(&self) -> *mut c_void {
|
unsafe fn getcond(&self) -> *mut c_void {
|
||||||
match self.cond.load(atomics::SeqCst) {
|
match self.cond.load(atomic::SeqCst) {
|
||||||
0 => {}
|
0 => {}
|
||||||
n => return n as *mut c_void
|
n => return n as *mut c_void
|
||||||
}
|
}
|
||||||
let cond = init_cond();
|
let cond = init_cond();
|
||||||
match self.cond.compare_and_swap(0, cond, atomics::SeqCst) {
|
match self.cond.compare_and_swap(0, cond, atomic::SeqCst) {
|
||||||
0 => return cond as *mut c_void,
|
0 => return cond as *mut c_void,
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
free_cond(cond);
|
free_cond(cond);
|
||||||
return self.cond.load(atomics::SeqCst) as *mut c_void;
|
return self.cond.load(atomic::SeqCst) as *mut c_void;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ use core::prelude::*;
|
||||||
use alloc::arc::Arc;
|
use alloc::arc::Arc;
|
||||||
use alloc::boxed::{BoxAny, Box};
|
use alloc::boxed::{BoxAny, Box};
|
||||||
use core::any::Any;
|
use core::any::Any;
|
||||||
use core::atomics::{AtomicUint, SeqCst};
|
use core::atomic::{AtomicUint, SeqCst};
|
||||||
use core::iter::Take;
|
use core::iter::Take;
|
||||||
use core::kinds::marker;
|
use core::kinds::marker;
|
||||||
use core::mem;
|
use core::mem;
|
||||||
|
|
|
@ -63,7 +63,7 @@ use alloc::boxed::Box;
|
||||||
use collections::string::String;
|
use collections::string::String;
|
||||||
use collections::vec::Vec;
|
use collections::vec::Vec;
|
||||||
use core::any::Any;
|
use core::any::Any;
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::cmp;
|
use core::cmp;
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
use core::intrinsics;
|
use core::intrinsics;
|
||||||
|
@ -91,16 +91,16 @@ pub type Callback = fn(msg: &Any + Send, file: &'static str, line: uint);
|
||||||
//
|
//
|
||||||
// For more information, see below.
|
// For more information, see below.
|
||||||
static MAX_CALLBACKS: uint = 16;
|
static MAX_CALLBACKS: uint = 16;
|
||||||
static mut CALLBACKS: [atomics::AtomicUint, ..MAX_CALLBACKS] =
|
static mut CALLBACKS: [atomic::AtomicUint, ..MAX_CALLBACKS] =
|
||||||
[atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
|
[atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
|
||||||
atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
|
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
|
||||||
atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
|
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
|
||||||
atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
|
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
|
||||||
atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
|
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
|
||||||
atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
|
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
|
||||||
atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT,
|
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT,
|
||||||
atomics::INIT_ATOMIC_UINT, atomics::INIT_ATOMIC_UINT];
|
atomic::INIT_ATOMIC_UINT, atomic::INIT_ATOMIC_UINT];
|
||||||
static mut CALLBACK_CNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
|
static mut CALLBACK_CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
|
||||||
|
|
||||||
impl Unwinder {
|
impl Unwinder {
|
||||||
pub fn new() -> Unwinder {
|
pub fn new() -> Unwinder {
|
||||||
|
@ -466,11 +466,11 @@ fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) ->
|
||||||
// callback. Additionally, CALLBACK_CNT may briefly be higher than
|
// callback. Additionally, CALLBACK_CNT may briefly be higher than
|
||||||
// MAX_CALLBACKS, so we're sure to clamp it as necessary.
|
// MAX_CALLBACKS, so we're sure to clamp it as necessary.
|
||||||
let callbacks = unsafe {
|
let callbacks = unsafe {
|
||||||
let amt = CALLBACK_CNT.load(atomics::SeqCst);
|
let amt = CALLBACK_CNT.load(atomic::SeqCst);
|
||||||
CALLBACKS.slice_to(cmp::min(amt, MAX_CALLBACKS))
|
CALLBACKS.slice_to(cmp::min(amt, MAX_CALLBACKS))
|
||||||
};
|
};
|
||||||
for cb in callbacks.iter() {
|
for cb in callbacks.iter() {
|
||||||
match cb.load(atomics::SeqCst) {
|
match cb.load(atomic::SeqCst) {
|
||||||
0 => {}
|
0 => {}
|
||||||
n => {
|
n => {
|
||||||
let f: Callback = unsafe { mem::transmute(n) };
|
let f: Callback = unsafe { mem::transmute(n) };
|
||||||
|
@ -518,18 +518,18 @@ fn begin_unwind_inner(msg: Box<Any + Send>, file_line: &(&'static str, uint)) ->
|
||||||
/// currently possible to unregister a callback once it has been registered.
|
/// currently possible to unregister a callback once it has been registered.
|
||||||
#[experimental]
|
#[experimental]
|
||||||
pub unsafe fn register(f: Callback) -> bool {
|
pub unsafe fn register(f: Callback) -> bool {
|
||||||
match CALLBACK_CNT.fetch_add(1, atomics::SeqCst) {
|
match CALLBACK_CNT.fetch_add(1, atomic::SeqCst) {
|
||||||
// The invocation code has knowledge of this window where the count has
|
// The invocation code has knowledge of this window where the count has
|
||||||
// been incremented, but the callback has not been stored. We're
|
// been incremented, but the callback has not been stored. We're
|
||||||
// guaranteed that the slot we're storing into is 0.
|
// guaranteed that the slot we're storing into is 0.
|
||||||
n if n < MAX_CALLBACKS => {
|
n if n < MAX_CALLBACKS => {
|
||||||
let prev = CALLBACKS[n].swap(mem::transmute(f), atomics::SeqCst);
|
let prev = CALLBACKS[n].swap(mem::transmute(f), atomic::SeqCst);
|
||||||
rtassert!(prev == 0);
|
rtassert!(prev == 0);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
// If we accidentally bumped the count too high, pull it back.
|
// If we accidentally bumped the count too high, pull it back.
|
||||||
_ => {
|
_ => {
|
||||||
CALLBACK_CNT.store(MAX_CALLBACKS, atomics::SeqCst);
|
CALLBACK_CNT.store(MAX_CALLBACKS, atomic::SeqCst);
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,7 @@ use option::{Option, None, Some};
|
||||||
use os;
|
use os;
|
||||||
use path::{Path, GenericPath};
|
use path::{Path, GenericPath};
|
||||||
use result::{Ok, Err};
|
use result::{Ok, Err};
|
||||||
use sync::atomics;
|
use sync::atomic;
|
||||||
|
|
||||||
#[cfg(stage0)]
|
#[cfg(stage0)]
|
||||||
use iter::Iterator; // NOTE(stage0): Remove after snapshot.
|
use iter::Iterator; // NOTE(stage0): Remove after snapshot.
|
||||||
|
@ -42,13 +42,13 @@ impl TempDir {
|
||||||
return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
|
return TempDir::new_in(&os::make_absolute(tmpdir), suffix);
|
||||||
}
|
}
|
||||||
|
|
||||||
static mut CNT: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
|
static mut CNT: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
|
||||||
|
|
||||||
for _ in range(0u, 1000) {
|
for _ in range(0u, 1000) {
|
||||||
let filename =
|
let filename =
|
||||||
format!("rs-{}-{}-{}",
|
format!("rs-{}-{}-{}",
|
||||||
unsafe { libc::getpid() },
|
unsafe { libc::getpid() },
|
||||||
unsafe { CNT.fetch_add(1, atomics::SeqCst) },
|
unsafe { CNT.fetch_add(1, atomic::SeqCst) },
|
||||||
suffix);
|
suffix);
|
||||||
let p = tmpdir.join(filename);
|
let p = tmpdir.join(filename);
|
||||||
match fs::mkdir(&p, io::UserRWX) {
|
match fs::mkdir(&p, io::UserRWX) {
|
||||||
|
|
|
@ -16,7 +16,7 @@ use libc;
|
||||||
use os;
|
use os;
|
||||||
use prelude::*;
|
use prelude::*;
|
||||||
use std::io::net::ip::*;
|
use std::io::net::ip::*;
|
||||||
use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
|
use sync::atomic::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
|
||||||
|
|
||||||
macro_rules! iotest (
|
macro_rules! iotest (
|
||||||
{ fn $name:ident() $b:block $(#[$a:meta])* } => (
|
{ fn $name:ident() $b:block $(#[$a:meta])* } => (
|
||||||
|
|
|
@ -48,7 +48,7 @@ use result::{Err, Ok, Result};
|
||||||
use slice::{Vector, ImmutableVector, MutableVector, ImmutableEqVector};
|
use slice::{Vector, ImmutableVector, MutableVector, ImmutableEqVector};
|
||||||
use str::{Str, StrSlice, StrAllocating};
|
use str::{Str, StrSlice, StrAllocating};
|
||||||
use string::String;
|
use string::String;
|
||||||
use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
|
use sync::atomic::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
|
||||||
use vec::Vec;
|
use vec::Vec;
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
|
|
|
@ -20,7 +20,7 @@ use option::{Some, None};
|
||||||
use os;
|
use os;
|
||||||
use result::{Ok, Err};
|
use result::{Ok, Err};
|
||||||
use str::StrSlice;
|
use str::StrSlice;
|
||||||
use sync::atomics;
|
use sync::atomic;
|
||||||
use unicode::char::UnicodeChar;
|
use unicode::char::UnicodeChar;
|
||||||
|
|
||||||
pub use self::imp::write;
|
pub use self::imp::write;
|
||||||
|
@ -28,9 +28,9 @@ pub use self::imp::write;
|
||||||
// For now logging is turned off by default, and this function checks to see
|
// For now logging is turned off by default, and this function checks to see
|
||||||
// whether the magical environment variable is present to see if it's turned on.
|
// whether the magical environment variable is present to see if it's turned on.
|
||||||
pub fn log_enabled() -> bool {
|
pub fn log_enabled() -> bool {
|
||||||
static mut ENABLED: atomics::AtomicInt = atomics::INIT_ATOMIC_INT;
|
static mut ENABLED: atomic::AtomicInt = atomic::INIT_ATOMIC_INT;
|
||||||
unsafe {
|
unsafe {
|
||||||
match ENABLED.load(atomics::SeqCst) {
|
match ENABLED.load(atomic::SeqCst) {
|
||||||
1 => return false,
|
1 => return false,
|
||||||
2 => return true,
|
2 => return true,
|
||||||
_ => {}
|
_ => {}
|
||||||
|
@ -41,7 +41,7 @@ pub fn log_enabled() -> bool {
|
||||||
Some(..) => 2,
|
Some(..) => 2,
|
||||||
None => 1,
|
None => 1,
|
||||||
};
|
};
|
||||||
unsafe { ENABLED.store(val, atomics::SeqCst); }
|
unsafe { ENABLED.store(val, atomic::SeqCst); }
|
||||||
val == 2
|
val == 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ use libc::uintptr_t;
|
||||||
use option::{Some, None, Option};
|
use option::{Some, None, Option};
|
||||||
use os;
|
use os;
|
||||||
use str::Str;
|
use str::Str;
|
||||||
use sync::atomics;
|
use sync::atomic;
|
||||||
|
|
||||||
/// Dynamically inquire about whether we're running under V.
|
/// Dynamically inquire about whether we're running under V.
|
||||||
/// You should usually not use this unless your test definitely
|
/// You should usually not use this unless your test definitely
|
||||||
|
@ -41,8 +41,8 @@ pub fn limit_thread_creation_due_to_osx_and_valgrind() -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn min_stack() -> uint {
|
pub fn min_stack() -> uint {
|
||||||
static mut MIN: atomics::AtomicUint = atomics::INIT_ATOMIC_UINT;
|
static mut MIN: atomic::AtomicUint = atomic::INIT_ATOMIC_UINT;
|
||||||
match unsafe { MIN.load(atomics::SeqCst) } {
|
match unsafe { MIN.load(atomic::SeqCst) } {
|
||||||
0 => {}
|
0 => {}
|
||||||
n => return n - 1,
|
n => return n - 1,
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ pub fn min_stack() -> uint {
|
||||||
let amt = amt.unwrap_or(2 * 1024 * 1024);
|
let amt = amt.unwrap_or(2 * 1024 * 1024);
|
||||||
// 0 is our sentinel value, so ensure that we'll never see 0 after
|
// 0 is our sentinel value, so ensure that we'll never see 0 after
|
||||||
// initialization has run
|
// initialization has run
|
||||||
unsafe { MIN.store(amt + 1, atomics::SeqCst); }
|
unsafe { MIN.store(amt + 1, atomic::SeqCst); }
|
||||||
return amt;
|
return amt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,12 +17,18 @@
|
||||||
|
|
||||||
#![experimental]
|
#![experimental]
|
||||||
|
|
||||||
pub use core_sync::{atomics, deque, mpmc_bounded_queue, mpsc_queue, spsc_queue};
|
#[stable]
|
||||||
|
pub use core_sync::atomic;
|
||||||
|
|
||||||
|
pub use core_sync::{deque, mpmc_bounded_queue, mpsc_queue, spsc_queue};
|
||||||
pub use core_sync::{Arc, Weak, Mutex, MutexGuard, Condvar, Barrier};
|
pub use core_sync::{Arc, Weak, Mutex, MutexGuard, Condvar, Barrier};
|
||||||
pub use core_sync::{RWLock, RWLockReadGuard, RWLockWriteGuard};
|
pub use core_sync::{RWLock, RWLockReadGuard, RWLockWriteGuard};
|
||||||
pub use core_sync::{Semaphore, SemaphoreGuard};
|
pub use core_sync::{Semaphore, SemaphoreGuard};
|
||||||
pub use core_sync::one::{Once, ONCE_INIT};
|
pub use core_sync::one::{Once, ONCE_INIT};
|
||||||
|
|
||||||
|
#[deprecated = "use atomic instead"]
|
||||||
|
pub use atomics = core_sync::atomic;
|
||||||
|
|
||||||
pub use self::future::Future;
|
pub use self::future::Future;
|
||||||
pub use self::task_pool::TaskPool;
|
pub use self::task_pool::TaskPool;
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use std::sync::Arc;
|
//! use std::sync::Arc;
|
||||||
//! use std::sync::atomics::{AtomicUint, SeqCst};
|
//! use std::sync::atomic::{AtomicUint, SeqCst};
|
||||||
//! use std::task::deschedule;
|
//! use std::task::deschedule;
|
||||||
//!
|
//!
|
||||||
//! fn main() {
|
//! fn main() {
|
||||||
|
@ -67,7 +67,7 @@
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use std::sync::Arc;
|
//! use std::sync::Arc;
|
||||||
//! use std::sync::atomics::{AtomicOption, SeqCst};
|
//! use std::sync::atomic::{AtomicOption, SeqCst};
|
||||||
//!
|
//!
|
||||||
//! fn main() {
|
//! fn main() {
|
||||||
//! struct BigObject;
|
//! struct BigObject;
|
||||||
|
@ -91,7 +91,7 @@
|
||||||
//! Keep a global count of live tasks:
|
//! Keep a global count of live tasks:
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
|
//! use std::sync::atomic::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
|
||||||
//!
|
//!
|
||||||
//! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
|
//! static mut GLOBAL_TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
|
||||||
//!
|
//!
|
||||||
|
@ -106,16 +106,18 @@ use core::prelude::*;
|
||||||
use alloc::boxed::Box;
|
use alloc::boxed::Box;
|
||||||
use core::mem;
|
use core::mem;
|
||||||
|
|
||||||
pub use core::atomics::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
|
pub use core::atomic::{AtomicBool, AtomicInt, AtomicUint, AtomicPtr};
|
||||||
pub use core::atomics::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
|
pub use core::atomic::{Ordering, Relaxed, Release, Acquire, AcqRel, SeqCst};
|
||||||
pub use core::atomics::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
|
pub use core::atomic::{INIT_ATOMIC_BOOL, INIT_ATOMIC_INT, INIT_ATOMIC_UINT};
|
||||||
pub use core::atomics::fence;
|
pub use core::atomic::fence;
|
||||||
|
|
||||||
/// An atomic, nullable unique pointer
|
/// An atomic, nullable unique pointer
|
||||||
///
|
///
|
||||||
/// This can be used as the concurrency primitive for operations that transfer
|
/// This can be used as the concurrency primitive for operations that transfer
|
||||||
/// owned heap objects across tasks.
|
/// owned heap objects across tasks.
|
||||||
#[unsafe_no_drop_flag]
|
#[unsafe_no_drop_flag]
|
||||||
|
#[deprecated = "no longer used; will eventually be replaced by a higher-level\
|
||||||
|
concept like MVar"]
|
||||||
pub struct AtomicOption<T> {
|
pub struct AtomicOption<T> {
|
||||||
p: AtomicUint,
|
p: AtomicUint,
|
||||||
}
|
}
|
||||||
|
@ -227,4 +229,3 @@ mod test {
|
||||||
assert!(p.take(SeqCst) == Some(box 2));
|
assert!(p.take(SeqCst) == Some(box 2));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ use core::mem;
|
||||||
use rustrt::local::Local;
|
use rustrt::local::Local;
|
||||||
use rustrt::task::{Task, BlockedTask};
|
use rustrt::task::{Task, BlockedTask};
|
||||||
|
|
||||||
use atomics;
|
use atomic;
|
||||||
use comm::Receiver;
|
use comm::Receiver;
|
||||||
|
|
||||||
// Various states you can find a port in.
|
// Various states you can find a port in.
|
||||||
|
@ -49,7 +49,7 @@ static DISCONNECTED: uint = 2;
|
||||||
|
|
||||||
pub struct Packet<T> {
|
pub struct Packet<T> {
|
||||||
// Internal state of the chan/port pair (stores the blocked task as well)
|
// Internal state of the chan/port pair (stores the blocked task as well)
|
||||||
state: atomics::AtomicUint,
|
state: atomic::AtomicUint,
|
||||||
// One-shot data slot location
|
// One-shot data slot location
|
||||||
data: Option<T>,
|
data: Option<T>,
|
||||||
// when used for the second time, a oneshot channel must be upgraded, and
|
// when used for the second time, a oneshot channel must be upgraded, and
|
||||||
|
@ -86,7 +86,7 @@ impl<T: Send> Packet<T> {
|
||||||
Packet {
|
Packet {
|
||||||
data: None,
|
data: None,
|
||||||
upgrade: NothingSent,
|
upgrade: NothingSent,
|
||||||
state: atomics::AtomicUint::new(EMPTY),
|
state: atomic::AtomicUint::new(EMPTY),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,7 @@ impl<T: Send> Packet<T> {
|
||||||
self.data = Some(t);
|
self.data = Some(t);
|
||||||
self.upgrade = SendUsed;
|
self.upgrade = SendUsed;
|
||||||
|
|
||||||
match self.state.swap(DATA, atomics::SeqCst) {
|
match self.state.swap(DATA, atomic::SeqCst) {
|
||||||
// Sent the data, no one was waiting
|
// Sent the data, no one was waiting
|
||||||
EMPTY => Ok(()),
|
EMPTY => Ok(()),
|
||||||
|
|
||||||
|
@ -136,11 +136,11 @@ impl<T: Send> Packet<T> {
|
||||||
pub fn recv(&mut self) -> Result<T, Failure<T>> {
|
pub fn recv(&mut self) -> Result<T, Failure<T>> {
|
||||||
// Attempt to not block the task (it's a little expensive). If it looks
|
// Attempt to not block the task (it's a little expensive). If it looks
|
||||||
// like we're not empty, then immediately go through to `try_recv`.
|
// like we're not empty, then immediately go through to `try_recv`.
|
||||||
if self.state.load(atomics::SeqCst) == EMPTY {
|
if self.state.load(atomic::SeqCst) == EMPTY {
|
||||||
let t: Box<Task> = Local::take();
|
let t: Box<Task> = Local::take();
|
||||||
t.deschedule(1, |task| {
|
t.deschedule(1, |task| {
|
||||||
let n = unsafe { task.cast_to_uint() };
|
let n = unsafe { task.cast_to_uint() };
|
||||||
match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
|
match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
|
||||||
// Nothing on the channel, we legitimately block
|
// Nothing on the channel, we legitimately block
|
||||||
EMPTY => Ok(()),
|
EMPTY => Ok(()),
|
||||||
|
|
||||||
|
@ -160,7 +160,7 @@ impl<T: Send> Packet<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
|
pub fn try_recv(&mut self) -> Result<T, Failure<T>> {
|
||||||
match self.state.load(atomics::SeqCst) {
|
match self.state.load(atomic::SeqCst) {
|
||||||
EMPTY => Err(Empty),
|
EMPTY => Err(Empty),
|
||||||
|
|
||||||
// We saw some data on the channel, but the channel can be used
|
// We saw some data on the channel, but the channel can be used
|
||||||
|
@ -170,7 +170,7 @@ impl<T: Send> Packet<T> {
|
||||||
// the state changes under our feet we'd rather just see that state
|
// the state changes under our feet we'd rather just see that state
|
||||||
// change.
|
// change.
|
||||||
DATA => {
|
DATA => {
|
||||||
self.state.compare_and_swap(DATA, EMPTY, atomics::SeqCst);
|
self.state.compare_and_swap(DATA, EMPTY, atomic::SeqCst);
|
||||||
match self.data.take() {
|
match self.data.take() {
|
||||||
Some(data) => Ok(data),
|
Some(data) => Ok(data),
|
||||||
None => unreachable!(),
|
None => unreachable!(),
|
||||||
|
@ -207,7 +207,7 @@ impl<T: Send> Packet<T> {
|
||||||
};
|
};
|
||||||
self.upgrade = GoUp(up);
|
self.upgrade = GoUp(up);
|
||||||
|
|
||||||
match self.state.swap(DISCONNECTED, atomics::SeqCst) {
|
match self.state.swap(DISCONNECTED, atomic::SeqCst) {
|
||||||
// If the channel is empty or has data on it, then we're good to go.
|
// If the channel is empty or has data on it, then we're good to go.
|
||||||
// Senders will check the data before the upgrade (in case we
|
// Senders will check the data before the upgrade (in case we
|
||||||
// plastered over the DATA state).
|
// plastered over the DATA state).
|
||||||
|
@ -223,7 +223,7 @@ impl<T: Send> Packet<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn drop_chan(&mut self) {
|
pub fn drop_chan(&mut self) {
|
||||||
match self.state.swap(DISCONNECTED, atomics::SeqCst) {
|
match self.state.swap(DISCONNECTED, atomic::SeqCst) {
|
||||||
DATA | DISCONNECTED | EMPTY => {}
|
DATA | DISCONNECTED | EMPTY => {}
|
||||||
|
|
||||||
// If someone's waiting, we gotta wake them up
|
// If someone's waiting, we gotta wake them up
|
||||||
|
@ -235,7 +235,7 @@ impl<T: Send> Packet<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn drop_port(&mut self) {
|
pub fn drop_port(&mut self) {
|
||||||
match self.state.swap(DISCONNECTED, atomics::SeqCst) {
|
match self.state.swap(DISCONNECTED, atomic::SeqCst) {
|
||||||
// An empty channel has nothing to do, and a remotely disconnected
|
// An empty channel has nothing to do, and a remotely disconnected
|
||||||
// channel also has nothing to do b/c we're about to run the drop
|
// channel also has nothing to do b/c we're about to run the drop
|
||||||
// glue
|
// glue
|
||||||
|
@ -258,7 +258,7 @@ impl<T: Send> Packet<T> {
|
||||||
// If Ok, the value is whether this port has data, if Err, then the upgraded
|
// If Ok, the value is whether this port has data, if Err, then the upgraded
|
||||||
// port needs to be checked instead of this one.
|
// port needs to be checked instead of this one.
|
||||||
pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
|
pub fn can_recv(&mut self) -> Result<bool, Receiver<T>> {
|
||||||
match self.state.load(atomics::SeqCst) {
|
match self.state.load(atomic::SeqCst) {
|
||||||
EMPTY => Ok(false), // Welp, we tried
|
EMPTY => Ok(false), // Welp, we tried
|
||||||
DATA => Ok(true), // we have some un-acquired data
|
DATA => Ok(true), // we have some un-acquired data
|
||||||
DISCONNECTED if self.data.is_some() => Ok(true), // we have data
|
DISCONNECTED if self.data.is_some() => Ok(true), // we have data
|
||||||
|
@ -283,7 +283,7 @@ impl<T: Send> Packet<T> {
|
||||||
// because there is data, or fail because there is an upgrade pending.
|
// because there is data, or fail because there is an upgrade pending.
|
||||||
pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
|
pub fn start_selection(&mut self, task: BlockedTask) -> SelectionResult<T> {
|
||||||
let n = unsafe { task.cast_to_uint() };
|
let n = unsafe { task.cast_to_uint() };
|
||||||
match self.state.compare_and_swap(EMPTY, n, atomics::SeqCst) {
|
match self.state.compare_and_swap(EMPTY, n, atomic::SeqCst) {
|
||||||
EMPTY => SelSuccess,
|
EMPTY => SelSuccess,
|
||||||
DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }),
|
DATA => SelCanceled(unsafe { BlockedTask::cast_from_uint(n) }),
|
||||||
DISCONNECTED if self.data.is_some() => {
|
DISCONNECTED if self.data.is_some() => {
|
||||||
|
@ -317,7 +317,7 @@ impl<T: Send> Packet<T> {
|
||||||
//
|
//
|
||||||
// The return value indicates whether there's data on this port.
|
// The return value indicates whether there's data on this port.
|
||||||
pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> {
|
pub fn abort_selection(&mut self) -> Result<bool, Receiver<T>> {
|
||||||
let state = match self.state.load(atomics::SeqCst) {
|
let state = match self.state.load(atomic::SeqCst) {
|
||||||
// Each of these states means that no further activity will happen
|
// Each of these states means that no further activity will happen
|
||||||
// with regard to abortion selection
|
// with regard to abortion selection
|
||||||
s @ EMPTY |
|
s @ EMPTY |
|
||||||
|
@ -326,7 +326,7 @@ impl<T: Send> Packet<T> {
|
||||||
|
|
||||||
// If we've got a blocked task, then use an atomic to gain ownership
|
// If we've got a blocked task, then use an atomic to gain ownership
|
||||||
// of it (may fail)
|
// of it (may fail)
|
||||||
n => self.state.compare_and_swap(n, EMPTY, atomics::SeqCst)
|
n => self.state.compare_and_swap(n, EMPTY, atomic::SeqCst)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Now that we've got ownership of our state, figure out what to do
|
// Now that we've got ownership of our state, figure out what to do
|
||||||
|
@ -367,6 +367,6 @@ impl<T: Send> Packet<T> {
|
||||||
#[unsafe_destructor]
|
#[unsafe_destructor]
|
||||||
impl<T: Send> Drop for Packet<T> {
|
impl<T: Send> Drop for Packet<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
assert_eq!(self.state.load(atomics::SeqCst), DISCONNECTED);
|
assert_eq!(self.state.load(atomic::SeqCst), DISCONNECTED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,7 @@ use rustrt::mutex::NativeMutex;
|
||||||
use rustrt::task::{Task, BlockedTask};
|
use rustrt::task::{Task, BlockedTask};
|
||||||
use rustrt::thread::Thread;
|
use rustrt::thread::Thread;
|
||||||
|
|
||||||
use atomics;
|
use atomic;
|
||||||
use mpsc = mpsc_queue;
|
use mpsc = mpsc_queue;
|
||||||
|
|
||||||
static DISCONNECTED: int = int::MIN;
|
static DISCONNECTED: int = int::MIN;
|
||||||
|
@ -40,17 +40,17 @@ static MAX_STEALS: int = 1 << 20;
|
||||||
|
|
||||||
pub struct Packet<T> {
|
pub struct Packet<T> {
|
||||||
queue: mpsc::Queue<T>,
|
queue: mpsc::Queue<T>,
|
||||||
cnt: atomics::AtomicInt, // How many items are on this channel
|
cnt: atomic::AtomicInt, // How many items are on this channel
|
||||||
steals: int, // How many times has a port received without blocking?
|
steals: int, // How many times has a port received without blocking?
|
||||||
to_wake: atomics::AtomicUint, // Task to wake up
|
to_wake: atomic::AtomicUint, // Task to wake up
|
||||||
|
|
||||||
// The number of channels which are currently using this packet.
|
// The number of channels which are currently using this packet.
|
||||||
channels: atomics::AtomicInt,
|
channels: atomic::AtomicInt,
|
||||||
|
|
||||||
// See the discussion in Port::drop and the channel send methods for what
|
// See the discussion in Port::drop and the channel send methods for what
|
||||||
// these are used for
|
// these are used for
|
||||||
port_dropped: atomics::AtomicBool,
|
port_dropped: atomic::AtomicBool,
|
||||||
sender_drain: atomics::AtomicInt,
|
sender_drain: atomic::AtomicInt,
|
||||||
|
|
||||||
// this lock protects various portions of this implementation during
|
// this lock protects various portions of this implementation during
|
||||||
// select()
|
// select()
|
||||||
|
@ -68,12 +68,12 @@ impl<T: Send> Packet<T> {
|
||||||
pub fn new() -> Packet<T> {
|
pub fn new() -> Packet<T> {
|
||||||
let p = Packet {
|
let p = Packet {
|
||||||
queue: mpsc::Queue::new(),
|
queue: mpsc::Queue::new(),
|
||||||
cnt: atomics::AtomicInt::new(0),
|
cnt: atomic::AtomicInt::new(0),
|
||||||
steals: 0,
|
steals: 0,
|
||||||
to_wake: atomics::AtomicUint::new(0),
|
to_wake: atomic::AtomicUint::new(0),
|
||||||
channels: atomics::AtomicInt::new(2),
|
channels: atomic::AtomicInt::new(2),
|
||||||
port_dropped: atomics::AtomicBool::new(false),
|
port_dropped: atomic::AtomicBool::new(false),
|
||||||
sender_drain: atomics::AtomicInt::new(0),
|
sender_drain: atomic::AtomicInt::new(0),
|
||||||
select_lock: unsafe { NativeMutex::new() },
|
select_lock: unsafe { NativeMutex::new() },
|
||||||
};
|
};
|
||||||
return p;
|
return p;
|
||||||
|
@ -96,11 +96,11 @@ impl<T: Send> Packet<T> {
|
||||||
pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) {
|
pub fn inherit_blocker(&mut self, task: Option<BlockedTask>) {
|
||||||
match task {
|
match task {
|
||||||
Some(task) => {
|
Some(task) => {
|
||||||
assert_eq!(self.cnt.load(atomics::SeqCst), 0);
|
assert_eq!(self.cnt.load(atomic::SeqCst), 0);
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
self.to_wake.store(unsafe { task.cast_to_uint() },
|
self.to_wake.store(unsafe { task.cast_to_uint() },
|
||||||
atomics::SeqCst);
|
atomic::SeqCst);
|
||||||
self.cnt.store(-1, atomics::SeqCst);
|
self.cnt.store(-1, atomic::SeqCst);
|
||||||
|
|
||||||
// This store is a little sketchy. What's happening here is
|
// This store is a little sketchy. What's happening here is
|
||||||
// that we're transferring a blocker from a oneshot or stream
|
// that we're transferring a blocker from a oneshot or stream
|
||||||
|
@ -138,7 +138,7 @@ impl<T: Send> Packet<T> {
|
||||||
|
|
||||||
pub fn send(&mut self, t: T) -> Result<(), T> {
|
pub fn send(&mut self, t: T) -> Result<(), T> {
|
||||||
// See Port::drop for what's going on
|
// See Port::drop for what's going on
|
||||||
if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
|
if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
|
||||||
|
|
||||||
// Note that the multiple sender case is a little trickier
|
// Note that the multiple sender case is a little trickier
|
||||||
// semantically than the single sender case. The logic for
|
// semantically than the single sender case. The logic for
|
||||||
|
@ -165,12 +165,12 @@ impl<T: Send> Packet<T> {
|
||||||
// preflight check serves as the definitive "this will never be
|
// preflight check serves as the definitive "this will never be
|
||||||
// received". Once we get beyond this check, we have permanently
|
// received". Once we get beyond this check, we have permanently
|
||||||
// entered the realm of "this may be received"
|
// entered the realm of "this may be received"
|
||||||
if self.cnt.load(atomics::SeqCst) < DISCONNECTED + FUDGE {
|
if self.cnt.load(atomic::SeqCst) < DISCONNECTED + FUDGE {
|
||||||
return Err(t)
|
return Err(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.queue.push(t);
|
self.queue.push(t);
|
||||||
match self.cnt.fetch_add(1, atomics::SeqCst) {
|
match self.cnt.fetch_add(1, atomic::SeqCst) {
|
||||||
-1 => {
|
-1 => {
|
||||||
self.take_to_wake().wake().map(|t| t.reawaken());
|
self.take_to_wake().wake().map(|t| t.reawaken());
|
||||||
}
|
}
|
||||||
|
@ -187,9 +187,9 @@ impl<T: Send> Packet<T> {
|
||||||
n if n < DISCONNECTED + FUDGE => {
|
n if n < DISCONNECTED + FUDGE => {
|
||||||
// see the comment in 'try' for a shared channel for why this
|
// see the comment in 'try' for a shared channel for why this
|
||||||
// window of "not disconnected" is ok.
|
// window of "not disconnected" is ok.
|
||||||
self.cnt.store(DISCONNECTED, atomics::SeqCst);
|
self.cnt.store(DISCONNECTED, atomic::SeqCst);
|
||||||
|
|
||||||
if self.sender_drain.fetch_add(1, atomics::SeqCst) == 0 {
|
if self.sender_drain.fetch_add(1, atomic::SeqCst) == 0 {
|
||||||
loop {
|
loop {
|
||||||
// drain the queue, for info on the thread yield see the
|
// drain the queue, for info on the thread yield see the
|
||||||
// discussion in try_recv
|
// discussion in try_recv
|
||||||
|
@ -202,7 +202,7 @@ impl<T: Send> Packet<T> {
|
||||||
}
|
}
|
||||||
// maybe we're done, if we're not the last ones
|
// maybe we're done, if we're not the last ones
|
||||||
// here, then we need to go try again.
|
// here, then we need to go try again.
|
||||||
if self.sender_drain.fetch_sub(1, atomics::SeqCst) == 1 {
|
if self.sender_drain.fetch_sub(1, atomic::SeqCst) == 1 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -242,15 +242,15 @@ impl<T: Send> Packet<T> {
|
||||||
|
|
||||||
// Essentially the exact same thing as the stream decrement function.
|
// Essentially the exact same thing as the stream decrement function.
|
||||||
fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
|
fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
let n = unsafe { task.cast_to_uint() };
|
let n = unsafe { task.cast_to_uint() };
|
||||||
self.to_wake.store(n, atomics::SeqCst);
|
self.to_wake.store(n, atomic::SeqCst);
|
||||||
|
|
||||||
let steals = self.steals;
|
let steals = self.steals;
|
||||||
self.steals = 0;
|
self.steals = 0;
|
||||||
|
|
||||||
match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
|
match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
|
||||||
DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
|
DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
|
||||||
// If we factor in our steals and notice that the channel has no
|
// If we factor in our steals and notice that the channel has no
|
||||||
// data, we successfully sleep
|
// data, we successfully sleep
|
||||||
n => {
|
n => {
|
||||||
|
@ -259,7 +259,7 @@ impl<T: Send> Packet<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.to_wake.store(0, atomics::SeqCst);
|
self.to_wake.store(0, atomic::SeqCst);
|
||||||
Err(unsafe { BlockedTask::cast_from_uint(n) })
|
Err(unsafe { BlockedTask::cast_from_uint(n) })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,9 +311,9 @@ impl<T: Send> Packet<T> {
|
||||||
// might decrement steals.
|
// might decrement steals.
|
||||||
Some(data) => {
|
Some(data) => {
|
||||||
if self.steals > MAX_STEALS {
|
if self.steals > MAX_STEALS {
|
||||||
match self.cnt.swap(0, atomics::SeqCst) {
|
match self.cnt.swap(0, atomic::SeqCst) {
|
||||||
DISCONNECTED => {
|
DISCONNECTED => {
|
||||||
self.cnt.store(DISCONNECTED, atomics::SeqCst);
|
self.cnt.store(DISCONNECTED, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
n => {
|
n => {
|
||||||
let m = cmp::min(n, self.steals);
|
let m = cmp::min(n, self.steals);
|
||||||
|
@ -330,7 +330,7 @@ impl<T: Send> Packet<T> {
|
||||||
// See the discussion in the stream implementation for why we try
|
// See the discussion in the stream implementation for why we try
|
||||||
// again.
|
// again.
|
||||||
None => {
|
None => {
|
||||||
match self.cnt.load(atomics::SeqCst) {
|
match self.cnt.load(atomic::SeqCst) {
|
||||||
n if n != DISCONNECTED => Err(Empty),
|
n if n != DISCONNECTED => Err(Empty),
|
||||||
_ => {
|
_ => {
|
||||||
match self.queue.pop() {
|
match self.queue.pop() {
|
||||||
|
@ -348,20 +348,20 @@ impl<T: Send> Packet<T> {
|
||||||
// Prepares this shared packet for a channel clone, essentially just bumping
|
// Prepares this shared packet for a channel clone, essentially just bumping
|
||||||
// a refcount.
|
// a refcount.
|
||||||
pub fn clone_chan(&mut self) {
|
pub fn clone_chan(&mut self) {
|
||||||
self.channels.fetch_add(1, atomics::SeqCst);
|
self.channels.fetch_add(1, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrement the reference count on a channel. This is called whenever a
|
// Decrement the reference count on a channel. This is called whenever a
|
||||||
// Chan is dropped and may end up waking up a receiver. It's the receiver's
|
// Chan is dropped and may end up waking up a receiver. It's the receiver's
|
||||||
// responsibility on the other end to figure out that we've disconnected.
|
// responsibility on the other end to figure out that we've disconnected.
|
||||||
pub fn drop_chan(&mut self) {
|
pub fn drop_chan(&mut self) {
|
||||||
match self.channels.fetch_sub(1, atomics::SeqCst) {
|
match self.channels.fetch_sub(1, atomic::SeqCst) {
|
||||||
1 => {}
|
1 => {}
|
||||||
n if n > 1 => return,
|
n if n > 1 => return,
|
||||||
n => fail!("bad number of channels left {}", n),
|
n => fail!("bad number of channels left {}", n),
|
||||||
}
|
}
|
||||||
|
|
||||||
match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
|
match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
|
||||||
-1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
|
-1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
|
||||||
DISCONNECTED => {}
|
DISCONNECTED => {}
|
||||||
n => { assert!(n >= 0); }
|
n => { assert!(n >= 0); }
|
||||||
|
@ -371,11 +371,11 @@ impl<T: Send> Packet<T> {
|
||||||
// See the long discussion inside of stream.rs for why the queue is drained,
|
// See the long discussion inside of stream.rs for why the queue is drained,
|
||||||
// and why it is done in this fashion.
|
// and why it is done in this fashion.
|
||||||
pub fn drop_port(&mut self) {
|
pub fn drop_port(&mut self) {
|
||||||
self.port_dropped.store(true, atomics::SeqCst);
|
self.port_dropped.store(true, atomic::SeqCst);
|
||||||
let mut steals = self.steals;
|
let mut steals = self.steals;
|
||||||
while {
|
while {
|
||||||
let cnt = self.cnt.compare_and_swap(
|
let cnt = self.cnt.compare_and_swap(
|
||||||
steals, DISCONNECTED, atomics::SeqCst);
|
steals, DISCONNECTED, atomic::SeqCst);
|
||||||
cnt != DISCONNECTED && cnt != steals
|
cnt != DISCONNECTED && cnt != steals
|
||||||
} {
|
} {
|
||||||
// See the discussion in 'try_recv' for why we yield
|
// See the discussion in 'try_recv' for why we yield
|
||||||
|
@ -391,8 +391,8 @@ impl<T: Send> Packet<T> {
|
||||||
|
|
||||||
// Consumes ownership of the 'to_wake' field.
|
// Consumes ownership of the 'to_wake' field.
|
||||||
fn take_to_wake(&mut self) -> BlockedTask {
|
fn take_to_wake(&mut self) -> BlockedTask {
|
||||||
let task = self.to_wake.load(atomics::SeqCst);
|
let task = self.to_wake.load(atomic::SeqCst);
|
||||||
self.to_wake.store(0, atomics::SeqCst);
|
self.to_wake.store(0, atomic::SeqCst);
|
||||||
assert!(task != 0);
|
assert!(task != 0);
|
||||||
unsafe { BlockedTask::cast_from_uint(task) }
|
unsafe { BlockedTask::cast_from_uint(task) }
|
||||||
}
|
}
|
||||||
|
@ -407,15 +407,15 @@ impl<T: Send> Packet<T> {
|
||||||
// This is different than the stream version because there's no need to peek
|
// This is different than the stream version because there's no need to peek
|
||||||
// at the queue, we can just look at the local count.
|
// at the queue, we can just look at the local count.
|
||||||
pub fn can_recv(&mut self) -> bool {
|
pub fn can_recv(&mut self) -> bool {
|
||||||
let cnt = self.cnt.load(atomics::SeqCst);
|
let cnt = self.cnt.load(atomic::SeqCst);
|
||||||
cnt == DISCONNECTED || cnt - self.steals > 0
|
cnt == DISCONNECTED || cnt - self.steals > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// increment the count on the channel (used for selection)
|
// increment the count on the channel (used for selection)
|
||||||
fn bump(&mut self, amt: int) -> int {
|
fn bump(&mut self, amt: int) -> int {
|
||||||
match self.cnt.fetch_add(amt, atomics::SeqCst) {
|
match self.cnt.fetch_add(amt, atomic::SeqCst) {
|
||||||
DISCONNECTED => {
|
DISCONNECTED => {
|
||||||
self.cnt.store(DISCONNECTED, atomics::SeqCst);
|
self.cnt.store(DISCONNECTED, atomic::SeqCst);
|
||||||
DISCONNECTED
|
DISCONNECTED
|
||||||
}
|
}
|
||||||
n => n
|
n => n
|
||||||
|
@ -460,13 +460,13 @@ impl<T: Send> Packet<T> {
|
||||||
// the channel count and figure out what we should do to make it
|
// the channel count and figure out what we should do to make it
|
||||||
// positive.
|
// positive.
|
||||||
let steals = {
|
let steals = {
|
||||||
let cnt = self.cnt.load(atomics::SeqCst);
|
let cnt = self.cnt.load(atomic::SeqCst);
|
||||||
if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0}
|
if cnt < 0 && cnt != DISCONNECTED {-cnt} else {0}
|
||||||
};
|
};
|
||||||
let prev = self.bump(steals + 1);
|
let prev = self.bump(steals + 1);
|
||||||
|
|
||||||
if prev == DISCONNECTED {
|
if prev == DISCONNECTED {
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
true
|
true
|
||||||
} else {
|
} else {
|
||||||
let cur = prev + steals + 1;
|
let cur = prev + steals + 1;
|
||||||
|
@ -474,7 +474,7 @@ impl<T: Send> Packet<T> {
|
||||||
if prev < 0 {
|
if prev < 0 {
|
||||||
self.take_to_wake().trash();
|
self.take_to_wake().trash();
|
||||||
} else {
|
} else {
|
||||||
while self.to_wake.load(atomics::SeqCst) != 0 {
|
while self.to_wake.load(atomic::SeqCst) != 0 {
|
||||||
Thread::yield_now();
|
Thread::yield_now();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -495,8 +495,8 @@ impl<T: Send> Drop for Packet<T> {
|
||||||
// disconnection, but also a proper fence before the read of
|
// disconnection, but also a proper fence before the read of
|
||||||
// `to_wake`, so this assert cannot be removed with also removing
|
// `to_wake`, so this assert cannot be removed with also removing
|
||||||
// the `to_wake` assert.
|
// the `to_wake` assert.
|
||||||
assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
|
assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
assert_eq!(self.channels.load(atomics::SeqCst), 0);
|
assert_eq!(self.channels.load(atomic::SeqCst), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ use rustrt::local::Local;
|
||||||
use rustrt::task::{Task, BlockedTask};
|
use rustrt::task::{Task, BlockedTask};
|
||||||
use rustrt::thread::Thread;
|
use rustrt::thread::Thread;
|
||||||
|
|
||||||
use atomics;
|
use atomic;
|
||||||
use comm::Receiver;
|
use comm::Receiver;
|
||||||
use spsc = spsc_queue;
|
use spsc = spsc_queue;
|
||||||
|
|
||||||
|
@ -39,11 +39,11 @@ static MAX_STEALS: int = 1 << 20;
|
||||||
pub struct Packet<T> {
|
pub struct Packet<T> {
|
||||||
queue: spsc::Queue<Message<T>>, // internal queue for all message
|
queue: spsc::Queue<Message<T>>, // internal queue for all message
|
||||||
|
|
||||||
cnt: atomics::AtomicInt, // How many items are on this channel
|
cnt: atomic::AtomicInt, // How many items are on this channel
|
||||||
steals: int, // How many times has a port received without blocking?
|
steals: int, // How many times has a port received without blocking?
|
||||||
to_wake: atomics::AtomicUint, // Task to wake up
|
to_wake: atomic::AtomicUint, // Task to wake up
|
||||||
|
|
||||||
port_dropped: atomics::AtomicBool, // flag if the channel has been destroyed.
|
port_dropped: atomic::AtomicBool, // flag if the channel has been destroyed.
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum Failure<T> {
|
pub enum Failure<T> {
|
||||||
|
@ -76,11 +76,11 @@ impl<T: Send> Packet<T> {
|
||||||
Packet {
|
Packet {
|
||||||
queue: unsafe { spsc::Queue::new(128) },
|
queue: unsafe { spsc::Queue::new(128) },
|
||||||
|
|
||||||
cnt: atomics::AtomicInt::new(0),
|
cnt: atomic::AtomicInt::new(0),
|
||||||
steals: 0,
|
steals: 0,
|
||||||
to_wake: atomics::AtomicUint::new(0),
|
to_wake: atomic::AtomicUint::new(0),
|
||||||
|
|
||||||
port_dropped: atomics::AtomicBool::new(false),
|
port_dropped: atomic::AtomicBool::new(false),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ impl<T: Send> Packet<T> {
|
||||||
// If the other port has deterministically gone away, then definitely
|
// If the other port has deterministically gone away, then definitely
|
||||||
// must return the data back up the stack. Otherwise, the data is
|
// must return the data back up the stack. Otherwise, the data is
|
||||||
// considered as being sent.
|
// considered as being sent.
|
||||||
if self.port_dropped.load(atomics::SeqCst) { return Err(t) }
|
if self.port_dropped.load(atomic::SeqCst) { return Err(t) }
|
||||||
|
|
||||||
match self.do_send(Data(t)) {
|
match self.do_send(Data(t)) {
|
||||||
UpSuccess | UpDisconnected => {},
|
UpSuccess | UpDisconnected => {},
|
||||||
|
@ -100,14 +100,14 @@ impl<T: Send> Packet<T> {
|
||||||
pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
|
pub fn upgrade(&mut self, up: Receiver<T>) -> UpgradeResult {
|
||||||
// If the port has gone away, then there's no need to proceed any
|
// If the port has gone away, then there's no need to proceed any
|
||||||
// further.
|
// further.
|
||||||
if self.port_dropped.load(atomics::SeqCst) { return UpDisconnected }
|
if self.port_dropped.load(atomic::SeqCst) { return UpDisconnected }
|
||||||
|
|
||||||
self.do_send(GoUp(up))
|
self.do_send(GoUp(up))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn do_send(&mut self, t: Message<T>) -> UpgradeResult {
|
fn do_send(&mut self, t: Message<T>) -> UpgradeResult {
|
||||||
self.queue.push(t);
|
self.queue.push(t);
|
||||||
match self.cnt.fetch_add(1, atomics::SeqCst) {
|
match self.cnt.fetch_add(1, atomic::SeqCst) {
|
||||||
// As described in the mod's doc comment, -1 == wakeup
|
// As described in the mod's doc comment, -1 == wakeup
|
||||||
-1 => UpWoke(self.take_to_wake()),
|
-1 => UpWoke(self.take_to_wake()),
|
||||||
// As as described before, SPSC queues must be >= -2
|
// As as described before, SPSC queues must be >= -2
|
||||||
|
@ -121,7 +121,7 @@ impl<T: Send> Packet<T> {
|
||||||
// will never remove this data. We can only have at most one item to
|
// will never remove this data. We can only have at most one item to
|
||||||
// drain (the port drains the rest).
|
// drain (the port drains the rest).
|
||||||
DISCONNECTED => {
|
DISCONNECTED => {
|
||||||
self.cnt.store(DISCONNECTED, atomics::SeqCst);
|
self.cnt.store(DISCONNECTED, atomic::SeqCst);
|
||||||
let first = self.queue.pop();
|
let first = self.queue.pop();
|
||||||
let second = self.queue.pop();
|
let second = self.queue.pop();
|
||||||
assert!(second.is_none());
|
assert!(second.is_none());
|
||||||
|
@ -140,8 +140,8 @@ impl<T: Send> Packet<T> {
|
||||||
|
|
||||||
// Consumes ownership of the 'to_wake' field.
|
// Consumes ownership of the 'to_wake' field.
|
||||||
fn take_to_wake(&mut self) -> BlockedTask {
|
fn take_to_wake(&mut self) -> BlockedTask {
|
||||||
let task = self.to_wake.load(atomics::SeqCst);
|
let task = self.to_wake.load(atomic::SeqCst);
|
||||||
self.to_wake.store(0, atomics::SeqCst);
|
self.to_wake.store(0, atomic::SeqCst);
|
||||||
assert!(task != 0);
|
assert!(task != 0);
|
||||||
unsafe { BlockedTask::cast_from_uint(task) }
|
unsafe { BlockedTask::cast_from_uint(task) }
|
||||||
}
|
}
|
||||||
|
@ -150,15 +150,15 @@ impl<T: Send> Packet<T> {
|
||||||
// back if it shouldn't sleep. Note that this is the location where we take
|
// back if it shouldn't sleep. Note that this is the location where we take
|
||||||
// steals into account.
|
// steals into account.
|
||||||
fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
|
fn decrement(&mut self, task: BlockedTask) -> Result<(), BlockedTask> {
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
let n = unsafe { task.cast_to_uint() };
|
let n = unsafe { task.cast_to_uint() };
|
||||||
self.to_wake.store(n, atomics::SeqCst);
|
self.to_wake.store(n, atomic::SeqCst);
|
||||||
|
|
||||||
let steals = self.steals;
|
let steals = self.steals;
|
||||||
self.steals = 0;
|
self.steals = 0;
|
||||||
|
|
||||||
match self.cnt.fetch_sub(1 + steals, atomics::SeqCst) {
|
match self.cnt.fetch_sub(1 + steals, atomic::SeqCst) {
|
||||||
DISCONNECTED => { self.cnt.store(DISCONNECTED, atomics::SeqCst); }
|
DISCONNECTED => { self.cnt.store(DISCONNECTED, atomic::SeqCst); }
|
||||||
// If we factor in our steals and notice that the channel has no
|
// If we factor in our steals and notice that the channel has no
|
||||||
// data, we successfully sleep
|
// data, we successfully sleep
|
||||||
n => {
|
n => {
|
||||||
|
@ -167,7 +167,7 @@ impl<T: Send> Packet<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.to_wake.store(0, atomics::SeqCst);
|
self.to_wake.store(0, atomic::SeqCst);
|
||||||
Err(unsafe { BlockedTask::cast_from_uint(n) })
|
Err(unsafe { BlockedTask::cast_from_uint(n) })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,9 +214,9 @@ impl<T: Send> Packet<T> {
|
||||||
// adding back in whatever we couldn't factor into steals.
|
// adding back in whatever we couldn't factor into steals.
|
||||||
Some(data) => {
|
Some(data) => {
|
||||||
if self.steals > MAX_STEALS {
|
if self.steals > MAX_STEALS {
|
||||||
match self.cnt.swap(0, atomics::SeqCst) {
|
match self.cnt.swap(0, atomic::SeqCst) {
|
||||||
DISCONNECTED => {
|
DISCONNECTED => {
|
||||||
self.cnt.store(DISCONNECTED, atomics::SeqCst);
|
self.cnt.store(DISCONNECTED, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
n => {
|
n => {
|
||||||
let m = cmp::min(n, self.steals);
|
let m = cmp::min(n, self.steals);
|
||||||
|
@ -234,7 +234,7 @@ impl<T: Send> Packet<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
None => {
|
None => {
|
||||||
match self.cnt.load(atomics::SeqCst) {
|
match self.cnt.load(atomic::SeqCst) {
|
||||||
n if n != DISCONNECTED => Err(Empty),
|
n if n != DISCONNECTED => Err(Empty),
|
||||||
|
|
||||||
// This is a little bit of a tricky case. We failed to pop
|
// This is a little bit of a tricky case. We failed to pop
|
||||||
|
@ -263,7 +263,7 @@ impl<T: Send> Packet<T> {
|
||||||
pub fn drop_chan(&mut self) {
|
pub fn drop_chan(&mut self) {
|
||||||
// Dropping a channel is pretty simple, we just flag it as disconnected
|
// Dropping a channel is pretty simple, we just flag it as disconnected
|
||||||
// and then wakeup a blocker if there is one.
|
// and then wakeup a blocker if there is one.
|
||||||
match self.cnt.swap(DISCONNECTED, atomics::SeqCst) {
|
match self.cnt.swap(DISCONNECTED, atomic::SeqCst) {
|
||||||
-1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
|
-1 => { self.take_to_wake().wake().map(|t| t.reawaken()); }
|
||||||
DISCONNECTED => {}
|
DISCONNECTED => {}
|
||||||
n => { assert!(n >= 0); }
|
n => { assert!(n >= 0); }
|
||||||
|
@ -290,7 +290,7 @@ impl<T: Send> Packet<T> {
|
||||||
// sends are gated on this flag, so we're immediately guaranteed that
|
// sends are gated on this flag, so we're immediately guaranteed that
|
||||||
// there are a bounded number of active sends that we'll have to deal
|
// there are a bounded number of active sends that we'll have to deal
|
||||||
// with.
|
// with.
|
||||||
self.port_dropped.store(true, atomics::SeqCst);
|
self.port_dropped.store(true, atomic::SeqCst);
|
||||||
|
|
||||||
// Now that we're guaranteed to deal with a bounded number of senders,
|
// Now that we're guaranteed to deal with a bounded number of senders,
|
||||||
// we need to drain the queue. This draining process happens atomically
|
// we need to drain the queue. This draining process happens atomically
|
||||||
|
@ -303,7 +303,7 @@ impl<T: Send> Packet<T> {
|
||||||
let mut steals = self.steals;
|
let mut steals = self.steals;
|
||||||
while {
|
while {
|
||||||
let cnt = self.cnt.compare_and_swap(
|
let cnt = self.cnt.compare_and_swap(
|
||||||
steals, DISCONNECTED, atomics::SeqCst);
|
steals, DISCONNECTED, atomic::SeqCst);
|
||||||
cnt != DISCONNECTED && cnt != steals
|
cnt != DISCONNECTED && cnt != steals
|
||||||
} {
|
} {
|
||||||
loop {
|
loop {
|
||||||
|
@ -348,9 +348,9 @@ impl<T: Send> Packet<T> {
|
||||||
|
|
||||||
// increment the count on the channel (used for selection)
|
// increment the count on the channel (used for selection)
|
||||||
fn bump(&mut self, amt: int) -> int {
|
fn bump(&mut self, amt: int) -> int {
|
||||||
match self.cnt.fetch_add(amt, atomics::SeqCst) {
|
match self.cnt.fetch_add(amt, atomic::SeqCst) {
|
||||||
DISCONNECTED => {
|
DISCONNECTED => {
|
||||||
self.cnt.store(DISCONNECTED, atomics::SeqCst);
|
self.cnt.store(DISCONNECTED, atomic::SeqCst);
|
||||||
DISCONNECTED
|
DISCONNECTED
|
||||||
}
|
}
|
||||||
n => n
|
n => n
|
||||||
|
@ -400,7 +400,7 @@ impl<T: Send> Packet<T> {
|
||||||
// of time until the data is actually sent.
|
// of time until the data is actually sent.
|
||||||
if was_upgrade {
|
if was_upgrade {
|
||||||
assert_eq!(self.steals, 0);
|
assert_eq!(self.steals, 0);
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
return Ok(true)
|
return Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -413,7 +413,7 @@ impl<T: Send> Packet<T> {
|
||||||
// If we were previously disconnected, then we know for sure that there
|
// If we were previously disconnected, then we know for sure that there
|
||||||
// is no task in to_wake, so just keep going
|
// is no task in to_wake, so just keep going
|
||||||
let has_data = if prev == DISCONNECTED {
|
let has_data = if prev == DISCONNECTED {
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
true // there is data, that data is that we're disconnected
|
true // there is data, that data is that we're disconnected
|
||||||
} else {
|
} else {
|
||||||
let cur = prev + steals + 1;
|
let cur = prev + steals + 1;
|
||||||
|
@ -436,7 +436,7 @@ impl<T: Send> Packet<T> {
|
||||||
if prev < 0 {
|
if prev < 0 {
|
||||||
self.take_to_wake().trash();
|
self.take_to_wake().trash();
|
||||||
} else {
|
} else {
|
||||||
while self.to_wake.load(atomics::SeqCst) != 0 {
|
while self.to_wake.load(atomic::SeqCst) != 0 {
|
||||||
Thread::yield_now();
|
Thread::yield_now();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -475,7 +475,7 @@ impl<T: Send> Drop for Packet<T> {
|
||||||
// disconnection, but also a proper fence before the read of
|
// disconnection, but also a proper fence before the read of
|
||||||
// `to_wake`, so this assert cannot be removed with also removing
|
// `to_wake`, so this assert cannot be removed with also removing
|
||||||
// the `to_wake` assert.
|
// the `to_wake` assert.
|
||||||
assert_eq!(self.cnt.load(atomics::SeqCst), DISCONNECTED);
|
assert_eq!(self.cnt.load(atomic::SeqCst), DISCONNECTED);
|
||||||
assert_eq!(self.to_wake.load(atomics::SeqCst), 0);
|
assert_eq!(self.to_wake.load(atomic::SeqCst), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,12 +44,12 @@ use rustrt::local::Local;
|
||||||
use rustrt::mutex::{NativeMutex, LockGuard};
|
use rustrt::mutex::{NativeMutex, LockGuard};
|
||||||
use rustrt::task::{Task, BlockedTask};
|
use rustrt::task::{Task, BlockedTask};
|
||||||
|
|
||||||
use atomics;
|
use atomic;
|
||||||
|
|
||||||
pub struct Packet<T> {
|
pub struct Packet<T> {
|
||||||
/// Only field outside of the mutex. Just done for kicks, but mainly because
|
/// Only field outside of the mutex. Just done for kicks, but mainly because
|
||||||
/// the other shared channel already had the code implemented
|
/// the other shared channel already had the code implemented
|
||||||
channels: atomics::AtomicUint,
|
channels: atomic::AtomicUint,
|
||||||
|
|
||||||
/// The state field is protected by this mutex
|
/// The state field is protected by this mutex
|
||||||
lock: NativeMutex,
|
lock: NativeMutex,
|
||||||
|
@ -131,7 +131,7 @@ fn wakeup(task: BlockedTask, guard: LockGuard) {
|
||||||
impl<T: Send> Packet<T> {
|
impl<T: Send> Packet<T> {
|
||||||
pub fn new(cap: uint) -> Packet<T> {
|
pub fn new(cap: uint) -> Packet<T> {
|
||||||
Packet {
|
Packet {
|
||||||
channels: atomics::AtomicUint::new(1),
|
channels: atomic::AtomicUint::new(1),
|
||||||
lock: unsafe { NativeMutex::new() },
|
lock: unsafe { NativeMutex::new() },
|
||||||
state: UnsafeCell::new(State {
|
state: UnsafeCell::new(State {
|
||||||
disconnected: false,
|
disconnected: false,
|
||||||
|
@ -303,12 +303,12 @@ impl<T: Send> Packet<T> {
|
||||||
// Prepares this shared packet for a channel clone, essentially just bumping
|
// Prepares this shared packet for a channel clone, essentially just bumping
|
||||||
// a refcount.
|
// a refcount.
|
||||||
pub fn clone_chan(&self) {
|
pub fn clone_chan(&self) {
|
||||||
self.channels.fetch_add(1, atomics::SeqCst);
|
self.channels.fetch_add(1, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn drop_chan(&self) {
|
pub fn drop_chan(&self) {
|
||||||
// Only flag the channel as disconnected if we're the last channel
|
// Only flag the channel as disconnected if we're the last channel
|
||||||
match self.channels.fetch_sub(1, atomics::SeqCst) {
|
match self.channels.fetch_sub(1, atomic::SeqCst) {
|
||||||
1 => {}
|
1 => {}
|
||||||
_ => return
|
_ => return
|
||||||
}
|
}
|
||||||
|
@ -411,7 +411,7 @@ impl<T: Send> Packet<T> {
|
||||||
#[unsafe_destructor]
|
#[unsafe_destructor]
|
||||||
impl<T: Send> Drop for Packet<T> {
|
impl<T: Send> Drop for Packet<T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
assert_eq!(self.channels.load(atomics::SeqCst), 0);
|
assert_eq!(self.channels.load(atomic::SeqCst), 0);
|
||||||
let (_g, state) = self.lock();
|
let (_g, state) = self.lock();
|
||||||
assert!(state.queue.dequeue().is_none());
|
assert!(state.queue.dequeue().is_none());
|
||||||
assert!(state.canceled.is_none());
|
assert!(state.canceled.is_none());
|
||||||
|
|
|
@ -61,7 +61,7 @@ use core::mem::{forget, min_align_of, size_of, transmute};
|
||||||
use core::ptr;
|
use core::ptr;
|
||||||
use rustrt::exclusive::Exclusive;
|
use rustrt::exclusive::Exclusive;
|
||||||
|
|
||||||
use atomics::{AtomicInt, AtomicPtr, SeqCst};
|
use atomic::{AtomicInt, AtomicPtr, SeqCst};
|
||||||
|
|
||||||
// Once the queue is less than 1/K full, then it will be downsized. Note that
|
// Once the queue is less than 1/K full, then it will be downsized. Note that
|
||||||
// the deque requires that this number be less than 2.
|
// the deque requires that this number be less than 2.
|
||||||
|
@ -414,7 +414,7 @@ mod tests {
|
||||||
use std::rt::thread::Thread;
|
use std::rt::thread::Thread;
|
||||||
use std::rand;
|
use std::rand;
|
||||||
use std::rand::Rng;
|
use std::rand::Rng;
|
||||||
use atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
|
use atomic::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
|
||||||
AtomicUint, INIT_ATOMIC_UINT};
|
AtomicUint, INIT_ATOMIC_UINT};
|
||||||
use std::vec;
|
use std::vec;
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ pub use raw::{Semaphore, SemaphoreGuard};
|
||||||
|
|
||||||
// Core building blocks for all primitives in this crate
|
// Core building blocks for all primitives in this crate
|
||||||
|
|
||||||
pub mod atomics;
|
pub mod atomic;
|
||||||
|
|
||||||
// Concurrent data structures
|
// Concurrent data structures
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ use collections::Vec;
|
||||||
use core::num::next_power_of_two;
|
use core::num::next_power_of_two;
|
||||||
use core::cell::UnsafeCell;
|
use core::cell::UnsafeCell;
|
||||||
|
|
||||||
use atomics::{AtomicUint,Relaxed,Release,Acquire};
|
use atomic::{AtomicUint,Relaxed,Release,Acquire};
|
||||||
|
|
||||||
struct Node<T> {
|
struct Node<T> {
|
||||||
sequence: AtomicUint,
|
sequence: AtomicUint,
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
|
|
||||||
use core::prelude::*;
|
use core::prelude::*;
|
||||||
|
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::mem;
|
use core::mem;
|
||||||
use core::cell::UnsafeCell;
|
use core::cell::UnsafeCell;
|
||||||
|
|
||||||
|
@ -45,16 +45,16 @@ use core::cell::UnsafeCell;
|
||||||
// initialization.
|
// initialization.
|
||||||
|
|
||||||
pub struct Node<T> {
|
pub struct Node<T> {
|
||||||
pub next: atomics::AtomicUint,
|
pub next: atomic::AtomicUint,
|
||||||
pub data: T,
|
pub data: T,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct DummyNode {
|
pub struct DummyNode {
|
||||||
pub next: atomics::AtomicUint,
|
pub next: atomic::AtomicUint,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Queue<T> {
|
pub struct Queue<T> {
|
||||||
pub head: atomics::AtomicUint,
|
pub head: atomic::AtomicUint,
|
||||||
pub tail: UnsafeCell<*mut Node<T>>,
|
pub tail: UnsafeCell<*mut Node<T>>,
|
||||||
pub stub: DummyNode,
|
pub stub: DummyNode,
|
||||||
}
|
}
|
||||||
|
@ -62,26 +62,26 @@ pub struct Queue<T> {
|
||||||
impl<T: Send> Queue<T> {
|
impl<T: Send> Queue<T> {
|
||||||
pub fn new() -> Queue<T> {
|
pub fn new() -> Queue<T> {
|
||||||
Queue {
|
Queue {
|
||||||
head: atomics::AtomicUint::new(0),
|
head: atomic::AtomicUint::new(0),
|
||||||
tail: UnsafeCell::new(0 as *mut Node<T>),
|
tail: UnsafeCell::new(0 as *mut Node<T>),
|
||||||
stub: DummyNode {
|
stub: DummyNode {
|
||||||
next: atomics::AtomicUint::new(0),
|
next: atomic::AtomicUint::new(0),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn push(&self, node: *mut Node<T>) {
|
pub unsafe fn push(&self, node: *mut Node<T>) {
|
||||||
(*node).next.store(0, atomics::Release);
|
(*node).next.store(0, atomic::Release);
|
||||||
let prev = self.head.swap(node as uint, atomics::AcqRel);
|
let prev = self.head.swap(node as uint, atomic::AcqRel);
|
||||||
|
|
||||||
// Note that this code is slightly modified to allow static
|
// Note that this code is slightly modified to allow static
|
||||||
// initialization of these queues with rust's flavor of static
|
// initialization of these queues with rust's flavor of static
|
||||||
// initialization.
|
// initialization.
|
||||||
if prev == 0 {
|
if prev == 0 {
|
||||||
self.stub.next.store(node as uint, atomics::Release);
|
self.stub.next.store(node as uint, atomic::Release);
|
||||||
} else {
|
} else {
|
||||||
let prev = prev as *mut Node<T>;
|
let prev = prev as *mut Node<T>;
|
||||||
(*prev).next.store(node as uint, atomics::Release);
|
(*prev).next.store(node as uint, atomic::Release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,26 +103,26 @@ impl<T: Send> Queue<T> {
|
||||||
let mut tail = if !tail.is_null() {tail} else {
|
let mut tail = if !tail.is_null() {tail} else {
|
||||||
mem::transmute(&self.stub)
|
mem::transmute(&self.stub)
|
||||||
};
|
};
|
||||||
let mut next = (*tail).next(atomics::Relaxed);
|
let mut next = (*tail).next(atomic::Relaxed);
|
||||||
if tail as uint == &self.stub as *const DummyNode as uint {
|
if tail as uint == &self.stub as *const DummyNode as uint {
|
||||||
if next.is_null() {
|
if next.is_null() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
*self.tail.get() = next;
|
*self.tail.get() = next;
|
||||||
tail = next;
|
tail = next;
|
||||||
next = (*next).next(atomics::Relaxed);
|
next = (*next).next(atomic::Relaxed);
|
||||||
}
|
}
|
||||||
if !next.is_null() {
|
if !next.is_null() {
|
||||||
*self.tail.get() = next;
|
*self.tail.get() = next;
|
||||||
return Some(tail);
|
return Some(tail);
|
||||||
}
|
}
|
||||||
let head = self.head.load(atomics::Acquire) as *mut Node<T>;
|
let head = self.head.load(atomic::Acquire) as *mut Node<T>;
|
||||||
if tail != head {
|
if tail != head {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
let stub = mem::transmute(&self.stub);
|
let stub = mem::transmute(&self.stub);
|
||||||
self.push(stub);
|
self.push(stub);
|
||||||
next = (*tail).next(atomics::Relaxed);
|
next = (*tail).next(atomic::Relaxed);
|
||||||
if !next.is_null() {
|
if !next.is_null() {
|
||||||
*self.tail.get() = next;
|
*self.tail.get() = next;
|
||||||
return Some(tail);
|
return Some(tail);
|
||||||
|
@ -135,10 +135,10 @@ impl<T: Send> Node<T> {
|
||||||
pub fn new(t: T) -> Node<T> {
|
pub fn new(t: T) -> Node<T> {
|
||||||
Node {
|
Node {
|
||||||
data: t,
|
data: t,
|
||||||
next: atomics::AtomicUint::new(0),
|
next: atomic::AtomicUint::new(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub unsafe fn next(&self, ord: atomics::Ordering) -> *mut Node<T> {
|
pub unsafe fn next(&self, ord: atomic::Ordering) -> *mut Node<T> {
|
||||||
mem::transmute::<uint, *mut Node<T>>(self.next.load(ord))
|
mem::transmute::<uint, *mut Node<T>>(self.next.load(ord))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ use alloc::boxed::Box;
|
||||||
use core::mem;
|
use core::mem;
|
||||||
use core::cell::UnsafeCell;
|
use core::cell::UnsafeCell;
|
||||||
|
|
||||||
use atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
|
use atomic::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
|
||||||
|
|
||||||
/// A result of the `pop` function.
|
/// A result of the `pop` function.
|
||||||
pub enum PopResult<T> {
|
pub enum PopResult<T> {
|
||||||
|
|
|
@ -60,7 +60,7 @@
|
||||||
use core::prelude::*;
|
use core::prelude::*;
|
||||||
|
|
||||||
use alloc::boxed::Box;
|
use alloc::boxed::Box;
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::mem;
|
use core::mem;
|
||||||
use core::cell::UnsafeCell;
|
use core::cell::UnsafeCell;
|
||||||
use rustrt::local::Local;
|
use rustrt::local::Local;
|
||||||
|
@ -137,7 +137,7 @@ enum Flavor {
|
||||||
/// ```
|
/// ```
|
||||||
pub struct StaticMutex {
|
pub struct StaticMutex {
|
||||||
/// Current set of flags on this mutex
|
/// Current set of flags on this mutex
|
||||||
state: atomics::AtomicUint,
|
state: atomic::AtomicUint,
|
||||||
/// an OS mutex used by native threads
|
/// an OS mutex used by native threads
|
||||||
lock: mutex::StaticNativeMutex,
|
lock: mutex::StaticNativeMutex,
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ pub struct StaticMutex {
|
||||||
/// A concurrent mpsc queue used by green threads, along with a count used
|
/// A concurrent mpsc queue used by green threads, along with a count used
|
||||||
/// to figure out when to dequeue and enqueue.
|
/// to figure out when to dequeue and enqueue.
|
||||||
q: q::Queue<uint>,
|
q: q::Queue<uint>,
|
||||||
green_cnt: atomics::AtomicUint,
|
green_cnt: atomic::AtomicUint,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
||||||
|
@ -165,16 +165,16 @@ pub struct Guard<'a> {
|
||||||
/// other mutex constants.
|
/// other mutex constants.
|
||||||
pub static MUTEX_INIT: StaticMutex = StaticMutex {
|
pub static MUTEX_INIT: StaticMutex = StaticMutex {
|
||||||
lock: mutex::NATIVE_MUTEX_INIT,
|
lock: mutex::NATIVE_MUTEX_INIT,
|
||||||
state: atomics::INIT_ATOMIC_UINT,
|
state: atomic::INIT_ATOMIC_UINT,
|
||||||
flavor: UnsafeCell { value: Unlocked },
|
flavor: UnsafeCell { value: Unlocked },
|
||||||
green_blocker: UnsafeCell { value: 0 },
|
green_blocker: UnsafeCell { value: 0 },
|
||||||
native_blocker: UnsafeCell { value: 0 },
|
native_blocker: UnsafeCell { value: 0 },
|
||||||
green_cnt: atomics::INIT_ATOMIC_UINT,
|
green_cnt: atomic::INIT_ATOMIC_UINT,
|
||||||
q: q::Queue {
|
q: q::Queue {
|
||||||
head: atomics::INIT_ATOMIC_UINT,
|
head: atomic::INIT_ATOMIC_UINT,
|
||||||
tail: UnsafeCell { value: 0 as *mut q::Node<uint> },
|
tail: UnsafeCell { value: 0 as *mut q::Node<uint> },
|
||||||
stub: q::DummyNode {
|
stub: q::DummyNode {
|
||||||
next: atomics::INIT_ATOMIC_UINT,
|
next: atomic::INIT_ATOMIC_UINT,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -185,7 +185,7 @@ impl StaticMutex {
|
||||||
// Attempt to steal the mutex from an unlocked state.
|
// Attempt to steal the mutex from an unlocked state.
|
||||||
//
|
//
|
||||||
// FIXME: this can mess up the fairness of the mutex, seems bad
|
// FIXME: this can mess up the fairness of the mutex, seems bad
|
||||||
match self.state.compare_and_swap(0, LOCKED, atomics::SeqCst) {
|
match self.state.compare_and_swap(0, LOCKED, atomic::SeqCst) {
|
||||||
0 => {
|
0 => {
|
||||||
// After acquiring the mutex, we can safely access the inner
|
// After acquiring the mutex, we can safely access the inner
|
||||||
// fields.
|
// fields.
|
||||||
|
@ -230,7 +230,7 @@ impl StaticMutex {
|
||||||
// allow threads coming out of the native_lock function to try their
|
// allow threads coming out of the native_lock function to try their
|
||||||
// best to not hit a cvar in deschedule.
|
// best to not hit a cvar in deschedule.
|
||||||
let mut old = match self.state.compare_and_swap(0, LOCKED,
|
let mut old = match self.state.compare_and_swap(0, LOCKED,
|
||||||
atomics::SeqCst) {
|
atomic::SeqCst) {
|
||||||
0 => {
|
0 => {
|
||||||
let flavor = if can_block {
|
let flavor = if can_block {
|
||||||
NativeAcquisition
|
NativeAcquisition
|
||||||
|
@ -272,7 +272,7 @@ impl StaticMutex {
|
||||||
if old & LOCKED != 0 {
|
if old & LOCKED != 0 {
|
||||||
old = match self.state.compare_and_swap(old,
|
old = match self.state.compare_and_swap(old,
|
||||||
old | native_bit,
|
old | native_bit,
|
||||||
atomics::SeqCst) {
|
atomic::SeqCst) {
|
||||||
n if n == old => return Ok(()),
|
n if n == old => return Ok(()),
|
||||||
n => n
|
n => n
|
||||||
};
|
};
|
||||||
|
@ -280,7 +280,7 @@ impl StaticMutex {
|
||||||
assert_eq!(old, 0);
|
assert_eq!(old, 0);
|
||||||
old = match self.state.compare_and_swap(old,
|
old = match self.state.compare_and_swap(old,
|
||||||
old | LOCKED,
|
old | LOCKED,
|
||||||
atomics::SeqCst) {
|
atomic::SeqCst) {
|
||||||
n if n == old => {
|
n if n == old => {
|
||||||
// After acquiring the lock, we have access to the
|
// After acquiring the lock, we have access to the
|
||||||
// flavor field, and we've regained access to our
|
// flavor field, and we've regained access to our
|
||||||
|
@ -330,7 +330,7 @@ impl StaticMutex {
|
||||||
//
|
//
|
||||||
// FIXME: There isn't a cancellation currently of an enqueue, forcing
|
// FIXME: There isn't a cancellation currently of an enqueue, forcing
|
||||||
// the unlocker to spin for a bit.
|
// the unlocker to spin for a bit.
|
||||||
if self.green_cnt.fetch_add(1, atomics::SeqCst) == 0 {
|
if self.green_cnt.fetch_add(1, atomic::SeqCst) == 0 {
|
||||||
Local::put(t);
|
Local::put(t);
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -348,7 +348,7 @@ impl StaticMutex {
|
||||||
fn green_unlock(&self) {
|
fn green_unlock(&self) {
|
||||||
// If we're the only green thread, then no need to check the queue,
|
// If we're the only green thread, then no need to check the queue,
|
||||||
// otherwise the fixme above forces us to spin for a bit.
|
// otherwise the fixme above forces us to spin for a bit.
|
||||||
if self.green_cnt.fetch_sub(1, atomics::SeqCst) == 1 { return }
|
if self.green_cnt.fetch_sub(1, atomic::SeqCst) == 1 { return }
|
||||||
let node;
|
let node;
|
||||||
loop {
|
loop {
|
||||||
match unsafe { self.q.pop() } {
|
match unsafe { self.q.pop() } {
|
||||||
|
@ -380,7 +380,7 @@ impl StaticMutex {
|
||||||
// of the outer mutex.
|
// of the outer mutex.
|
||||||
let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) };
|
let flavor = unsafe { mem::replace(&mut *self.flavor.get(), Unlocked) };
|
||||||
|
|
||||||
let mut state = self.state.load(atomics::SeqCst);
|
let mut state = self.state.load(atomic::SeqCst);
|
||||||
let mut unlocked = false;
|
let mut unlocked = false;
|
||||||
let task;
|
let task;
|
||||||
loop {
|
loop {
|
||||||
|
@ -412,7 +412,7 @@ impl StaticMutex {
|
||||||
}
|
}
|
||||||
unlocked = true;
|
unlocked = true;
|
||||||
}
|
}
|
||||||
match self.state.compare_and_swap(LOCKED, 0, atomics::SeqCst) {
|
match self.state.compare_and_swap(LOCKED, 0, atomic::SeqCst) {
|
||||||
LOCKED => return,
|
LOCKED => return,
|
||||||
n => { state = n; }
|
n => { state = n; }
|
||||||
}
|
}
|
||||||
|
@ -435,7 +435,7 @@ impl StaticMutex {
|
||||||
loop {
|
loop {
|
||||||
assert!(state & bit != 0);
|
assert!(state & bit != 0);
|
||||||
let new = state ^ bit;
|
let new = state ^ bit;
|
||||||
match self.state.compare_and_swap(state, new, atomics::SeqCst) {
|
match self.state.compare_and_swap(state, new, atomic::SeqCst) {
|
||||||
n if n == state => break,
|
n if n == state => break,
|
||||||
n => { state = n; }
|
n => { state = n; }
|
||||||
}
|
}
|
||||||
|
@ -462,11 +462,11 @@ impl Mutex {
|
||||||
pub fn new() -> Mutex {
|
pub fn new() -> Mutex {
|
||||||
Mutex {
|
Mutex {
|
||||||
lock: box StaticMutex {
|
lock: box StaticMutex {
|
||||||
state: atomics::AtomicUint::new(0),
|
state: atomic::AtomicUint::new(0),
|
||||||
flavor: UnsafeCell::new(Unlocked),
|
flavor: UnsafeCell::new(Unlocked),
|
||||||
green_blocker: UnsafeCell::new(0),
|
green_blocker: UnsafeCell::new(0),
|
||||||
native_blocker: UnsafeCell::new(0),
|
native_blocker: UnsafeCell::new(0),
|
||||||
green_cnt: atomics::AtomicUint::new(0),
|
green_cnt: atomic::AtomicUint::new(0),
|
||||||
q: q::Queue::new(),
|
q: q::Queue::new(),
|
||||||
lock: unsafe { mutex::StaticNativeMutex::new() },
|
lock: unsafe { mutex::StaticNativeMutex::new() },
|
||||||
}
|
}
|
||||||
|
@ -498,7 +498,7 @@ impl<'a> Guard<'a> {
|
||||||
if cfg!(debug) {
|
if cfg!(debug) {
|
||||||
// once we've acquired a lock, it's ok to access the flavor
|
// once we've acquired a lock, it's ok to access the flavor
|
||||||
assert!(unsafe { *lock.flavor.get() != Unlocked });
|
assert!(unsafe { *lock.flavor.get() != Unlocked });
|
||||||
assert!(lock.state.load(atomics::SeqCst) & LOCKED != 0);
|
assert!(lock.state.load(atomic::SeqCst) & LOCKED != 0);
|
||||||
}
|
}
|
||||||
Guard { lock: lock }
|
Guard { lock: lock }
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
use core::prelude::*;
|
use core::prelude::*;
|
||||||
|
|
||||||
use core::int;
|
use core::int;
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
|
|
||||||
use mutex::{StaticMutex, MUTEX_INIT};
|
use mutex::{StaticMutex, MUTEX_INIT};
|
||||||
|
|
||||||
|
@ -40,15 +40,15 @@ use mutex::{StaticMutex, MUTEX_INIT};
|
||||||
/// ```
|
/// ```
|
||||||
pub struct Once {
|
pub struct Once {
|
||||||
mutex: StaticMutex,
|
mutex: StaticMutex,
|
||||||
cnt: atomics::AtomicInt,
|
cnt: atomic::AtomicInt,
|
||||||
lock_cnt: atomics::AtomicInt,
|
lock_cnt: atomic::AtomicInt,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialization value for static `Once` values.
|
/// Initialization value for static `Once` values.
|
||||||
pub static ONCE_INIT: Once = Once {
|
pub static ONCE_INIT: Once = Once {
|
||||||
mutex: MUTEX_INIT,
|
mutex: MUTEX_INIT,
|
||||||
cnt: atomics::INIT_ATOMIC_INT,
|
cnt: atomic::INIT_ATOMIC_INT,
|
||||||
lock_cnt: atomics::INIT_ATOMIC_INT,
|
lock_cnt: atomic::INIT_ATOMIC_INT,
|
||||||
};
|
};
|
||||||
|
|
||||||
impl Once {
|
impl Once {
|
||||||
|
@ -63,7 +63,7 @@ impl Once {
|
||||||
/// has run and completed (it may not be the closure specified).
|
/// has run and completed (it may not be the closure specified).
|
||||||
pub fn doit(&self, f: ||) {
|
pub fn doit(&self, f: ||) {
|
||||||
// Optimize common path: load is much cheaper than fetch_add.
|
// Optimize common path: load is much cheaper than fetch_add.
|
||||||
if self.cnt.load(atomics::SeqCst) < 0 {
|
if self.cnt.load(atomic::SeqCst) < 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,11 +94,11 @@ impl Once {
|
||||||
// calling `doit` will return immediately before the initialization has
|
// calling `doit` will return immediately before the initialization has
|
||||||
// completed.
|
// completed.
|
||||||
|
|
||||||
let prev = self.cnt.fetch_add(1, atomics::SeqCst);
|
let prev = self.cnt.fetch_add(1, atomic::SeqCst);
|
||||||
if prev < 0 {
|
if prev < 0 {
|
||||||
// Make sure we never overflow, we'll never have int::MIN
|
// Make sure we never overflow, we'll never have int::MIN
|
||||||
// simultaneous calls to `doit` to make this value go back to 0
|
// simultaneous calls to `doit` to make this value go back to 0
|
||||||
self.cnt.store(int::MIN, atomics::SeqCst);
|
self.cnt.store(int::MIN, atomic::SeqCst);
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,15 +106,15 @@ impl Once {
|
||||||
// otherwise we run the job and record how many people will try to grab
|
// otherwise we run the job and record how many people will try to grab
|
||||||
// this lock
|
// this lock
|
||||||
let guard = self.mutex.lock();
|
let guard = self.mutex.lock();
|
||||||
if self.cnt.load(atomics::SeqCst) > 0 {
|
if self.cnt.load(atomic::SeqCst) > 0 {
|
||||||
f();
|
f();
|
||||||
let prev = self.cnt.swap(int::MIN, atomics::SeqCst);
|
let prev = self.cnt.swap(int::MIN, atomic::SeqCst);
|
||||||
self.lock_cnt.store(prev, atomics::SeqCst);
|
self.lock_cnt.store(prev, atomic::SeqCst);
|
||||||
}
|
}
|
||||||
drop(guard);
|
drop(guard);
|
||||||
|
|
||||||
// Last one out cleans up after everyone else, no leaks!
|
// Last one out cleans up after everyone else, no leaks!
|
||||||
if self.lock_cnt.fetch_add(-1, atomics::SeqCst) == 1 {
|
if self.lock_cnt.fetch_add(-1, atomic::SeqCst) == 1 {
|
||||||
unsafe { self.mutex.destroy() }
|
unsafe { self.mutex.destroy() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
|
|
||||||
use core::prelude::*;
|
use core::prelude::*;
|
||||||
|
|
||||||
use core::atomics;
|
use core::atomic;
|
||||||
use core::finally::Finally;
|
use core::finally::Finally;
|
||||||
use core::kinds::marker;
|
use core::kinds::marker;
|
||||||
use core::mem;
|
use core::mem;
|
||||||
|
@ -458,7 +458,7 @@ pub struct RWLock {
|
||||||
//
|
//
|
||||||
// FIXME(#6598): The atomics module has no relaxed ordering flag, so I use
|
// FIXME(#6598): The atomics module has no relaxed ordering flag, so I use
|
||||||
// acquire/release orderings superfluously. Change these someday.
|
// acquire/release orderings superfluously. Change these someday.
|
||||||
read_count: atomics::AtomicUint,
|
read_count: atomic::AtomicUint,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An RAII helper which is created by acquiring a read lock on an RWLock. When
|
/// An RAII helper which is created by acquiring a read lock on an RWLock. When
|
||||||
|
@ -490,7 +490,7 @@ impl RWLock {
|
||||||
RWLock {
|
RWLock {
|
||||||
order_lock: Semaphore::new(1),
|
order_lock: Semaphore::new(1),
|
||||||
access_lock: Sem::new_and_signal(1, num_condvars),
|
access_lock: Sem::new_and_signal(1, num_condvars),
|
||||||
read_count: atomics::AtomicUint::new(0),
|
read_count: atomic::AtomicUint::new(0),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -499,7 +499,7 @@ impl RWLock {
|
||||||
/// this one.
|
/// this one.
|
||||||
pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> {
|
pub fn read<'a>(&'a self) -> RWLockReadGuard<'a> {
|
||||||
let _guard = self.order_lock.access();
|
let _guard = self.order_lock.access();
|
||||||
let old_count = self.read_count.fetch_add(1, atomics::Acquire);
|
let old_count = self.read_count.fetch_add(1, atomic::Acquire);
|
||||||
if old_count == 0 {
|
if old_count == 0 {
|
||||||
self.access_lock.acquire();
|
self.access_lock.acquire();
|
||||||
}
|
}
|
||||||
|
@ -575,7 +575,7 @@ impl<'a> RWLockWriteGuard<'a> {
|
||||||
// things from now on
|
// things from now on
|
||||||
unsafe { mem::forget(self) }
|
unsafe { mem::forget(self) }
|
||||||
|
|
||||||
let old_count = lock.read_count.fetch_add(1, atomics::Release);
|
let old_count = lock.read_count.fetch_add(1, atomic::Release);
|
||||||
// If another reader was already blocking, we need to hand-off
|
// If another reader was already blocking, we need to hand-off
|
||||||
// the "reader cloud" access lock to them.
|
// the "reader cloud" access lock to them.
|
||||||
if old_count != 0 {
|
if old_count != 0 {
|
||||||
|
@ -600,7 +600,7 @@ impl<'a> Drop for RWLockWriteGuard<'a> {
|
||||||
#[unsafe_destructor]
|
#[unsafe_destructor]
|
||||||
impl<'a> Drop for RWLockReadGuard<'a> {
|
impl<'a> Drop for RWLockReadGuard<'a> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let old_count = self.lock.read_count.fetch_sub(1, atomics::Release);
|
let old_count = self.lock.read_count.fetch_sub(1, atomic::Release);
|
||||||
assert!(old_count > 0);
|
assert!(old_count > 0);
|
||||||
if old_count == 1 {
|
if old_count == 1 {
|
||||||
// Note: this release used to be outside of a locked access
|
// Note: this release used to be outside of a locked access
|
||||||
|
|
|
@ -42,7 +42,7 @@ use core::mem;
|
||||||
use core::cell::UnsafeCell;
|
use core::cell::UnsafeCell;
|
||||||
use alloc::arc::Arc;
|
use alloc::arc::Arc;
|
||||||
|
|
||||||
use atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
|
use atomic::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
|
||||||
|
|
||||||
// Node within the linked list queue of messages to send
|
// Node within the linked list queue of messages to send
|
||||||
struct Node<T> {
|
struct Node<T> {
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
|
|
||||||
#![feature(globs)]
|
#![feature(globs)]
|
||||||
|
|
||||||
use std::sync::atomics::*;
|
use std::sync::atomic::*;
|
||||||
use std::ptr;
|
use std::ptr;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue