diff --git a/src/libcore/comm.rs b/src/libcore/comm.rs index b1f60ec4690..322584f8df1 100644 --- a/src/libcore/comm.rs +++ b/src/libcore/comm.rs @@ -19,8 +19,8 @@ use option::{Option, Some, None}; use uint; use unstable; use vec; -use unstable::Exclusive; use util::replace; +use unstable::sync::{Exclusive, exclusive}; use pipes::{recv, try_recv, wait_many, peek, PacketHeader}; @@ -304,7 +304,7 @@ pub struct SharedChan { impl SharedChan { /// Converts a `chan` into a `shared_chan`. pub fn new(c: Chan) -> SharedChan { - SharedChan { ch: unstable::exclusive(c) } + SharedChan { ch: exclusive(c) } } } diff --git a/src/libcore/os.rs b/src/libcore/os.rs index 030b7ec3e42..1a8d996cc46 100644 --- a/src/libcore/os.rs +++ b/src/libcore/os.rs @@ -152,7 +152,7 @@ FIXME #4726: It would probably be appropriate to make this a real global */ fn with_env_lock(f: &fn() -> T) -> T { use unstable::global::global_data_clone_create; - use unstable::{Exclusive, exclusive}; + use unstable::sync::{Exclusive, exclusive}; struct SharedValue(()); type ValueMutex = Exclusive; @@ -860,7 +860,7 @@ pub fn change_dir(p: &Path) -> bool { /// is otherwise unsuccessful. pub fn change_dir_locked(p: &Path, action: &fn()) -> bool { use unstable::global::global_data_clone_create; - use unstable::{Exclusive, exclusive}; + use unstable::sync::{Exclusive, exclusive}; fn key(_: Exclusive<()>) { } diff --git a/src/libcore/task/spawn.rs b/src/libcore/task/spawn.rs index 327b7a988aa..545f1ac8ada 100644 --- a/src/libcore/task/spawn.rs +++ b/src/libcore/task/spawn.rs @@ -90,6 +90,7 @@ use task::{ExistingScheduler, SchedulerHandle}; use task::unkillable; use uint; use util; +use unstable::sync::{Exclusive, exclusive}; #[cfg(test)] use task::default_task_opts; @@ -128,7 +129,7 @@ struct TaskGroupData { // tasks in this group. descendants: TaskSet, } -type TaskGroupArc = unstable::Exclusive>; +type TaskGroupArc = Exclusive>; type TaskGroupInner<'self> = &'self mut Option; @@ -158,7 +159,7 @@ struct AncestorNode { ancestors: AncestorList, } -struct AncestorList(Option>); +struct AncestorList(Option>); // Accessors for taskgroup arcs and ancestor arcs that wrap the unsafety. #[inline(always)] @@ -167,7 +168,7 @@ fn access_group(x: &TaskGroupArc, blk: &fn(TaskGroupInner) -> U) -> U { } #[inline(always)] -fn access_ancestors(x: &unstable::Exclusive, +fn access_ancestors(x: &Exclusive, blk: &fn(x: &mut AncestorNode) -> U) -> U { x.with(blk) } @@ -479,7 +480,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool) // here. let mut members = new_taskset(); taskset_insert(&mut members, spawner); - let tasks = unstable::exclusive(Some(TaskGroupData { + let tasks = exclusive(Some(TaskGroupData { members: members, descendants: new_taskset(), })); @@ -508,7 +509,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool) (g, a, spawner_group.is_main) } else { // Child is in a separate group from spawner. - let g = unstable::exclusive(Some(TaskGroupData { + let g = exclusive(Some(TaskGroupData { members: new_taskset(), descendants: new_taskset(), })); @@ -528,7 +529,7 @@ fn gen_child_taskgroup(linked: bool, supervised: bool) }; assert!(new_generation < uint::max_value); // Build a new node in the ancestor list. - AncestorList(Some(unstable::exclusive(AncestorNode { + AncestorList(Some(exclusive(AncestorNode { generation: new_generation, parent_group: Some(spawner_group.tasks.clone()), ancestors: old_ancestors, diff --git a/src/libcore/unstable/global.rs b/src/libcore/unstable/global.rs index eac686e28d1..27a003e0414 100644 --- a/src/libcore/unstable/global.rs +++ b/src/libcore/unstable/global.rs @@ -31,14 +31,14 @@ use kinds::Owned; use libc::{c_void}; use option::{Option, Some, None}; use ops::Drop; -use unstable::{Exclusive, exclusive}; +use unstable::sync::{Exclusive, exclusive}; use unstable::at_exit::at_exit; use unstable::intrinsics::atomic_cxchg; use hashmap::HashMap; use sys::Closure; -#[cfg(test)] use unstable::{SharedMutableState, shared_mutable_state}; -#[cfg(test)] use unstable::get_shared_immutable_state; +#[cfg(test)] use unstable::sync::{SharedMutableState, shared_mutable_state}; +#[cfg(test)] use unstable::sync::get_shared_immutable_state; #[cfg(test)] use task::spawn; #[cfg(test)] use uint; diff --git a/src/libcore/unstable/mod.rs b/src/libcore/unstable/mod.rs index ef7d70783c8..bef7a7f87d3 100644 --- a/src/libcore/unstable/mod.rs +++ b/src/libcore/unstable/mod.rs @@ -10,13 +10,10 @@ #[doc(hidden)]; -use cast; use libc; use comm::{GenericChan, GenericPort}; use prelude::*; use task; -use task::atomically; -use self::finally::Finally; pub mod at_exit; pub mod global; @@ -28,23 +25,7 @@ pub mod simd; pub mod extfmt; #[cfg(not(test))] pub mod lang; - -mod rustrt { - use unstable::{raw_thread, rust_little_lock}; - - pub extern { - pub unsafe fn rust_create_little_lock() -> rust_little_lock; - pub unsafe fn rust_destroy_little_lock(lock: rust_little_lock); - pub unsafe fn rust_lock_little_lock(lock: rust_little_lock); - pub unsafe fn rust_unlock_little_lock(lock: rust_little_lock); - - pub unsafe fn rust_raw_thread_start(f: &(&fn())) -> *raw_thread; - pub unsafe fn rust_raw_thread_join_delete(thread: *raw_thread); - } -} - -#[allow(non_camel_case_types)] // runtime type -pub type raw_thread = libc::c_void; +pub mod sync; /** @@ -63,8 +44,8 @@ pub fn run_in_bare_thread(f: ~fn()) { let closure: &fn() = || { f() }; - let thread = rustrt::rust_raw_thread_start(&closure); - rustrt::rust_raw_thread_join_delete(thread); + let thread = rust_raw_thread_start(&closure); + rust_raw_thread_join_delete(thread); chan.send(()); } } @@ -88,258 +69,10 @@ fn test_run_in_bare_thread_exchange() { } } -fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool { - unsafe { - let old = intrinsics::atomic_cxchg(address, oldval, newval); - old == oldval - } -} - -/**************************************************************************** - * Shared state & exclusive ARC - ****************************************************************************/ - -struct ArcData { - count: libc::intptr_t, - // FIXME(#3224) should be able to make this non-option to save memory - data: Option, -} - -struct ArcDestruct { - data: *libc::c_void, -} - -#[unsafe_destructor] -impl Drop for ArcDestruct{ - fn finalize(&self) { - unsafe { - do task::unkillable { - let mut data: ~ArcData = cast::transmute(self.data); - let new_count = - intrinsics::atomic_xsub(&mut data.count, 1) - 1; - assert!(new_count >= 0); - if new_count == 0 { - // drop glue takes over. - } else { - cast::forget(data); - } - } - } - } -} - -fn ArcDestruct(data: *libc::c_void) -> ArcDestruct { - ArcDestruct { - data: data - } -} - -/** - * COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc. - * - * Data races between tasks can result in crashes and, with sufficient - * cleverness, arbitrary type coercion. - */ -pub type SharedMutableState = ArcDestruct; - -pub unsafe fn shared_mutable_state(data: T) -> - SharedMutableState { - let data = ~ArcData { count: 1, data: Some(data) }; - let ptr = cast::transmute(data); - ArcDestruct(ptr) -} - -#[inline(always)] -pub unsafe fn get_shared_mutable_state( - rc: *SharedMutableState) -> *mut T -{ - let ptr: ~ArcData = cast::transmute((*rc).data); - assert!(ptr.count > 0); - let r = cast::transmute(ptr.data.get_ref()); - cast::forget(ptr); - return r; -} -#[inline(always)] -pub unsafe fn get_shared_immutable_state<'a,T:Owned>( - rc: &'a SharedMutableState) -> &'a T { - let ptr: ~ArcData = cast::transmute((*rc).data); - assert!(ptr.count > 0); - // Cast us back into the correct region - let r = cast::transmute_region(ptr.data.get_ref()); - cast::forget(ptr); - return r; -} - -pub unsafe fn clone_shared_mutable_state(rc: &SharedMutableState) - -> SharedMutableState { - let mut ptr: ~ArcData = cast::transmute((*rc).data); - let new_count = intrinsics::atomic_xadd(&mut ptr.count, 1) + 1; - assert!(new_count >= 2); - cast::forget(ptr); - ArcDestruct((*rc).data) -} - -impl Clone for SharedMutableState { - fn clone(&self) -> SharedMutableState { - unsafe { - clone_shared_mutable_state(self) - } - } -} - -/****************************************************************************/ - #[allow(non_camel_case_types)] // runtime type -pub type rust_little_lock = *libc::c_void; +pub type raw_thread = libc::c_void; -struct LittleLock { - l: rust_little_lock, -} - -impl Drop for LittleLock { - fn finalize(&self) { - unsafe { - rustrt::rust_destroy_little_lock(self.l); - } - } -} - -fn LittleLock() -> LittleLock { - unsafe { - LittleLock { - l: rustrt::rust_create_little_lock() - } - } -} - -pub impl LittleLock { - #[inline(always)] - unsafe fn lock(&self, f: &fn() -> T) -> T { - do atomically { - rustrt::rust_lock_little_lock(self.l); - do (|| { - f() - }).finally { - rustrt::rust_unlock_little_lock(self.l); - } - } - } -} - -struct ExData { - lock: LittleLock, - failed: bool, - data: T, -} - -/** - * An arc over mutable data that is protected by a lock. For library use only. - */ -pub struct Exclusive { - x: SharedMutableState> -} - -pub fn exclusive(user_data: T) -> Exclusive { - let data = ExData { - lock: LittleLock(), - failed: false, - data: user_data - }; - Exclusive { - x: unsafe { - shared_mutable_state(data) - } - } -} - -impl Clone for Exclusive { - // Duplicate an exclusive ARC, as std::arc::clone. - fn clone(&self) -> Exclusive { - Exclusive { x: unsafe { clone_shared_mutable_state(&self.x) } } - } -} - -pub impl Exclusive { - // Exactly like std::arc::mutex_arc,access(), but with the little_lock - // instead of a proper mutex. Same reason for being unsafe. - // - // Currently, scheduling operations (i.e., yielding, receiving on a pipe, - // accessing the provided condition variable) are prohibited while inside - // the exclusive. Supporting that is a work in progress. - #[inline(always)] - unsafe fn with(&self, f: &fn(x: &mut T) -> U) -> U { - let rec = get_shared_mutable_state(&self.x); - do (*rec).lock.lock { - if (*rec).failed { - fail!( - ~"Poisoned exclusive - another task failed inside!"); - } - (*rec).failed = true; - let result = f(&mut (*rec).data); - (*rec).failed = false; - result - } - } - - #[inline(always)] - unsafe fn with_imm(&self, f: &fn(x: &T) -> U) -> U { - do self.with |x| { - f(cast::transmute_immut(x)) - } - } -} - -#[cfg(test)] -mod tests { - use comm; - use super::exclusive; - use task; - use uint; - - #[test] - fn exclusive_arc() { - let mut futures = ~[]; - - let num_tasks = 10; - let count = 10; - - let total = exclusive(~0); - - for uint::range(0, num_tasks) |_i| { - let total = total.clone(); - let (port, chan) = comm::stream(); - futures.push(port); - - do task::spawn || { - for uint::range(0, count) |_i| { - do total.with |count| { - **count += 1; - } - } - chan.send(()); - } - }; - - for futures.each |f| { f.recv() } - - do total.with |total| { - assert!(**total == num_tasks * count) - }; - } - - #[test] #[should_fail] #[ignore(cfg(windows))] - fn exclusive_poison() { - // Tests that if one task fails inside of an exclusive, subsequent - // accesses will also fail. - let x = exclusive(1); - let x2 = x.clone(); - do task::try || { - do x2.with |one| { - assert!(*one == 2); - } - }; - do x.with |one| { - assert!(*one == 1); - } - } +extern { + fn rust_raw_thread_start(f: &(&fn())) -> *raw_thread; + fn rust_raw_thread_join_delete(thread: *raw_thread); } diff --git a/src/libcore/unstable/sync.rs b/src/libcore/unstable/sync.rs new file mode 100644 index 00000000000..691f893c4fc --- /dev/null +++ b/src/libcore/unstable/sync.rs @@ -0,0 +1,283 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use cast; +use libc; +use option::*; +use task; +use task::atomically; +use unstable::finally::Finally; +use unstable::intrinsics; +use ops::Drop; +use clone::Clone; +use kinds::Owned; + +/**************************************************************************** + * Shared state & exclusive ARC + ****************************************************************************/ + +struct ArcData { + count: libc::intptr_t, + // FIXME(#3224) should be able to make this non-option to save memory + data: Option, +} + +struct ArcDestruct { + data: *libc::c_void, +} + +#[unsafe_destructor] +impl Drop for ArcDestruct{ + fn finalize(&self) { + unsafe { + do task::unkillable { + let mut data: ~ArcData = cast::transmute(self.data); + let new_count = + intrinsics::atomic_xsub(&mut data.count, 1) - 1; + assert!(new_count >= 0); + if new_count == 0 { + // drop glue takes over. + } else { + cast::forget(data); + } + } + } + } +} + +fn ArcDestruct(data: *libc::c_void) -> ArcDestruct { + ArcDestruct { + data: data + } +} + +/** + * COMPLETELY UNSAFE. Used as a primitive for the safe versions in std::arc. + * + * Data races between tasks can result in crashes and, with sufficient + * cleverness, arbitrary type coercion. + */ +pub type SharedMutableState = ArcDestruct; + +pub unsafe fn shared_mutable_state(data: T) -> + SharedMutableState { + let data = ~ArcData { count: 1, data: Some(data) }; + let ptr = cast::transmute(data); + ArcDestruct(ptr) +} + +#[inline(always)] +pub unsafe fn get_shared_mutable_state( + rc: *SharedMutableState) -> *mut T +{ + let ptr: ~ArcData = cast::transmute((*rc).data); + assert!(ptr.count > 0); + let r = cast::transmute(ptr.data.get_ref()); + cast::forget(ptr); + return r; +} +#[inline(always)] +pub unsafe fn get_shared_immutable_state<'a,T:Owned>( + rc: &'a SharedMutableState) -> &'a T { + let ptr: ~ArcData = cast::transmute((*rc).data); + assert!(ptr.count > 0); + // Cast us back into the correct region + let r = cast::transmute_region(ptr.data.get_ref()); + cast::forget(ptr); + return r; +} + +pub unsafe fn clone_shared_mutable_state(rc: &SharedMutableState) + -> SharedMutableState { + let mut ptr: ~ArcData = cast::transmute((*rc).data); + let new_count = intrinsics::atomic_xadd(&mut ptr.count, 1) + 1; + assert!(new_count >= 2); + cast::forget(ptr); + ArcDestruct((*rc).data) +} + +impl Clone for SharedMutableState { + fn clone(&self) -> SharedMutableState { + unsafe { + clone_shared_mutable_state(self) + } + } +} + +/****************************************************************************/ + +#[allow(non_camel_case_types)] // runtime type +pub type rust_little_lock = *libc::c_void; + +struct LittleLock { + l: rust_little_lock, +} + +impl Drop for LittleLock { + fn finalize(&self) { + unsafe { + rust_destroy_little_lock(self.l); + } + } +} + +fn LittleLock() -> LittleLock { + unsafe { + LittleLock { + l: rust_create_little_lock() + } + } +} + +pub impl LittleLock { + #[inline(always)] + unsafe fn lock(&self, f: &fn() -> T) -> T { + do atomically { + rust_lock_little_lock(self.l); + do (|| { + f() + }).finally { + rust_unlock_little_lock(self.l); + } + } + } +} + +struct ExData { + lock: LittleLock, + failed: bool, + data: T, +} + +/** + * An arc over mutable data that is protected by a lock. For library use only. + */ +pub struct Exclusive { + x: SharedMutableState> +} + +pub fn exclusive(user_data: T) -> Exclusive { + let data = ExData { + lock: LittleLock(), + failed: false, + data: user_data + }; + Exclusive { + x: unsafe { + shared_mutable_state(data) + } + } +} + +impl Clone for Exclusive { + // Duplicate an exclusive ARC, as std::arc::clone. + fn clone(&self) -> Exclusive { + Exclusive { x: unsafe { clone_shared_mutable_state(&self.x) } } + } +} + +pub impl Exclusive { + // Exactly like std::arc::mutex_arc,access(), but with the little_lock + // instead of a proper mutex. Same reason for being unsafe. + // + // Currently, scheduling operations (i.e., yielding, receiving on a pipe, + // accessing the provided condition variable) are prohibited while inside + // the exclusive. Supporting that is a work in progress. + #[inline(always)] + unsafe fn with(&self, f: &fn(x: &mut T) -> U) -> U { + let rec = get_shared_mutable_state(&self.x); + do (*rec).lock.lock { + if (*rec).failed { + fail!( + ~"Poisoned exclusive - another task failed inside!"); + } + (*rec).failed = true; + let result = f(&mut (*rec).data); + (*rec).failed = false; + result + } + } + + #[inline(always)] + unsafe fn with_imm(&self, f: &fn(x: &T) -> U) -> U { + do self.with |x| { + f(cast::transmute_immut(x)) + } + } +} + +fn compare_and_swap(address: &mut int, oldval: int, newval: int) -> bool { + unsafe { + let old = intrinsics::atomic_cxchg(address, oldval, newval); + old == oldval + } +} + +extern { + fn rust_create_little_lock() -> rust_little_lock; + fn rust_destroy_little_lock(lock: rust_little_lock); + fn rust_lock_little_lock(lock: rust_little_lock); + fn rust_unlock_little_lock(lock: rust_little_lock); +} + +#[cfg(test)] +mod tests { + use comm; + use super::exclusive; + use task; + use uint; + + #[test] + fn exclusive_arc() { + let mut futures = ~[]; + + let num_tasks = 10; + let count = 10; + + let total = exclusive(~0); + + for uint::range(0, num_tasks) |_i| { + let total = total.clone(); + let (port, chan) = comm::stream(); + futures.push(port); + + do task::spawn || { + for uint::range(0, count) |_i| { + do total.with |count| { + **count += 1; + } + } + chan.send(()); + } + }; + + for futures.each |f| { f.recv() } + + do total.with |total| { + assert!(**total == num_tasks * count) + }; + } + + #[test] #[should_fail] #[ignore(cfg(windows))] + fn exclusive_poison() { + // Tests that if one task fails inside of an exclusive, subsequent + // accesses will also fail. + let x = exclusive(1); + let x2 = x.clone(); + do task::try || { + do x2.with |one| { + assert!(*one == 2); + } + }; + do x.with |one| { + assert!(*one == 1); + } + } +} diff --git a/src/libcore/vec.rs b/src/libcore/vec.rs index 604f0297b64..e56144ebc0a 100644 --- a/src/libcore/vec.rs +++ b/src/libcore/vec.rs @@ -3298,8 +3298,9 @@ mod tests { #[test] fn test_swap_remove_noncopyable() { // Tests that we don't accidentally run destructors twice. - let mut v = ~[::unstable::exclusive(()), ::unstable::exclusive(()), - ::unstable::exclusive(())]; + let mut v = ~[::unstable::sync::exclusive(()), + ::unstable::sync::exclusive(()), + ::unstable::sync::exclusive(())]; let mut _e = v.swap_remove(0); assert!(v.len() == 2); _e = v.swap_remove(1); diff --git a/src/libstd/arc.rs b/src/libstd/arc.rs index 7af68f3321d..86a77f36fb6 100644 --- a/src/libstd/arc.rs +++ b/src/libstd/arc.rs @@ -17,9 +17,9 @@ use sync; use sync::{Mutex, mutex_with_condvars, RWlock, rwlock_with_condvars}; use core::cast; -use core::unstable::{SharedMutableState, shared_mutable_state}; -use core::unstable::{clone_shared_mutable_state}; -use core::unstable::{get_shared_mutable_state, get_shared_immutable_state}; +use core::unstable::sync::{SharedMutableState, shared_mutable_state}; +use core::unstable::sync::{clone_shared_mutable_state}; +use core::unstable::sync::{get_shared_mutable_state, get_shared_immutable_state}; use core::ptr; use core::task; diff --git a/src/libstd/sync.rs b/src/libstd/sync.rs index 17d051518a9..108f24d60dc 100644 --- a/src/libstd/sync.rs +++ b/src/libstd/sync.rs @@ -15,7 +15,7 @@ * in std. */ -use core::unstable::{Exclusive, exclusive}; +use core::unstable::sync::{Exclusive, exclusive}; use core::ptr; use core::task; use core::util; diff --git a/src/test/compile-fail/noncopyable-match-pattern.rs b/src/test/compile-fail/noncopyable-match-pattern.rs index e8b01765a44..155b3981483 100644 --- a/src/test/compile-fail/noncopyable-match-pattern.rs +++ b/src/test/compile-fail/noncopyable-match-pattern.rs @@ -9,7 +9,7 @@ // except according to those terms. fn main() { - let x = Some(unstable::exclusive(false)); + let x = Some(unstable::sync::exclusive(false)); match x { Some(copy z) => { //~ ERROR copying a value of non-copyable type do z.with |b| { assert!(!*b); } diff --git a/src/test/run-pass/alt-ref-binding-in-guard-3256.rs b/src/test/run-pass/alt-ref-binding-in-guard-3256.rs index 1ece3b5fd93..ed7a6316374 100644 --- a/src/test/run-pass/alt-ref-binding-in-guard-3256.rs +++ b/src/test/run-pass/alt-ref-binding-in-guard-3256.rs @@ -9,7 +9,7 @@ // except according to those terms. pub fn main() { - let x = Some(unstable::exclusive(true)); + let x = Some(unstable::sync::exclusive(true)); match x { Some(ref z) if z.with(|b| *b) => { do z.with |b| { assert!(*b); }