diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs index afb4cf3943a..1952c35eb9d 100644 --- a/src/libextra/sync.rs +++ b/src/libextra/sync.rs @@ -135,9 +135,7 @@ impl Sem { do task::unkillable { do (|| { self.acquire(); - unsafe { - do task::rekillable { blk() } - } + do task::rekillable { blk() } }).finally { self.release(); } @@ -234,10 +232,8 @@ impl<'self> Condvar<'self> { // signaller already sent -- I mean 'unconditionally' in contrast // with acquire().) do (|| { - unsafe { - do task::rekillable { - let _ = WaitEnd.take_unwrap().recv(); - } + do task::rekillable { + let _ = WaitEnd.take_unwrap().recv(); } }).finally { // Reacquire the condvar. Note this is back in the unkillable @@ -516,14 +512,12 @@ impl RWLock { * 'write' from other tasks will run concurrently with this one. */ pub fn write(&self, blk: &fn() -> U) -> U { - unsafe { - do task::unkillable { - (&self.order_lock).acquire(); - do (&self.access_lock).access { - (&self.order_lock).release(); - do task::rekillable { - blk() - } + do task::unkillable { + (&self.order_lock).acquire(); + do (&self.access_lock).access { + (&self.order_lock).release(); + do task::rekillable { + blk() } } } @@ -562,16 +556,14 @@ impl RWLock { // which can't happen until T2 finishes the downgrade-read entirely. // The astute reader will also note that making waking writers use the // order_lock is better for not starving readers. - unsafe { - do task::unkillable { - (&self.order_lock).acquire(); - do (&self.access_lock).access_cond |cond| { - (&self.order_lock).release(); - do task::rekillable { - let opt_lock = Just(&self.order_lock); - blk(&Condvar { sem: cond.sem, order: opt_lock, - token: NonCopyable::new() }) - } + do task::unkillable { + (&self.order_lock).acquire(); + do (&self.access_lock).access_cond |cond| { + (&self.order_lock).release(); + do task::rekillable { + let opt_lock = Just(&self.order_lock); + blk(&Condvar { sem: cond.sem, order: opt_lock, + token: NonCopyable::new() }) } } } @@ -606,10 +598,8 @@ impl RWLock { (&self.access_lock).acquire(); (&self.order_lock).release(); do (|| { - unsafe { - do task::rekillable { - blk(RWLockWriteMode { lock: self, token: NonCopyable::new() }) - } + do task::rekillable { + blk(RWLockWriteMode { lock: self, token: NonCopyable::new() }) } }).finally { let writer_or_last_reader; diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs index e76b81a904d..bac4991d858 100644 --- a/src/libstd/task/mod.rs +++ b/src/libstd/task/mod.rs @@ -597,21 +597,34 @@ pub fn unkillable(f: &fn() -> U) -> U { } } -/// The inverse of unkillable. Only ever to be used nested in unkillable(). -pub unsafe fn rekillable(f: &fn() -> U) -> U { +/** + * Makes killable a task marked as unkillable + * + * # Example + * + * ~~~ + * do task::unkillable { + * do task::rekillable { + * // Task is killable + * } + * } + */ +pub fn rekillable(f: &fn() -> U) -> U { use rt::task::Task; - if in_green_task_context() { - let t = Local::unsafe_borrow::(); - do (|| { - (*t).death.allow_kill((*t).unwinder.unwinding); + unsafe { + if in_green_task_context() { + let t = Local::unsafe_borrow::(); + do (|| { + (*t).death.allow_kill((*t).unwinder.unwinding); + f() + }).finally { + (*t).death.inhibit_kill((*t).unwinder.unwinding); + } + } else { + // FIXME(#3095): As in unkillable(). f() - }).finally { - (*t).death.inhibit_kill((*t).unwinder.unwinding); } - } else { - // FIXME(#3095): As in unkillable(). - f() } } @@ -646,11 +659,9 @@ fn test_kill_rekillable_task() { do run_in_newsched_task { do task::try { do task::unkillable { - unsafe { - do task::rekillable { - do task::spawn { - fail!(); - } + do task::rekillable { + do task::spawn { + fail!(); } } }