Rollup merge of #110543 - joboet:reentrant_lock, r=m-ou-se
Make `ReentrantLock` public Implements the ACP rust-lang/libs-team#193. ``@rustbot`` label +T-libs-api +S-waiting-on-ACP
This commit is contained in:
commit
332b9be7a1
5 changed files with 375 additions and 207 deletions
|
@ -11,8 +11,9 @@ use crate::fs::File;
|
|||
use crate::io::{
|
||||
self, BorrowedCursor, BufReader, IoSlice, IoSliceMut, LineWriter, Lines, SpecReadByte,
|
||||
};
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::atomic::{AtomicBool, Ordering};
|
||||
use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantMutex, ReentrantMutexGuard};
|
||||
use crate::sync::{Arc, Mutex, MutexGuard, OnceLock, ReentrantLock, ReentrantLockGuard};
|
||||
use crate::sys::stdio;
|
||||
|
||||
type LocalStream = Arc<Mutex<Vec<u8>>>;
|
||||
|
@ -545,7 +546,7 @@ pub struct Stdout {
|
|||
// FIXME: this should be LineWriter or BufWriter depending on the state of
|
||||
// stdout (tty or not). Note that if this is not line buffered it
|
||||
// should also flush-on-panic or some form of flush-on-abort.
|
||||
inner: &'static ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>,
|
||||
inner: &'static ReentrantLock<RefCell<LineWriter<StdoutRaw>>>,
|
||||
}
|
||||
|
||||
/// A locked reference to the [`Stdout`] handle.
|
||||
|
@ -567,10 +568,10 @@ pub struct Stdout {
|
|||
#[must_use = "if unused stdout will immediately unlock"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct StdoutLock<'a> {
|
||||
inner: ReentrantMutexGuard<'a, RefCell<LineWriter<StdoutRaw>>>,
|
||||
inner: ReentrantLockGuard<'a, RefCell<LineWriter<StdoutRaw>>>,
|
||||
}
|
||||
|
||||
static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLock::new();
|
||||
static STDOUT: OnceLock<ReentrantLock<RefCell<LineWriter<StdoutRaw>>>> = OnceLock::new();
|
||||
|
||||
/// Constructs a new handle to the standard output of the current process.
|
||||
///
|
||||
|
@ -624,7 +625,7 @@ static STDOUT: OnceLock<ReentrantMutex<RefCell<LineWriter<StdoutRaw>>>> = OnceLo
|
|||
pub fn stdout() -> Stdout {
|
||||
Stdout {
|
||||
inner: STDOUT
|
||||
.get_or_init(|| ReentrantMutex::new(RefCell::new(LineWriter::new(stdout_raw())))),
|
||||
.get_or_init(|| ReentrantLock::new(RefCell::new(LineWriter::new(stdout_raw())))),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -635,7 +636,7 @@ pub fn cleanup() {
|
|||
let mut initialized = false;
|
||||
let stdout = STDOUT.get_or_init(|| {
|
||||
initialized = true;
|
||||
ReentrantMutex::new(RefCell::new(LineWriter::with_capacity(0, stdout_raw())))
|
||||
ReentrantLock::new(RefCell::new(LineWriter::with_capacity(0, stdout_raw())))
|
||||
});
|
||||
|
||||
if !initialized {
|
||||
|
@ -678,6 +679,12 @@ impl Stdout {
|
|||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl UnwindSafe for Stdout {}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl RefUnwindSafe for Stdout {}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Stdout {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
|
@ -737,6 +744,12 @@ impl Write for &Stdout {
|
|||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl UnwindSafe for StdoutLock<'_> {}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl RefUnwindSafe for StdoutLock<'_> {}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Write for StdoutLock<'_> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
|
@ -786,7 +799,7 @@ impl fmt::Debug for StdoutLock<'_> {
|
|||
/// standard library or via raw Windows API calls, will fail.
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct Stderr {
|
||||
inner: &'static ReentrantMutex<RefCell<StderrRaw>>,
|
||||
inner: &'static ReentrantLock<RefCell<StderrRaw>>,
|
||||
}
|
||||
|
||||
/// A locked reference to the [`Stderr`] handle.
|
||||
|
@ -808,7 +821,7 @@ pub struct Stderr {
|
|||
#[must_use = "if unused stderr will immediately unlock"]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub struct StderrLock<'a> {
|
||||
inner: ReentrantMutexGuard<'a, RefCell<StderrRaw>>,
|
||||
inner: ReentrantLockGuard<'a, RefCell<StderrRaw>>,
|
||||
}
|
||||
|
||||
/// Constructs a new handle to the standard error of the current process.
|
||||
|
@ -862,8 +875,8 @@ pub fn stderr() -> Stderr {
|
|||
// Note that unlike `stdout()` we don't use `at_exit` here to register a
|
||||
// destructor. Stderr is not buffered, so there's no need to run a
|
||||
// destructor for flushing the buffer
|
||||
static INSTANCE: ReentrantMutex<RefCell<StderrRaw>> =
|
||||
ReentrantMutex::new(RefCell::new(stderr_raw()));
|
||||
static INSTANCE: ReentrantLock<RefCell<StderrRaw>> =
|
||||
ReentrantLock::new(RefCell::new(stderr_raw()));
|
||||
|
||||
Stderr { inner: &INSTANCE }
|
||||
}
|
||||
|
@ -898,6 +911,12 @@ impl Stderr {
|
|||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl UnwindSafe for Stderr {}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl RefUnwindSafe for Stderr {}
|
||||
|
||||
#[stable(feature = "std_debug", since = "1.16.0")]
|
||||
impl fmt::Debug for Stderr {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
|
@ -957,6 +976,12 @@ impl Write for &Stderr {
|
|||
}
|
||||
}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl UnwindSafe for StderrLock<'_> {}
|
||||
|
||||
#[stable(feature = "catch_unwind", since = "1.9.0")]
|
||||
impl RefUnwindSafe for StderrLock<'_> {}
|
||||
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
impl Write for StderrLock<'_> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
|
|
|
@ -184,7 +184,8 @@ pub use self::lazy_lock::LazyLock;
|
|||
#[stable(feature = "once_cell", since = "1.70.0")]
|
||||
pub use self::once_lock::OnceLock;
|
||||
|
||||
pub(crate) use self::remutex::{ReentrantMutex, ReentrantMutexGuard};
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
pub use self::reentrant_lock::{ReentrantLock, ReentrantLockGuard};
|
||||
|
||||
pub mod mpsc;
|
||||
|
||||
|
@ -196,5 +197,5 @@ mod mutex;
|
|||
pub(crate) mod once;
|
||||
mod once_lock;
|
||||
mod poison;
|
||||
mod remutex;
|
||||
mod reentrant_lock;
|
||||
mod rwlock;
|
||||
|
|
320
library/std/src/sync/reentrant_lock.rs
Normal file
320
library/std/src/sync/reentrant_lock.rs
Normal file
|
@ -0,0 +1,320 @@
|
|||
#[cfg(all(test, not(target_os = "emscripten")))]
|
||||
mod tests;
|
||||
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::fmt;
|
||||
use crate::ops::Deref;
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
||||
use crate::sys::locks as sys;
|
||||
|
||||
/// A re-entrant mutual exclusion lock
|
||||
///
|
||||
/// This lock will block *other* threads waiting for the lock to become
|
||||
/// available. The thread which has already locked the mutex can lock it
|
||||
/// multiple times without blocking, preventing a common source of deadlocks.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Allow recursively calling a function needing synchronization from within
|
||||
/// a callback (this is how [`StdoutLock`](crate::io::StdoutLock) is currently
|
||||
/// implemented):
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
///
|
||||
/// use std::cell::RefCell;
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// pub struct Log {
|
||||
/// data: RefCell<String>,
|
||||
/// }
|
||||
///
|
||||
/// impl Log {
|
||||
/// pub fn append(&self, msg: &str) {
|
||||
/// self.data.borrow_mut().push_str(msg);
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// static LOG: ReentrantLock<Log> = ReentrantLock::new(Log { data: RefCell::new(String::new()) });
|
||||
///
|
||||
/// pub fn with_log<R>(f: impl FnOnce(&Log) -> R) -> R {
|
||||
/// let log = LOG.lock();
|
||||
/// f(&*log)
|
||||
/// }
|
||||
///
|
||||
/// with_log(|log| {
|
||||
/// log.append("Hello");
|
||||
/// with_log(|log| log.append(" there!"));
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
// # Implementation details
|
||||
//
|
||||
// The 'owner' field tracks which thread has locked the mutex.
|
||||
//
|
||||
// We use current_thread_unique_ptr() as the thread identifier,
|
||||
// which is just the address of a thread local variable.
|
||||
//
|
||||
// If `owner` is set to the identifier of the current thread,
|
||||
// we assume the mutex is already locked and instead of locking it again,
|
||||
// we increment `lock_count`.
|
||||
//
|
||||
// When unlocking, we decrement `lock_count`, and only unlock the mutex when
|
||||
// it reaches zero.
|
||||
//
|
||||
// `lock_count` is protected by the mutex and only accessed by the thread that has
|
||||
// locked the mutex, so needs no synchronization.
|
||||
//
|
||||
// `owner` can be checked by other threads that want to see if they already
|
||||
// hold the lock, so needs to be atomic. If it compares equal, we're on the
|
||||
// same thread that holds the mutex and memory access can use relaxed ordering
|
||||
// since we're not dealing with multiple threads. If it's not equal,
|
||||
// synchronization is left to the mutex, making relaxed memory ordering for
|
||||
// the `owner` field fine in all cases.
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
pub struct ReentrantLock<T: ?Sized> {
|
||||
mutex: sys::Mutex,
|
||||
owner: AtomicUsize,
|
||||
lock_count: UnsafeCell<u32>,
|
||||
data: T,
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: Send + ?Sized> Send for ReentrantLock<T> {}
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
unsafe impl<T: Send + ?Sized> Sync for ReentrantLock<T> {}
|
||||
|
||||
// Because of the `UnsafeCell`, these traits are not implemented automatically
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: UnwindSafe + ?Sized> UnwindSafe for ReentrantLock<T> {}
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for ReentrantLock<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a re-entrant lock. When this
|
||||
/// structure is dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// [`Deref`] implementation.
|
||||
///
|
||||
/// This structure is created by the [`lock`](ReentrantLock::lock) method on
|
||||
/// [`ReentrantLock`].
|
||||
///
|
||||
/// # Mutability
|
||||
///
|
||||
/// Unlike [`MutexGuard`](super::MutexGuard), `ReentrantLockGuard` does not
|
||||
/// implement [`DerefMut`](crate::ops::DerefMut), because implementation of
|
||||
/// the trait would violate Rust’s reference aliasing rules. Use interior
|
||||
/// mutability (usually [`RefCell`](crate::cell::RefCell)) in order to mutate
|
||||
/// the guarded data.
|
||||
#[must_use = "if unused the ReentrantLock will immediately unlock"]
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
pub struct ReentrantLockGuard<'a, T: ?Sized + 'a> {
|
||||
lock: &'a ReentrantLock<T>,
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> !Send for ReentrantLockGuard<'_, T> {}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T> ReentrantLock<T> {
|
||||
/// Creates a new re-entrant lock in an unlocked state ready for use.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let lock = ReentrantLock::new(0);
|
||||
/// ```
|
||||
pub const fn new(t: T) -> ReentrantLock<T> {
|
||||
ReentrantLock {
|
||||
mutex: sys::Mutex::new(),
|
||||
owner: AtomicUsize::new(0),
|
||||
lock_count: UnsafeCell::new(0),
|
||||
data: t,
|
||||
}
|
||||
}
|
||||
|
||||
/// Consumes this lock, returning the underlying data.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
///
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let lock = ReentrantLock::new(0);
|
||||
/// assert_eq!(lock.into_inner(), 0);
|
||||
/// ```
|
||||
pub fn into_inner(self) -> T {
|
||||
self.data
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> ReentrantLock<T> {
|
||||
/// Acquires the lock, blocking the current thread until it is able to do
|
||||
/// so.
|
||||
///
|
||||
/// This function will block the caller until it is available to acquire
|
||||
/// the lock. Upon returning, the thread is the only thread with the lock
|
||||
/// held. When the thread calling this method already holds the lock, the
|
||||
/// call succeeds without blocking.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::cell::Cell;
|
||||
/// use std::sync::{Arc, ReentrantLock};
|
||||
/// use std::thread;
|
||||
///
|
||||
/// let lock = Arc::new(ReentrantLock::new(Cell::new(0)));
|
||||
/// let c_lock = Arc::clone(&lock);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// c_lock.lock().set(10);
|
||||
/// }).join().expect("thread::spawn failed");
|
||||
/// assert_eq!(lock.lock().get(), 10);
|
||||
/// ```
|
||||
pub fn lock(&self) -> ReentrantLockGuard<'_, T> {
|
||||
let this_thread = current_thread_unique_ptr();
|
||||
// Safety: We only touch lock_count when we own the lock.
|
||||
unsafe {
|
||||
if self.owner.load(Relaxed) == this_thread {
|
||||
self.increment_lock_count().expect("lock count overflow in reentrant mutex");
|
||||
} else {
|
||||
self.mutex.lock();
|
||||
self.owner.store(this_thread, Relaxed);
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
}
|
||||
}
|
||||
ReentrantLockGuard { lock: self }
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the underlying data.
|
||||
///
|
||||
/// Since this call borrows the `ReentrantLock` mutably, no actual locking
|
||||
/// needs to take place -- the mutable borrow statically guarantees no locks
|
||||
/// exist.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// #![feature(reentrant_lock)]
|
||||
/// use std::sync::ReentrantLock;
|
||||
///
|
||||
/// let mut lock = ReentrantLock::new(0);
|
||||
/// *lock.get_mut() = 10;
|
||||
/// assert_eq!(*lock.lock(), 10);
|
||||
/// ```
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.data
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// If the lock could not be acquired at this time, then `None` is returned.
|
||||
/// Otherwise, an RAII guard is returned.
|
||||
///
|
||||
/// This function does not block.
|
||||
pub(crate) fn try_lock(&self) -> Option<ReentrantLockGuard<'_, T>> {
|
||||
let this_thread = current_thread_unique_ptr();
|
||||
// Safety: We only touch lock_count when we own the lock.
|
||||
unsafe {
|
||||
if self.owner.load(Relaxed) == this_thread {
|
||||
self.increment_lock_count()?;
|
||||
Some(ReentrantLockGuard { lock: self })
|
||||
} else if self.mutex.try_lock() {
|
||||
self.owner.store(this_thread, Relaxed);
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
Some(ReentrantLockGuard { lock: self })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn increment_lock_count(&self) -> Option<()> {
|
||||
*self.lock_count.get() = (*self.lock_count.get()).checked_add(1)?;
|
||||
Some(())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLock<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut d = f.debug_struct("ReentrantLock");
|
||||
match self.try_lock() {
|
||||
Some(v) => d.field("data", &&*v),
|
||||
None => d.field("data", &format_args!("<locked>")),
|
||||
};
|
||||
d.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: Default> Default for ReentrantLock<T> {
|
||||
fn default() -> Self {
|
||||
Self::new(T::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T> From<T> for ReentrantLock<T> {
|
||||
fn from(t: T) -> Self {
|
||||
Self::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> Deref for ReentrantLockGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.lock.data
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLockGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: fmt::Display + ?Sized> fmt::Display for ReentrantLockGuard<'_, T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
(**self).fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[unstable(feature = "reentrant_lock", issue = "121440")]
|
||||
impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// Safety: We own the lock.
|
||||
unsafe {
|
||||
*self.lock.lock_count.get() -= 1;
|
||||
if *self.lock.lock_count.get() == 0 {
|
||||
self.lock.owner.store(0, Relaxed);
|
||||
self.lock.mutex.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an address that is unique per running thread.
|
||||
///
|
||||
/// This can be used as a non-null usize-sized ID.
|
||||
pub(crate) fn current_thread_unique_ptr() -> usize {
|
||||
// Use a non-drop type to make sure it's still available during thread destruction.
|
||||
thread_local! { static X: u8 = const { 0 } }
|
||||
X.with(|x| <*const _>::addr(x))
|
||||
}
|
|
@ -1,17 +1,17 @@
|
|||
use super::{ReentrantMutex, ReentrantMutexGuard};
|
||||
use super::{ReentrantLock, ReentrantLockGuard};
|
||||
use crate::cell::RefCell;
|
||||
use crate::sync::Arc;
|
||||
use crate::thread;
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
let m = ReentrantMutex::new(());
|
||||
let l = ReentrantLock::new(());
|
||||
{
|
||||
let a = m.lock();
|
||||
let a = l.lock();
|
||||
{
|
||||
let b = m.lock();
|
||||
let b = l.lock();
|
||||
{
|
||||
let c = m.lock();
|
||||
let c = l.lock();
|
||||
assert_eq!(*c, ());
|
||||
}
|
||||
assert_eq!(*b, ());
|
||||
|
@ -22,15 +22,15 @@ fn smoke() {
|
|||
|
||||
#[test]
|
||||
fn is_mutex() {
|
||||
let m = Arc::new(ReentrantMutex::new(RefCell::new(0)));
|
||||
let m2 = m.clone();
|
||||
let lock = m.lock();
|
||||
let l = Arc::new(ReentrantLock::new(RefCell::new(0)));
|
||||
let l2 = l.clone();
|
||||
let lock = l.lock();
|
||||
let child = thread::spawn(move || {
|
||||
let lock = m2.lock();
|
||||
let lock = l2.lock();
|
||||
assert_eq!(*lock.borrow(), 4950);
|
||||
});
|
||||
for i in 0..100 {
|
||||
let lock = m.lock();
|
||||
let lock = l.lock();
|
||||
*lock.borrow_mut() += i;
|
||||
}
|
||||
drop(lock);
|
||||
|
@ -39,20 +39,20 @@ fn is_mutex() {
|
|||
|
||||
#[test]
|
||||
fn trylock_works() {
|
||||
let m = Arc::new(ReentrantMutex::new(()));
|
||||
let m2 = m.clone();
|
||||
let _lock = m.try_lock();
|
||||
let _lock2 = m.try_lock();
|
||||
let l = Arc::new(ReentrantLock::new(()));
|
||||
let l2 = l.clone();
|
||||
let _lock = l.try_lock();
|
||||
let _lock2 = l.try_lock();
|
||||
thread::spawn(move || {
|
||||
let lock = m2.try_lock();
|
||||
let lock = l2.try_lock();
|
||||
assert!(lock.is_none());
|
||||
})
|
||||
.join()
|
||||
.unwrap();
|
||||
let _lock3 = m.try_lock();
|
||||
let _lock3 = l.try_lock();
|
||||
}
|
||||
|
||||
pub struct Answer<'a>(pub ReentrantMutexGuard<'a, RefCell<u32>>);
|
||||
pub struct Answer<'a>(pub ReentrantLockGuard<'a, RefCell<u32>>);
|
||||
impl Drop for Answer<'_> {
|
||||
fn drop(&mut self) {
|
||||
*self.0.borrow_mut() = 42;
|
|
@ -1,178 +0,0 @@
|
|||
#[cfg(all(test, not(target_os = "emscripten")))]
|
||||
mod tests;
|
||||
|
||||
use crate::cell::UnsafeCell;
|
||||
use crate::ops::Deref;
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::atomic::{AtomicUsize, Ordering::Relaxed};
|
||||
use crate::sys::locks as sys;
|
||||
|
||||
/// A reentrant mutual exclusion
|
||||
///
|
||||
/// This mutex will block *other* threads waiting for the lock to become
|
||||
/// available. The thread which has already locked the mutex can lock it
|
||||
/// multiple times without blocking, preventing a common source of deadlocks.
|
||||
///
|
||||
/// This is used by stdout().lock() and friends.
|
||||
///
|
||||
/// ## Implementation details
|
||||
///
|
||||
/// The 'owner' field tracks which thread has locked the mutex.
|
||||
///
|
||||
/// We use current_thread_unique_ptr() as the thread identifier,
|
||||
/// which is just the address of a thread local variable.
|
||||
///
|
||||
/// If `owner` is set to the identifier of the current thread,
|
||||
/// we assume the mutex is already locked and instead of locking it again,
|
||||
/// we increment `lock_count`.
|
||||
///
|
||||
/// When unlocking, we decrement `lock_count`, and only unlock the mutex when
|
||||
/// it reaches zero.
|
||||
///
|
||||
/// `lock_count` is protected by the mutex and only accessed by the thread that has
|
||||
/// locked the mutex, so needs no synchronization.
|
||||
///
|
||||
/// `owner` can be checked by other threads that want to see if they already
|
||||
/// hold the lock, so needs to be atomic. If it compares equal, we're on the
|
||||
/// same thread that holds the mutex and memory access can use relaxed ordering
|
||||
/// since we're not dealing with multiple threads. If it's not equal,
|
||||
/// synchronization is left to the mutex, making relaxed memory ordering for
|
||||
/// the `owner` field fine in all cases.
|
||||
pub struct ReentrantMutex<T> {
|
||||
mutex: sys::Mutex,
|
||||
owner: AtomicUsize,
|
||||
lock_count: UnsafeCell<u32>,
|
||||
data: T,
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for ReentrantMutex<T> {}
|
||||
unsafe impl<T: Send> Sync for ReentrantMutex<T> {}
|
||||
|
||||
impl<T> UnwindSafe for ReentrantMutex<T> {}
|
||||
impl<T> RefUnwindSafe for ReentrantMutex<T> {}
|
||||
|
||||
/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
|
||||
/// dropped (falls out of scope), the lock will be unlocked.
|
||||
///
|
||||
/// The data protected by the mutex can be accessed through this guard via its
|
||||
/// Deref implementation.
|
||||
///
|
||||
/// # Mutability
|
||||
///
|
||||
/// Unlike `MutexGuard`, `ReentrantMutexGuard` does not implement `DerefMut`,
|
||||
/// because implementation of the trait would violate Rust’s reference aliasing
|
||||
/// rules. Use interior mutability (usually `RefCell`) in order to mutate the
|
||||
/// guarded data.
|
||||
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
|
||||
pub struct ReentrantMutexGuard<'a, T: 'a> {
|
||||
lock: &'a ReentrantMutex<T>,
|
||||
}
|
||||
|
||||
impl<T> !Send for ReentrantMutexGuard<'_, T> {}
|
||||
|
||||
impl<T> ReentrantMutex<T> {
|
||||
/// Creates a new reentrant mutex in an unlocked state.
|
||||
pub const fn new(t: T) -> ReentrantMutex<T> {
|
||||
ReentrantMutex {
|
||||
mutex: sys::Mutex::new(),
|
||||
owner: AtomicUsize::new(0),
|
||||
lock_count: UnsafeCell::new(0),
|
||||
data: t,
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquires a mutex, blocking the current thread until it is able to do so.
|
||||
///
|
||||
/// This function will block the caller until it is available to acquire the mutex.
|
||||
/// Upon returning, the thread is the only thread with the mutex held. When the thread
|
||||
/// calling this method already holds the lock, the call shall succeed without
|
||||
/// blocking.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return failure if the mutex would otherwise be
|
||||
/// acquired.
|
||||
pub fn lock(&self) -> ReentrantMutexGuard<'_, T> {
|
||||
let this_thread = current_thread_unique_ptr();
|
||||
// Safety: We only touch lock_count when we own the lock.
|
||||
unsafe {
|
||||
if self.owner.load(Relaxed) == this_thread {
|
||||
self.increment_lock_count();
|
||||
} else {
|
||||
self.mutex.lock();
|
||||
self.owner.store(this_thread, Relaxed);
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
}
|
||||
}
|
||||
ReentrantMutexGuard { lock: self }
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock.
|
||||
///
|
||||
/// If the lock could not be acquired at this time, then `Err` is returned.
|
||||
/// Otherwise, an RAII guard is returned.
|
||||
///
|
||||
/// This function does not block.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If another user of this mutex panicked while holding the mutex, then
|
||||
/// this call will return failure if the mutex would otherwise be
|
||||
/// acquired.
|
||||
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, T>> {
|
||||
let this_thread = current_thread_unique_ptr();
|
||||
// Safety: We only touch lock_count when we own the lock.
|
||||
unsafe {
|
||||
if self.owner.load(Relaxed) == this_thread {
|
||||
self.increment_lock_count();
|
||||
Some(ReentrantMutexGuard { lock: self })
|
||||
} else if self.mutex.try_lock() {
|
||||
self.owner.store(this_thread, Relaxed);
|
||||
debug_assert_eq!(*self.lock_count.get(), 0);
|
||||
*self.lock_count.get() = 1;
|
||||
Some(ReentrantMutexGuard { lock: self })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn increment_lock_count(&self) {
|
||||
*self.lock_count.get() = (*self.lock_count.get())
|
||||
.checked_add(1)
|
||||
.expect("lock count overflow in reentrant mutex");
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for ReentrantMutexGuard<'_, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.lock.data
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for ReentrantMutexGuard<'_, T> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// Safety: We own the lock.
|
||||
unsafe {
|
||||
*self.lock.lock_count.get() -= 1;
|
||||
if *self.lock.lock_count.get() == 0 {
|
||||
self.lock.owner.store(0, Relaxed);
|
||||
self.lock.mutex.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an address that is unique per running thread.
|
||||
///
|
||||
/// This can be used as a non-null usize-sized ID.
|
||||
pub fn current_thread_unique_ptr() -> usize {
|
||||
// Use a non-drop type to make sure it's still available during thread destruction.
|
||||
thread_local! { static X: u8 = const { 0 } }
|
||||
X.with(|x| <*const _>::addr(x))
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue