1
Fork 0

Auto merge of #76645 - fusion-engineering-forks:windows-lock, r=kennytm

Small cleanups in Windows Mutex.

 - Move `held` into the boxed part, since the SRW lock implementation does not use this. This makes the Mutex 50% smaller.
 - Use `Cell` instead of `UnsafeCell` for `held`, such that `.replace()` can be used.
 - Add some comments.
 - Avoid creating multiple `&mut`s to the critical section object in `ReentrantMutex`.
This commit is contained in:
bors 2020-09-17 19:23:58 +00:00
commit f3c923a13a
2 changed files with 42 additions and 43 deletions

View file

@ -315,6 +315,7 @@
#![feature(try_reserve)] #![feature(try_reserve)]
#![feature(unboxed_closures)] #![feature(unboxed_closures)]
#![feature(unsafe_block_in_unsafe_fn)] #![feature(unsafe_block_in_unsafe_fn)]
#![feature(unsafe_cell_raw_get)]
#![feature(untagged_unions)] #![feature(untagged_unions)]
#![feature(unwind_attributes)] #![feature(unwind_attributes)]
#![feature(vec_into_raw_parts)] #![feature(vec_into_raw_parts)]

View file

@ -19,20 +19,25 @@
//! CriticalSection is used and we keep track of who's holding the mutex to //! CriticalSection is used and we keep track of who's holding the mutex to
//! detect recursive locks. //! detect recursive locks.
use crate::cell::UnsafeCell; use crate::cell::{Cell, UnsafeCell};
use crate::mem::{self, MaybeUninit}; use crate::mem::{self, MaybeUninit};
use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::atomic::{AtomicUsize, Ordering};
use crate::sys::c; use crate::sys::c;
use crate::sys::compat; use crate::sys::compat;
pub struct Mutex { pub struct Mutex {
// This is either directly an SRWLOCK (if supported), or a Box<Inner> otherwise.
lock: AtomicUsize, lock: AtomicUsize,
held: UnsafeCell<bool>,
} }
unsafe impl Send for Mutex {} unsafe impl Send for Mutex {}
unsafe impl Sync for Mutex {} unsafe impl Sync for Mutex {}
struct Inner {
remutex: ReentrantMutex,
held: Cell<bool>,
}
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
enum Kind { enum Kind {
SRWLock = 1, SRWLock = 1,
@ -51,7 +56,6 @@ impl Mutex {
// This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly // This works because SRWLOCK_INIT is 0 (wrapped in a struct), so we are also properly
// initializing an SRWLOCK here. // initializing an SRWLOCK here.
lock: AtomicUsize::new(0), lock: AtomicUsize::new(0),
held: UnsafeCell::new(false),
} }
} }
#[inline] #[inline]
@ -60,10 +64,11 @@ impl Mutex {
match kind() { match kind() {
Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)), Kind::SRWLock => c::AcquireSRWLockExclusive(raw(self)),
Kind::CriticalSection => { Kind::CriticalSection => {
let re = self.remutex(); let inner = &*self.inner();
(*re).lock(); inner.remutex.lock();
if !self.flag_locked() { if inner.held.replace(true) {
(*re).unlock(); // It was already locked, so we got a recursive lock which we do not want.
inner.remutex.unlock();
panic!("cannot recursively lock a mutex"); panic!("cannot recursively lock a mutex");
} }
} }
@ -73,23 +78,27 @@ impl Mutex {
match kind() { match kind() {
Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0, Kind::SRWLock => c::TryAcquireSRWLockExclusive(raw(self)) != 0,
Kind::CriticalSection => { Kind::CriticalSection => {
let re = self.remutex(); let inner = &*self.inner();
if !(*re).try_lock() { if !inner.remutex.try_lock() {
false
} else if inner.held.replace(true) {
// It was already locked, so we got a recursive lock which we do not want.
inner.remutex.unlock();
false false
} else if self.flag_locked() {
true
} else { } else {
(*re).unlock(); true
false
} }
} }
} }
} }
pub unsafe fn unlock(&self) { pub unsafe fn unlock(&self) {
*self.held.get() = false;
match kind() { match kind() {
Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)), Kind::SRWLock => c::ReleaseSRWLockExclusive(raw(self)),
Kind::CriticalSection => (*self.remutex()).unlock(), Kind::CriticalSection => {
let inner = &*(self.lock.load(Ordering::SeqCst) as *const Inner);
inner.held.set(false);
inner.remutex.unlock();
}
} }
} }
pub unsafe fn destroy(&self) { pub unsafe fn destroy(&self) {
@ -97,38 +106,27 @@ impl Mutex {
Kind::SRWLock => {} Kind::SRWLock => {}
Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) { Kind::CriticalSection => match self.lock.load(Ordering::SeqCst) {
0 => {} 0 => {}
n => { n => Box::from_raw(n as *mut Inner).remutex.destroy(),
Box::from_raw(n as *mut ReentrantMutex).destroy();
}
}, },
} }
} }
unsafe fn remutex(&self) -> *mut ReentrantMutex { unsafe fn inner(&self) -> *const Inner {
match self.lock.load(Ordering::SeqCst) { match self.lock.load(Ordering::SeqCst) {
0 => {} 0 => {}
n => return n as *mut _, n => return n as *const _,
} }
let re = box ReentrantMutex::uninitialized(); let inner = box Inner { remutex: ReentrantMutex::uninitialized(), held: Cell::new(false) };
re.init(); inner.remutex.init();
let re = Box::into_raw(re); let inner = Box::into_raw(inner);
match self.lock.compare_and_swap(0, re as usize, Ordering::SeqCst) { match self.lock.compare_and_swap(0, inner as usize, Ordering::SeqCst) {
0 => re, 0 => inner,
n => { n => {
Box::from_raw(re).destroy(); Box::from_raw(inner).remutex.destroy();
n as *mut _ n as *const _
} }
} }
} }
unsafe fn flag_locked(&self) -> bool {
if *self.held.get() {
false
} else {
*self.held.get() = true;
true
}
}
} }
fn kind() -> Kind { fn kind() -> Kind {
@ -150,7 +148,7 @@ fn kind() -> Kind {
} }
pub struct ReentrantMutex { pub struct ReentrantMutex {
inner: UnsafeCell<MaybeUninit<c::CRITICAL_SECTION>>, inner: MaybeUninit<UnsafeCell<c::CRITICAL_SECTION>>,
} }
unsafe impl Send for ReentrantMutex {} unsafe impl Send for ReentrantMutex {}
@ -158,27 +156,27 @@ unsafe impl Sync for ReentrantMutex {}
impl ReentrantMutex { impl ReentrantMutex {
pub const fn uninitialized() -> ReentrantMutex { pub const fn uninitialized() -> ReentrantMutex {
ReentrantMutex { inner: UnsafeCell::new(MaybeUninit::uninit()) } ReentrantMutex { inner: MaybeUninit::uninit() }
} }
pub unsafe fn init(&self) { pub unsafe fn init(&self) {
c::InitializeCriticalSection((&mut *self.inner.get()).as_mut_ptr()); c::InitializeCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
} }
pub unsafe fn lock(&self) { pub unsafe fn lock(&self) {
c::EnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()); c::EnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
} }
#[inline] #[inline]
pub unsafe fn try_lock(&self) -> bool { pub unsafe fn try_lock(&self) -> bool {
c::TryEnterCriticalSection((&mut *self.inner.get()).as_mut_ptr()) != 0 c::TryEnterCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr())) != 0
} }
pub unsafe fn unlock(&self) { pub unsafe fn unlock(&self) {
c::LeaveCriticalSection((&mut *self.inner.get()).as_mut_ptr()); c::LeaveCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
} }
pub unsafe fn destroy(&self) { pub unsafe fn destroy(&self) {
c::DeleteCriticalSection((&mut *self.inner.get()).as_mut_ptr()); c::DeleteCriticalSection(UnsafeCell::raw_get(self.inner.as_ptr()));
} }
} }