1
Fork 0

Add optimized lock methods for Sharded

This commit is contained in:
John Kåre Alsaker 2023-08-16 13:50:31 +02:00
parent 3d249706aa
commit 8fc160b742
5 changed files with 133 additions and 33 deletions

View file

@ -73,6 +73,53 @@ impl<T> Sharded<T> {
}
}
/// The shard is selected by hashing `val` with `FxHasher`.
#[inline]
pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> {
match self {
Self::Single(single) => {
// Syncronization is disabled so use the `lock_assume_no_sync` method optimized
// for that case.
// SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
// `might_be_dyn_thread_safe` was also false.
unsafe { single.lock_assume_no_sync() }
}
#[cfg(parallel_compiler)]
Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
}
}
#[inline]
pub fn lock_shard_by_hash(&self, hash: u64) -> LockGuard<'_, T> {
self.lock_shard_by_index(get_shard_hash(hash))
}
#[inline]
pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> {
match self {
Self::Single(single) => {
// Syncronization is disabled so use the `lock_assume_no_sync` method optimized
// for that case.
// SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
// `might_be_dyn_thread_safe` was also false.
unsafe { single.lock_assume_no_sync() }
}
#[cfg(parallel_compiler)]
Self::Shards(shards) => {
// Syncronization is enabled so use the `lock_assume_sync` method optimized
// for that case.
// SAFETY (get_unchecked): The index gets ANDed with the shard mask, ensuring it is
// always inbounds.
// SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating
// the lock thus `might_be_dyn_thread_safe` was also true.
unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume_sync() }
}
}
}
#[inline]
pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
match self {
@ -124,7 +171,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
Q: Hash + Eq,
{
let hash = make_hash(value);
let mut shard = self.get_shard_by_hash(hash).lock();
let mut shard = self.lock_shard_by_hash(hash);
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
match entry {
@ -144,7 +191,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
Q: Hash + Eq,
{
let hash = make_hash(&value);
let mut shard = self.get_shard_by_hash(hash).lock();
let mut shard = self.lock_shard_by_hash(hash);
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
match entry {
@ -166,7 +213,7 @@ pub trait IntoPointer {
impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
let hash = make_hash(&value);
let shard = self.get_shard_by_hash(hash).lock();
let shard = self.lock_shard_by_hash(hash);
let value = value.into_pointer();
shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
}

View file

@ -49,6 +49,20 @@ impl<T> Lock<T> {
self.0.try_borrow_mut().ok()
}
#[inline(always)]
#[track_caller]
// This is unsafe to match the API for the `parallel_compiler` case.
pub unsafe fn lock_assume_no_sync(&self) -> LockGuard<'_, T> {
self.0.borrow_mut()
}
#[inline(always)]
#[track_caller]
// This is unsafe to match the API for the `parallel_compiler` case.
pub unsafe fn lock_assume_sync(&self) -> LockGuard<'_, T> {
self.0.borrow_mut()
}
#[inline(always)]
#[track_caller]
pub fn lock(&self) -> LockGuard<'_, T> {
@ -150,24 +164,45 @@ impl LockRaw {
#[inline(always)]
fn lock(&self) {
if super::ERROR_CHECKING {
// We're in the debugging mode, so assert that the lock is not held so we
// get a panic instead of waiting for the lock.
assert_eq!(self.try_lock(), true, "lock must not be hold");
} else {
// SAFETY: This is safe since the union fields are used in accordance with `self.sync`.
unsafe {
if likely(!self.sync) {
if unlikely(self.opt.cell.replace(true)) {
cold_path(|| panic!("lock was already held"))
}
} else {
self.opt.lock.lock();
}
// SAFETY: This is safe since `self.sync` is used in accordance with the preconditions of
// `lock_assume_no_sync` and `lock_assume_sync`.
unsafe {
if likely(!self.sync) {
self.lock_assume_no_sync()
} else {
self.lock_assume_sync();
}
}
}
/// This acquires the lock assuming no syncronization is required.
///
/// Safety
/// This method must only be called if `might_be_dyn_thread_safe` was false on lock creation.
#[inline(always)]
unsafe fn lock_assume_no_sync(&self) {
// SAFETY: This is safe since `self.opt.cell` is the union field used due to the
// precondition on this function.
unsafe {
if unlikely(self.opt.cell.replace(true)) {
cold_path(|| panic!("lock was already held"))
}
}
}
/// This acquires the lock assuming syncronization is required.
///
/// Safety
/// This method must only be called if `might_be_dyn_thread_safe` was true on lock creation.
#[inline(always)]
unsafe fn lock_assume_sync(&self) {
// SAFETY: This is safe since `self.opt.lock` is the union field used due to the
// precondition on this function.
unsafe {
self.opt.lock.lock();
}
}
/// This unlocks the lock.
///
/// Safety
@ -217,6 +252,30 @@ impl<T> Lock<T> {
if self.raw.try_lock() { Some(LockGuard { lock: self, marker: PhantomData }) } else { None }
}
/// This acquires the lock assuming no syncronization is required.
///
/// Safety
/// This method must only be called if `might_be_dyn_thread_safe` was false on lock creation.
#[inline(always)]
pub(crate) unsafe fn lock_assume_no_sync(&self) -> LockGuard<'_, T> {
unsafe {
self.raw.lock_assume_no_sync();
}
LockGuard { lock: self, marker: PhantomData }
}
/// This acquires the lock assuming syncronization is required.
///
/// Safety
/// This method must only be called if `might_be_dyn_thread_safe` was true on lock creation.
#[inline(always)]
pub(crate) unsafe fn lock_assume_sync(&self) -> LockGuard<'_, T> {
unsafe {
self.raw.lock_assume_sync();
}
LockGuard { lock: self, marker: PhantomData }
}
#[inline(always)]
pub fn lock(&self) -> LockGuard<'_, T> {
self.raw.lock();