Make the stdlib largely conform to strict provenance.
Some things like the unwinders and system APIs are not fully conformant, this only covers a lot of low-hanging fruit.
This commit is contained in:
parent
5167b6891c
commit
c7de289e1c
30 changed files with 100 additions and 81 deletions
|
@ -91,9 +91,12 @@ use crate::cell::Cell;
|
|||
use crate::fmt;
|
||||
use crate::marker;
|
||||
use crate::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use crate::ptr;
|
||||
use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering};
|
||||
use crate::thread::{self, Thread};
|
||||
|
||||
type Masked = ();
|
||||
|
||||
/// A synchronization primitive which can be used to run a one-time global
|
||||
/// initialization. Useful for one-time initialization for FFI or related
|
||||
/// functionality. This type can only be constructed with [`Once::new()`].
|
||||
|
@ -113,7 +116,7 @@ use crate::thread::{self, Thread};
|
|||
pub struct Once {
|
||||
// `state_and_queue` is actually a pointer to a `Waiter` with extra state
|
||||
// bits, so we add the `PhantomData` appropriately.
|
||||
state_and_queue: AtomicUsize,
|
||||
state_and_queue: AtomicPtr<Masked>,
|
||||
_marker: marker::PhantomData<*const Waiter>,
|
||||
}
|
||||
|
||||
|
@ -136,7 +139,7 @@ impl RefUnwindSafe for Once {}
|
|||
#[derive(Debug)]
|
||||
pub struct OnceState {
|
||||
poisoned: bool,
|
||||
set_state_on_drop_to: Cell<usize>,
|
||||
set_state_on_drop_to: Cell<*mut Masked>,
|
||||
}
|
||||
|
||||
/// Initialization value for static [`Once`] values.
|
||||
|
@ -184,8 +187,8 @@ struct Waiter {
|
|||
// Every node is a struct on the stack of a waiting thread.
|
||||
// Will wake up the waiters when it gets dropped, i.e. also on panic.
|
||||
struct WaiterQueue<'a> {
|
||||
state_and_queue: &'a AtomicUsize,
|
||||
set_state_on_drop_to: usize,
|
||||
state_and_queue: &'a AtomicPtr<Masked>,
|
||||
set_state_on_drop_to: *mut Masked,
|
||||
}
|
||||
|
||||
impl Once {
|
||||
|
@ -195,7 +198,10 @@ impl Once {
|
|||
#[rustc_const_stable(feature = "const_once_new", since = "1.32.0")]
|
||||
#[must_use]
|
||||
pub const fn new() -> Once {
|
||||
Once { state_and_queue: AtomicUsize::new(INCOMPLETE), _marker: marker::PhantomData }
|
||||
Once {
|
||||
state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)),
|
||||
_marker: marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs an initialization routine once and only once. The given closure
|
||||
|
@ -376,7 +382,7 @@ impl Once {
|
|||
// operations visible to us, and, this being a fast path, weaker
|
||||
// ordering helps with performance. This `Acquire` synchronizes with
|
||||
// `Release` operations on the slow path.
|
||||
self.state_and_queue.load(Ordering::Acquire) == COMPLETE
|
||||
self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE
|
||||
}
|
||||
|
||||
// This is a non-generic function to reduce the monomorphization cost of
|
||||
|
@ -395,7 +401,7 @@ impl Once {
|
|||
fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) {
|
||||
let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire);
|
||||
loop {
|
||||
match state_and_queue {
|
||||
match state_and_queue.addr() {
|
||||
COMPLETE => break,
|
||||
POISONED if !ignore_poisoning => {
|
||||
// Panic to propagate the poison.
|
||||
|
@ -405,7 +411,7 @@ impl Once {
|
|||
// Try to register this thread as the one RUNNING.
|
||||
let exchange_result = self.state_and_queue.compare_exchange(
|
||||
state_and_queue,
|
||||
RUNNING,
|
||||
ptr::invalid_mut(RUNNING),
|
||||
Ordering::Acquire,
|
||||
Ordering::Acquire,
|
||||
);
|
||||
|
@ -417,13 +423,13 @@ impl Once {
|
|||
// wake them up on drop.
|
||||
let mut waiter_queue = WaiterQueue {
|
||||
state_and_queue: &self.state_and_queue,
|
||||
set_state_on_drop_to: POISONED,
|
||||
set_state_on_drop_to: ptr::invalid_mut(POISONED),
|
||||
};
|
||||
// Run the initialization function, letting it know if we're
|
||||
// poisoned or not.
|
||||
let init_state = OnceState {
|
||||
poisoned: state_and_queue == POISONED,
|
||||
set_state_on_drop_to: Cell::new(COMPLETE),
|
||||
poisoned: state_and_queue.addr() == POISONED,
|
||||
set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)),
|
||||
};
|
||||
init(&init_state);
|
||||
waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get();
|
||||
|
@ -432,7 +438,7 @@ impl Once {
|
|||
_ => {
|
||||
// All other values must be RUNNING with possibly a
|
||||
// pointer to the waiter queue in the more significant bits.
|
||||
assert!(state_and_queue & STATE_MASK == RUNNING);
|
||||
assert!(state_and_queue.addr() & STATE_MASK == RUNNING);
|
||||
wait(&self.state_and_queue, state_and_queue);
|
||||
state_and_queue = self.state_and_queue.load(Ordering::Acquire);
|
||||
}
|
||||
|
@ -441,13 +447,13 @@ impl Once {
|
|||
}
|
||||
}
|
||||
|
||||
fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
|
||||
fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) {
|
||||
// Note: the following code was carefully written to avoid creating a
|
||||
// mutable reference to `node` that gets aliased.
|
||||
loop {
|
||||
// Don't queue this thread if the status is no longer running,
|
||||
// otherwise we will not be woken up.
|
||||
if current_state & STATE_MASK != RUNNING {
|
||||
if current_state.addr() & STATE_MASK != RUNNING {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -455,15 +461,15 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
|
|||
let node = Waiter {
|
||||
thread: Cell::new(Some(thread::current())),
|
||||
signaled: AtomicBool::new(false),
|
||||
next: (current_state & !STATE_MASK) as *const Waiter,
|
||||
next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter,
|
||||
};
|
||||
let me = &node as *const Waiter as usize;
|
||||
let me = &node as *const Waiter as *const Masked as *mut Masked;
|
||||
|
||||
// Try to slide in the node at the head of the linked list, making sure
|
||||
// that another thread didn't just replace the head of the linked list.
|
||||
let exchange_result = state_and_queue.compare_exchange(
|
||||
current_state,
|
||||
me | RUNNING,
|
||||
me.with_addr(me.addr() | RUNNING),
|
||||
Ordering::Release,
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
|
@ -502,7 +508,7 @@ impl Drop for WaiterQueue<'_> {
|
|||
self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
|
||||
|
||||
// We should only ever see an old state which was RUNNING.
|
||||
assert_eq!(state_and_queue & STATE_MASK, RUNNING);
|
||||
assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING);
|
||||
|
||||
// Walk the entire linked list of waiters and wake them up (in lifo
|
||||
// order, last to register is first to wake up).
|
||||
|
@ -511,7 +517,8 @@ impl Drop for WaiterQueue<'_> {
|
|||
// free `node` if there happens to be has a spurious wakeup.
|
||||
// So we have to take out the `thread` field and copy the pointer to
|
||||
// `next` first.
|
||||
let mut queue = (state_and_queue & !STATE_MASK) as *const Waiter;
|
||||
let mut queue =
|
||||
state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter;
|
||||
while !queue.is_null() {
|
||||
let next = (*queue).next;
|
||||
let thread = (*queue).thread.take().unwrap();
|
||||
|
@ -568,6 +575,6 @@ impl OnceState {
|
|||
/// Poison the associated [`Once`] without explicitly panicking.
|
||||
// NOTE: This is currently only exposed for the `lazy` module
|
||||
pub(crate) fn poison(&self) {
|
||||
self.set_state_on_drop_to.set(POISONED);
|
||||
self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue