Auto merge of #2699 - RalfJung:schedule-refactor, r=RalfJung
refactor scheduler Refactors the scheduler to use something akin to a generator -- a callback that will be invoked when the stack of a thread is empty, which has the chance to push a new stack frame or do other things and then indicates whether this thread is done, or should be scheduled again. (Unfortunately I think we [cannot use actual generators](https://rust-lang.zulipchat.com/#narrow/stream/213817-t-lang/topic/Generators.20that.20borrow.20on.20each.20resume.3F) here.) The interpreter loop is now a proper infinite loop, the only way to leave it is for some kind of interrupt to be triggered (represented as `InterpError`) -- unifying how we handle 'exit when calling `process::exit`' and 'exit when main thread quits'. The last commit implements an alternative approach to https://github.com/rust-lang/miri/pull/2660 using this new structure. Fixes https://github.com/rust-lang/miri/issues/2629.
This commit is contained in:
commit
623b4aba6c
15 changed files with 412 additions and 327 deletions
|
@ -99,12 +99,10 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta
|
|||
// `config.toml`.
|
||||
command.env("RUSTC_WRAPPER", "");
|
||||
|
||||
if only_setup {
|
||||
if print_sysroot {
|
||||
// Be extra sure there is no noise on stdout.
|
||||
command.stdout(process::Stdio::null());
|
||||
}
|
||||
if only_setup && !print_sysroot {
|
||||
// Forward output.
|
||||
} else {
|
||||
// Supress output.
|
||||
command.stdout(process::Stdio::null());
|
||||
command.stderr(process::Stdio::null());
|
||||
}
|
||||
|
@ -120,7 +118,9 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta
|
|||
std::env::set_var("MIRI_SYSROOT", &sysroot_dir);
|
||||
|
||||
// Do the build.
|
||||
if only_setup {
|
||||
if print_sysroot {
|
||||
// Be silent.
|
||||
} else if only_setup {
|
||||
// We want to be explicit.
|
||||
eprintln!("Preparing a sysroot for Miri (target: {target})...");
|
||||
} else {
|
||||
|
@ -143,7 +143,9 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta
|
|||
)
|
||||
}
|
||||
});
|
||||
if only_setup {
|
||||
if print_sysroot {
|
||||
// Be silent.
|
||||
} else if only_setup {
|
||||
eprintln!("A sysroot for Miri is now available in `{}`.", sysroot_dir.display());
|
||||
} else {
|
||||
eprintln!("done");
|
||||
|
|
|
@ -40,10 +40,15 @@ function run_tests {
|
|||
./miri test
|
||||
if [ -z "${MIRI_TEST_TARGET+exists}" ]; then
|
||||
# Only for host architecture: tests with optimizations (`-O` is what cargo passes, but crank MIR
|
||||
# optimizations up all the way).
|
||||
# Optimizations change diagnostics (mostly backtraces), so we don't check them
|
||||
#FIXME(#2155): we want to only run the pass and panic tests here, not the fail tests.
|
||||
# optimizations up all the way, too).
|
||||
# Optimizations change diagnostics (mostly backtraces), so we don't check
|
||||
# them. Also error locations change so we don't run the failing tests.
|
||||
MIRIFLAGS="${MIRIFLAGS:-} -O -Zmir-opt-level=4" MIRI_SKIP_UI_CHECKS=1 ./miri test -- tests/{pass,panic}
|
||||
|
||||
# Also run some many-seeds tests. 64 seeds means this takes around a minute per test.
|
||||
for FILE in tests/many-seeds/*.rs; do
|
||||
MIRI_SEEDS=64 CARGO_EXTRA_FLAGS="$CARGO_EXTRA_FLAGS -q" ./miri many-seeds ./miri run "$FILE"
|
||||
done
|
||||
fi
|
||||
|
||||
## test-cargo-miri
|
||||
|
|
|
@ -36,7 +36,8 @@ Mainly meant to be invoked by rust-analyzer.
|
|||
./miri many-seeds <command>:
|
||||
Runs <command> over and over again with different seeds for Miri. The MIRIFLAGS
|
||||
variable is set to its original value appended with ` -Zmiri-seed=$SEED` for
|
||||
many different seeds.
|
||||
many different seeds. The MIRI_SEEDS variable controls how many seeds are being
|
||||
tried; MIRI_SEED_START controls the first seed to try.
|
||||
|
||||
./miri bench <benches>:
|
||||
Runs the benchmarks from bench-cargo-miri in hyperfine. hyperfine needs to be installed.
|
||||
|
@ -174,7 +175,9 @@ rustc-push)
|
|||
fi
|
||||
;;
|
||||
many-seeds)
|
||||
for SEED in $(seq 0 255); do
|
||||
MIRI_SEED_START=${MIRI_SEED_START:-0} # default to 0
|
||||
MIRI_SEEDS=${MIRI_SEEDS:-256} # default to 256
|
||||
for SEED in $(seq $MIRI_SEED_START $(( $MIRI_SEED_START + $MIRI_SEEDS - 1 )) ); do
|
||||
echo "Trying seed: $SEED"
|
||||
MIRIFLAGS="$MIRIFLAGS -Zlayout-seed=$SEED -Zmiri-seed=$SEED" $@ || { echo "Failing seed: $SEED"; break; }
|
||||
done
|
||||
|
@ -249,6 +252,8 @@ export RUSTFLAGS="-C link-args=-Wl,-rpath,$LIBDIR $RUSTFLAGS"
|
|||
# Build a sysroot and set MIRI_SYSROOT to use it. Arguments are passed to `cargo miri setup`.
|
||||
build_sysroot() {
|
||||
if ! MIRI_SYSROOT="$($CARGO run $CARGO_EXTRA_FLAGS --manifest-path "$MIRIDIR"/cargo-miri/Cargo.toml -- miri setup --print-sysroot "$@")"; then
|
||||
# Run it again so the user can see the error.
|
||||
$CARGO run $CARGO_EXTRA_FLAGS --manifest-path "$MIRIDIR"/cargo-miri/Cargo.toml -- miri setup "$@"
|
||||
echo "'cargo miri setup' failed"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
use std::cell::RefCell;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::num::TryFromIntError;
|
||||
use std::task::Poll;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use log::trace;
|
||||
|
@ -16,18 +17,17 @@ use rustc_target::spec::abi::Abi;
|
|||
|
||||
use crate::concurrency::data_race;
|
||||
use crate::concurrency::sync::SynchronizationState;
|
||||
use crate::shims::tls;
|
||||
use crate::*;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum SchedulingAction {
|
||||
enum SchedulingAction {
|
||||
/// Execute step on the active thread.
|
||||
ExecuteStep,
|
||||
/// Execute a timeout callback.
|
||||
ExecuteTimeoutCallback,
|
||||
/// Execute destructors of the active thread.
|
||||
ExecuteDtors,
|
||||
/// Stop the program.
|
||||
Stop,
|
||||
/// Wait for a bit, until there is a timeout to be called.
|
||||
Sleep(Duration),
|
||||
}
|
||||
|
||||
/// Trait for callbacks that can be executed when some event happens, such as after a timeout.
|
||||
|
@ -41,9 +41,6 @@ type TimeoutCallback<'mir, 'tcx> = Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>;
|
|||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
|
||||
pub struct ThreadId(u32);
|
||||
|
||||
/// The main thread. When it terminates, the whole application terminates.
|
||||
const MAIN_THREAD: ThreadId = ThreadId(0);
|
||||
|
||||
impl ThreadId {
|
||||
pub fn to_u32(self) -> u32 {
|
||||
self.0
|
||||
|
@ -118,6 +115,12 @@ pub struct Thread<'mir, 'tcx> {
|
|||
/// The virtual call stack.
|
||||
stack: Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>>,
|
||||
|
||||
/// The function to call when the stack ran empty, to figure out what to do next.
|
||||
/// Conceptually, this is the interpreter implementation of the things that happen 'after' the
|
||||
/// Rust language entry point for this thread returns (usually implemented by the C or OS runtime).
|
||||
/// (`None` is an error, it means the callback has not been set up yet or is actively running.)
|
||||
pub(crate) on_stack_empty: Option<StackEmptyCallback<'mir, 'tcx>>,
|
||||
|
||||
/// The index of the topmost user-relevant frame in `stack`. This field must contain
|
||||
/// the value produced by `get_top_user_relevant_frame`.
|
||||
/// The `None` state here represents
|
||||
|
@ -137,19 +140,10 @@ pub struct Thread<'mir, 'tcx> {
|
|||
pub(crate) last_error: Option<MPlaceTy<'tcx, Provenance>>,
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
|
||||
/// Check if the thread is done executing (no more stack frames). If yes,
|
||||
/// change the state to terminated and return `true`.
|
||||
fn check_terminated(&mut self) -> bool {
|
||||
if self.state == ThreadState::Enabled {
|
||||
if self.stack.is_empty() {
|
||||
self.state = ThreadState::Terminated;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
pub type StackEmptyCallback<'mir, 'tcx> =
|
||||
Box<dyn FnMut(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx, Poll<()>>>;
|
||||
|
||||
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
|
||||
/// Get the name of the current thread, or `<unnamed>` if it was not set.
|
||||
fn thread_name(&self) -> &[u8] {
|
||||
if let Some(ref thread_name) = self.thread_name { thread_name } else { b"<unnamed>" }
|
||||
|
@ -202,28 +196,21 @@ impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> {
|
||||
fn default() -> Self {
|
||||
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
|
||||
fn new(name: Option<&str>, on_stack_empty: Option<StackEmptyCallback<'mir, 'tcx>>) -> Self {
|
||||
Self {
|
||||
state: ThreadState::Enabled,
|
||||
thread_name: None,
|
||||
thread_name: name.map(|name| Vec::from(name.as_bytes())),
|
||||
stack: Vec::new(),
|
||||
top_user_relevant_frame: None,
|
||||
join_status: ThreadJoinStatus::Joinable,
|
||||
panic_payload: None,
|
||||
last_error: None,
|
||||
on_stack_empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
|
||||
fn new(name: &str) -> Self {
|
||||
let mut thread = Thread::default();
|
||||
thread.thread_name = Some(Vec::from(name.as_bytes()));
|
||||
thread
|
||||
}
|
||||
}
|
||||
|
||||
impl VisitTags for Thread<'_, '_> {
|
||||
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
|
||||
let Thread {
|
||||
|
@ -234,6 +221,7 @@ impl VisitTags for Thread<'_, '_> {
|
|||
state: _,
|
||||
thread_name: _,
|
||||
join_status: _,
|
||||
on_stack_empty: _, // we assume the closure captures no GC-relevant state
|
||||
} = self;
|
||||
|
||||
panic_payload.visit_tags(visit);
|
||||
|
@ -327,22 +315,6 @@ pub struct ThreadManager<'mir, 'tcx> {
|
|||
timeout_callbacks: FxHashMap<ThreadId, TimeoutCallbackInfo<'mir, 'tcx>>,
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
|
||||
fn default() -> Self {
|
||||
let mut threads = IndexVec::new();
|
||||
// Create the main thread and add it to the list of threads.
|
||||
threads.push(Thread::new("main"));
|
||||
Self {
|
||||
active_thread: ThreadId::new(0),
|
||||
threads,
|
||||
sync: SynchronizationState::default(),
|
||||
thread_local_alloc_ids: Default::default(),
|
||||
yield_active_thread: false,
|
||||
timeout_callbacks: FxHashMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VisitTags for ThreadManager<'_, '_> {
|
||||
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
|
||||
let ThreadManager {
|
||||
|
@ -367,8 +339,28 @@ impl VisitTags for ThreadManager<'_, '_> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
|
||||
fn default() -> Self {
|
||||
let mut threads = IndexVec::new();
|
||||
// Create the main thread and add it to the list of threads.
|
||||
threads.push(Thread::new(Some("main"), None));
|
||||
Self {
|
||||
active_thread: ThreadId::new(0),
|
||||
threads,
|
||||
sync: SynchronizationState::default(),
|
||||
thread_local_alloc_ids: Default::default(),
|
||||
yield_active_thread: false,
|
||||
timeout_callbacks: FxHashMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
|
||||
pub(crate) fn init(ecx: &mut MiriInterpCx<'mir, 'tcx>) {
|
||||
pub(crate) fn init(
|
||||
ecx: &mut MiriInterpCx<'mir, 'tcx>,
|
||||
on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>,
|
||||
) {
|
||||
ecx.machine.threads.threads[ThreadId::new(0)].on_stack_empty = Some(on_main_stack_empty);
|
||||
if ecx.tcx.sess.target.os.as_ref() != "windows" {
|
||||
// The main thread can *not* be joined on except on windows.
|
||||
ecx.machine.threads.threads[ThreadId::new(0)].join_status = ThreadJoinStatus::Detached;
|
||||
|
@ -411,9 +403,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
|
|||
}
|
||||
|
||||
/// Create a new thread and returns its id.
|
||||
fn create_thread(&mut self) -> ThreadId {
|
||||
fn create_thread(&mut self, on_stack_empty: StackEmptyCallback<'mir, 'tcx>) -> ThreadId {
|
||||
let new_thread_id = ThreadId::new(self.threads.len());
|
||||
self.threads.push(Default::default());
|
||||
self.threads.push(Thread::new(None, Some(on_stack_empty)));
|
||||
new_thread_id
|
||||
}
|
||||
|
||||
|
@ -458,7 +450,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
|
|||
}
|
||||
|
||||
/// Get a mutable borrow of the currently active thread.
|
||||
fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
|
||||
pub fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
|
||||
&mut self.threads[self.active_thread]
|
||||
}
|
||||
|
||||
|
@ -669,18 +661,6 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
|
|||
/// long as we can and switch only when we have to (the active thread was
|
||||
/// blocked, terminated, or has explicitly asked to be preempted).
|
||||
fn schedule(&mut self, clock: &Clock) -> InterpResult<'tcx, SchedulingAction> {
|
||||
// Check whether the thread has **just** terminated (`check_terminated`
|
||||
// checks whether the thread has popped all its stack and if yes, sets
|
||||
// the thread state to terminated).
|
||||
if self.threads[self.active_thread].check_terminated() {
|
||||
return Ok(SchedulingAction::ExecuteDtors);
|
||||
}
|
||||
// If we get here again and the thread is *still* terminated, there are no more dtors to run.
|
||||
if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
|
||||
// The main thread terminated; stop the program.
|
||||
// We do *not* run TLS dtors of remaining threads, which seems to match rustc behavior.
|
||||
return Ok(SchedulingAction::Stop);
|
||||
}
|
||||
// This thread and the program can keep going.
|
||||
if self.threads[self.active_thread].state == ThreadState::Enabled
|
||||
&& !self.yield_active_thread
|
||||
|
@ -688,18 +668,18 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
|
|||
// The currently active thread is still enabled, just continue with it.
|
||||
return Ok(SchedulingAction::ExecuteStep);
|
||||
}
|
||||
// The active thread yielded. Let's see if there are any timeouts to take care of. We do
|
||||
// this *before* running any other thread, to ensure that timeouts "in the past" fire before
|
||||
// any other thread can take an action. This ensures that for `pthread_cond_timedwait`, "an
|
||||
// error is returned if [...] the absolute time specified by abstime has already been passed
|
||||
// at the time of the call".
|
||||
// The active thread yielded or got terminated. Let's see if there are any timeouts to take
|
||||
// care of. We do this *before* running any other thread, to ensure that timeouts "in the
|
||||
// past" fire before any other thread can take an action. This ensures that for
|
||||
// `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
|
||||
// abstime has already been passed at the time of the call".
|
||||
// <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
|
||||
let potential_sleep_time =
|
||||
self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time(clock)).min();
|
||||
if potential_sleep_time == Some(Duration::new(0, 0)) {
|
||||
return Ok(SchedulingAction::ExecuteTimeoutCallback);
|
||||
}
|
||||
// No callbacks scheduled, pick a regular thread to execute.
|
||||
// No callbacks immediately scheduled, pick a regular thread to execute.
|
||||
// The active thread blocked or yielded. So we go search for another enabled thread.
|
||||
// Crucially, we start searching at the current active thread ID, rather than at 0, since we
|
||||
// want to avoid always scheduling threads 0 and 1 without ever making progress in thread 2.
|
||||
|
@ -730,15 +710,58 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
|
|||
// All threads are currently blocked, but we have unexecuted
|
||||
// timeout_callbacks, which may unblock some of the threads. Hence,
|
||||
// sleep until the first callback.
|
||||
|
||||
clock.sleep(sleep_time);
|
||||
Ok(SchedulingAction::ExecuteTimeoutCallback)
|
||||
Ok(SchedulingAction::Sleep(sleep_time))
|
||||
} else {
|
||||
throw_machine_stop!(TerminationInfo::Deadlock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {}
|
||||
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
|
||||
/// Execute a timeout callback on the callback's thread.
|
||||
#[inline]
|
||||
fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let (thread, callback) = if let Some((thread, callback)) =
|
||||
this.machine.threads.get_ready_callback(&this.machine.clock)
|
||||
{
|
||||
(thread, callback)
|
||||
} else {
|
||||
// get_ready_callback can return None if the computer's clock
|
||||
// was shifted after calling the scheduler and before the call
|
||||
// to get_ready_callback (see issue
|
||||
// https://github.com/rust-lang/miri/issues/1763). In this case,
|
||||
// just do nothing, which effectively just returns to the
|
||||
// scheduler.
|
||||
return Ok(());
|
||||
};
|
||||
// This back-and-forth with `set_active_thread` is here because of two
|
||||
// design decisions:
|
||||
// 1. Make the caller and not the callback responsible for changing
|
||||
// thread.
|
||||
// 2. Make the scheduler the only place that can change the active
|
||||
// thread.
|
||||
let old_thread = this.set_active_thread(thread);
|
||||
callback.call(this)?;
|
||||
this.set_active_thread(old_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
|
||||
let this = self.eval_context_mut();
|
||||
let mut callback = this
|
||||
.active_thread_mut()
|
||||
.on_stack_empty
|
||||
.take()
|
||||
.expect("`on_stack_empty` not set up, or already running");
|
||||
let res = callback(this)?;
|
||||
this.active_thread_mut().on_stack_empty = Some(callback);
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
// Public interface to thread management.
|
||||
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
|
||||
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
|
@ -773,18 +796,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Start a regular (non-main) thread.
|
||||
#[inline]
|
||||
fn create_thread(&mut self) -> ThreadId {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.machine.threads.create_thread();
|
||||
if let Some(data_race) = &mut this.machine.data_race {
|
||||
data_race.thread_created(&this.machine.threads, id);
|
||||
}
|
||||
id
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn start_thread(
|
||||
fn start_regular_thread(
|
||||
&mut self,
|
||||
thread: Option<MPlaceTy<'tcx, Provenance>>,
|
||||
start_routine: Pointer<Option<Provenance>>,
|
||||
|
@ -795,7 +809,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
let this = self.eval_context_mut();
|
||||
|
||||
// Create the new thread
|
||||
let new_thread_id = this.create_thread();
|
||||
let new_thread_id = this.machine.threads.create_thread({
|
||||
let mut state = tls::TlsDtorsState::default();
|
||||
Box::new(move |m| state.on_stack_empty(m))
|
||||
});
|
||||
if let Some(data_race) = &mut this.machine.data_race {
|
||||
data_race.thread_created(&this.machine.threads, new_thread_id);
|
||||
}
|
||||
|
||||
// Write the current thread-id, switch to the next thread later
|
||||
// to treat this write operation as occuring on the current thread.
|
||||
|
@ -888,12 +908,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
this.machine.threads.get_total_thread_count()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn has_terminated(&self, thread_id: ThreadId) -> bool {
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.threads.has_terminated(thread_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn have_all_terminated(&self) -> bool {
|
||||
let this = self.eval_context_ref();
|
||||
|
@ -943,26 +957,22 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
where
|
||||
'mir: 'c,
|
||||
{
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.threads.get_thread_name(thread)
|
||||
self.eval_context_ref().machine.threads.get_thread_name(thread)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn block_thread(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.block_thread(thread);
|
||||
self.eval_context_mut().machine.threads.block_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn unblock_thread(&mut self, thread: ThreadId) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.unblock_thread(thread);
|
||||
self.eval_context_mut().machine.threads.unblock_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn yield_active_thread(&mut self) {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.yield_active_thread();
|
||||
self.eval_context_mut().machine.threads.yield_active_thread();
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -995,49 +1005,42 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
this.machine.threads.unregister_timeout_callback_if_exists(thread);
|
||||
}
|
||||
|
||||
/// Execute a timeout callback on the callback's thread.
|
||||
#[inline]
|
||||
fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
|
||||
/// Run the core interpreter loop. Returns only when an interrupt occurs (an error or program
|
||||
/// termination).
|
||||
fn run_threads(&mut self) -> InterpResult<'tcx, !> {
|
||||
let this = self.eval_context_mut();
|
||||
let (thread, callback) = if let Some((thread, callback)) =
|
||||
this.machine.threads.get_ready_callback(&this.machine.clock)
|
||||
{
|
||||
(thread, callback)
|
||||
} else {
|
||||
// get_ready_callback can return None if the computer's clock
|
||||
// was shifted after calling the scheduler and before the call
|
||||
// to get_ready_callback (see issue
|
||||
// https://github.com/rust-lang/miri/issues/1763). In this case,
|
||||
// just do nothing, which effectively just returns to the
|
||||
// scheduler.
|
||||
return Ok(());
|
||||
};
|
||||
// This back-and-forth with `set_active_thread` is here because of two
|
||||
// design decisions:
|
||||
// 1. Make the caller and not the callback responsible for changing
|
||||
// thread.
|
||||
// 2. Make the scheduler the only place that can change the active
|
||||
// thread.
|
||||
let old_thread = this.set_active_thread(thread);
|
||||
callback.call(this)?;
|
||||
this.set_active_thread(old_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Decide which action to take next and on which thread.
|
||||
#[inline]
|
||||
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
|
||||
let this = self.eval_context_mut();
|
||||
this.machine.threads.schedule(&this.machine.clock)
|
||||
loop {
|
||||
match this.machine.threads.schedule(&this.machine.clock)? {
|
||||
SchedulingAction::ExecuteStep => {
|
||||
if !this.step()? {
|
||||
// See if this thread can do something else.
|
||||
match this.run_on_stack_empty()? {
|
||||
Poll::Pending => {} // keep going
|
||||
Poll::Ready(()) => this.terminate_active_thread()?,
|
||||
}
|
||||
}
|
||||
}
|
||||
SchedulingAction::ExecuteTimeoutCallback => {
|
||||
this.run_timeout_callback()?;
|
||||
}
|
||||
SchedulingAction::Sleep(duration) => {
|
||||
this.machine.clock.sleep(duration);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles thread termination of the active thread: wakes up threads joining on this one,
|
||||
/// and deallocated thread-local statics.
|
||||
///
|
||||
/// This is called from `tls.rs` after handling the TLS dtors.
|
||||
/// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
|
||||
#[inline]
|
||||
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
|
||||
fn terminate_active_thread(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let thread = this.active_thread_mut();
|
||||
assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
|
||||
thread.state = ThreadState::Terminated;
|
||||
|
||||
for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
|
||||
this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
|
||||
}
|
||||
|
|
|
@ -11,7 +11,10 @@ use crate::*;
|
|||
|
||||
/// Details of premature program termination.
|
||||
pub enum TerminationInfo {
|
||||
Exit(i64),
|
||||
Exit {
|
||||
code: i64,
|
||||
leak_check: bool,
|
||||
},
|
||||
Abort(String),
|
||||
UnsupportedInIsolation(String),
|
||||
StackedBorrowsUb {
|
||||
|
@ -38,7 +41,7 @@ impl fmt::Display for TerminationInfo {
|
|||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use TerminationInfo::*;
|
||||
match self {
|
||||
Exit(code) => write!(f, "the evaluated program completed with exit code {code}"),
|
||||
Exit { code, .. } => write!(f, "the evaluated program completed with exit code {code}"),
|
||||
Abort(msg) => write!(f, "{msg}"),
|
||||
UnsupportedInIsolation(msg) => write!(f, "{msg}"),
|
||||
Int2PtrWithStrictProvenance =>
|
||||
|
@ -148,11 +151,11 @@ fn prune_stacktrace<'tcx>(
|
|||
|
||||
/// Emit a custom diagnostic without going through the miri-engine machinery.
|
||||
///
|
||||
/// Returns `Some` if this was regular program termination with a given exit code, `None` otherwise.
|
||||
/// Returns `Some` if this was regular program termination with a given exit code and a `bool` indicating whether a leak check should happen; `None` otherwise.
|
||||
pub fn report_error<'tcx, 'mir>(
|
||||
ecx: &InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>,
|
||||
e: InterpErrorInfo<'tcx>,
|
||||
) -> Option<i64> {
|
||||
) -> Option<(i64, bool)> {
|
||||
use InterpError::*;
|
||||
|
||||
let mut msg = vec![];
|
||||
|
@ -161,7 +164,7 @@ pub fn report_error<'tcx, 'mir>(
|
|||
let info = info.downcast_ref::<TerminationInfo>().expect("invalid MachineStop payload");
|
||||
use TerminationInfo::*;
|
||||
let title = match info {
|
||||
Exit(code) => return Some(*code),
|
||||
Exit { code, leak_check } => return Some((*code, *leak_check)),
|
||||
Abort(_) => Some("abnormal termination"),
|
||||
UnsupportedInIsolation(_) | Int2PtrWithStrictProvenance =>
|
||||
Some("unsupported operation"),
|
||||
|
|
|
@ -4,6 +4,7 @@ use std::ffi::{OsStr, OsString};
|
|||
use std::iter;
|
||||
use std::panic::{self, AssertUnwindSafe};
|
||||
use std::path::PathBuf;
|
||||
use std::task::Poll;
|
||||
use std::thread;
|
||||
|
||||
use log::info;
|
||||
|
@ -20,8 +21,14 @@ use rustc_target::spec::abi::Abi;
|
|||
|
||||
use rustc_session::config::EntryFnType;
|
||||
|
||||
use crate::shims::tls;
|
||||
use crate::*;
|
||||
|
||||
/// When the main thread would exit, we will yield to any other thread that is ready to execute.
|
||||
/// But we must only do that a finite number of times, or a background thread running `loop {}`
|
||||
/// will hang the program.
|
||||
const MAIN_THREAD_YIELDS_AT_SHUTDOWN: u32 = 256;
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub enum AlignmentCheck {
|
||||
/// Do not check alignment.
|
||||
|
@ -172,17 +179,79 @@ impl Default for MiriConfig {
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns a freshly created `InterpCx`, along with an `MPlaceTy` representing
|
||||
/// the location where the return value of the `start` function will be
|
||||
/// written to.
|
||||
/// The state of the main thread. Implementation detail of `on_main_stack_empty`.
|
||||
#[derive(Default, Debug)]
|
||||
enum MainThreadState {
|
||||
#[default]
|
||||
Running,
|
||||
TlsDtors(tls::TlsDtorsState),
|
||||
Yield {
|
||||
remaining: u32,
|
||||
},
|
||||
Done,
|
||||
}
|
||||
|
||||
impl MainThreadState {
|
||||
fn on_main_stack_empty<'tcx>(
|
||||
&mut self,
|
||||
this: &mut MiriInterpCx<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx, Poll<()>> {
|
||||
use MainThreadState::*;
|
||||
match self {
|
||||
Running => {
|
||||
*self = TlsDtors(Default::default());
|
||||
}
|
||||
TlsDtors(state) =>
|
||||
match state.on_stack_empty(this)? {
|
||||
Poll::Pending => {} // just keep going
|
||||
Poll::Ready(()) => {
|
||||
// Give background threads a chance to finish by yielding the main thread a
|
||||
// couple of times -- but only if we would also preempt threads randomly.
|
||||
if this.machine.preemption_rate > 0.0 {
|
||||
// There is a non-zero chance they will yield back to us often enough to
|
||||
// make Miri terminate eventually.
|
||||
*self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN };
|
||||
} else {
|
||||
// The other threads did not get preempted, so no need to yield back to
|
||||
// them.
|
||||
*self = Done;
|
||||
}
|
||||
}
|
||||
},
|
||||
Yield { remaining } =>
|
||||
match remaining.checked_sub(1) {
|
||||
None => *self = Done,
|
||||
Some(new_remaining) => {
|
||||
*remaining = new_remaining;
|
||||
this.yield_active_thread();
|
||||
}
|
||||
},
|
||||
Done => {
|
||||
// Figure out exit code.
|
||||
let ret_place = MPlaceTy::from_aligned_ptr(
|
||||
this.machine.main_fn_ret_place.unwrap().ptr,
|
||||
this.machine.layouts.isize,
|
||||
);
|
||||
let exit_code = this.read_scalar(&ret_place.into())?.to_machine_isize(this)?;
|
||||
// Need to call this ourselves since we are not going to return to the scheduler
|
||||
// loop, and we want the main thread TLS to not show up as memory leaks.
|
||||
this.terminate_active_thread()?;
|
||||
// Stop interpreter loop.
|
||||
throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true });
|
||||
}
|
||||
}
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a freshly created `InterpCx`.
|
||||
/// Public because this is also used by `priroda`.
|
||||
pub fn create_ecx<'mir, 'tcx: 'mir>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
entry_id: DefId,
|
||||
entry_type: EntryFnType,
|
||||
config: &MiriConfig,
|
||||
) -> InterpResult<'tcx, (InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>, MPlaceTy<'tcx, Provenance>)>
|
||||
{
|
||||
) -> InterpResult<'tcx, InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>> {
|
||||
let param_env = ty::ParamEnv::reveal_all();
|
||||
let layout_cx = LayoutCx { tcx, param_env };
|
||||
let mut ecx = InterpCx::new(
|
||||
|
@ -193,7 +262,11 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
|
|||
);
|
||||
|
||||
// Some parts of initialization require a full `InterpCx`.
|
||||
MiriMachine::late_init(&mut ecx, config)?;
|
||||
MiriMachine::late_init(&mut ecx, config, {
|
||||
let mut state = MainThreadState::default();
|
||||
// Cannot capture anything GC-relevant here.
|
||||
Box::new(move |m| state.on_main_stack_empty(m))
|
||||
})?;
|
||||
|
||||
// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
|
||||
let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"], Namespace::ValueNS);
|
||||
|
@ -274,6 +347,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
|
|||
|
||||
// Return place (in static memory so that it does not count as leak).
|
||||
let ret_place = ecx.allocate(ecx.machine.layouts.isize, MiriMemoryKind::Machine.into())?;
|
||||
ecx.machine.main_fn_ret_place = Some(*ret_place);
|
||||
// Call start function.
|
||||
|
||||
match entry_type {
|
||||
|
@ -321,7 +395,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
|
|||
}
|
||||
}
|
||||
|
||||
Ok((ecx, ret_place))
|
||||
Ok(ecx)
|
||||
}
|
||||
|
||||
/// Evaluates the entry function specified by `entry_id`.
|
||||
|
@ -337,7 +411,7 @@ pub fn eval_entry<'tcx>(
|
|||
// Copy setting before we move `config`.
|
||||
let ignore_leaks = config.ignore_leaks;
|
||||
|
||||
let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, &config) {
|
||||
let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
err.print_backtrace();
|
||||
|
@ -346,34 +420,17 @@ pub fn eval_entry<'tcx>(
|
|||
};
|
||||
|
||||
// Perform the main execution.
|
||||
let res: thread::Result<InterpResult<'_, i64>> = panic::catch_unwind(AssertUnwindSafe(|| {
|
||||
// Main loop.
|
||||
loop {
|
||||
match ecx.schedule()? {
|
||||
SchedulingAction::ExecuteStep => {
|
||||
assert!(ecx.step()?, "a terminated thread was scheduled for execution");
|
||||
}
|
||||
SchedulingAction::ExecuteTimeoutCallback => {
|
||||
ecx.run_timeout_callback()?;
|
||||
}
|
||||
SchedulingAction::ExecuteDtors => {
|
||||
// This will either enable the thread again (so we go back
|
||||
// to `ExecuteStep`), or determine that this thread is done
|
||||
// for good.
|
||||
ecx.schedule_next_tls_dtor_for_active_thread()?;
|
||||
}
|
||||
SchedulingAction::Stop => {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
let return_code = ecx.read_scalar(&ret_place.into())?.to_machine_isize(&ecx)?;
|
||||
Ok(return_code)
|
||||
}));
|
||||
let res: thread::Result<InterpResult<'_, !>> =
|
||||
panic::catch_unwind(AssertUnwindSafe(|| ecx.run_threads()));
|
||||
let res = res.unwrap_or_else(|panic_payload| {
|
||||
ecx.handle_ice();
|
||||
panic::resume_unwind(panic_payload)
|
||||
});
|
||||
let res = match res {
|
||||
Err(res) => res,
|
||||
// `Ok` can never happen
|
||||
Ok(never) => match never {},
|
||||
};
|
||||
|
||||
// Machine cleanup. Only do this if all threads have terminated; threads that are still running
|
||||
// might cause Stacked Borrows errors (https://github.com/rust-lang/miri/issues/2396).
|
||||
|
@ -386,32 +443,26 @@ pub fn eval_entry<'tcx>(
|
|||
}
|
||||
|
||||
// Process the result.
|
||||
match res {
|
||||
Ok(return_code) => {
|
||||
if !ignore_leaks {
|
||||
// Check for thread leaks.
|
||||
if !ecx.have_all_terminated() {
|
||||
tcx.sess.err(
|
||||
"the main thread terminated without waiting for all remaining threads",
|
||||
);
|
||||
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
|
||||
return None;
|
||||
}
|
||||
// Check for memory leaks.
|
||||
info!("Additonal static roots: {:?}", ecx.machine.static_roots);
|
||||
let leaks = ecx.leak_report(&ecx.machine.static_roots);
|
||||
if leaks != 0 {
|
||||
tcx.sess.err("the evaluated program leaked memory");
|
||||
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
|
||||
// Ignore the provided return code - let the reported error
|
||||
// determine the return code.
|
||||
return None;
|
||||
}
|
||||
}
|
||||
Some(return_code)
|
||||
let (return_code, leak_check) = report_error(&ecx, res)?;
|
||||
if leak_check && !ignore_leaks {
|
||||
// Check for thread leaks.
|
||||
if !ecx.have_all_terminated() {
|
||||
tcx.sess.err("the main thread terminated without waiting for all remaining threads");
|
||||
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
|
||||
return None;
|
||||
}
|
||||
// Check for memory leaks.
|
||||
info!("Additonal static roots: {:?}", ecx.machine.static_roots);
|
||||
let leaks = ecx.leak_report(&ecx.machine.static_roots);
|
||||
if leaks != 0 {
|
||||
tcx.sess.err("the evaluated program leaked memory");
|
||||
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
|
||||
// Ignore the provided return code - let the reported error
|
||||
// determine the return code.
|
||||
return None;
|
||||
}
|
||||
Err(e) => report_error(&ecx, e),
|
||||
}
|
||||
Some(return_code)
|
||||
}
|
||||
|
||||
/// Turns an array of arguments into a Windows command line string.
|
||||
|
|
|
@ -81,7 +81,7 @@ pub use crate::shims::intrinsics::EvalContextExt as _;
|
|||
pub use crate::shims::os_str::EvalContextExt as _;
|
||||
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _};
|
||||
pub use crate::shims::time::EvalContextExt as _;
|
||||
pub use crate::shims::tls::{EvalContextExt as _, TlsData};
|
||||
pub use crate::shims::tls::TlsData;
|
||||
pub use crate::shims::EvalContextExt as _;
|
||||
|
||||
pub use crate::clock::{Clock, Instant};
|
||||
|
@ -89,7 +89,7 @@ pub use crate::concurrency::{
|
|||
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
|
||||
init_once::{EvalContextExt as _, InitOnceId},
|
||||
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
|
||||
thread::{EvalContextExt as _, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time},
|
||||
thread::{EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, Time},
|
||||
};
|
||||
pub use crate::diagnostics::{
|
||||
report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,
|
||||
|
@ -107,7 +107,7 @@ pub use crate::mono_hash_map::MonoHashMap;
|
|||
pub use crate::operator::EvalContextExt as _;
|
||||
pub use crate::range_map::RangeMap;
|
||||
pub use crate::stacked_borrows::{
|
||||
CallId, EvalContextExt as _, Item, Permission, RetagFields, SbTag, Stack, Stacks,
|
||||
CallId, EvalContextExt as _, Item, Permission, RetagFields, SbTag,
|
||||
};
|
||||
pub use crate::tag_gc::{EvalContextExt as _, VisitTags};
|
||||
|
||||
|
|
|
@ -363,6 +363,9 @@ pub struct MiriMachine<'mir, 'tcx> {
|
|||
/// Miri does not expose env vars from the host to the emulated program.
|
||||
pub(crate) env_vars: EnvVars<'tcx>,
|
||||
|
||||
/// Return place of the main function.
|
||||
pub(crate) main_fn_ret_place: Option<MemPlace<Provenance>>,
|
||||
|
||||
/// Program arguments (`Option` because we can only initialize them after creating the ecx).
|
||||
/// These are *pointers* to argc/argv because macOS.
|
||||
/// We also need the full command line as one string because of Windows.
|
||||
|
@ -492,6 +495,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
|
|||
intptrcast: RefCell::new(intptrcast::GlobalStateInner::new(config)),
|
||||
// `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
|
||||
env_vars: EnvVars::default(),
|
||||
main_fn_ret_place: None,
|
||||
argc: None,
|
||||
argv: None,
|
||||
cmd_line: None,
|
||||
|
@ -556,10 +560,11 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
|
|||
pub(crate) fn late_init(
|
||||
this: &mut MiriInterpCx<'mir, 'tcx>,
|
||||
config: &MiriConfig,
|
||||
on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
EnvVars::init(this, config)?;
|
||||
MiriMachine::init_extern_statics(this)?;
|
||||
ThreadManager::init(this);
|
||||
ThreadManager::init(this, on_main_stack_empty);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -657,6 +662,7 @@ impl VisitTags for MiriMachine<'_, '_> {
|
|||
threads,
|
||||
tls,
|
||||
env_vars,
|
||||
main_fn_ret_place,
|
||||
argc,
|
||||
argv,
|
||||
cmd_line,
|
||||
|
@ -702,6 +708,7 @@ impl VisitTags for MiriMachine<'_, '_> {
|
|||
data_race.visit_tags(visit);
|
||||
stacked_borrows.visit_tags(visit);
|
||||
intptrcast.visit_tags(visit);
|
||||
main_fn_ret_place.visit_tags(visit);
|
||||
argc.visit_tags(visit);
|
||||
argv.visit_tags(visit);
|
||||
cmd_line.visit_tags(visit);
|
||||
|
@ -901,7 +908,13 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
|||
|
||||
let alloc = alloc.into_owned();
|
||||
let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| {
|
||||
Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind, &ecx.machine)
|
||||
stacked_borrows::Stacks::new_allocation(
|
||||
id,
|
||||
alloc.size(),
|
||||
stacked_borrows,
|
||||
kind,
|
||||
&ecx.machine,
|
||||
)
|
||||
});
|
||||
let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| {
|
||||
data_race::AllocExtra::new_allocation(
|
||||
|
|
|
@ -286,7 +286,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
let [code] = this.check_shim(abi, exp_abi, link_name, args)?;
|
||||
// it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
|
||||
let code = this.read_scalar(code)?.to_i32()?;
|
||||
throw_machine_stop!(TerminationInfo::Exit(code.into()));
|
||||
throw_machine_stop!(TerminationInfo::Exit { code: code.into(), leak_check: false });
|
||||
}
|
||||
"abort" => {
|
||||
let [] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
//! Implement thread-local storage.
|
||||
|
||||
use std::collections::btree_map::Entry as BTreeEntry;
|
||||
use std::collections::hash_map::Entry as HashMapEntry;
|
||||
use std::collections::BTreeMap;
|
||||
use std::task::Poll;
|
||||
|
||||
use log::trace;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_middle::ty;
|
||||
use rustc_target::abi::{HasDataLayout, Size};
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
@ -23,12 +22,12 @@ pub struct TlsEntry<'tcx> {
|
|||
dtor: Option<ty::Instance<'tcx>>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct RunningDtorsState {
|
||||
#[derive(Default, Debug)]
|
||||
struct RunningDtorState {
|
||||
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
|
||||
/// have not tried to retrieve a TLS destructor yet or that we already tried
|
||||
/// all keys.
|
||||
last_dtor_key: Option<TlsKey>,
|
||||
last_key: Option<TlsKey>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -42,11 +41,6 @@ pub struct TlsData<'tcx> {
|
|||
/// A single per thread destructor of the thread local storage (that's how
|
||||
/// things work on macOS) with a data argument.
|
||||
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Provenance>)>,
|
||||
|
||||
/// State for currently running TLS dtors. If this map contains a key for a
|
||||
/// specific thread, it means that we are in the "destruct" phase, during
|
||||
/// which some operations are UB.
|
||||
dtors_running: FxHashMap<ThreadId, RunningDtorsState>,
|
||||
}
|
||||
|
||||
impl<'tcx> Default for TlsData<'tcx> {
|
||||
|
@ -55,7 +49,6 @@ impl<'tcx> Default for TlsData<'tcx> {
|
|||
next_key: 1, // start with 1 as we must not use 0 on Windows
|
||||
keys: Default::default(),
|
||||
macos_thread_dtors: Default::default(),
|
||||
dtors_running: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -143,12 +136,6 @@ impl<'tcx> TlsData<'tcx> {
|
|||
dtor: ty::Instance<'tcx>,
|
||||
data: Scalar<Provenance>,
|
||||
) -> InterpResult<'tcx> {
|
||||
if self.dtors_running.contains_key(&thread) {
|
||||
// UB, according to libstd docs.
|
||||
throw_ub_format!(
|
||||
"setting thread's local storage destructor while destructors are already running"
|
||||
);
|
||||
}
|
||||
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
|
||||
throw_unsup_format!(
|
||||
"setting more than one thread local storage destructor for the same thread is not supported"
|
||||
|
@ -211,21 +198,6 @@ impl<'tcx> TlsData<'tcx> {
|
|||
None
|
||||
}
|
||||
|
||||
/// Set that dtors are running for `thread`. It is guaranteed not to change
|
||||
/// the existing values stored in `dtors_running` for this thread. Returns
|
||||
/// `true` if dtors for `thread` are already running.
|
||||
fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool {
|
||||
match self.dtors_running.entry(thread) {
|
||||
HashMapEntry::Occupied(_) => true,
|
||||
HashMapEntry::Vacant(entry) => {
|
||||
// We cannot just do `self.dtors_running.insert` because that
|
||||
// would overwrite `last_dtor_key` with `None`.
|
||||
entry.insert(RunningDtorsState { last_dtor_key: None });
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete all TLS entries for the given thread. This function should be
|
||||
/// called after all TLS destructors have already finished.
|
||||
fn delete_all_thread_tls(&mut self, thread_id: ThreadId) {
|
||||
|
@ -237,7 +209,7 @@ impl<'tcx> TlsData<'tcx> {
|
|||
|
||||
impl VisitTags for TlsData<'_> {
|
||||
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
|
||||
let TlsData { keys, macos_thread_dtors, next_key: _, dtors_running: _ } = self;
|
||||
let TlsData { keys, macos_thread_dtors, next_key: _ } = self;
|
||||
|
||||
for scalar in keys.values().flat_map(|v| v.data.values()) {
|
||||
scalar.visit_tags(visit);
|
||||
|
@ -248,13 +220,77 @@ impl VisitTags for TlsData<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TlsDtorsState(TlsDtorsStatePriv);
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
enum TlsDtorsStatePriv {
|
||||
#[default]
|
||||
Init,
|
||||
PthreadDtors(RunningDtorState),
|
||||
Done,
|
||||
}
|
||||
|
||||
impl TlsDtorsState {
|
||||
pub fn on_stack_empty<'tcx>(
|
||||
&mut self,
|
||||
this: &mut MiriInterpCx<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx, Poll<()>> {
|
||||
use TlsDtorsStatePriv::*;
|
||||
match &mut self.0 {
|
||||
Init => {
|
||||
match this.tcx.sess.target.os.as_ref() {
|
||||
"linux" | "freebsd" | "android" => {
|
||||
// Run the pthread dtors.
|
||||
self.0 = PthreadDtors(Default::default());
|
||||
}
|
||||
"macos" => {
|
||||
// The macOS thread wide destructor runs "before any TLS slots get
|
||||
// freed", so do that first.
|
||||
this.schedule_macos_tls_dtor()?;
|
||||
// When the stack is empty again, go on with the pthread dtors.
|
||||
self.0 = PthreadDtors(Default::default());
|
||||
}
|
||||
"windows" => {
|
||||
// Run the special magic hook.
|
||||
this.schedule_windows_tls_dtors()?;
|
||||
// And move to the final state.
|
||||
self.0 = Done;
|
||||
}
|
||||
"wasi" | "none" => {
|
||||
// No OS, no TLS dtors.
|
||||
// FIXME: should we do something on wasi?
|
||||
self.0 = Done;
|
||||
}
|
||||
os => {
|
||||
throw_unsup_format!(
|
||||
"the TLS machinery does not know how to handle OS `{os}`"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
PthreadDtors(state) => {
|
||||
match this.schedule_next_pthread_tls_dtor(state)? {
|
||||
Poll::Pending => {} // just keep going
|
||||
Poll::Ready(()) => self.0 = Done,
|
||||
}
|
||||
}
|
||||
Done => {
|
||||
this.machine.tls.delete_all_thread_tls(this.get_active_thread());
|
||||
return Ok(Poll::Ready(()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Poll::Pending)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
|
||||
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
/// Schedule TLS destructors for Windows.
|
||||
/// On windows, TLS destructors are managed by std.
|
||||
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
// Windows has a special magic linker section that is run on certain events.
|
||||
// Instead of searching for that section and supporting arbitrary hooks in there
|
||||
|
@ -284,16 +320,12 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
None,
|
||||
StackPopCleanup::Root { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.enable_thread(active_thread);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Schedule the MacOS thread destructor of the thread local storage to be
|
||||
/// executed. Returns `true` if scheduled.
|
||||
///
|
||||
/// Note: It is safe to call this function also on other Unixes.
|
||||
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
||||
/// executed.
|
||||
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let thread_id = this.get_active_thread();
|
||||
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
|
||||
|
@ -306,35 +338,27 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
None,
|
||||
StackPopCleanup::Root { cleanup: true },
|
||||
)?;
|
||||
|
||||
// Enable the thread so that it steps through the destructor which
|
||||
// we just scheduled. Since we deleted the destructor, it is
|
||||
// guaranteed that we will schedule it again. The `dtors_running`
|
||||
// flag will prevent the code from adding the destructor again.
|
||||
this.enable_thread(thread_id);
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Schedule a pthread TLS destructor. Returns `true` if found
|
||||
/// a destructor to schedule, and `false` otherwise.
|
||||
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
||||
fn schedule_next_pthread_tls_dtor(
|
||||
&mut self,
|
||||
state: &mut RunningDtorState,
|
||||
) -> InterpResult<'tcx, Poll<()>> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
|
||||
// Fetch next dtor after `key`.
|
||||
let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key;
|
||||
let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
|
||||
let dtor = match this.machine.tls.fetch_tls_dtor(state.last_key, active_thread) {
|
||||
dtor @ Some(_) => dtor,
|
||||
// We ran each dtor once, start over from the beginning.
|
||||
None => this.machine.tls.fetch_tls_dtor(None, active_thread),
|
||||
};
|
||||
if let Some((instance, ptr, key)) = dtor {
|
||||
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key =
|
||||
Some(key);
|
||||
state.last_key = Some(key);
|
||||
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
|
||||
assert!(
|
||||
!ptr.to_machine_usize(this).unwrap() != 0,
|
||||
|
@ -349,64 +373,9 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
StackPopCleanup::Root { cleanup: true },
|
||||
)?;
|
||||
|
||||
this.enable_thread(active_thread);
|
||||
return Ok(true);
|
||||
return Ok(Poll::Pending);
|
||||
}
|
||||
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
|
||||
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
|
||||
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
/// Schedule an active thread's TLS destructor to run on the active thread.
|
||||
/// Note that this function does not run the destructors itself, it just
|
||||
/// schedules them one by one each time it is called and reenables the
|
||||
/// thread so that it can be executed normally by the main execution loop.
|
||||
///
|
||||
/// Note: we consistently run TLS destructors for all threads, including the
|
||||
/// main thread. However, it is not clear that we should run the TLS
|
||||
/// destructors for the main thread. See issue:
|
||||
/// <https://github.com/rust-lang/rust/issues/28129>.
|
||||
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let active_thread = this.get_active_thread();
|
||||
trace!("schedule_next_tls_dtor_for_active_thread on thread {:?}", active_thread);
|
||||
|
||||
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
|
||||
// This is the first time we got asked to schedule a destructor. The
|
||||
// Windows schedule destructor function must be called exactly once,
|
||||
// this is why it is in this block.
|
||||
if this.tcx.sess.target.os == "windows" {
|
||||
// On Windows, we signal that the thread quit by starting the
|
||||
// relevant function, reenabling the thread, and going back to
|
||||
// the scheduler.
|
||||
this.schedule_windows_tls_dtors()?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// The remaining dtors make some progress each time around the scheduler loop,
|
||||
// until they return `false` to indicate that they are done.
|
||||
|
||||
// The macOS thread wide destructor runs "before any TLS slots get
|
||||
// freed", so do that first.
|
||||
if this.schedule_macos_tls_dtor()? {
|
||||
// We have scheduled a MacOS dtor to run on the thread. Execute it
|
||||
// to completion and come back here. Scheduling a destructor
|
||||
// destroys it, so we will not enter this branch again.
|
||||
return Ok(());
|
||||
}
|
||||
if this.schedule_next_pthread_tls_dtor()? {
|
||||
// We have scheduled a pthread destructor and removed it from the
|
||||
// destructors list. Run it to completion and come back here.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// All dtors done!
|
||||
this.machine.tls.delete_all_thread_tls(active_thread);
|
||||
this.thread_terminated()?;
|
||||
|
||||
Ok(())
|
||||
Ok(Poll::Ready(()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
|
||||
let func_arg = this.read_immediate(arg)?;
|
||||
|
||||
this.start_thread(
|
||||
this.start_regular_thread(
|
||||
Some(thread_info_place),
|
||||
start_routine,
|
||||
Abi::C { unwind: false },
|
||||
|
|
|
@ -46,7 +46,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
|||
throw_unsup_format!("non-null `lpThreadAttributes` in `CreateThread`")
|
||||
}
|
||||
|
||||
this.start_thread(
|
||||
this.start_regular_thread(
|
||||
thread,
|
||||
start_routine,
|
||||
Abi::System { unwind: false },
|
||||
|
|
|
@ -5,7 +5,9 @@ use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange};
|
|||
use rustc_span::{Span, SpanData};
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::stacked_borrows::{err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind};
|
||||
use crate::stacked_borrows::{
|
||||
err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind, Stack,
|
||||
};
|
||||
use crate::*;
|
||||
|
||||
use rustc_middle::mir::interpret::InterpError;
|
||||
|
|
8
src/tools/miri/tests/many-seeds/scoped-thread-leak.rs
Normal file
8
src/tools/miri/tests/many-seeds/scoped-thread-leak.rs
Normal file
|
@ -0,0 +1,8 @@
|
|||
//! Regression test for https://github.com/rust-lang/miri/issues/2629
|
||||
use std::thread;
|
||||
|
||||
fn main() {
|
||||
thread::scope(|s| {
|
||||
s.spawn(|| {});
|
||||
});
|
||||
}
|
24
src/tools/miri/tests/pass/concurrency/scope.rs
Normal file
24
src/tools/miri/tests/pass/concurrency/scope.rs
Normal file
|
@ -0,0 +1,24 @@
|
|||
use std::thread;
|
||||
|
||||
fn main() {
|
||||
let mut a = vec![1, 2, 3];
|
||||
let mut x = 0;
|
||||
|
||||
thread::scope(|s| {
|
||||
s.spawn(|| {
|
||||
// We can borrow `a` here.
|
||||
let _s = format!("hello from the first scoped thread: {a:?}");
|
||||
});
|
||||
s.spawn(|| {
|
||||
let _s = format!("hello from the second scoped thread");
|
||||
// We can even mutably borrow `x` here,
|
||||
// because no other threads are using it.
|
||||
x += a[0] + a[2];
|
||||
});
|
||||
let _s = format!("hello from the main thread");
|
||||
});
|
||||
|
||||
// After the scope, we can modify and access our variables again:
|
||||
a.push(4);
|
||||
assert_eq!(x, a.len());
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue