1
Fork 0

Rollup merge of #105214 - RalfJung:miri, r=RalfJung

update Miri

r? `@ghost`
This commit is contained in:
Matthias Krüger 2022-12-03 17:37:46 +01:00 committed by GitHub
commit 8be329de01
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
43 changed files with 1076 additions and 910 deletions

View file

@ -5346,9 +5346,9 @@ checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c"
[[package]]
name = "ui_test"
version = "0.4.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4559da3fe6b481f8674a29379677cb9606cd6f75fc254a2c9834c55638503d"
checksum = "54ddb6f31025943e2f9d59237f433711c461a43d9415974c3eb3a4902edc1c1f"
dependencies = [
"bstr 1.0.1",
"cargo_metadata 0.15.0",

View file

@ -203,65 +203,32 @@ for more information about configuring VS Code and `rust-analyzer`.
[rdg-r-a]: https://rustc-dev-guide.rust-lang.org/building/suggested.html#configuring-rust-analyzer-for-rustc
## Advanced topic: other build environments
## Advanced topic: Working on Miri in the rustc tree
We described above the simplest way to get a working build environment for Miri,
which is to use the version of rustc indicated by `rustc-version`. But
sometimes, that is not enough.
### Building Miri with a locally built rustc
A big part of the Miri driver is shared with rustc, so working on Miri will
sometimes require also working on rustc itself. In this case, you should *not*
work in a clone of the Miri repository, but in a clone of the
[main Rust repository](https://github.com/rust-lang/rust/). There is a copy of
Miri located at `src/tools/miri` that you can work on directly. A maintainer
will eventually sync those changes back into this repository.
[building Miri with a locally built rustc]: #building-miri-with-a-locally-built-rustc
When working on Miri in the rustc tree, here's how you can run tests:
A big part of the Miri driver lives in rustc, so working on Miri will sometimes
require using a locally built rustc. The bug you want to fix may actually be on
the rustc side, or you just need to get more detailed trace of the execution
than what is possible with release builds -- in both cases, you should develop
Miri against a rustc you compiled yourself, with debug assertions (and hence
tracing) enabled.
The setup for a local rustc works as follows:
```sh
# Clone the rust-lang/rust repo.
git clone https://github.com/rust-lang/rust rustc
cd rustc
# Create a config.toml with defaults for working on Miri.
./x.py setup compiler
# Now edit `config.toml` and under `[rust]` set `debug-assertions = true`.
# Build a stage 2 rustc, and build the rustc libraries with that rustc.
# This step can take 30 minutes or more.
./x.py build --stage 2 compiler/rustc
# If you change something, you can get a faster rebuild by doing
./x.py build --keep-stage 0 --stage 2 compiler/rustc
# You may have to change the architecture in the next command
rustup toolchain link stage2 build/x86_64-unknown-linux-gnu/stage2
# Now cd to your Miri directory, then configure rustup
rustup override set stage2
```
./x.py test miri --stage 0
```
Note: When you are working with a locally built rustc or any other toolchain that
is not the same as the one in `rust-version`, you should not have `.auto-everything` or
`.auto-toolchain` as that will keep resetting your toolchain.
`--bless` will work, too.
You can also directly run Miri on a Rust source file:
```sh
rm -f .auto-everything .auto-toolchain
```
Important: You need to delete the Miri cache when you change the stdlib; otherwise the
old, chached version will be used. On Linux, the cache is located at `~/.cache/miri`,
and on Windows, it is located at `%LOCALAPPDATA%\rust-lang\miri\cache`; the exact
location is printed after the library build: "A libstd for Miri is now available in ...".
Note: `./x.py --stage 2 compiler/rustc` currently errors with `thread 'main'
panicked at 'fs::read(stamp) failed with No such file or directory (os error 2)`,
you can simply ignore that error; Miri will build anyway.
For more information about building and configuring a local compiler,
see <https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html>.
With this, you should now have a working development setup! See
[above](#building-and-testing-miri) for how to proceed working on Miri.
./x.py run miri --stage 0 --args src/tools/miri/tests/pass/hello.rs
```
## Advanced topic: Syncing with the rustc repo

View file

@ -724,9 +724,9 @@ dependencies = [
[[package]]
name = "ui_test"
version = "0.4.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4559da3fe6b481f8674a29379677cb9606cd6f75fc254a2c9834c55638503d"
checksum = "54ddb6f31025943e2f9d59237f433711c461a43d9415974c3eb3a4902edc1c1f"
dependencies = [
"bstr",
"cargo_metadata",

View file

@ -39,7 +39,7 @@ libloading = "0.7"
[dev-dependencies]
colored = "2"
ui_test = "0.4"
ui_test = "0.5"
rustc_version = "0.4"
# Features chosen to match those required by env_logger, to avoid rebuilds
regex = { version = "1.5.5", default-features = false, features = ["perf", "std"] }

View file

@ -94,7 +94,7 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
let target = target.as_ref().unwrap_or(host);
// We always setup.
setup(&subcommand, target, &rustc_version);
setup(&subcommand, target, &rustc_version, verbose);
// Invoke actual cargo for the job, but with different flags.
// We re-use `cargo test` and `cargo run`, which makes target and binary handling very easy but
@ -486,8 +486,7 @@ pub fn phase_runner(mut binary_args: impl Iterator<Item = String>, phase: Runner
continue;
} else if verbose > 0 {
eprintln!(
"[cargo-miri runner] Overwriting run-time env var {:?}={:?} with build-time value {:?}",
name, old_val, val
"[cargo-miri runner] Overwriting run-time env var {name:?}={old_val:?} with build-time value {val:?}"
);
}
}

View file

@ -13,7 +13,7 @@ use crate::util::*;
/// Performs the setup required to make `cargo miri` work: Getting a custom-built libstd. Then sets
/// `MIRI_SYSROOT`. Skipped if `MIRI_SYSROOT` is already set, in which case we expect the user has
/// done all this already.
pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta) {
pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta, verbose: usize) {
let only_setup = matches!(subcommand, MiriCommand::Setup);
let ask_user = !only_setup;
let print_sysroot = only_setup && has_arg_flag("--print-sysroot"); // whether we just print the sysroot path
@ -99,12 +99,13 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta
// `config.toml`.
command.env("RUSTC_WRAPPER", "");
if only_setup {
if print_sysroot {
// Be extra sure there is no noise on stdout.
command.stdout(process::Stdio::null());
if only_setup && !print_sysroot {
// Forward output. Even make it verbose, if requested.
for _ in 0..verbose {
command.arg("-v");
}
} else {
// Supress output.
command.stdout(process::Stdio::null());
command.stderr(process::Stdio::null());
}
@ -120,7 +121,9 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta
std::env::set_var("MIRI_SYSROOT", &sysroot_dir);
// Do the build.
if only_setup {
if print_sysroot {
// Be silent.
} else if only_setup {
// We want to be explicit.
eprintln!("Preparing a sysroot for Miri (target: {target})...");
} else {
@ -143,7 +146,9 @@ pub fn setup(subcommand: &MiriCommand, target: &str, rustc_version: &VersionMeta
)
}
});
if only_setup {
if print_sysroot {
// Be silent.
} else if only_setup {
eprintln!("A sysroot for Miri is now available in `{}`.", sysroot_dir.display());
} else {
eprintln!("done");

View file

@ -40,10 +40,15 @@ function run_tests {
./miri test
if [ -z "${MIRI_TEST_TARGET+exists}" ]; then
# Only for host architecture: tests with optimizations (`-O` is what cargo passes, but crank MIR
# optimizations up all the way).
# Optimizations change diagnostics (mostly backtraces), so we don't check them
#FIXME(#2155): we want to only run the pass and panic tests here, not the fail tests.
# optimizations up all the way, too).
# Optimizations change diagnostics (mostly backtraces), so we don't check
# them. Also error locations change so we don't run the failing tests.
MIRIFLAGS="${MIRIFLAGS:-} -O -Zmir-opt-level=4" MIRI_SKIP_UI_CHECKS=1 ./miri test -- tests/{pass,panic}
# Also run some many-seeds tests. 64 seeds means this takes around a minute per test.
for FILE in tests/many-seeds/*.rs; do
MIRI_SEEDS=64 CARGO_EXTRA_FLAGS="$CARGO_EXTRA_FLAGS -q" ./miri many-seeds ./miri run "$FILE"
done
fi
## test-cargo-miri

View file

@ -36,7 +36,8 @@ Mainly meant to be invoked by rust-analyzer.
./miri many-seeds <command>:
Runs <command> over and over again with different seeds for Miri. The MIRIFLAGS
variable is set to its original value appended with ` -Zmiri-seed=$SEED` for
many different seeds.
many different seeds. The MIRI_SEEDS variable controls how many seeds are being
tried; MIRI_SEED_START controls the first seed to try.
./miri bench <benches>:
Runs the benchmarks from bench-cargo-miri in hyperfine. hyperfine needs to be installed.
@ -174,7 +175,9 @@ rustc-push)
fi
;;
many-seeds)
for SEED in $(seq 0 255); do
MIRI_SEED_START=${MIRI_SEED_START:-0} # default to 0
MIRI_SEEDS=${MIRI_SEEDS:-256} # default to 256
for SEED in $(seq $MIRI_SEED_START $(( $MIRI_SEED_START + $MIRI_SEEDS - 1 )) ); do
echo "Trying seed: $SEED"
MIRIFLAGS="$MIRIFLAGS -Zlayout-seed=$SEED -Zmiri-seed=$SEED" $@ || { echo "Failing seed: $SEED"; break; }
done
@ -249,6 +252,8 @@ export RUSTFLAGS="-C link-args=-Wl,-rpath,$LIBDIR $RUSTFLAGS"
# Build a sysroot and set MIRI_SYSROOT to use it. Arguments are passed to `cargo miri setup`.
build_sysroot() {
if ! MIRI_SYSROOT="$($CARGO run $CARGO_EXTRA_FLAGS --manifest-path "$MIRIDIR"/cargo-miri/Cargo.toml -- miri setup --print-sysroot "$@")"; then
# Run it again so the user can see the error.
$CARGO run $CARGO_EXTRA_FLAGS --manifest-path "$MIRIDIR"/cargo-miri/Cargo.toml -- miri setup "$@"
echo "'cargo miri setup' failed"
exit 1
fi

View file

@ -1 +1 @@
454784afba5bf35b5ff14ada0e31265ad1d75e73
cef44f53034eac46be3a0e3eec7b2b3d4ef5140b

View file

@ -192,10 +192,7 @@ fn init_late_loggers(tcx: TyCtxt<'_>) {
if log::Level::from_str(&var).is_ok() {
env::set_var(
"RUSTC_LOG",
format!(
"rustc_middle::mir::interpret={0},rustc_const_eval::interpret={0}",
var
),
format!("rustc_middle::mir::interpret={var},rustc_const_eval::interpret={var}"),
);
} else {
env::set_var("RUSTC_LOG", &var);
@ -317,7 +314,7 @@ fn main() {
} else if arg == "-Zmiri-disable-validation" {
miri_config.validate = false;
} else if arg == "-Zmiri-disable-stacked-borrows" {
miri_config.stacked_borrows = false;
miri_config.borrow_tracker = None;
} else if arg == "-Zmiri-disable-data-race-detector" {
miri_config.data_race_detector = false;
miri_config.weak_memory_emulation = false;
@ -413,7 +410,7 @@ fn main() {
err
),
};
for id in ids.into_iter().map(miri::SbTag::new) {
for id in ids.into_iter().map(miri::BorTag::new) {
if let Some(id) = id {
miri_config.tracked_pointer_tags.insert(id);
} else {

View file

@ -0,0 +1,371 @@
use std::cell::RefCell;
use std::fmt;
use std::num::NonZeroU64;
use log::trace;
use smallvec::SmallVec;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::mir::RetagKind;
use rustc_target::abi::Size;
use crate::*;
pub mod stacked_borrows;
use stacked_borrows::diagnostics::RetagCause;
pub type CallId = NonZeroU64;
/// Tracking pointer provenance
#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct BorTag(NonZeroU64);
impl BorTag {
pub fn new(i: u64) -> Option<Self> {
NonZeroU64::new(i).map(BorTag)
}
pub fn get(&self) -> u64 {
self.0.get()
}
pub fn inner(&self) -> NonZeroU64 {
self.0
}
pub fn succ(self) -> Option<Self> {
self.0.checked_add(1).map(Self)
}
/// The minimum representable tag
pub fn one() -> Self {
Self::new(1).unwrap()
}
}
impl std::default::Default for BorTag {
/// The default to be used when borrow tracking is disabled
fn default() -> Self {
Self::one()
}
}
impl fmt::Debug for BorTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "<{}>", self.0)
}
}
/// Per-call-stack-frame data for borrow tracking
#[derive(Debug)]
pub struct FrameState {
/// The ID of the call this frame corresponds to.
pub call_id: CallId,
/// If this frame is protecting any tags, they are listed here. We use this list to do
/// incremental updates of the global list of protected tags stored in the
/// `stacked_borrows::GlobalState` upon function return, and if we attempt to pop a protected
/// tag, to identify which call is responsible for protecting the tag.
/// See `Stack::item_popped` for more explanation.
///
/// This will contain one tag per reference passed to the function, so
/// a size of 2 is enough for the vast majority of functions.
pub protected_tags: SmallVec<[BorTag; 2]>,
}
impl VisitTags for FrameState {
fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
// `protected_tags` are fine to GC.
}
}
/// Extra global state, available to the memory access hooks.
#[derive(Debug)]
pub struct GlobalStateInner {
/// Borrow tracker method currently in use.
pub borrow_tracker_method: BorrowTrackerMethod,
/// Next unused pointer ID (tag).
pub next_ptr_tag: BorTag,
/// Table storing the "base" tag for each allocation.
/// The base tag is the one used for the initial pointer.
/// We need this in a separate table to handle cyclic statics.
pub base_ptr_tags: FxHashMap<AllocId, BorTag>,
/// Next unused call ID (for protectors).
pub next_call_id: CallId,
/// All currently protected tags.
/// An item is protected if its tag is in this set, *and* it has the "protected" bit set.
/// We add tags to this when they are created with a protector in `reborrow`, and
/// we remove tags from this when the call which is protecting them returns, in
/// `GlobalStateInner::end_call`. See `Stack::item_popped` for more details.
pub protected_tags: FxHashMap<BorTag, ProtectorKind>,
/// The pointer ids to trace
pub tracked_pointer_tags: FxHashSet<BorTag>,
/// The call ids to trace
pub tracked_call_ids: FxHashSet<CallId>,
/// Whether to recurse into datatypes when searching for pointers to retag.
pub retag_fields: RetagFields,
}
impl VisitTags for GlobalStateInner {
fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
// The only candidate is base_ptr_tags, and that does not need visiting since we don't ever
// GC the bottommost tag.
}
}
/// We need interior mutable access to the global state.
pub type GlobalState = RefCell<GlobalStateInner>;
/// Indicates which kind of access is being performed.
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
pub enum AccessKind {
Read,
Write,
}
impl fmt::Display for AccessKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AccessKind::Read => write!(f, "read access"),
AccessKind::Write => write!(f, "write access"),
}
}
}
/// Policy on whether to recurse into fields to retag
#[derive(Copy, Clone, Debug)]
pub enum RetagFields {
/// Don't retag any fields.
No,
/// Retag all fields.
Yes,
/// Only retag fields of types with Scalar and ScalarPair layout,
/// to match the LLVM `noalias` we generate.
OnlyScalar,
}
/// The flavor of the protector.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ProtectorKind {
/// Protected against aliasing violations from other pointers.
///
/// Items protected like this cause UB when they are invalidated, *but* the pointer itself may
/// still be used to issue a deallocation.
///
/// This is required for LLVM IR pointers that are `noalias` but *not* `dereferenceable`.
WeakProtector,
/// Protected against any kind of invalidation.
///
/// Items protected like this cause UB when they are invalidated or the memory is deallocated.
/// This is strictly stronger protection than `WeakProtector`.
///
/// This is required for LLVM IR pointers that are `dereferenceable` (and also allows `noalias`).
StrongProtector,
}
/// Utilities for initialization and ID generation
impl GlobalStateInner {
pub fn new(
borrow_tracker_method: BorrowTrackerMethod,
tracked_pointer_tags: FxHashSet<BorTag>,
tracked_call_ids: FxHashSet<CallId>,
retag_fields: RetagFields,
) -> Self {
GlobalStateInner {
borrow_tracker_method,
next_ptr_tag: BorTag::one(),
base_ptr_tags: FxHashMap::default(),
next_call_id: NonZeroU64::new(1).unwrap(),
protected_tags: FxHashMap::default(),
tracked_pointer_tags,
tracked_call_ids,
retag_fields,
}
}
/// Generates a new pointer tag. Remember to also check track_pointer_tags and log its creation!
pub fn new_ptr(&mut self) -> BorTag {
let id = self.next_ptr_tag;
self.next_ptr_tag = id.succ().unwrap();
id
}
pub fn new_frame(&mut self, machine: &MiriMachine<'_, '_>) -> FrameState {
let call_id = self.next_call_id;
trace!("new_frame: Assigning call ID {}", call_id);
if self.tracked_call_ids.contains(&call_id) {
machine.emit_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id));
}
self.next_call_id = NonZeroU64::new(call_id.get() + 1).unwrap();
FrameState { call_id, protected_tags: SmallVec::new() }
}
pub fn end_call(&mut self, frame: &machine::FrameExtra<'_>) {
for tag in &frame
.borrow_tracker
.as_ref()
.expect("we should have borrow tracking data")
.protected_tags
{
self.protected_tags.remove(tag);
}
}
pub fn base_ptr_tag(&mut self, id: AllocId, machine: &MiriMachine<'_, '_>) -> BorTag {
self.base_ptr_tags.get(&id).copied().unwrap_or_else(|| {
let tag = self.new_ptr();
if self.tracked_pointer_tags.contains(&tag) {
machine.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
tag.inner(),
None,
None,
));
}
trace!("New allocation {:?} has base tag {:?}", id, tag);
self.base_ptr_tags.try_insert(id, tag).unwrap();
tag
})
}
}
/// Which borrow tracking method to use
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum BorrowTrackerMethod {
/// Stacked Borrows, as implemented in borrow_tracker/stacked
StackedBorrows,
}
impl BorrowTrackerMethod {
pub fn instanciate_global_state(self, config: &MiriConfig) -> GlobalState {
RefCell::new(GlobalStateInner::new(
self,
config.tracked_pointer_tags.clone(),
config.tracked_call_ids.clone(),
config.retag_fields,
))
}
}
impl GlobalStateInner {
pub fn new_allocation(
&mut self,
id: AllocId,
alloc_size: Size,
kind: MemoryKind<machine::MiriMemoryKind>,
machine: &MiriMachine<'_, '_>,
) -> AllocState {
match self.borrow_tracker_method {
BorrowTrackerMethod::StackedBorrows =>
AllocState::StackedBorrows(Box::new(RefCell::new(Stacks::new_allocation(
id, alloc_size, self, kind, machine,
)))),
}
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn retag(&mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let method = this.machine.borrow_tracker.as_ref().unwrap().borrow().borrow_tracker_method;
match method {
BorrowTrackerMethod::StackedBorrows => this.sb_retag(kind, place),
}
}
fn retag_return_place(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let method = this.machine.borrow_tracker.as_ref().unwrap().borrow().borrow_tracker_method;
match method {
BorrowTrackerMethod::StackedBorrows => this.sb_retag_return_place(),
}
}
fn expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let method = this.machine.borrow_tracker.as_ref().unwrap().borrow().borrow_tracker_method;
match method {
BorrowTrackerMethod::StackedBorrows => this.sb_expose_tag(alloc_id, tag),
}
}
}
/// Extra per-allocation data for borrow tracking
#[derive(Debug, Clone)]
pub enum AllocState {
/// Data corresponding to Stacked Borrows
StackedBorrows(Box<RefCell<stacked_borrows::AllocState>>),
}
impl machine::AllocExtra {
#[track_caller]
pub fn borrow_tracker_sb(&self) -> &RefCell<stacked_borrows::AllocState> {
match self.borrow_tracker {
Some(AllocState::StackedBorrows(ref sb)) => sb,
_ => panic!("expected Stacked Borrows borrow tracking, got something else"),
}
}
#[track_caller]
pub fn borrow_tracker_sb_mut(&mut self) -> &mut RefCell<stacked_borrows::AllocState> {
match self.borrow_tracker {
Some(AllocState::StackedBorrows(ref mut sb)) => sb,
_ => panic!("expected Stacked Borrows borrow tracking, got something else"),
}
}
}
impl AllocState {
pub fn before_memory_read<'tcx>(
&self,
alloc_id: AllocId,
prov_extra: ProvenanceExtra,
range: AllocRange,
machine: &MiriMachine<'_, 'tcx>,
) -> InterpResult<'tcx> {
match self {
AllocState::StackedBorrows(sb) =>
sb.borrow_mut().before_memory_read(alloc_id, prov_extra, range, machine),
}
}
pub fn before_memory_write<'tcx>(
&mut self,
alloc_id: AllocId,
prov_extra: ProvenanceExtra,
range: AllocRange,
machine: &mut MiriMachine<'_, 'tcx>,
) -> InterpResult<'tcx> {
match self {
AllocState::StackedBorrows(sb) =>
sb.get_mut().before_memory_write(alloc_id, prov_extra, range, machine),
}
}
pub fn before_memory_deallocation<'tcx>(
&mut self,
alloc_id: AllocId,
prov_extra: ProvenanceExtra,
range: AllocRange,
machine: &mut MiriMachine<'_, 'tcx>,
) -> InterpResult<'tcx> {
match self {
AllocState::StackedBorrows(sb) =>
sb.get_mut().before_memory_deallocation(alloc_id, prov_extra, range, machine),
}
}
pub fn remove_unreachable_tags(&self, tags: &FxHashSet<BorTag>) {
match self {
AllocState::StackedBorrows(sb) => sb.borrow_mut().remove_unreachable_tags(tags),
}
}
}
impl VisitTags for AllocState {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
match self {
AllocState::StackedBorrows(sb) => sb.visit_tags(visit),
}
}
}

View file

@ -1,15 +1,16 @@
use smallvec::SmallVec;
use std::fmt;
use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange};
use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange, InterpError};
use rustc_span::{Span, SpanData};
use rustc_target::abi::Size;
use crate::stacked_borrows::{err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind};
use crate::borrow_tracker::{
stacked_borrows::{err_sb_ub, Permission},
AccessKind, GlobalStateInner, ProtectorKind,
};
use crate::*;
use rustc_middle::mir::interpret::InterpError;
#[derive(Clone, Debug)]
pub struct AllocHistory {
id: AllocId,
@ -51,7 +52,7 @@ impl Creation {
#[derive(Clone, Debug)]
struct Invalidation {
tag: SbTag,
tag: BorTag,
range: AllocRange,
span: Span,
cause: InvalidationCause,
@ -98,7 +99,7 @@ impl fmt::Display for InvalidationCause {
#[derive(Clone, Debug)]
struct Protection {
tag: SbTag,
tag: BorTag,
span: Span,
}
@ -133,7 +134,7 @@ impl<'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
pub fn retag(
machine: &'ecx MiriMachine<'mir, 'tcx>,
cause: RetagCause,
new_tag: SbTag,
new_tag: BorTag,
orig_tag: ProvenanceExtra,
range: AllocRange,
) -> Self {
@ -183,7 +184,7 @@ enum Operation {
#[derive(Debug, Clone)]
struct RetagOp {
cause: RetagCause,
new_tag: SbTag,
new_tag: BorTag,
orig_tag: ProvenanceExtra,
range: AllocRange,
permission: Option<Permission>,
@ -255,7 +256,7 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
.push(Creation { retag: op.clone(), span: self.machine.current_span() });
}
pub fn log_invalidation(&mut self, tag: SbTag) {
pub fn log_invalidation(&mut self, tag: BorTag) {
let mut span = self.machine.current_span();
let (range, cause) = match &self.operation {
Operation::Retag(RetagOp { cause, range, permission, .. }) => {
@ -286,8 +287,8 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
pub fn get_logs_relevant_to(
&self,
tag: SbTag,
protector_tag: Option<SbTag>,
tag: BorTag,
protector_tag: Option<BorTag>,
) -> Option<TagHistory> {
let Some(created) = self.history
.creations
@ -408,7 +409,7 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
.all_stacks()
.flatten()
.map(|frame| {
frame.extra.stacked_borrows.as_ref().expect("we should have Stacked Borrows data")
frame.extra.borrow_tracker.as_ref().expect("we should have borrow tracking data")
})
.find(|frame| frame.protected_tags.contains(&item.tag()))
.map(|frame| frame.call_id)
@ -454,23 +455,18 @@ impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
if !global.tracked_pointer_tags.contains(&item.tag()) {
return;
}
let summary = match self.operation {
Operation::Dealloc(_) => None,
Operation::Access(AccessOp { kind, tag, .. }) => Some((tag, kind)),
let cause = match self.operation {
Operation::Dealloc(_) => format!(" due to deallocation"),
Operation::Access(AccessOp { kind, tag, .. }) =>
format!(" due to {kind:?} access for {tag:?}"),
Operation::Retag(RetagOp { orig_tag, permission, .. }) => {
let kind = match permission
.expect("start_grant should set the current permission before popping a tag")
{
Permission::SharedReadOnly => AccessKind::Read,
Permission::Unique => AccessKind::Write,
Permission::SharedReadWrite | Permission::Disabled => {
panic!("Only SharedReadOnly and Unique retags can pop tags");
}
};
Some((orig_tag, kind))
let permission = permission
.expect("start_grant should set the current permission before popping a tag");
format!(" due to {permission:?} retag from {orig_tag:?}")
}
};
self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary));
self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, cause));
}
}

View file

@ -1,13 +1,13 @@
use crate::stacked_borrows::SbTag;
use std::fmt;
use std::num::NonZeroU64;
use crate::borrow_tracker::BorTag;
/// An item in the per-location borrow stack.
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub struct Item(u64);
// An Item contains 3 bitfields:
// * Bits 0-61 store an SbTag
// * Bits 0-61 store a BorTag
// * Bits 61-63 store a Permission
// * Bit 64 stores a flag which indicates if we have a protector
const TAG_MASK: u64 = u64::MAX >> 3;
@ -18,9 +18,9 @@ const PERM_SHIFT: u64 = 61;
const PROTECTED_SHIFT: u64 = 63;
impl Item {
pub fn new(tag: SbTag, perm: Permission, protected: bool) -> Self {
assert!(tag.0.get() <= TAG_MASK);
let packed_tag = tag.0.get();
pub fn new(tag: BorTag, perm: Permission, protected: bool) -> Self {
assert!(tag.get() <= TAG_MASK);
let packed_tag = tag.get();
let packed_perm = perm.to_bits() << PERM_SHIFT;
let packed_protected = u64::from(protected) << PROTECTED_SHIFT;
@ -34,8 +34,8 @@ impl Item {
}
/// The pointers the permission is granted to.
pub fn tag(self) -> SbTag {
SbTag(NonZeroU64::new(self.0 & TAG_MASK).unwrap())
pub fn tag(self) -> BorTag {
BorTag::new(self.0 & TAG_MASK).unwrap()
}
/// The permission this item grants.

View file

@ -2,81 +2,30 @@
//! for further information.
use log::trace;
use std::cell::RefCell;
use std::cmp;
use std::fmt;
use std::fmt::Write;
use std::num::NonZeroU64;
use std::fmt::{self, Write};
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_hir::Mutability;
use rustc_middle::mir::RetagKind;
use rustc_data_structures::fx::FxHashSet;
use rustc_middle::mir::{Mutability, RetagKind};
use rustc_middle::ty::{
self,
layout::{HasParamEnv, LayoutOf},
};
use rustc_target::abi::Abi;
use rustc_target::abi::Size;
use smallvec::SmallVec;
use rustc_target::abi::{Abi, Size};
use crate::borrow_tracker::{
stacked_borrows::diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder, TagHistory},
AccessKind, GlobalStateInner, ProtectorKind, RetagCause, RetagFields,
};
use crate::*;
pub mod diagnostics;
use diagnostics::{AllocHistory, DiagnosticCx, DiagnosticCxBuilder, RetagCause, TagHistory};
mod item;
pub use item::{Item, Permission};
mod stack;
pub use stack::Stack;
pub mod diagnostics;
pub type CallId = NonZeroU64;
// Even reading memory can have effects on the stack, so we need a `RefCell` here.
pub type AllocExtra = RefCell<Stacks>;
/// Tracking pointer provenance
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub struct SbTag(NonZeroU64);
impl SbTag {
pub fn new(i: u64) -> Option<Self> {
NonZeroU64::new(i).map(SbTag)
}
// The default to be used when SB is disabled
#[allow(clippy::should_implement_trait)]
pub fn default() -> Self {
Self::new(1).unwrap()
}
}
impl fmt::Debug for SbTag {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "<{}>", self.0)
}
}
#[derive(Debug)]
pub struct FrameExtra {
/// The ID of the call this frame corresponds to.
call_id: CallId,
/// If this frame is protecting any tags, they are listed here. We use this list to do
/// incremental updates of the global list of protected tags stored in the
/// `stacked_borrows::GlobalState` upon function return, and if we attempt to pop a protected
/// tag, to identify which call is responsible for protecting the tag.
/// See `Stack::item_invalidated` for more explanation.
///
/// This will contain one tag per reference passed to the function, so
/// a size of 2 is enough for the vast majority of functions.
protected_tags: SmallVec<[SbTag; 2]>,
}
impl VisitTags for FrameExtra {
fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
// `protected_tags` are fine to GC.
}
}
pub type AllocState = Stacks;
/// Extra per-allocation state.
#[derive(Clone, Debug)]
@ -86,98 +35,16 @@ pub struct Stacks {
/// Stores past operations on this allocation
history: AllocHistory,
/// The set of tags that have been exposed inside this allocation.
exposed_tags: FxHashSet<SbTag>,
exposed_tags: FxHashSet<BorTag>,
/// Whether this memory has been modified since the last time the tag GC ran
modified_since_last_gc: bool,
}
/// The flavor of the protector.
#[derive(Copy, Clone, Debug)]
enum ProtectorKind {
/// Protected against aliasing violations from other pointers.
///
/// Items protected like this cause UB when they are invalidated, *but* the pointer itself may
/// still be used to issue a deallocation.
///
/// This is required for LLVM IR pointers that are `noalias` but *not* `dereferenceable`.
WeakProtector,
/// Protected against any kind of invalidation.
///
/// Items protected like this cause UB when they are invalidated or the memory is deallocated.
/// This is strictly stronger protection than `WeakProtector`.
///
/// This is required for LLVM IR pointers that are `dereferenceable` (and also allows `noalias`).
StrongProtector,
}
/// Extra global state, available to the memory access hooks.
#[derive(Debug)]
pub struct GlobalStateInner {
/// Next unused pointer ID (tag).
next_ptr_tag: SbTag,
/// Table storing the "base" tag for each allocation.
/// The base tag is the one used for the initial pointer.
/// We need this in a separate table to handle cyclic statics.
base_ptr_tags: FxHashMap<AllocId, SbTag>,
/// Next unused call ID (for protectors).
next_call_id: CallId,
/// All currently protected tags, and the status of their protection.
/// An item is protected if its tag is in this set, *and* it has the "protected" bit set.
/// We add tags to this when they are created with a protector in `reborrow`, and
/// we remove tags from this when the call which is protecting them returns, in
/// `GlobalStateInner::end_call`. See `Stack::item_invalidated` for more details.
protected_tags: FxHashMap<SbTag, ProtectorKind>,
/// The pointer ids to trace
tracked_pointer_tags: FxHashSet<SbTag>,
/// The call ids to trace
tracked_call_ids: FxHashSet<CallId>,
/// Whether to recurse into datatypes when searching for pointers to retag.
retag_fields: RetagFields,
}
#[derive(Copy, Clone, Debug)]
pub enum RetagFields {
/// Don't retag any fields.
No,
/// Retag all fields.
Yes,
/// Only retag fields of types with Scalar and ScalarPair layout,
/// to match the LLVM `noalias` we generate.
OnlyScalar,
}
impl VisitTags for GlobalStateInner {
fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
// The only candidate is base_ptr_tags, and that does not need visiting since we don't ever
// GC the bottommost tag.
}
}
/// We need interior mutable access to the global state.
pub type GlobalState = RefCell<GlobalStateInner>;
/// Indicates which kind of access is being performed.
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
pub enum AccessKind {
Read,
Write,
}
impl fmt::Display for AccessKind {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AccessKind::Read => write!(f, "read access"),
AccessKind::Write => write!(f, "write access"),
}
}
}
/// Indicates which kind of reference is being created.
/// Used by high-level `reborrow` to compute which permissions to grant to the
/// new pointer.
#[derive(Copy, Clone, Hash, PartialEq, Eq)]
pub enum RefKind {
enum RefKind {
/// `&mut` and `Box`.
Unique { two_phase: bool },
/// `&` with or without interior mutability.
@ -198,65 +65,6 @@ impl fmt::Display for RefKind {
}
}
/// Utilities for initialization and ID generation
impl GlobalStateInner {
pub fn new(
tracked_pointer_tags: FxHashSet<SbTag>,
tracked_call_ids: FxHashSet<CallId>,
retag_fields: RetagFields,
) -> Self {
GlobalStateInner {
next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
base_ptr_tags: FxHashMap::default(),
next_call_id: NonZeroU64::new(1).unwrap(),
protected_tags: FxHashMap::default(),
tracked_pointer_tags,
tracked_call_ids,
retag_fields,
}
}
/// Generates a new pointer tag. Remember to also check track_pointer_tags and log its creation!
fn new_ptr(&mut self) -> SbTag {
let id = self.next_ptr_tag;
self.next_ptr_tag = SbTag(NonZeroU64::new(id.0.get() + 1).unwrap());
id
}
pub fn new_frame(&mut self, machine: &MiriMachine<'_, '_>) -> FrameExtra {
let call_id = self.next_call_id;
trace!("new_frame: Assigning call ID {}", call_id);
if self.tracked_call_ids.contains(&call_id) {
machine.emit_diagnostic(NonHaltingDiagnostic::CreatedCallId(call_id));
}
self.next_call_id = NonZeroU64::new(call_id.get() + 1).unwrap();
FrameExtra { call_id, protected_tags: SmallVec::new() }
}
pub fn end_call(&mut self, frame: &machine::FrameData<'_>) {
for tag in &frame
.stacked_borrows
.as_ref()
.expect("we should have Stacked Borrows data")
.protected_tags
{
self.protected_tags.remove(tag);
}
}
pub fn base_ptr_tag(&mut self, id: AllocId, machine: &MiriMachine<'_, '_>) -> SbTag {
self.base_ptr_tags.get(&id).copied().unwrap_or_else(|| {
let tag = self.new_ptr();
if self.tracked_pointer_tags.contains(&tag) {
machine.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(tag.0, None, None));
}
trace!("New allocation {:?} has base tag {:?}", id, tag);
self.base_ptr_tags.try_insert(id, tag).unwrap();
tag
})
}
}
/// Error reporting
pub fn err_sb_ub<'tcx>(
msg: String,
@ -329,14 +137,7 @@ impl<'tcx> Stack {
}
}
/// Check if the given item is protected.
///
/// The `provoking_access` argument is only used to produce diagnostics.
/// It is `Some` when we are granting the contained access for said tag, and it is
/// `None` during a deallocation.
/// Within `provoking_access, the `AllocRange` refers the entire operation, and
/// the `Size` refers to the specific location in the `AllocRange` that we are
/// currently checking.
/// The given item was invalidated -- check its protectors for whether that will cause UB.
fn item_invalidated(
item: &Item,
global: &GlobalStateInner,
@ -386,7 +187,7 @@ impl<'tcx> Stack {
tag: ProvenanceExtra,
global: &GlobalStateInner,
dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
exposed_tags: &FxHashSet<SbTag>,
exposed_tags: &FxHashSet<BorTag>,
) -> InterpResult<'tcx> {
// Two main steps: Find granting item, remove incompatible items above.
@ -442,23 +243,24 @@ impl<'tcx> Stack {
if granting_idx.is_none() || matches!(tag, ProvenanceExtra::Wildcard) {
// Compute the upper bound of the items that remain.
// (This is why we did all the work above: to reduce the items we have to consider here.)
let mut max = NonZeroU64::new(1).unwrap();
let mut max = BorTag::one();
for i in 0..self.len() {
let item = self.get(i).unwrap();
// Skip disabled items, they cannot be matched anyway.
if !matches!(item.perm(), Permission::Disabled) {
// We are looking for a strict upper bound, so add 1 to this tag.
max = cmp::max(item.tag().0.checked_add(1).unwrap(), max);
max = cmp::max(item.tag().succ().unwrap(), max);
}
}
if let Some(unk) = self.unknown_bottom() {
max = cmp::max(unk.0, max);
max = cmp::max(unk, max);
}
// Use `max` as new strict upper bound for everything.
trace!(
"access: forgetting stack to upper bound {max} due to wildcard or unknown access"
"access: forgetting stack to upper bound {max} due to wildcard or unknown access",
max = max.get(),
);
self.set_unknown_bottom(SbTag(max));
self.set_unknown_bottom(max);
}
// Done.
@ -472,7 +274,7 @@ impl<'tcx> Stack {
tag: ProvenanceExtra,
global: &GlobalStateInner,
dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
exposed_tags: &FxHashSet<SbTag>,
exposed_tags: &FxHashSet<BorTag>,
) -> InterpResult<'tcx> {
// Step 1: Make a write access.
// As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
@ -497,7 +299,7 @@ impl<'tcx> Stack {
access: Option<AccessKind>,
global: &GlobalStateInner,
dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
exposed_tags: &FxHashSet<SbTag>,
exposed_tags: &FxHashSet<BorTag>,
) -> InterpResult<'tcx> {
dcx.start_grant(new.perm());
@ -550,9 +352,9 @@ impl<'tcx> Stack {
}
// # Stacked Borrows Core End
/// Integration with the SbTag garbage collector
/// Integration with the BorTag garbage collector
impl Stacks {
pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<SbTag>) {
pub fn remove_unreachable_tags(&mut self, live_tags: &FxHashSet<BorTag>) {
if self.modified_since_last_gc {
for stack in self.stacks.iter_mut_all() {
if stack.len() > 64 {
@ -565,7 +367,7 @@ impl Stacks {
}
impl VisitTags for Stacks {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
for tag in self.exposed_tags.iter().copied() {
visit(tag);
}
@ -579,7 +381,7 @@ impl<'tcx> Stacks {
fn new(
size: Size,
perm: Permission,
tag: SbTag,
tag: BorTag,
id: AllocId,
machine: &MiriMachine<'_, '_>,
) -> Self {
@ -602,7 +404,7 @@ impl<'tcx> Stacks {
mut f: impl FnMut(
&mut Stack,
&mut DiagnosticCx<'_, '_, '_, 'tcx>,
&mut FxHashSet<SbTag>,
&mut FxHashSet<BorTag>,
) -> InterpResult<'tcx>,
) -> InterpResult<'tcx> {
self.modified_since_last_gc = true;
@ -620,20 +422,19 @@ impl Stacks {
pub fn new_allocation(
id: AllocId,
size: Size,
state: &GlobalState,
state: &mut GlobalStateInner,
kind: MemoryKind<MiriMemoryKind>,
machine: &MiriMachine<'_, '_>,
) -> Self {
let mut extra = state.borrow_mut();
let (base_tag, perm) = match kind {
// New unique borrow. This tag is not accessible by the program,
// so it will only ever be used when using the local directly (i.e.,
// not through a pointer). That is, whenever we directly write to a local, this will pop
// everything else off the stack, invalidating all previous pointers,
// and in particular, *all* raw pointers.
MemoryKind::Stack => (extra.base_ptr_tag(id, machine), Permission::Unique),
MemoryKind::Stack => (state.base_ptr_tag(id, machine), Permission::Unique),
// Everything else is shared by default.
_ => (extra.base_ptr_tag(id, machine), Permission::SharedReadWrite),
_ => (state.base_ptr_tag(id, machine), Permission::SharedReadWrite),
};
Stacks::new(size, perm, base_tag, id, machine)
}
@ -656,7 +457,7 @@ impl Stacks {
range.size.bytes()
);
let dcx = DiagnosticCxBuilder::read(machine, tag, range);
let state = machine.stacked_borrows.as_ref().unwrap().borrow();
let state = machine.borrow_tracker.as_ref().unwrap().borrow();
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
})
@ -677,7 +478,7 @@ impl Stacks {
range.size.bytes()
);
let dcx = DiagnosticCxBuilder::write(machine, tag, range);
let state = machine.stacked_borrows.as_ref().unwrap().borrow();
let state = machine.borrow_tracker.as_ref().unwrap().borrow();
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
})
@ -693,7 +494,7 @@ impl Stacks {
) -> InterpResult<'tcx> {
trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
let state = machine.stacked_borrows.as_ref().unwrap().borrow();
let state = machine.borrow_tracker.as_ref().unwrap().borrow();
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
stack.dealloc(tag, &state, dcx, exposed_tags)
})?;
@ -710,13 +511,13 @@ impl<'mir: 'ecx, 'tcx: 'mir, 'ecx> EvalContextPrivExt<'mir, 'tcx, 'ecx>
trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Returns the `AllocId` the reborrow was done in, if some actual borrow stack manipulation
/// happened.
fn reborrow(
fn sb_reborrow(
&mut self,
place: &MPlaceTy<'tcx, Provenance>,
size: Size,
kind: RefKind,
retag_cause: RetagCause, // What caused this retag, for diagnostics only
new_tag: SbTag,
new_tag: BorTag,
protect: Option<ProtectorKind>,
) -> InterpResult<'tcx, Option<AllocId>> {
let this = self.eval_context_mut();
@ -725,7 +526,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
let log_creation = |this: &MiriInterpCx<'mir, 'tcx>,
loc: Option<(AllocId, Size, ProvenanceExtra)>| // alloc_id, base_offset, orig_tag
-> InterpResult<'tcx> {
let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
let ty = place.layout.ty;
if global.tracked_pointer_tags.contains(&new_tag) {
let mut kind_str = format!("{kind}");
@ -743,7 +544,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
_ => write!(kind_str, " (pointee type {ty})").unwrap(),
};
this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag(
new_tag.0,
new_tag.inner(),
Some(kind_str),
loc.map(|(alloc_id, base_offset, orig_tag)| (alloc_id, alloc_range(base_offset, size), orig_tag)),
));
@ -762,9 +563,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
// uncovers a non-supported `extern static`.
let extra = this.get_alloc_extra(alloc_id)?;
let mut stacked_borrows = extra
.stacked_borrows
.as_ref()
.expect("we should have Stacked Borrows data")
.borrow_tracker_sb()
.borrow_mut();
// Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
// FIXME: can this be done cleaner?
@ -780,7 +579,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
if protect.is_some() {
dcx.log_protector();
}
}
},
AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
// No stacked borrows on these allocations.
}
@ -839,9 +638,9 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
if let Some(protect) = protect {
// See comment in `Stack::item_invalidated` for why we store the tag twice.
this.frame_mut().extra.stacked_borrows.as_mut().unwrap().protected_tags.push(new_tag);
this.frame_mut().extra.borrow_tracker.as_mut().unwrap().protected_tags.push(new_tag);
this.machine
.stacked_borrows
.borrow_tracker
.as_mut()
.unwrap()
.get_mut()
@ -875,11 +674,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
// We have to use shared references to alloc/memory_extra here since
// `visit_freeze_sensitive` needs to access the global state.
let alloc_extra = this.get_alloc_extra(alloc_id)?;
let mut stacked_borrows = alloc_extra
.stacked_borrows
.as_ref()
.expect("we should have Stacked Borrows data")
.borrow_mut();
let mut stacked_borrows = alloc_extra.borrow_tracker_sb().borrow_mut();
this.visit_freeze_sensitive(place, size, |mut range, frozen| {
// Adjust range.
range.start += base_offset;
@ -900,7 +695,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
false
};
let item = Item::new(new_tag, perm, protected);
let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
let global = this.machine.borrow_tracker.as_ref().unwrap().borrow();
let dcx = DiagnosticCxBuilder::retag(
&this.machine,
retag_cause,
@ -929,14 +724,10 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
// Note that this asserts that the allocation is mutable -- but since we are creating a
// mutable pointer, that seems reasonable.
let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
let stacked_borrows = alloc_extra
.stacked_borrows
.as_mut()
.expect("we should have Stacked Borrows data")
.get_mut();
let stacked_borrows = alloc_extra.borrow_tracker_sb_mut().get_mut();
let item = Item::new(new_tag, perm, protect.is_some());
let range = alloc_range(base_offset, size);
let global = machine.stacked_borrows.as_ref().unwrap().borrow();
let global = machine.borrow_tracker.as_ref().unwrap().borrow();
let dcx = DiagnosticCxBuilder::retag(
machine,
retag_cause,
@ -960,8 +751,8 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
}
/// Retags an indidual pointer, returning the retagged version.
/// `mutbl` can be `None` to make this a raw pointer.
fn retag_reference(
/// `kind` indicates what kind of reference is being created.
fn sb_retag_reference(
&mut self,
val: &ImmTy<'tcx, Provenance>,
kind: RefKind,
@ -981,10 +772,10 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
};
// Compute new borrow.
let new_tag = this.machine.stacked_borrows.as_mut().unwrap().get_mut().new_ptr();
let new_tag = this.machine.borrow_tracker.as_mut().unwrap().get_mut().new_ptr();
// Reborrow.
let alloc_id = this.reborrow(&place, size, kind, retag_cause, new_tag, protect)?;
let alloc_id = this.sb_reborrow(&place, size, kind, retag_cause, new_tag, protect)?;
// Adjust pointer.
let new_place = place.map_provenance(|p| {
@ -993,7 +784,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
Some(alloc_id) => {
// If `reborrow` could figure out the AllocId of this ptr, hard-code it into the new one.
// Even if we started out with a wildcard, this newly retagged pointer is tied to that allocation.
Provenance::Concrete { alloc_id, sb: new_tag }
Provenance::Concrete { alloc_id, tag: new_tag }
}
None => {
// Looks like this has to stay a wildcard pointer.
@ -1011,9 +802,13 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn retag(&mut self, kind: RetagKind, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
fn sb_retag(
&mut self,
kind: RetagKind,
place: &PlaceTy<'tcx, Provenance>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let retag_fields = this.machine.stacked_borrows.as_mut().unwrap().get_mut().retag_fields;
let retag_fields = this.machine.borrow_tracker.as_mut().unwrap().get_mut().retag_fields;
let retag_cause = match kind {
RetagKind::TwoPhase { .. } => RetagCause::TwoPhase,
RetagKind::FnEntry => RetagCause::FnEntry,
@ -1039,7 +834,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
protector: Option<ProtectorKind>,
) -> InterpResult<'tcx> {
let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
let val = self.ecx.retag_reference(&val, ref_kind, retag_cause, protector)?;
let val = self.ecx.sb_retag_reference(&val, ref_kind, retag_cause, protector)?;
self.ecx.write_immediate(*val, place)?;
Ok(())
}
@ -1138,7 +933,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
///
/// This is a HACK because there is nothing in MIR that would make the retag
/// explicit. Also see <https://github.com/rust-lang/rust/issues/71117>.
fn retag_return_place(&mut self) -> InterpResult<'tcx> {
fn sb_retag_return_place(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let return_place = &this.frame().return_place;
if return_place.layout.is_zst() {
@ -1153,7 +948,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?;
let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout);
// Reborrow it. With protection! That is part of the point.
let val = this.retag_reference(
let val = this.sb_retag_reference(
&val,
RefKind::Unique { two_phase: false },
RetagCause::FnReturn,
@ -1167,7 +962,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
/// Mark the given tag as exposed. It was found on a pointer with the given AllocId.
fn expose_tag(&mut self, alloc_id: AllocId, tag: SbTag) -> InterpResult<'tcx> {
fn sb_expose_tag(&mut self, alloc_id: AllocId, tag: BorTag) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// Function pointers and dead objects don't have an alloc_extra so we ignore them.
@ -1181,7 +976,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
// uncovers a non-supported `extern static`.
let alloc_extra = this.get_alloc_extra(alloc_id)?;
trace!("Stacked Borrows tag {tag:?} exposed in {alloc_id:?}");
alloc_extra.stacked_borrows.as_ref().unwrap().borrow_mut().exposed_tags.insert(tag);
alloc_extra.borrow_tracker_sb().borrow_mut().exposed_tags.insert(tag);
}
AllocKind::Function | AllocKind::VTable | AllocKind::Dead => {
// No stacked borrows on these allocations.
@ -1193,7 +988,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn print_stacks(&mut self, alloc_id: AllocId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let alloc_extra = this.get_alloc_extra(alloc_id)?;
let stacks = alloc_extra.stacked_borrows.as_ref().unwrap().borrow();
let stacks = alloc_extra.borrow_tracker_sb().borrow();
for (range, stack) in stacks.stacks.iter_all() {
print!("{range:?}: [");
if let Some(bottom) = stack.unknown_bottom() {

View file

@ -3,11 +3,14 @@ use std::ops::Range;
use rustc_data_structures::fx::FxHashSet;
use crate::stacked_borrows::{AccessKind, Item, Permission, SbTag};
use crate::borrow_tracker::{
stacked_borrows::{Item, Permission},
AccessKind, BorTag,
};
use crate::ProvenanceExtra;
/// Exactly what cache size we should use is a difficult tradeoff. There will always be some
/// workload which has a `SbTag` working set which exceeds the size of the cache, and ends up
/// workload which has a `BorTag` working set which exceeds the size of the cache, and ends up
/// falling back to linear searches of the borrow stack very often.
/// The cost of making this value too large is that the loop in `Stack::insert` which ensures the
/// entries in the cache stay correct after an insert becomes expensive.
@ -28,7 +31,7 @@ pub struct Stack {
/// than `id`.
/// When the bottom is unknown, `borrows` always has a `SharedReadOnly` or `Unique` at the bottom;
/// we never have the unknown-to-known boundary in an SRW group.
unknown_bottom: Option<SbTag>,
unknown_bottom: Option<BorTag>,
/// A small LRU cache of searches of the borrow stack.
#[cfg(feature = "stack-cache")]
@ -40,7 +43,7 @@ pub struct Stack {
}
impl Stack {
pub fn retain(&mut self, tags: &FxHashSet<SbTag>) {
pub fn retain(&mut self, tags: &FxHashSet<BorTag>) {
let mut first_removed = None;
// We never consider removing the bottom-most tag. For stacks without an unknown
@ -185,7 +188,7 @@ impl<'tcx> Stack {
&mut self,
access: AccessKind,
tag: ProvenanceExtra,
exposed_tags: &FxHashSet<SbTag>,
exposed_tags: &FxHashSet<BorTag>,
) -> Result<Option<usize>, ()> {
#[cfg(all(feature = "stack-cache", debug_assertions))]
self.verify_cache_consistency();
@ -219,12 +222,12 @@ impl<'tcx> Stack {
// Couldn't find it in the stack; but if there is an unknown bottom it might be there.
let found = self.unknown_bottom.is_some_and(|unknown_limit| {
tag.0 < unknown_limit.0 // unknown_limit is an upper bound for what can be in the unknown bottom.
tag < unknown_limit // unknown_limit is an upper bound for what can be in the unknown bottom.
});
if found { Ok(None) } else { Err(()) }
}
fn find_granting_tagged(&mut self, access: AccessKind, tag: SbTag) -> Option<usize> {
fn find_granting_tagged(&mut self, access: AccessKind, tag: BorTag) -> Option<usize> {
#[cfg(feature = "stack-cache")]
if let Some(idx) = self.find_granting_cache(access, tag) {
return Some(idx);
@ -243,7 +246,7 @@ impl<'tcx> Stack {
}
#[cfg(feature = "stack-cache")]
fn find_granting_cache(&mut self, access: AccessKind, tag: SbTag) -> Option<usize> {
fn find_granting_cache(&mut self, access: AccessKind, tag: BorTag) -> Option<usize> {
// This looks like a common-sense optimization; we're going to do a linear search of the
// cache or the borrow stack to scan the shorter of the two. This optimization is miniscule
// and this check actually ensures we do not access an invalid cache.
@ -349,11 +352,11 @@ impl<'tcx> Stack {
self.borrows.len()
}
pub fn unknown_bottom(&self) -> Option<SbTag> {
pub fn unknown_bottom(&self) -> Option<BorTag> {
self.unknown_bottom
}
pub fn set_unknown_bottom(&mut self, tag: SbTag) {
pub fn set_unknown_bottom(&mut self, tag: BorTag) {
// We clear the borrow stack but the lookup cache doesn't support clearing per se. Instead,
// there is a check explained in `find_granting_cache` which protects against accessing the
// cache when it has been cleared and not yet refilled.

View file

@ -59,7 +59,7 @@ use super::{
weak_memory::EvalContextExt as _,
};
pub type AllocExtra = VClockAlloc;
pub type AllocState = VClockAlloc;
/// Valid atomic read-write orderings, alias of atomic::Ordering (not non-exhaustive).
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
@ -670,7 +670,7 @@ pub struct VClockAlloc {
}
impl VisitTags for VClockAlloc {
fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
// No tags here.
}
}
@ -1220,7 +1220,7 @@ pub struct GlobalState {
}
impl VisitTags for GlobalState {
fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
// We don't have any tags.
}
}

View file

@ -45,7 +45,7 @@ pub(super) struct InitOnce<'mir, 'tcx> {
}
impl<'mir, 'tcx> VisitTags for InitOnce<'mir, 'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
for waiter in self.waiters.iter() {
waiter.callback.visit_tags(visit);
}

View file

@ -181,7 +181,7 @@ pub(crate) struct SynchronizationState<'mir, 'tcx> {
}
impl<'mir, 'tcx> VisitTags for SynchronizationState<'mir, 'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
for init_once in self.init_onces.iter() {
init_once.visit_tags(visit);
}

View file

@ -3,6 +3,7 @@
use std::cell::RefCell;
use std::collections::hash_map::Entry;
use std::num::TryFromIntError;
use std::task::Poll;
use std::time::{Duration, SystemTime};
use log::trace;
@ -16,18 +17,17 @@ use rustc_target::spec::abi::Abi;
use crate::concurrency::data_race;
use crate::concurrency::sync::SynchronizationState;
use crate::shims::tls;
use crate::*;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum SchedulingAction {
enum SchedulingAction {
/// Execute step on the active thread.
ExecuteStep,
/// Execute a timeout callback.
ExecuteTimeoutCallback,
/// Execute destructors of the active thread.
ExecuteDtors,
/// Stop the program.
Stop,
/// Wait for a bit, until there is a timeout to be called.
Sleep(Duration),
}
/// Trait for callbacks that can be executed when some event happens, such as after a timeout.
@ -41,9 +41,6 @@ type TimeoutCallback<'mir, 'tcx> = Box<dyn MachineCallback<'mir, 'tcx> + 'tcx>;
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
pub struct ThreadId(u32);
/// The main thread. When it terminates, the whole application terminates.
const MAIN_THREAD: ThreadId = ThreadId(0);
impl ThreadId {
pub fn to_u32(self) -> u32 {
self.0
@ -116,7 +113,13 @@ pub struct Thread<'mir, 'tcx> {
thread_name: Option<Vec<u8>>,
/// The virtual call stack.
stack: Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>>,
stack: Vec<Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>>,
/// The function to call when the stack ran empty, to figure out what to do next.
/// Conceptually, this is the interpreter implementation of the things that happen 'after' the
/// Rust language entry point for this thread returns (usually implemented by the C or OS runtime).
/// (`None` is an error, it means the callback has not been set up yet or is actively running.)
pub(crate) on_stack_empty: Option<StackEmptyCallback<'mir, 'tcx>>,
/// The index of the topmost user-relevant frame in `stack`. This field must contain
/// the value produced by `get_top_user_relevant_frame`.
@ -137,19 +140,10 @@ pub struct Thread<'mir, 'tcx> {
pub(crate) last_error: Option<MPlaceTy<'tcx, Provenance>>,
}
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
/// Check if the thread is done executing (no more stack frames). If yes,
/// change the state to terminated and return `true`.
fn check_terminated(&mut self) -> bool {
if self.state == ThreadState::Enabled {
if self.stack.is_empty() {
self.state = ThreadState::Terminated;
return true;
}
}
false
}
pub type StackEmptyCallback<'mir, 'tcx> =
Box<dyn FnMut(&mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx, Poll<()>>>;
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
/// Get the name of the current thread, or `<unnamed>` if it was not set.
fn thread_name(&self) -> &[u8] {
if let Some(ref thread_name) = self.thread_name { thread_name } else { b"<unnamed>" }
@ -202,30 +196,23 @@ impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
}
}
impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> {
fn default() -> Self {
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
fn new(name: Option<&str>, on_stack_empty: Option<StackEmptyCallback<'mir, 'tcx>>) -> Self {
Self {
state: ThreadState::Enabled,
thread_name: None,
thread_name: name.map(|name| Vec::from(name.as_bytes())),
stack: Vec::new(),
top_user_relevant_frame: None,
join_status: ThreadJoinStatus::Joinable,
panic_payload: None,
last_error: None,
on_stack_empty,
}
}
}
impl<'mir, 'tcx> Thread<'mir, 'tcx> {
fn new(name: &str) -> Self {
let mut thread = Thread::default();
thread.thread_name = Some(Vec::from(name.as_bytes()));
thread
}
}
impl VisitTags for Thread<'_, '_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Thread {
panic_payload,
last_error,
@ -234,6 +221,7 @@ impl VisitTags for Thread<'_, '_> {
state: _,
thread_name: _,
join_status: _,
on_stack_empty: _, // we assume the closure captures no GC-relevant state
} = self;
panic_payload.visit_tags(visit);
@ -244,8 +232,8 @@ impl VisitTags for Thread<'_, '_> {
}
}
impl VisitTags for Frame<'_, '_, Provenance, FrameData<'_>> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
impl VisitTags for Frame<'_, '_, Provenance, FrameExtra<'_>> {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Frame {
return_place,
locals,
@ -327,24 +315,8 @@ pub struct ThreadManager<'mir, 'tcx> {
timeout_callbacks: FxHashMap<ThreadId, TimeoutCallbackInfo<'mir, 'tcx>>,
}
impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
fn default() -> Self {
let mut threads = IndexVec::new();
// Create the main thread and add it to the list of threads.
threads.push(Thread::new("main"));
Self {
active_thread: ThreadId::new(0),
threads,
sync: SynchronizationState::default(),
thread_local_alloc_ids: Default::default(),
yield_active_thread: false,
timeout_callbacks: FxHashMap::default(),
}
}
}
impl VisitTags for ThreadManager<'_, '_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let ThreadManager {
threads,
thread_local_alloc_ids,
@ -367,8 +339,28 @@ impl VisitTags for ThreadManager<'_, '_> {
}
}
impl<'mir, 'tcx> Default for ThreadManager<'mir, 'tcx> {
fn default() -> Self {
let mut threads = IndexVec::new();
// Create the main thread and add it to the list of threads.
threads.push(Thread::new(Some("main"), None));
Self {
active_thread: ThreadId::new(0),
threads,
sync: SynchronizationState::default(),
thread_local_alloc_ids: Default::default(),
yield_active_thread: false,
timeout_callbacks: FxHashMap::default(),
}
}
}
impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
pub(crate) fn init(ecx: &mut MiriInterpCx<'mir, 'tcx>) {
pub(crate) fn init(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>,
) {
ecx.machine.threads.threads[ThreadId::new(0)].on_stack_empty = Some(on_main_stack_empty);
if ecx.tcx.sess.target.os.as_ref() != "windows" {
// The main thread can *not* be joined on except on windows.
ecx.machine.threads.threads[ThreadId::new(0)].join_status = ThreadJoinStatus::Detached;
@ -393,27 +385,27 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
}
/// Borrow the stack of the active thread.
pub fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>] {
pub fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>] {
&self.threads[self.active_thread].stack
}
/// Mutably borrow the stack of the active thread.
fn active_thread_stack_mut(
&mut self,
) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>> {
) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>> {
&mut self.threads[self.active_thread].stack
}
pub fn all_stacks(
&self,
) -> impl Iterator<Item = &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>]> {
) -> impl Iterator<Item = &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>]> {
self.threads.iter().map(|t| &t.stack[..])
}
/// Create a new thread and returns its id.
fn create_thread(&mut self) -> ThreadId {
fn create_thread(&mut self, on_stack_empty: StackEmptyCallback<'mir, 'tcx>) -> ThreadId {
let new_thread_id = ThreadId::new(self.threads.len());
self.threads.push(Default::default());
self.threads.push(Thread::new(None, Some(on_stack_empty)));
new_thread_id
}
@ -458,7 +450,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
}
/// Get a mutable borrow of the currently active thread.
fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
pub fn active_thread_mut(&mut self) -> &mut Thread<'mir, 'tcx> {
&mut self.threads[self.active_thread]
}
@ -669,18 +661,6 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// long as we can and switch only when we have to (the active thread was
/// blocked, terminated, or has explicitly asked to be preempted).
fn schedule(&mut self, clock: &Clock) -> InterpResult<'tcx, SchedulingAction> {
// Check whether the thread has **just** terminated (`check_terminated`
// checks whether the thread has popped all its stack and if yes, sets
// the thread state to terminated).
if self.threads[self.active_thread].check_terminated() {
return Ok(SchedulingAction::ExecuteDtors);
}
// If we get here again and the thread is *still* terminated, there are no more dtors to run.
if self.threads[MAIN_THREAD].state == ThreadState::Terminated {
// The main thread terminated; stop the program.
// We do *not* run TLS dtors of remaining threads, which seems to match rustc behavior.
return Ok(SchedulingAction::Stop);
}
// This thread and the program can keep going.
if self.threads[self.active_thread].state == ThreadState::Enabled
&& !self.yield_active_thread
@ -688,18 +668,18 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
// The currently active thread is still enabled, just continue with it.
return Ok(SchedulingAction::ExecuteStep);
}
// The active thread yielded. Let's see if there are any timeouts to take care of. We do
// this *before* running any other thread, to ensure that timeouts "in the past" fire before
// any other thread can take an action. This ensures that for `pthread_cond_timedwait`, "an
// error is returned if [...] the absolute time specified by abstime has already been passed
// at the time of the call".
// The active thread yielded or got terminated. Let's see if there are any timeouts to take
// care of. We do this *before* running any other thread, to ensure that timeouts "in the
// past" fire before any other thread can take an action. This ensures that for
// `pthread_cond_timedwait`, "an error is returned if [...] the absolute time specified by
// abstime has already been passed at the time of the call".
// <https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_cond_timedwait.html>
let potential_sleep_time =
self.timeout_callbacks.values().map(|info| info.call_time.get_wait_time(clock)).min();
if potential_sleep_time == Some(Duration::new(0, 0)) {
return Ok(SchedulingAction::ExecuteTimeoutCallback);
}
// No callbacks scheduled, pick a regular thread to execute.
// No callbacks immediately scheduled, pick a regular thread to execute.
// The active thread blocked or yielded. So we go search for another enabled thread.
// Crucially, we start searching at the current active thread ID, rather than at 0, since we
// want to avoid always scheduling threads 0 and 1 without ever making progress in thread 2.
@ -730,15 +710,58 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
// All threads are currently blocked, but we have unexecuted
// timeout_callbacks, which may unblock some of the threads. Hence,
// sleep until the first callback.
clock.sleep(sleep_time);
Ok(SchedulingAction::ExecuteTimeoutCallback)
Ok(SchedulingAction::Sleep(sleep_time))
} else {
throw_machine_stop!(TerminationInfo::Deadlock);
}
}
}
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for MiriInterpCx<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
/// Execute a timeout callback on the callback's thread.
#[inline]
fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let (thread, callback) = if let Some((thread, callback)) =
this.machine.threads.get_ready_callback(&this.machine.clock)
{
(thread, callback)
} else {
// get_ready_callback can return None if the computer's clock
// was shifted after calling the scheduler and before the call
// to get_ready_callback (see issue
// https://github.com/rust-lang/miri/issues/1763). In this case,
// just do nothing, which effectively just returns to the
// scheduler.
return Ok(());
};
// This back-and-forth with `set_active_thread` is here because of two
// design decisions:
// 1. Make the caller and not the callback responsible for changing
// thread.
// 2. Make the scheduler the only place that can change the active
// thread.
let old_thread = this.set_active_thread(thread);
callback.call(this)?;
this.set_active_thread(old_thread);
Ok(())
}
#[inline]
fn run_on_stack_empty(&mut self) -> InterpResult<'tcx, Poll<()>> {
let this = self.eval_context_mut();
let mut callback = this
.active_thread_mut()
.on_stack_empty
.take()
.expect("`on_stack_empty` not set up, or already running");
let res = callback(this)?;
this.active_thread_mut().on_stack_empty = Some(callback);
Ok(res)
}
}
// Public interface to thread management.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
@ -773,18 +796,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
}
/// Start a regular (non-main) thread.
#[inline]
fn create_thread(&mut self) -> ThreadId {
let this = self.eval_context_mut();
let id = this.machine.threads.create_thread();
if let Some(data_race) = &mut this.machine.data_race {
data_race.thread_created(&this.machine.threads, id);
}
id
}
#[inline]
fn start_thread(
fn start_regular_thread(
&mut self,
thread: Option<MPlaceTy<'tcx, Provenance>>,
start_routine: Pointer<Option<Provenance>>,
@ -795,7 +809,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let this = self.eval_context_mut();
// Create the new thread
let new_thread_id = this.create_thread();
let new_thread_id = this.machine.threads.create_thread({
let mut state = tls::TlsDtorsState::default();
Box::new(move |m| state.on_stack_empty(m))
});
if let Some(data_race) = &mut this.machine.data_race {
data_race.thread_created(&this.machine.threads, new_thread_id);
}
// Write the current thread-id, switch to the next thread later
// to treat this write operation as occuring on the current thread.
@ -888,12 +908,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.machine.threads.get_total_thread_count()
}
#[inline]
fn has_terminated(&self, thread_id: ThreadId) -> bool {
let this = self.eval_context_ref();
this.machine.threads.has_terminated(thread_id)
}
#[inline]
fn have_all_terminated(&self) -> bool {
let this = self.eval_context_ref();
@ -907,7 +921,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
#[inline]
fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>] {
fn active_thread_stack(&self) -> &[Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>] {
let this = self.eval_context_ref();
this.machine.threads.active_thread_stack()
}
@ -915,7 +929,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
#[inline]
fn active_thread_stack_mut(
&mut self,
) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>> {
) -> &mut Vec<Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>> {
let this = self.eval_context_mut();
this.machine.threads.active_thread_stack_mut()
}
@ -943,26 +957,22 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
where
'mir: 'c,
{
let this = self.eval_context_ref();
this.machine.threads.get_thread_name(thread)
self.eval_context_ref().machine.threads.get_thread_name(thread)
}
#[inline]
fn block_thread(&mut self, thread: ThreadId) {
let this = self.eval_context_mut();
this.machine.threads.block_thread(thread);
self.eval_context_mut().machine.threads.block_thread(thread);
}
#[inline]
fn unblock_thread(&mut self, thread: ThreadId) {
let this = self.eval_context_mut();
this.machine.threads.unblock_thread(thread);
self.eval_context_mut().machine.threads.unblock_thread(thread);
}
#[inline]
fn yield_active_thread(&mut self) {
let this = self.eval_context_mut();
this.machine.threads.yield_active_thread();
self.eval_context_mut().machine.threads.yield_active_thread();
}
#[inline]
@ -995,49 +1005,42 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
this.machine.threads.unregister_timeout_callback_if_exists(thread);
}
/// Execute a timeout callback on the callback's thread.
#[inline]
fn run_timeout_callback(&mut self) -> InterpResult<'tcx> {
/// Run the core interpreter loop. Returns only when an interrupt occurs (an error or program
/// termination).
fn run_threads(&mut self) -> InterpResult<'tcx, !> {
let this = self.eval_context_mut();
let (thread, callback) = if let Some((thread, callback)) =
this.machine.threads.get_ready_callback(&this.machine.clock)
{
(thread, callback)
} else {
// get_ready_callback can return None if the computer's clock
// was shifted after calling the scheduler and before the call
// to get_ready_callback (see issue
// https://github.com/rust-lang/miri/issues/1763). In this case,
// just do nothing, which effectively just returns to the
// scheduler.
return Ok(());
};
// This back-and-forth with `set_active_thread` is here because of two
// design decisions:
// 1. Make the caller and not the callback responsible for changing
// thread.
// 2. Make the scheduler the only place that can change the active
// thread.
let old_thread = this.set_active_thread(thread);
callback.call(this)?;
this.set_active_thread(old_thread);
Ok(())
}
/// Decide which action to take next and on which thread.
#[inline]
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
let this = self.eval_context_mut();
this.machine.threads.schedule(&this.machine.clock)
loop {
match this.machine.threads.schedule(&this.machine.clock)? {
SchedulingAction::ExecuteStep => {
if !this.step()? {
// See if this thread can do something else.
match this.run_on_stack_empty()? {
Poll::Pending => {} // keep going
Poll::Ready(()) => this.terminate_active_thread()?,
}
}
}
SchedulingAction::ExecuteTimeoutCallback => {
this.run_timeout_callback()?;
}
SchedulingAction::Sleep(duration) => {
this.machine.clock.sleep(duration);
}
}
}
}
/// Handles thread termination of the active thread: wakes up threads joining on this one,
/// and deallocated thread-local statics.
///
/// This is called from `tls.rs` after handling the TLS dtors.
/// This is called by the eval loop when a thread's on_stack_empty returns `Ready`.
#[inline]
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
fn terminate_active_thread(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let thread = this.active_thread_mut();
assert!(thread.stack.is_empty(), "only threads with an empty stack can be terminated");
thread.state = ThreadState::Terminated;
for ptr in this.machine.threads.thread_terminated(this.machine.data_race.as_mut()) {
this.deallocate_ptr(ptr.into(), None, MiriMemoryKind::Tls.into())?;
}

View file

@ -404,67 +404,49 @@ mod tests {
assert_eq!(
alt_compare,
o.map(Ordering::reverse),
"Invalid alt comparison\n l: {:?}\n r: {:?}",
l,
r
"Invalid alt comparison\n l: {l:?}\n r: {r:?}"
);
//Test operators with faster implementations
assert_eq!(
matches!(compare, Some(Ordering::Less)),
l < r,
"Invalid (<):\n l: {:?}\n r: {:?}",
l,
r
"Invalid (<):\n l: {l:?}\n r: {r:?}"
);
assert_eq!(
matches!(compare, Some(Ordering::Less) | Some(Ordering::Equal)),
l <= r,
"Invalid (<=):\n l: {:?}\n r: {:?}",
l,
r
"Invalid (<=):\n l: {l:?}\n r: {r:?}"
);
assert_eq!(
matches!(compare, Some(Ordering::Greater)),
l > r,
"Invalid (>):\n l: {:?}\n r: {:?}",
l,
r
"Invalid (>):\n l: {l:?}\n r: {r:?}"
);
assert_eq!(
matches!(compare, Some(Ordering::Greater) | Some(Ordering::Equal)),
l >= r,
"Invalid (>=):\n l: {:?}\n r: {:?}",
l,
r
"Invalid (>=):\n l: {l:?}\n r: {r:?}"
);
assert_eq!(
matches!(alt_compare, Some(Ordering::Less)),
r < l,
"Invalid alt (<):\n l: {:?}\n r: {:?}",
l,
r
"Invalid alt (<):\n l: {l:?}\n r: {r:?}"
);
assert_eq!(
matches!(alt_compare, Some(Ordering::Less) | Some(Ordering::Equal)),
r <= l,
"Invalid alt (<=):\n l: {:?}\n r: {:?}",
l,
r
"Invalid alt (<=):\n l: {l:?}\n r: {r:?}"
);
assert_eq!(
matches!(alt_compare, Some(Ordering::Greater)),
r > l,
"Invalid alt (>):\n l: {:?}\n r: {:?}",
l,
r
"Invalid alt (>):\n l: {l:?}\n r: {r:?}"
);
assert_eq!(
matches!(alt_compare, Some(Ordering::Greater) | Some(Ordering::Equal)),
r >= l,
"Invalid alt (>=):\n l: {:?}\n r: {:?}",
l,
r
"Invalid alt (>=):\n l: {l:?}\n r: {r:?}"
);
}
}

View file

@ -93,7 +93,7 @@ use super::{
vector_clock::{VClock, VTimestamp, VectorIdx},
};
pub type AllocExtra = StoreBufferAlloc;
pub type AllocState = StoreBufferAlloc;
// Each store buffer must be bounded otherwise it will grow indefinitely.
// However, bounding the store buffer means restricting the amount of weak
@ -109,7 +109,7 @@ pub struct StoreBufferAlloc {
}
impl VisitTags for StoreBufferAlloc {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Self { store_buffers } = self;
for val in store_buffers
.borrow()

View file

@ -6,12 +6,15 @@ use log::trace;
use rustc_span::{source_map::DUMMY_SP, SpanData, Symbol};
use rustc_target::abi::{Align, Size};
use crate::stacked_borrows::{diagnostics::TagHistory, AccessKind};
use crate::borrow_tracker::stacked_borrows::diagnostics::TagHistory;
use crate::*;
/// Details of premature program termination.
pub enum TerminationInfo {
Exit(i64),
Exit {
code: i64,
leak_check: bool,
},
Abort(String),
UnsupportedInIsolation(String),
StackedBorrowsUb {
@ -38,7 +41,7 @@ impl fmt::Display for TerminationInfo {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use TerminationInfo::*;
match self {
Exit(code) => write!(f, "the evaluated program completed with exit code {code}"),
Exit { code, .. } => write!(f, "the evaluated program completed with exit code {code}"),
Abort(msg) => write!(f, "{msg}"),
UnsupportedInIsolation(msg) => write!(f, "{msg}"),
Int2PtrWithStrictProvenance =>
@ -64,9 +67,8 @@ pub enum NonHaltingDiagnostic {
///
/// new_kind is `None` for base tags.
CreatedPointerTag(NonZeroU64, Option<String>, Option<(AllocId, AllocRange, ProvenanceExtra)>),
/// This `Item` was popped from the borrow stack, either due to an access with the given tag or
/// a deallocation when the second argument is `None`.
PoppedPointerTag(Item, Option<(ProvenanceExtra, AccessKind)>),
/// This `Item` was popped from the borrow stack. The string explains the reason.
PoppedPointerTag(Item, String),
CreatedCallId(CallId),
CreatedAlloc(AllocId, Size, Align, MemoryKind<MiriMemoryKind>),
FreedAlloc(AllocId),
@ -148,11 +150,11 @@ fn prune_stacktrace<'tcx>(
/// Emit a custom diagnostic without going through the miri-engine machinery.
///
/// Returns `Some` if this was regular program termination with a given exit code, `None` otherwise.
/// Returns `Some` if this was regular program termination with a given exit code and a `bool` indicating whether a leak check should happen; `None` otherwise.
pub fn report_error<'tcx, 'mir>(
ecx: &InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>,
e: InterpErrorInfo<'tcx>,
) -> Option<i64> {
) -> Option<(i64, bool)> {
use InterpError::*;
let mut msg = vec![];
@ -161,7 +163,7 @@ pub fn report_error<'tcx, 'mir>(
let info = info.downcast_ref::<TerminationInfo>().expect("invalid MachineStop payload");
use TerminationInfo::*;
let title = match info {
Exit(code) => return Some(*code),
Exit { code, leak_check } => return Some((*code, *leak_check)),
Abort(_) => Some("abnormal termination"),
UnsupportedInIsolation(_) | Int2PtrWithStrictProvenance =>
Some("unsupported operation"),
@ -396,15 +398,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
format!(
"created tag {tag:?} for {kind} at {alloc_id:?}{range:?} derived from {orig_tag:?}"
),
PoppedPointerTag(item, tag) =>
match tag {
None => format!("popped tracked tag for item {item:?} due to deallocation",),
Some((tag, access)) => {
format!(
"popped tracked tag for item {item:?} due to {access:?} access for {tag:?}",
)
}
},
PoppedPointerTag(item, cause) => format!("popped tracked tag for item {item:?}{cause}"),
CreatedCallId(id) => format!("function call with id {id}"),
CreatedAlloc(AllocId(id), size, align, kind) =>
format!(

View file

@ -4,10 +4,12 @@ use std::ffi::{OsStr, OsString};
use std::iter;
use std::panic::{self, AssertUnwindSafe};
use std::path::PathBuf;
use std::task::Poll;
use std::thread;
use log::info;
use crate::borrow_tracker::RetagFields;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir::def::Namespace;
use rustc_hir::def_id::DefId;
@ -20,8 +22,14 @@ use rustc_target::spec::abi::Abi;
use rustc_session::config::EntryFnType;
use crate::shims::tls;
use crate::*;
/// When the main thread would exit, we will yield to any other thread that is ready to execute.
/// But we must only do that a finite number of times, or a background thread running `loop {}`
/// will hang the program.
const MAIN_THREAD_YIELDS_AT_SHUTDOWN: u32 = 256;
#[derive(Copy, Clone, Debug, PartialEq)]
pub enum AlignmentCheck {
/// Do not check alignment.
@ -80,7 +88,7 @@ pub struct MiriConfig {
/// Determine if validity checking is enabled.
pub validate: bool,
/// Determines if Stacked Borrows is enabled.
pub stacked_borrows: bool,
pub borrow_tracker: Option<BorrowTrackerMethod>,
/// Controls alignment checking.
pub check_alignment: AlignmentCheck,
/// Controls function [ABI](Abi) checking.
@ -96,7 +104,7 @@ pub struct MiriConfig {
/// The seed to use when non-determinism or randomness are required (e.g. ptr-to-int cast, `getrandom()`).
pub seed: Option<u64>,
/// The stacked borrows pointer ids to report about
pub tracked_pointer_tags: FxHashSet<SbTag>,
pub tracked_pointer_tags: FxHashSet<BorTag>,
/// The stacked borrows call IDs to report about
pub tracked_call_ids: FxHashSet<CallId>,
/// The allocation ids to report about.
@ -131,7 +139,7 @@ pub struct MiriConfig {
/// The location of a shared object file to load when calling external functions
/// FIXME! consider allowing users to specify paths to multiple SO files, or to a directory
pub external_so_file: Option<PathBuf>,
/// Run a garbage collector for SbTags every N basic blocks.
/// Run a garbage collector for BorTags every N basic blocks.
pub gc_interval: u32,
/// The number of CPUs to be reported by miri.
pub num_cpus: u32,
@ -142,7 +150,7 @@ impl Default for MiriConfig {
MiriConfig {
env: vec![],
validate: true,
stacked_borrows: true,
borrow_tracker: Some(BorrowTrackerMethod::StackedBorrows),
check_alignment: AlignmentCheck::Int,
check_abi: true,
isolated_op: IsolatedOp::Reject(RejectOpWith::Abort),
@ -172,17 +180,79 @@ impl Default for MiriConfig {
}
}
/// Returns a freshly created `InterpCx`, along with an `MPlaceTy` representing
/// the location where the return value of the `start` function will be
/// written to.
/// The state of the main thread. Implementation detail of `on_main_stack_empty`.
#[derive(Default, Debug)]
enum MainThreadState {
#[default]
Running,
TlsDtors(tls::TlsDtorsState),
Yield {
remaining: u32,
},
Done,
}
impl MainThreadState {
fn on_main_stack_empty<'tcx>(
&mut self,
this: &mut MiriInterpCx<'_, 'tcx>,
) -> InterpResult<'tcx, Poll<()>> {
use MainThreadState::*;
match self {
Running => {
*self = TlsDtors(Default::default());
}
TlsDtors(state) =>
match state.on_stack_empty(this)? {
Poll::Pending => {} // just keep going
Poll::Ready(()) => {
// Give background threads a chance to finish by yielding the main thread a
// couple of times -- but only if we would also preempt threads randomly.
if this.machine.preemption_rate > 0.0 {
// There is a non-zero chance they will yield back to us often enough to
// make Miri terminate eventually.
*self = Yield { remaining: MAIN_THREAD_YIELDS_AT_SHUTDOWN };
} else {
// The other threads did not get preempted, so no need to yield back to
// them.
*self = Done;
}
}
},
Yield { remaining } =>
match remaining.checked_sub(1) {
None => *self = Done,
Some(new_remaining) => {
*remaining = new_remaining;
this.yield_active_thread();
}
},
Done => {
// Figure out exit code.
let ret_place = MPlaceTy::from_aligned_ptr(
this.machine.main_fn_ret_place.unwrap().ptr,
this.machine.layouts.isize,
);
let exit_code = this.read_scalar(&ret_place.into())?.to_machine_isize(this)?;
// Need to call this ourselves since we are not going to return to the scheduler
// loop, and we want the main thread TLS to not show up as memory leaks.
this.terminate_active_thread()?;
// Stop interpreter loop.
throw_machine_stop!(TerminationInfo::Exit { code: exit_code, leak_check: true });
}
}
Ok(Poll::Pending)
}
}
/// Returns a freshly created `InterpCx`.
/// Public because this is also used by `priroda`.
pub fn create_ecx<'mir, 'tcx: 'mir>(
tcx: TyCtxt<'tcx>,
entry_id: DefId,
entry_type: EntryFnType,
config: &MiriConfig,
) -> InterpResult<'tcx, (InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>, MPlaceTy<'tcx, Provenance>)>
{
) -> InterpResult<'tcx, InterpCx<'mir, 'tcx, MiriMachine<'mir, 'tcx>>> {
let param_env = ty::ParamEnv::reveal_all();
let layout_cx = LayoutCx { tcx, param_env };
let mut ecx = InterpCx::new(
@ -193,7 +263,11 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
);
// Some parts of initialization require a full `InterpCx`.
MiriMachine::late_init(&mut ecx, config)?;
MiriMachine::late_init(&mut ecx, config, {
let mut state = MainThreadState::default();
// Cannot capture anything GC-relevant here.
Box::new(move |m| state.on_main_stack_empty(m))
})?;
// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"], Namespace::ValueNS);
@ -274,6 +348,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
// Return place (in static memory so that it does not count as leak).
let ret_place = ecx.allocate(ecx.machine.layouts.isize, MiriMemoryKind::Machine.into())?;
ecx.machine.main_fn_ret_place = Some(*ret_place);
// Call start function.
match entry_type {
@ -321,7 +396,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
}
}
Ok((ecx, ret_place))
Ok(ecx)
}
/// Evaluates the entry function specified by `entry_id`.
@ -337,7 +412,7 @@ pub fn eval_entry<'tcx>(
// Copy setting before we move `config`.
let ignore_leaks = config.ignore_leaks;
let (mut ecx, ret_place) = match create_ecx(tcx, entry_id, entry_type, &config) {
let mut ecx = match create_ecx(tcx, entry_id, entry_type, &config) {
Ok(v) => v,
Err(err) => {
err.print_backtrace();
@ -346,34 +421,17 @@ pub fn eval_entry<'tcx>(
};
// Perform the main execution.
let res: thread::Result<InterpResult<'_, i64>> = panic::catch_unwind(AssertUnwindSafe(|| {
// Main loop.
loop {
match ecx.schedule()? {
SchedulingAction::ExecuteStep => {
assert!(ecx.step()?, "a terminated thread was scheduled for execution");
}
SchedulingAction::ExecuteTimeoutCallback => {
ecx.run_timeout_callback()?;
}
SchedulingAction::ExecuteDtors => {
// This will either enable the thread again (so we go back
// to `ExecuteStep`), or determine that this thread is done
// for good.
ecx.schedule_next_tls_dtor_for_active_thread()?;
}
SchedulingAction::Stop => {
break;
}
}
}
let return_code = ecx.read_scalar(&ret_place.into())?.to_machine_isize(&ecx)?;
Ok(return_code)
}));
let res: thread::Result<InterpResult<'_, !>> =
panic::catch_unwind(AssertUnwindSafe(|| ecx.run_threads()));
let res = res.unwrap_or_else(|panic_payload| {
ecx.handle_ice();
panic::resume_unwind(panic_payload)
});
let res = match res {
Err(res) => res,
// `Ok` can never happen
Ok(never) => match never {},
};
// Machine cleanup. Only do this if all threads have terminated; threads that are still running
// might cause Stacked Borrows errors (https://github.com/rust-lang/miri/issues/2396).
@ -386,32 +444,26 @@ pub fn eval_entry<'tcx>(
}
// Process the result.
match res {
Ok(return_code) => {
if !ignore_leaks {
// Check for thread leaks.
if !ecx.have_all_terminated() {
tcx.sess.err(
"the main thread terminated without waiting for all remaining threads",
);
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
return None;
}
// Check for memory leaks.
info!("Additonal static roots: {:?}", ecx.machine.static_roots);
let leaks = ecx.leak_report(&ecx.machine.static_roots);
if leaks != 0 {
tcx.sess.err("the evaluated program leaked memory");
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
// Ignore the provided return code - let the reported error
// determine the return code.
return None;
}
}
Some(return_code)
let (return_code, leak_check) = report_error(&ecx, res)?;
if leak_check && !ignore_leaks {
// Check for thread leaks.
if !ecx.have_all_terminated() {
tcx.sess.err("the main thread terminated without waiting for all remaining threads");
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
return None;
}
// Check for memory leaks.
info!("Additonal static roots: {:?}", ecx.machine.static_roots);
let leaks = ecx.leak_report(&ecx.machine.static_roots);
if leaks != 0 {
tcx.sess.err("the evaluated program leaked memory");
tcx.sess.note_without_error("pass `-Zmiri-ignore-leaks` to disable this check");
// Ignore the provided return code - let the reported error
// determine the return code.
return None;
}
Err(e) => report_error(&ecx, e),
}
Some(return_code)
}
/// Turns an array of arguments into a Windows command line string.

View file

@ -554,9 +554,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
assert_eq!(
self.eval_context_ref().tcx.sess.target.os,
target_os,
"`{}` is only available on the `{}` target OS",
name,
target_os,
"`{name}` is only available on the `{target_os}` target OS",
)
}
@ -566,8 +564,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn assert_target_os_is_unix(&self, name: &str) {
assert!(
target_os_is_unix(self.eval_context_ref().tcx.sess.target.os.as_ref()),
"`{}` is only available for supported UNIX family targets",
name,
"`{name}` is only available for supported UNIX family targets",
);
}
@ -988,7 +985,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
self.stack()[frame_idx].current_span()
}
fn stack(&self) -> &[Frame<'mir, 'tcx, Provenance, machine::FrameData<'tcx>>] {
fn stack(&self) -> &[Frame<'mir, 'tcx, Provenance, machine::FrameExtra<'tcx>>] {
self.threads.active_thread_stack()
}
@ -1019,8 +1016,7 @@ where
pub fn isolation_abort_error<'tcx>(name: &str) -> InterpResult<'tcx> {
throw_machine_stop!(TerminationInfo::UnsupportedInIsolation(format!(
"{} not available when isolation is enabled",
name,
"{name} not available when isolation is enabled",
)))
}

View file

@ -45,7 +45,7 @@ pub struct GlobalStateInner {
}
impl VisitTags for GlobalStateInner {
fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
// Nothing to visit here.
}
}
@ -105,15 +105,15 @@ impl<'mir, 'tcx> GlobalStateInner {
pub fn expose_ptr(
ecx: &mut MiriInterpCx<'mir, 'tcx>,
alloc_id: AllocId,
sb: SbTag,
tag: BorTag,
) -> InterpResult<'tcx> {
let global_state = ecx.machine.intptrcast.get_mut();
// In strict mode, we don't need this, so we can save some cycles by not tracking it.
if global_state.provenance_mode != ProvenanceMode::Strict {
trace!("Exposing allocation id {alloc_id:?}");
global_state.exposed.insert(alloc_id);
if ecx.machine.stacked_borrows.is_some() {
ecx.expose_tag(alloc_id, sb)?;
if ecx.machine.borrow_tracker.is_some() {
ecx.expose_tag(alloc_id, tag)?;
}
}
Ok(())

View file

@ -53,6 +53,7 @@ extern crate rustc_session;
extern crate rustc_span;
extern crate rustc_target;
mod borrow_tracker;
mod clock;
mod concurrency;
mod diagnostics;
@ -64,7 +65,6 @@ mod mono_hash_map;
mod operator;
mod range_map;
mod shims;
mod stacked_borrows;
mod tag_gc;
// Establish a "crate-wide prelude": we often import `crate::*`.
@ -81,15 +81,21 @@ pub use crate::shims::intrinsics::EvalContextExt as _;
pub use crate::shims::os_str::EvalContextExt as _;
pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _};
pub use crate::shims::time::EvalContextExt as _;
pub use crate::shims::tls::{EvalContextExt as _, TlsData};
pub use crate::shims::tls::TlsData;
pub use crate::shims::EvalContextExt as _;
pub use crate::borrow_tracker::stacked_borrows::{
EvalContextExt as _, Item, Permission, Stack, Stacks,
};
pub use crate::borrow_tracker::{
BorTag, BorrowTrackerMethod, CallId, EvalContextExt as _, RetagFields,
};
pub use crate::clock::{Clock, Instant};
pub use crate::concurrency::{
data_race::{AtomicFenceOrd, AtomicReadOrd, AtomicRwOrd, AtomicWriteOrd, EvalContextExt as _},
init_once::{EvalContextExt as _, InitOnceId},
sync::{CondvarId, EvalContextExt as _, MutexId, RwLockId, SyncId},
thread::{EvalContextExt as _, SchedulingAction, ThreadId, ThreadManager, ThreadState, Time},
thread::{EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, Time},
};
pub use crate::diagnostics::{
report_error, EvalContextExt as _, NonHaltingDiagnostic, TerminationInfo,
@ -100,15 +106,12 @@ pub use crate::eval::{
pub use crate::helpers::EvalContextExt as _;
pub use crate::intptrcast::ProvenanceMode;
pub use crate::machine::{
AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind,
AllocExtra, FrameExtra, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind,
PrimitiveLayouts, Provenance, ProvenanceExtra, PAGE_SIZE, STACK_ADDR, STACK_SIZE,
};
pub use crate::mono_hash_map::MonoHashMap;
pub use crate::operator::EvalContextExt as _;
pub use crate::range_map::RangeMap;
pub use crate::stacked_borrows::{
CallId, EvalContextExt as _, Item, Permission, RetagFields, SbTag, Stack, Stacks,
};
pub use crate::tag_gc::{EvalContextExt as _, VisitTags};
/// Insert rustc arguments at the beginning of the argument list that Miri wants to be

View file

@ -37,9 +37,9 @@ pub const STACK_ADDR: u64 = 32 * PAGE_SIZE; // not really about the "stack", but
pub const STACK_SIZE: u64 = 16 * PAGE_SIZE; // whatever
/// Extra data stored with each stack frame
pub struct FrameData<'tcx> {
pub struct FrameExtra<'tcx> {
/// Extra data for Stacked Borrows.
pub stacked_borrows: Option<stacked_borrows::FrameExtra>,
pub borrow_tracker: Option<borrow_tracker::FrameState>,
/// If this is Some(), then this is a special "catch unwind" frame (the frame of `try_fn`
/// called by `try`). When this frame is popped during unwinding a panic,
@ -58,23 +58,23 @@ pub struct FrameData<'tcx> {
pub is_user_relevant: bool,
}
impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
impl<'tcx> std::fmt::Debug for FrameExtra<'tcx> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Omitting `timing`, it does not support `Debug`.
let FrameData { stacked_borrows, catch_unwind, timing: _, is_user_relevant: _ } = self;
let FrameExtra { borrow_tracker, catch_unwind, timing: _, is_user_relevant: _ } = self;
f.debug_struct("FrameData")
.field("stacked_borrows", stacked_borrows)
.field("borrow_tracker", borrow_tracker)
.field("catch_unwind", catch_unwind)
.finish()
}
}
impl VisitTags for FrameData<'_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
let FrameData { catch_unwind, stacked_borrows, timing: _, is_user_relevant: _ } = self;
impl VisitTags for FrameExtra<'_> {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let FrameExtra { catch_unwind, borrow_tracker, timing: _, is_user_relevant: _ } = self;
catch_unwind.visit_tags(visit);
stacked_borrows.visit_tags(visit);
borrow_tracker.visit_tags(visit);
}
}
@ -147,7 +147,7 @@ pub enum Provenance {
Concrete {
alloc_id: AllocId,
/// Stacked Borrows tag.
sb: SbTag,
tag: BorTag,
},
Wildcard,
}
@ -173,7 +173,7 @@ impl std::hash::Hash for Provenance {
/// The "extra" information a pointer has over a regular AllocId.
#[derive(Copy, Clone, PartialEq)]
pub enum ProvenanceExtra {
Concrete(SbTag),
Concrete(BorTag),
Wildcard,
}
@ -188,7 +188,7 @@ static_assert_size!(Scalar<Provenance>, 32);
impl fmt::Debug for Provenance {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Provenance::Concrete { alloc_id, sb } => {
Provenance::Concrete { alloc_id, tag } => {
// Forward `alternate` flag to `alloc_id` printing.
if f.alternate() {
write!(f, "[{alloc_id:#?}]")?;
@ -196,7 +196,7 @@ impl fmt::Debug for Provenance {
write!(f, "[{alloc_id:?}]")?;
}
// Print Stacked Borrows tag.
write!(f, "{sb:?}")?;
write!(f, "{tag:?}")?;
}
Provenance::Wildcard => {
write!(f, "[wildcard]")?;
@ -221,9 +221,9 @@ impl interpret::Provenance for Provenance {
match (left, right) {
// If both are the *same* concrete tag, that is the result.
(
Some(Provenance::Concrete { alloc_id: left_alloc, sb: left_sb }),
Some(Provenance::Concrete { alloc_id: right_alloc, sb: right_sb }),
) if left_alloc == right_alloc && left_sb == right_sb => left,
Some(Provenance::Concrete { alloc_id: left_alloc, tag: left_tag }),
Some(Provenance::Concrete { alloc_id: right_alloc, tag: right_tag }),
) if left_alloc == right_alloc && left_tag == right_tag => left,
// If one side is a wildcard, the best possible outcome is that it is equal to the other
// one, and we use that.
(Some(Provenance::Wildcard), o) | (o, Some(Provenance::Wildcard)) => o,
@ -243,7 +243,7 @@ impl fmt::Debug for ProvenanceExtra {
}
impl ProvenanceExtra {
pub fn and_then<T>(self, f: impl FnOnce(SbTag) -> Option<T>) -> Option<T> {
pub fn and_then<T>(self, f: impl FnOnce(BorTag) -> Option<T>) -> Option<T> {
match self {
ProvenanceExtra::Concrete(pid) => f(pid),
ProvenanceExtra::Wildcard => None,
@ -254,21 +254,21 @@ impl ProvenanceExtra {
/// Extra per-allocation data
#[derive(Debug, Clone)]
pub struct AllocExtra {
/// Stacked Borrows state is only added if it is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
/// Global state of the borrow tracker, if enabled.
pub borrow_tracker: Option<borrow_tracker::AllocState>,
/// Data race detection via the use of a vector-clock,
/// this is only added if it is enabled.
pub data_race: Option<data_race::AllocExtra>,
pub data_race: Option<data_race::AllocState>,
/// Weak memory emulation via the use of store buffers,
/// this is only added if it is enabled.
pub weak_memory: Option<weak_memory::AllocExtra>,
pub weak_memory: Option<weak_memory::AllocState>,
}
impl VisitTags for AllocExtra {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
let AllocExtra { stacked_borrows, data_race, weak_memory } = self;
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let AllocExtra { borrow_tracker, data_race, weak_memory } = self;
stacked_borrows.visit_tags(visit);
borrow_tracker.visit_tags(visit);
data_race.visit_tags(visit);
weak_memory.visit_tags(visit);
}
@ -350,8 +350,8 @@ pub struct MiriMachine<'mir, 'tcx> {
// We carry a copy of the global `TyCtxt` for convenience, so methods taking just `&Evaluator` have `tcx` access.
pub tcx: TyCtxt<'tcx>,
/// Stacked Borrows global data.
pub stacked_borrows: Option<stacked_borrows::GlobalState>,
/// Global data for borrow tracking.
pub borrow_tracker: Option<borrow_tracker::GlobalState>,
/// Data race detector global data.
pub data_race: Option<data_race::GlobalState>,
@ -363,6 +363,9 @@ pub struct MiriMachine<'mir, 'tcx> {
/// Miri does not expose env vars from the host to the emulated program.
pub(crate) env_vars: EnvVars<'tcx>,
/// Return place of the main function.
pub(crate) main_fn_ret_place: Option<MemPlace<Provenance>>,
/// Program arguments (`Option` because we can only initialize them after creating the ecx).
/// These are *pointers* to argc/argv because macOS.
/// We also need the full command line as one string because of Windows.
@ -460,9 +463,9 @@ pub struct MiriMachine<'mir, 'tcx> {
#[cfg(not(target_os = "linux"))]
pub external_so_lib: Option<!>,
/// Run a garbage collector for SbTags every N basic blocks.
/// Run a garbage collector for BorTags every N basic blocks.
pub(crate) gc_interval: u32,
/// The number of blocks that passed since the last SbTag GC pass.
/// The number of blocks that passed since the last BorTag GC pass.
pub(crate) since_gc: u32,
/// The number of CPUs to be reported by miri.
pub(crate) num_cpus: u32,
@ -477,21 +480,16 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
measureme::Profiler::new(out).expect("Couldn't create `measureme` profiler")
});
let rng = StdRng::seed_from_u64(config.seed.unwrap_or(0));
let stacked_borrows = config.stacked_borrows.then(|| {
RefCell::new(stacked_borrows::GlobalStateInner::new(
config.tracked_pointer_tags.clone(),
config.tracked_call_ids.clone(),
config.retag_fields,
))
});
let borrow_tracker = config.borrow_tracker.map(|bt| bt.instanciate_global_state(config));
let data_race = config.data_race_detector.then(|| data_race::GlobalState::new(config));
MiriMachine {
tcx: layout_cx.tcx,
stacked_borrows,
borrow_tracker,
data_race,
intptrcast: RefCell::new(intptrcast::GlobalStateInner::new(config)),
// `env_vars` depends on a full interpreter so we cannot properly initialize it yet.
env_vars: EnvVars::default(),
main_fn_ret_place: None,
argc: None,
argv: None,
cmd_line: None,
@ -556,10 +554,11 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
pub(crate) fn late_init(
this: &mut MiriInterpCx<'mir, 'tcx>,
config: &MiriConfig,
on_main_stack_empty: StackEmptyCallback<'mir, 'tcx>,
) -> InterpResult<'tcx> {
EnvVars::init(this, config)?;
MiriMachine::init_extern_statics(this)?;
ThreadManager::init(this);
ThreadManager::init(this, on_main_stack_empty);
Ok(())
}
@ -651,18 +650,19 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
}
impl VisitTags for MiriMachine<'_, '_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
#[rustfmt::skip]
let MiriMachine {
threads,
tls,
env_vars,
main_fn_ret_place,
argc,
argv,
cmd_line,
extern_statics,
dir_handler,
stacked_borrows,
borrow_tracker,
data_race,
intptrcast,
file_handler,
@ -700,8 +700,9 @@ impl VisitTags for MiriMachine<'_, '_> {
dir_handler.visit_tags(visit);
file_handler.visit_tags(visit);
data_race.visit_tags(visit);
stacked_borrows.visit_tags(visit);
borrow_tracker.visit_tags(visit);
intptrcast.visit_tags(visit);
main_fn_ret_place.visit_tags(visit);
argc.visit_tags(visit);
argv.visit_tags(visit);
cmd_line.visit_tags(visit);
@ -735,7 +736,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
type MemoryKind = MiriMemoryKind;
type ExtraFnVal = Dlsym;
type FrameExtra = FrameData<'tcx>;
type FrameExtra = FrameExtra<'tcx>;
type AllocExtra = AllocExtra;
type Provenance = Provenance;
@ -900,25 +901,24 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
}
let alloc = alloc.into_owned();
let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| {
Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind, &ecx.machine)
});
let borrow_tracker = ecx
.machine
.borrow_tracker
.as_ref()
.map(|bt| bt.borrow_mut().new_allocation(id, alloc.size(), kind, &ecx.machine));
let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| {
data_race::AllocExtra::new_allocation(
data_race::AllocState::new_allocation(
data_race,
&ecx.machine.threads,
alloc.size(),
kind,
)
});
let buffer_alloc = ecx.machine.weak_memory.then(weak_memory::AllocExtra::new_allocation);
let buffer_alloc = ecx.machine.weak_memory.then(weak_memory::AllocState::new_allocation);
let alloc: Allocation<Provenance, Self::AllocExtra> = alloc.adjust_from_tcx(
&ecx.tcx,
AllocExtra {
stacked_borrows: stacks.map(RefCell::new),
data_race: race_alloc,
weak_memory: buffer_alloc,
},
AllocExtra { borrow_tracker, data_race: race_alloc, weak_memory: buffer_alloc },
|ptr| ecx.global_base_pointer(ptr),
)?;
Ok(Cow::Owned(alloc))
@ -942,14 +942,14 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
}
}
let absolute_addr = intptrcast::GlobalStateInner::rel_ptr_to_addr(ecx, ptr);
let sb_tag = if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
stacked_borrows.borrow_mut().base_ptr_tag(ptr.provenance, &ecx.machine)
let tag = if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
borrow_tracker.borrow_mut().base_ptr_tag(ptr.provenance, &ecx.machine)
} else {
// Value does not matter, SB is disabled
SbTag::default()
BorTag::default()
};
Pointer::new(
Provenance::Concrete { alloc_id: ptr.provenance, sb: sb_tag },
Provenance::Concrete { alloc_id: ptr.provenance, tag },
Size::from_bytes(absolute_addr),
)
}
@ -967,8 +967,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
ptr: Pointer<Self::Provenance>,
) -> InterpResult<'tcx> {
match ptr.provenance {
Provenance::Concrete { alloc_id, sb } =>
intptrcast::GlobalStateInner::expose_ptr(ecx, alloc_id, sb),
Provenance::Concrete { alloc_id, tag } =>
intptrcast::GlobalStateInner::expose_ptr(ecx, alloc_id, tag),
Provenance::Wildcard => {
// No need to do anything for wildcard pointers as
// their provenances have already been previously exposed.
@ -986,11 +986,11 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
let rel = intptrcast::GlobalStateInner::abs_ptr_to_rel(ecx, ptr);
rel.map(|(alloc_id, size)| {
let sb = match ptr.provenance {
Provenance::Concrete { sb, .. } => ProvenanceExtra::Concrete(sb),
let tag = match ptr.provenance {
Provenance::Concrete { tag, .. } => ProvenanceExtra::Concrete(tag),
Provenance::Wildcard => ProvenanceExtra::Wildcard,
};
(alloc_id, size, sb)
(alloc_id, size, tag)
})
}
@ -1005,10 +1005,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
if let Some(data_race) = &alloc_extra.data_race {
data_race.read(alloc_id, range, machine)?;
}
if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
stacked_borrows
.borrow_mut()
.before_memory_read(alloc_id, prov_extra, range, machine)?;
if let Some(borrow_tracker) = &alloc_extra.borrow_tracker {
borrow_tracker.before_memory_read(alloc_id, prov_extra, range, machine)?;
}
if let Some(weak_memory) = &alloc_extra.weak_memory {
weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
@ -1027,8 +1025,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
if let Some(data_race) = &mut alloc_extra.data_race {
data_race.write(alloc_id, range, machine)?;
}
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
stacked_borrows.get_mut().before_memory_write(alloc_id, prov_extra, range, machine)?;
if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
borrow_tracker.before_memory_write(alloc_id, prov_extra, range, machine)?;
}
if let Some(weak_memory) = &alloc_extra.weak_memory {
weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
@ -1050,16 +1048,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
if let Some(data_race) = &mut alloc_extra.data_race {
data_race.deallocate(alloc_id, range, machine)?;
}
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
stacked_borrows.get_mut().before_memory_deallocation(
alloc_id,
prove_extra,
range,
machine,
)
} else {
Ok(())
if let Some(borrow_tracker) = &mut alloc_extra.borrow_tracker {
borrow_tracker.before_memory_deallocation(alloc_id, prove_extra, range, machine)?;
}
Ok(())
}
#[inline(always)]
@ -1068,14 +1060,17 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
kind: mir::RetagKind,
place: &PlaceTy<'tcx, Provenance>,
) -> InterpResult<'tcx> {
if ecx.machine.stacked_borrows.is_some() { ecx.retag(kind, place) } else { Ok(()) }
if ecx.machine.borrow_tracker.is_some() {
ecx.retag(kind, place)?;
}
Ok(())
}
#[inline(always)]
fn init_frame_extra(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
frame: Frame<'mir, 'tcx, Provenance>,
) -> InterpResult<'tcx, Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>> {
) -> InterpResult<'tcx, Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>> {
// Start recording our event before doing anything else
let timing = if let Some(profiler) = ecx.machine.profiler.as_ref() {
let fn_name = frame.instance.to_string();
@ -1091,10 +1086,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
None
};
let stacked_borrows = ecx.machine.stacked_borrows.as_ref();
let borrow_tracker = ecx.machine.borrow_tracker.as_ref();
let extra = FrameData {
stacked_borrows: stacked_borrows.map(|sb| sb.borrow_mut().new_frame(&ecx.machine)),
let extra = FrameExtra {
borrow_tracker: borrow_tracker.map(|bt| bt.borrow_mut().new_frame(&ecx.machine)),
catch_unwind: None,
timing,
is_user_relevant: ecx.machine.is_user_relevant(&frame),
@ -1127,7 +1122,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
}
}
// Search for SbTags to find all live pointers, then remove all other tags from borrow
// Search for BorTags to find all live pointers, then remove all other tags from borrow
// stacks.
// When debug assertions are enabled, run the GC as often as possible so that any cases
// where it mistakenly removes an important tag become visible.
@ -1153,14 +1148,16 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
let stack_len = ecx.active_thread_stack().len();
ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
}
if ecx.machine.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
if ecx.machine.borrow_tracker.is_some() {
ecx.retag_return_place()?;
}
Ok(())
}
#[inline(always)]
fn after_stack_pop(
ecx: &mut InterpCx<'mir, 'tcx, Self>,
mut frame: Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>,
mut frame: Frame<'mir, 'tcx, Provenance, FrameExtra<'tcx>>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopJump> {
if frame.extra.is_user_relevant {
@ -1171,8 +1168,8 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
ecx.active_thread_mut().recompute_top_user_relevant_frame();
}
let timing = frame.extra.timing.take();
if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
stacked_borrows.borrow_mut().end_call(&frame.extra);
if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
borrow_tracker.borrow_mut().end_call(&frame.extra);
}
let res = ecx.handle_stack_pop_unwind(frame.extra, unwinding);
if let Some(profiler) = ecx.machine.profiler.as_ref() {

View file

@ -37,7 +37,7 @@ pub struct EnvVars<'tcx> {
}
impl VisitTags for EnvVars<'_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let EnvVars { map, environ } = self;
environ.visit_tags(visit);

View file

@ -286,7 +286,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let [code] = this.check_shim(abi, exp_abi, link_name, args)?;
// it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
let code = this.read_scalar(code)?.to_i32()?;
throw_machine_stop!(TerminationInfo::Exit(code.into()));
throw_machine_stop!(TerminationInfo::Exit { code: code.into(), leak_check: false });
}
"abort" => {
let [] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
@ -299,8 +299,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
return Ok(Some(body));
}
this.handle_unsupported(format!(
"can't call (diverging) foreign function: {}",
link_name
"can't call (diverging) foreign function: {link_name}"
))?;
return Ok(None);
}

View file

@ -36,7 +36,7 @@ pub struct CatchUnwindData<'tcx> {
}
impl VisitTags for CatchUnwindData<'_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let CatchUnwindData { catch_fn, data, dest, ret: _ } = self;
catch_fn.visit_tags(visit);
data.visit_tags(visit);
@ -125,7 +125,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
fn handle_stack_pop_unwind(
&mut self,
mut extra: FrameData<'tcx>,
mut extra: FrameExtra<'tcx>,
unwinding: bool,
) -> InterpResult<'tcx, StackPopJump> {
let this = self.eval_context_mut();

View file

@ -278,7 +278,7 @@ struct UnblockCallback {
}
impl VisitTags for UnblockCallback {
fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {}
fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {}
}
impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for UnblockCallback {

View file

@ -1,12 +1,11 @@
//! Implement thread-local storage.
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::hash_map::Entry as HashMapEntry;
use std::collections::BTreeMap;
use std::task::Poll;
use log::trace;
use rustc_data_structures::fx::FxHashMap;
use rustc_middle::ty;
use rustc_target::abi::{HasDataLayout, Size};
use rustc_target::spec::abi::Abi;
@ -23,12 +22,12 @@ pub struct TlsEntry<'tcx> {
dtor: Option<ty::Instance<'tcx>>,
}
#[derive(Clone, Debug)]
struct RunningDtorsState {
#[derive(Default, Debug)]
struct RunningDtorState {
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
/// have not tried to retrieve a TLS destructor yet or that we already tried
/// all keys.
last_dtor_key: Option<TlsKey>,
last_key: Option<TlsKey>,
}
#[derive(Debug)]
@ -42,11 +41,6 @@ pub struct TlsData<'tcx> {
/// A single per thread destructor of the thread local storage (that's how
/// things work on macOS) with a data argument.
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Provenance>)>,
/// State for currently running TLS dtors. If this map contains a key for a
/// specific thread, it means that we are in the "destruct" phase, during
/// which some operations are UB.
dtors_running: FxHashMap<ThreadId, RunningDtorsState>,
}
impl<'tcx> Default for TlsData<'tcx> {
@ -55,7 +49,6 @@ impl<'tcx> Default for TlsData<'tcx> {
next_key: 1, // start with 1 as we must not use 0 on Windows
keys: Default::default(),
macos_thread_dtors: Default::default(),
dtors_running: Default::default(),
}
}
}
@ -143,12 +136,6 @@ impl<'tcx> TlsData<'tcx> {
dtor: ty::Instance<'tcx>,
data: Scalar<Provenance>,
) -> InterpResult<'tcx> {
if self.dtors_running.contains_key(&thread) {
// UB, according to libstd docs.
throw_ub_format!(
"setting thread's local storage destructor while destructors are already running"
);
}
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
throw_unsup_format!(
"setting more than one thread local storage destructor for the same thread is not supported"
@ -211,21 +198,6 @@ impl<'tcx> TlsData<'tcx> {
None
}
/// Set that dtors are running for `thread`. It is guaranteed not to change
/// the existing values stored in `dtors_running` for this thread. Returns
/// `true` if dtors for `thread` are already running.
fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool {
match self.dtors_running.entry(thread) {
HashMapEntry::Occupied(_) => true,
HashMapEntry::Vacant(entry) => {
// We cannot just do `self.dtors_running.insert` because that
// would overwrite `last_dtor_key` with `None`.
entry.insert(RunningDtorsState { last_dtor_key: None });
false
}
}
}
/// Delete all TLS entries for the given thread. This function should be
/// called after all TLS destructors have already finished.
fn delete_all_thread_tls(&mut self, thread_id: ThreadId) {
@ -236,8 +208,8 @@ impl<'tcx> TlsData<'tcx> {
}
impl VisitTags for TlsData<'_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
let TlsData { keys, macos_thread_dtors, next_key: _, dtors_running: _ } = self;
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let TlsData { keys, macos_thread_dtors, next_key: _ } = self;
for scalar in keys.values().flat_map(|v| v.data.values()) {
scalar.visit_tags(visit);
@ -248,13 +220,77 @@ impl VisitTags for TlsData<'_> {
}
}
#[derive(Debug, Default)]
pub struct TlsDtorsState(TlsDtorsStatePriv);
#[derive(Debug, Default)]
enum TlsDtorsStatePriv {
#[default]
Init,
PthreadDtors(RunningDtorState),
Done,
}
impl TlsDtorsState {
pub fn on_stack_empty<'tcx>(
&mut self,
this: &mut MiriInterpCx<'_, 'tcx>,
) -> InterpResult<'tcx, Poll<()>> {
use TlsDtorsStatePriv::*;
match &mut self.0 {
Init => {
match this.tcx.sess.target.os.as_ref() {
"linux" | "freebsd" | "android" => {
// Run the pthread dtors.
self.0 = PthreadDtors(Default::default());
}
"macos" => {
// The macOS thread wide destructor runs "before any TLS slots get
// freed", so do that first.
this.schedule_macos_tls_dtor()?;
// When the stack is empty again, go on with the pthread dtors.
self.0 = PthreadDtors(Default::default());
}
"windows" => {
// Run the special magic hook.
this.schedule_windows_tls_dtors()?;
// And move to the final state.
self.0 = Done;
}
"wasi" | "none" => {
// No OS, no TLS dtors.
// FIXME: should we do something on wasi?
self.0 = Done;
}
os => {
throw_unsup_format!(
"the TLS machinery does not know how to handle OS `{os}`"
);
}
}
}
PthreadDtors(state) => {
match this.schedule_next_pthread_tls_dtor(state)? {
Poll::Pending => {} // just keep going
Poll::Ready(()) => self.0 = Done,
}
}
Done => {
this.machine.tls.delete_all_thread_tls(this.get_active_thread());
return Ok(Poll::Ready(()));
}
}
Ok(Poll::Pending)
}
}
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Schedule TLS destructors for Windows.
/// On windows, TLS destructors are managed by std.
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
@ -284,16 +320,12 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
None,
StackPopCleanup::Root { cleanup: true },
)?;
this.enable_thread(active_thread);
Ok(())
}
/// Schedule the MacOS thread destructor of the thread local storage to be
/// executed. Returns `true` if scheduled.
///
/// Note: It is safe to call this function also on other Unixes.
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
/// executed.
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let thread_id = this.get_active_thread();
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
@ -306,35 +338,27 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
None,
StackPopCleanup::Root { cleanup: true },
)?;
// Enable the thread so that it steps through the destructor which
// we just scheduled. Since we deleted the destructor, it is
// guaranteed that we will schedule it again. The `dtors_running`
// flag will prevent the code from adding the destructor again.
this.enable_thread(thread_id);
Ok(true)
} else {
Ok(false)
}
Ok(())
}
/// Schedule a pthread TLS destructor. Returns `true` if found
/// a destructor to schedule, and `false` otherwise.
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
fn schedule_next_pthread_tls_dtor(
&mut self,
state: &mut RunningDtorState,
) -> InterpResult<'tcx, Poll<()>> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
// Fetch next dtor after `key`.
let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key;
let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
let dtor = match this.machine.tls.fetch_tls_dtor(state.last_key, active_thread) {
dtor @ Some(_) => dtor,
// We ran each dtor once, start over from the beginning.
None => this.machine.tls.fetch_tls_dtor(None, active_thread),
};
if let Some((instance, ptr, key)) = dtor {
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key =
Some(key);
state.last_key = Some(key);
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
assert!(
!ptr.to_machine_usize(this).unwrap() != 0,
@ -349,64 +373,9 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
StackPopCleanup::Root { cleanup: true },
)?;
this.enable_thread(active_thread);
return Ok(true);
return Ok(Poll::Pending);
}
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
Ok(false)
}
}
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
/// Schedule an active thread's TLS destructor to run on the active thread.
/// Note that this function does not run the destructors itself, it just
/// schedules them one by one each time it is called and reenables the
/// thread so that it can be executed normally by the main execution loop.
///
/// Note: we consistently run TLS destructors for all threads, including the
/// main thread. However, it is not clear that we should run the TLS
/// destructors for the main thread. See issue:
/// <https://github.com/rust-lang/rust/issues/28129>.
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let active_thread = this.get_active_thread();
trace!("schedule_next_tls_dtor_for_active_thread on thread {:?}", active_thread);
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
// This is the first time we got asked to schedule a destructor. The
// Windows schedule destructor function must be called exactly once,
// this is why it is in this block.
if this.tcx.sess.target.os == "windows" {
// On Windows, we signal that the thread quit by starting the
// relevant function, reenabling the thread, and going back to
// the scheduler.
this.schedule_windows_tls_dtors()?;
return Ok(());
}
}
// The remaining dtors make some progress each time around the scheduler loop,
// until they return `false` to indicate that they are done.
// The macOS thread wide destructor runs "before any TLS slots get
// freed", so do that first.
if this.schedule_macos_tls_dtor()? {
// We have scheduled a MacOS dtor to run on the thread. Execute it
// to completion and come back here. Scheduling a destructor
// destroys it, so we will not enter this branch again.
return Ok(());
}
if this.schedule_next_pthread_tls_dtor()? {
// We have scheduled a pthread destructor and removed it from the
// destructors list. Run it to completion and come back here.
return Ok(());
}
// All dtors done!
this.machine.tls.delete_all_thread_tls(active_thread);
this.thread_terminated()?;
Ok(())
Ok(Poll::Ready(()))
}
}

View file

@ -278,7 +278,7 @@ pub struct FileHandler {
}
impl VisitTags for FileHandler {
fn visit_tags(&self, _visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, _visit: &mut dyn FnMut(BorTag)) {
// All our FileDescriptor do not have any tags.
}
}
@ -490,7 +490,7 @@ impl Default for DirHandler {
}
impl VisitTags for DirHandler {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let DirHandler { streams, next_id: _ } = self;
for dir in streams.values() {

View file

@ -183,7 +183,7 @@ pub fn futex<'tcx>(
}
impl<'tcx> VisitTags for Callback<'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Callback { thread: _, addr_usize: _, dest } = self;
dest.visit_tags(visit);
}

View file

@ -747,7 +747,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
impl<'tcx> VisitTags for Callback<'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Callback { active_thread: _, mutex_id: _, id: _, dest } = self;
dest.visit_tags(visit);
}

View file

@ -19,7 +19,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
let func_arg = this.read_immediate(arg)?;
this.start_thread(
this.start_regular_thread(
Some(thread_info_place),
start_routine,
Abi::C { unwind: false },

View file

@ -182,7 +182,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
impl<'tcx> VisitTags for Callback<'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Callback { init_once_id: _, pending_place } = self;
pending_place.visit_tags(visit);
}
@ -315,7 +315,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
impl<'tcx> VisitTags for Callback<'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Callback { thread: _, addr: _, dest } = self;
dest.visit_tags(visit);
}
@ -419,7 +419,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
}
impl<'tcx> VisitTags for Callback<'tcx> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let Callback { thread: _, condvar_id: _, lock_id: _, mode: _, dest } = self;
dest.visit_tags(visit);
}

View file

@ -46,7 +46,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
throw_unsup_format!("non-null `lpThreadAttributes` in `CreateThread`")
}
this.start_thread(
this.start_regular_thread(
thread,
start_routine,
Abi::System { unwind: false },

View file

@ -3,11 +3,11 @@ use rustc_data_structures::fx::FxHashSet;
use crate::*;
pub trait VisitTags {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag));
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag));
}
impl<T: VisitTags> VisitTags for Option<T> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
if let Some(x) = self {
x.visit_tags(visit);
}
@ -15,41 +15,41 @@ impl<T: VisitTags> VisitTags for Option<T> {
}
impl<T: VisitTags> VisitTags for std::cell::RefCell<T> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
self.borrow().visit_tags(visit)
}
}
impl VisitTags for SbTag {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
impl VisitTags for BorTag {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
visit(*self)
}
}
impl VisitTags for Provenance {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
if let Provenance::Concrete { sb, .. } = self {
visit(*sb);
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
if let Provenance::Concrete { tag, .. } = self {
visit(*tag);
}
}
}
impl VisitTags for Pointer<Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let (prov, _offset) = self.into_parts();
prov.visit_tags(visit);
}
}
impl VisitTags for Pointer<Option<Provenance>> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let (prov, _offset) = self.into_parts();
prov.visit_tags(visit);
}
}
impl VisitTags for Scalar<Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
match self {
Scalar::Ptr(ptr, _) => ptr.visit_tags(visit),
Scalar::Int(_) => (),
@ -58,7 +58,7 @@ impl VisitTags for Scalar<Provenance> {
}
impl VisitTags for Immediate<Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
match self {
Immediate::Scalar(s) => {
s.visit_tags(visit);
@ -73,7 +73,7 @@ impl VisitTags for Immediate<Provenance> {
}
impl VisitTags for MemPlaceMeta<Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
match self {
MemPlaceMeta::Meta(m) => m.visit_tags(visit),
MemPlaceMeta::None => {}
@ -82,7 +82,7 @@ impl VisitTags for MemPlaceMeta<Provenance> {
}
impl VisitTags for MemPlace<Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
let MemPlace { ptr, meta } = self;
ptr.visit_tags(visit);
meta.visit_tags(visit);
@ -90,13 +90,13 @@ impl VisitTags for MemPlace<Provenance> {
}
impl VisitTags for MPlaceTy<'_, Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
(**self).visit_tags(visit)
}
}
impl VisitTags for Place<Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
match self {
Place::Ptr(p) => p.visit_tags(visit),
Place::Local { .. } => {
@ -107,13 +107,13 @@ impl VisitTags for Place<Provenance> {
}
impl VisitTags for PlaceTy<'_, Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
(**self).visit_tags(visit)
}
}
impl VisitTags for Operand<Provenance> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
match self {
Operand::Immediate(imm) => {
imm.visit_tags(visit);
@ -126,7 +126,7 @@ impl VisitTags for Operand<Provenance> {
}
impl VisitTags for Allocation<Provenance, AllocExtra> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
for prov in self.provenance().provenances() {
prov.visit_tags(visit);
}
@ -136,7 +136,7 @@ impl VisitTags for Allocation<Provenance, AllocExtra> {
}
impl VisitTags for crate::MiriInterpCx<'_, '_> {
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
fn visit_tags(&self, visit: &mut dyn FnMut(BorTag)) {
// Memory.
self.memory.alloc_map().iter(|it| {
for (_id, (_kind, alloc)) in it {
@ -154,7 +154,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
fn garbage_collect_tags(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
// No reason to do anything at all if stacked borrows is off.
if this.machine.stacked_borrows.is_none() {
if this.machine.borrow_tracker.is_none() {
return Ok(());
}
@ -167,17 +167,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
Ok(())
}
fn remove_unreachable_tags(&mut self, tags: FxHashSet<SbTag>) {
fn remove_unreachable_tags(&mut self, tags: FxHashSet<BorTag>) {
let this = self.eval_context_mut();
this.memory.alloc_map().iter(|it| {
for (_id, (_kind, alloc)) in it {
alloc
.extra
.stacked_borrows
.as_ref()
.unwrap()
.borrow_mut()
.remove_unreachable_tags(&tags);
if let Some(bt) = &alloc.extra.borrow_tracker {
bt.remove_unreachable_tags(&tags);
}
}
});
}

View file

@ -12,7 +12,7 @@ fn main() {
#[cfg(fn_ptr)]
unsafe {
std::mem::transmute::<unsafe fn(), unsafe extern "C" fn()>(foo)();
//[fn_ptr]~^ ERROR: calling a function with calling convention Rust using calling convention C
//~[fn_ptr]^ ERROR: calling a function with calling convention Rust using calling convention C
}
// `Instance` caching should not suppress ABI check.
@ -28,8 +28,8 @@ fn main() {
}
unsafe {
foo();
//[no_cache]~^ ERROR: calling a function with calling convention Rust using calling convention C
//[cache]~| ERROR: calling a function with calling convention Rust using calling convention C
//~[no_cache]^ ERROR: calling a function with calling convention Rust using calling convention C
//~[cache]| ERROR: calling a function with calling convention Rust using calling convention C
}
}
}

View file

@ -4,8 +4,8 @@
#[cfg_attr(any(definition, both), rustc_nounwind)]
#[no_mangle]
extern "C-unwind" fn nounwind() {
//[definition]~^ ERROR: abnormal termination: the program aborted execution
//[both]~^^ ERROR: abnormal termination: the program aborted execution
//~[definition]^ ERROR: abnormal termination: the program aborted execution
//~[both]^^ ERROR: abnormal termination: the program aborted execution
panic!();
}
@ -15,5 +15,5 @@ fn main() {
fn nounwind();
}
unsafe { nounwind() }
//[extern_block]~^ ERROR: unwinding past a stack frame that does not allow unwinding
//~[extern_block]^ ERROR: unwinding past a stack frame that does not allow unwinding
}

View file

@ -0,0 +1,8 @@
//! Regression test for https://github.com/rust-lang/miri/issues/2629
use std::thread;
fn main() {
thread::scope(|s| {
s.spawn(|| {});
});
}

View file

@ -0,0 +1,24 @@
use std::thread;
fn main() {
let mut a = vec![1, 2, 3];
let mut x = 0;
thread::scope(|s| {
s.spawn(|| {
// We can borrow `a` here.
let _s = format!("hello from the first scoped thread: {a:?}");
});
s.spawn(|| {
let _s = format!("hello from the second scoped thread");
// We can even mutably borrow `x` here,
// because no other threads are using it.
x += a[0] + a[2];
});
let _s = format!("hello from the main thread");
});
// After the scope, we can modify and access our variables again:
a.push(4);
assert_eq!(x, a.len());
}