1
Fork 0

Auto merge of #82300 - andersk:libtest-id, r=Amanieu

libtest: Index tests by a unique TestId

This more robustly avoids problems with duplicate `TestDesc`. See #81852 and #82274.

Cc `@Mark-Simulacrum.`
This commit is contained in:
bors 2021-04-12 13:30:30 +00:00
commit 5e73bd1040
5 changed files with 75 additions and 41 deletions

View file

@ -2,7 +2,11 @@
pub use std::hint::black_box; pub use std::hint::black_box;
use super::{ use super::{
event::CompletedTest, options::BenchMode, test_result::TestResult, types::TestDesc, Sender, event::CompletedTest,
options::BenchMode,
test_result::TestResult,
types::{TestDesc, TestId},
Sender,
}; };
use crate::stats; use crate::stats;
@ -177,8 +181,13 @@ where
} }
} }
pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<CompletedTest>, nocapture: bool, f: F) pub fn benchmark<F>(
where id: TestId,
desc: TestDesc,
monitor_ch: Sender<CompletedTest>,
nocapture: bool,
f: F,
) where
F: FnMut(&mut Bencher), F: FnMut(&mut Bencher),
{ {
let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 }; let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 };
@ -213,7 +222,7 @@ where
}; };
let stdout = data.lock().unwrap().to_vec(); let stdout = data.lock().unwrap().to_vec();
let message = CompletedTest::new(desc, test_result, None, stdout); let message = CompletedTest::new(id, desc, test_result, None, stdout);
monitor_ch.send(message).unwrap(); monitor_ch.send(message).unwrap();
} }

View file

@ -3,10 +3,11 @@
use super::test_result::TestResult; use super::test_result::TestResult;
use super::time::TestExecTime; use super::time::TestExecTime;
use super::types::TestDesc; use super::types::{TestDesc, TestId};
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct CompletedTest { pub struct CompletedTest {
pub id: TestId,
pub desc: TestDesc, pub desc: TestDesc,
pub result: TestResult, pub result: TestResult,
pub exec_time: Option<TestExecTime>, pub exec_time: Option<TestExecTime>,
@ -15,12 +16,13 @@ pub struct CompletedTest {
impl CompletedTest { impl CompletedTest {
pub fn new( pub fn new(
id: TestId,
desc: TestDesc, desc: TestDesc,
result: TestResult, result: TestResult,
exec_time: Option<TestExecTime>, exec_time: Option<TestExecTime>,
stdout: Vec<u8>, stdout: Vec<u8>,
) -> Self { ) -> Self {
Self { desc, result, exec_time, stdout } Self { id, desc, result, exec_time, stdout }
} }
} }

View file

@ -54,7 +54,7 @@ pub mod test {
time::{TestExecTime, TestTimeOptions}, time::{TestExecTime, TestTimeOptions},
types::{ types::{
DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
TestDescAndFn, TestName, TestType, TestDescAndFn, TestId, TestName, TestType,
}, },
}; };
} }
@ -215,9 +215,10 @@ where
// Use a deterministic hasher // Use a deterministic hasher
type TestMap = type TestMap =
HashMap<TestDesc, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>; HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
struct TimeoutEntry { struct TimeoutEntry {
id: TestId,
desc: TestDesc, desc: TestDesc,
timeout: Instant, timeout: Instant,
} }
@ -249,7 +250,9 @@ where
let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
.into_iter() .into_iter()
.partition(|e| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_))); .enumerate()
.map(|(i, e)| (TestId(i), e))
.partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
@ -278,7 +281,7 @@ where
break; break;
} }
let timeout_entry = timeout_queue.pop_front().unwrap(); let timeout_entry = timeout_queue.pop_front().unwrap();
if running_tests.contains_key(&timeout_entry.desc) { if running_tests.contains_key(&timeout_entry.id) {
timed_out.push(timeout_entry.desc); timed_out.push(timeout_entry.desc);
} }
} }
@ -294,11 +297,11 @@ where
if concurrency == 1 { if concurrency == 1 {
while !remaining.is_empty() { while !remaining.is_empty() {
let test = remaining.pop().unwrap(); let (id, test) = remaining.pop().unwrap();
let event = TestEvent::TeWait(test.desc.clone()); let event = TestEvent::TeWait(test.desc.clone());
notify_about_test_event(event)?; notify_about_test_event(event)?;
let join_handle = let join_handle =
run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No); run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
assert!(join_handle.is_none()); assert!(join_handle.is_none());
let completed_test = rx.recv().unwrap(); let completed_test = rx.recv().unwrap();
@ -308,7 +311,7 @@ where
} else { } else {
while pending > 0 || !remaining.is_empty() { while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() { while pending < concurrency && !remaining.is_empty() {
let test = remaining.pop().unwrap(); let (id, test) = remaining.pop().unwrap();
let timeout = time::get_default_test_timeout(); let timeout = time::get_default_test_timeout();
let desc = test.desc.clone(); let desc = test.desc.clone();
@ -317,13 +320,14 @@ where
let join_handle = run_test( let join_handle = run_test(
opts, opts,
!opts.run_tests, !opts.run_tests,
id,
test, test,
run_strategy, run_strategy,
tx.clone(), tx.clone(),
Concurrent::Yes, Concurrent::Yes,
); );
running_tests.insert(desc.clone(), RunningTest { join_handle }); running_tests.insert(id, RunningTest { join_handle });
timeout_queue.push_back(TimeoutEntry { desc, timeout }); timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
pending += 1; pending += 1;
} }
@ -352,7 +356,7 @@ where
} }
let mut completed_test = res.unwrap(); let mut completed_test = res.unwrap();
if let Some(running_test) = running_tests.remove(&completed_test.desc) { let running_test = running_tests.remove(&completed_test.id).unwrap();
if let Some(join_handle) = running_test.join_handle { if let Some(join_handle) = running_test.join_handle {
if let Err(_) = join_handle.join() { if let Err(_) = join_handle.join() {
if let TrOk = completed_test.result { if let TrOk = completed_test.result {
@ -361,7 +365,6 @@ where
} }
} }
} }
}
let event = TestEvent::TeResult(completed_test); let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?; notify_about_test_event(event)?;
@ -371,10 +374,10 @@ where
if opts.bench_benchmarks { if opts.bench_benchmarks {
// All benchmarks run at the end, in serial. // All benchmarks run at the end, in serial.
for b in filtered_benchs { for (id, b) in filtered_benchs {
let event = TestEvent::TeWait(b.desc.clone()); let event = TestEvent::TeWait(b.desc.clone());
notify_about_test_event(event)?; notify_about_test_event(event)?;
run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No); run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
let completed_test = rx.recv().unwrap(); let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test); let event = TestEvent::TeResult(completed_test);
@ -448,6 +451,7 @@ pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAnd
pub fn run_test( pub fn run_test(
opts: &TestOpts, opts: &TestOpts,
force_ignore: bool, force_ignore: bool,
id: TestId,
test: TestDescAndFn, test: TestDescAndFn,
strategy: RunStrategy, strategy: RunStrategy,
monitor_ch: Sender<CompletedTest>, monitor_ch: Sender<CompletedTest>,
@ -461,7 +465,7 @@ pub fn run_test(
&& !cfg!(target_os = "emscripten"); && !cfg!(target_os = "emscripten");
if force_ignore || desc.ignore || ignore_because_no_process_support { if force_ignore || desc.ignore || ignore_because_no_process_support {
let message = CompletedTest::new(desc, TrIgnored, None, Vec::new()); let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
monitor_ch.send(message).unwrap(); monitor_ch.send(message).unwrap();
return None; return None;
} }
@ -474,6 +478,7 @@ pub fn run_test(
} }
fn run_test_inner( fn run_test_inner(
id: TestId,
desc: TestDesc, desc: TestDesc,
monitor_ch: Sender<CompletedTest>, monitor_ch: Sender<CompletedTest>,
testfn: Box<dyn FnOnce() + Send>, testfn: Box<dyn FnOnce() + Send>,
@ -484,6 +489,7 @@ pub fn run_test(
let runtest = move || match opts.strategy { let runtest = move || match opts.strategy {
RunStrategy::InProcess => run_test_in_process( RunStrategy::InProcess => run_test_in_process(
id,
desc, desc,
opts.nocapture, opts.nocapture,
opts.time.is_some(), opts.time.is_some(),
@ -492,6 +498,7 @@ pub fn run_test(
opts.time, opts.time,
), ),
RunStrategy::SpawnPrimary => spawn_test_subprocess( RunStrategy::SpawnPrimary => spawn_test_subprocess(
id,
desc, desc,
opts.nocapture, opts.nocapture,
opts.time.is_some(), opts.time.is_some(),
@ -530,14 +537,14 @@ pub fn run_test(
match testfn { match testfn {
DynBenchFn(bencher) => { DynBenchFn(bencher) => {
// Benchmarks aren't expected to panic, so we run them all in-process. // Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| { crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, |harness| {
bencher.run(harness) bencher.run(harness)
}); });
None None
} }
StaticBenchFn(benchfn) => { StaticBenchFn(benchfn) => {
// Benchmarks aren't expected to panic, so we run them all in-process. // Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn); crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
None None
} }
DynTestFn(f) => { DynTestFn(f) => {
@ -546,6 +553,7 @@ pub fn run_test(
_ => panic!("Cannot run dynamic test fn out-of-process"), _ => panic!("Cannot run dynamic test fn out-of-process"),
}; };
run_test_inner( run_test_inner(
id,
desc, desc,
monitor_ch, monitor_ch,
Box::new(move || __rust_begin_short_backtrace(f)), Box::new(move || __rust_begin_short_backtrace(f)),
@ -553,6 +561,7 @@ pub fn run_test(
) )
} }
StaticTestFn(f) => run_test_inner( StaticTestFn(f) => run_test_inner(
id,
desc, desc,
monitor_ch, monitor_ch,
Box::new(move || __rust_begin_short_backtrace(f)), Box::new(move || __rust_begin_short_backtrace(f)),
@ -571,6 +580,7 @@ fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
} }
fn run_test_in_process( fn run_test_in_process(
id: TestId,
desc: TestDesc, desc: TestDesc,
nocapture: bool, nocapture: bool,
report_time: bool, report_time: bool,
@ -599,11 +609,12 @@ fn run_test_in_process(
Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time), Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
}; };
let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec(); let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
let message = CompletedTest::new(desc, test_result, exec_time, stdout); let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
monitor_ch.send(message).unwrap(); monitor_ch.send(message).unwrap();
} }
fn spawn_test_subprocess( fn spawn_test_subprocess(
id: TestId,
desc: TestDesc, desc: TestDesc,
nocapture: bool, nocapture: bool,
report_time: bool, report_time: bool,
@ -653,7 +664,7 @@ fn spawn_test_subprocess(
(result, test_output, exec_time) (result, test_output, exec_time)
})(); })();
let message = CompletedTest::new(desc, result, exec_time, test_output); let message = CompletedTest::new(id, desc, result, exec_time, test_output);
monitor_ch.send(message).unwrap(); monitor_ch.send(message).unwrap();
} }

View file

@ -94,7 +94,7 @@ pub fn do_not_run_ignored_tests() {
testfn: DynTestFn(Box::new(f)), testfn: DynTestFn(Box::new(f)),
}; };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
assert_ne!(result, TrOk); assert_ne!(result, TrOk);
} }
@ -113,7 +113,7 @@ pub fn ignored_tests_result_in_ignored() {
testfn: DynTestFn(Box::new(f)), testfn: DynTestFn(Box::new(f)),
}; };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
assert_eq!(result, TrIgnored); assert_eq!(result, TrIgnored);
} }
@ -136,7 +136,7 @@ fn test_should_panic() {
testfn: DynTestFn(Box::new(f)), testfn: DynTestFn(Box::new(f)),
}; };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
assert_eq!(result, TrOk); assert_eq!(result, TrOk);
} }
@ -159,7 +159,7 @@ fn test_should_panic_good_message() {
testfn: DynTestFn(Box::new(f)), testfn: DynTestFn(Box::new(f)),
}; };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
assert_eq!(result, TrOk); assert_eq!(result, TrOk);
} }
@ -187,7 +187,7 @@ fn test_should_panic_bad_message() {
testfn: DynTestFn(Box::new(f)), testfn: DynTestFn(Box::new(f)),
}; };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
assert_eq!(result, TrFailedMsg(failed_msg.to_string())); assert_eq!(result, TrFailedMsg(failed_msg.to_string()));
} }
@ -219,7 +219,7 @@ fn test_should_panic_non_string_message_type() {
testfn: DynTestFn(Box::new(f)), testfn: DynTestFn(Box::new(f)),
}; };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&TestOpts::new(), false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
assert_eq!(result, TrFailedMsg(failed_msg)); assert_eq!(result, TrFailedMsg(failed_msg));
} }
@ -243,7 +243,15 @@ fn test_should_panic_but_succeeds() {
testfn: DynTestFn(Box::new(f)), testfn: DynTestFn(Box::new(f)),
}; };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(
&TestOpts::new(),
false,
TestId(0),
desc,
RunStrategy::InProcess,
tx,
Concurrent::No,
);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
assert_eq!( assert_eq!(
result, result,
@ -270,7 +278,7 @@ fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
let test_opts = TestOpts { time_options, ..TestOpts::new() }; let test_opts = TestOpts { time_options, ..TestOpts::new() };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let exec_time = rx.recv().unwrap().exec_time; let exec_time = rx.recv().unwrap().exec_time;
exec_time exec_time
} }
@ -305,7 +313,7 @@ fn time_test_failure_template(test_type: TestType) -> TestResult {
let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() }; let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() };
let (tx, rx) = channel(); let (tx, rx) = channel();
run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); run_test(&test_opts, false, TestId(0), desc, RunStrategy::InProcess, tx, Concurrent::No);
let result = rx.recv().unwrap().result; let result = rx.recv().unwrap().result;
result result
@ -637,7 +645,7 @@ pub fn test_bench_no_iter() {
test_type: TestType::Unknown, test_type: TestType::Unknown,
}; };
crate::bench::benchmark(desc, tx, true, f); crate::bench::benchmark(TestId(0), desc, tx, true, f);
rx.recv().unwrap(); rx.recv().unwrap();
} }
@ -657,7 +665,7 @@ pub fn test_bench_iter() {
test_type: TestType::Unknown, test_type: TestType::Unknown,
}; };
crate::bench::benchmark(desc, tx, true, f); crate::bench::benchmark(TestId(0), desc, tx, true, f);
rx.recv().unwrap(); rx.recv().unwrap();
} }

View file

@ -112,9 +112,13 @@ impl fmt::Debug for TestFn {
} }
} }
// A unique integer associated with each test.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct TestId(pub usize);
// The definition of a single test. A test runner will run a list of // The definition of a single test. A test runner will run a list of
// these. // these.
#[derive(Clone, Debug, PartialEq, Eq, Hash)] #[derive(Clone, Debug)]
pub struct TestDesc { pub struct TestDesc {
pub name: TestName, pub name: TestName,
pub ignore: bool, pub ignore: bool,