Enhance report-time option
This commit is contained in:
parent
06c68947ad
commit
75a297573f
8 changed files with 547 additions and 96 deletions
|
@ -702,6 +702,7 @@ impl Tester for Collector {
|
|||
// compiler failures are test failures
|
||||
should_panic: testing::ShouldPanic::No,
|
||||
allow_fail: config.allow_fail,
|
||||
test_type: testing::TestType::DocTest,
|
||||
},
|
||||
testfn: testing::DynTestFn(box move || {
|
||||
let res = run_test(
|
||||
|
|
|
@ -106,6 +106,11 @@ pub fn expand_test_or_bench(
|
|||
cx.path(sp, vec![test_id, cx.ident_of("ShouldPanic", sp), cx.ident_of(name, sp)])
|
||||
};
|
||||
|
||||
// creates test::TestType::$name
|
||||
let test_type_path = |name| {
|
||||
cx.path(sp, vec![test_id, cx.ident_of("TestType", sp), cx.ident_of(name, sp)])
|
||||
};
|
||||
|
||||
// creates $name: $expr
|
||||
let field = |name, expr| cx.field_imm(sp, cx.ident_of(name, sp), expr);
|
||||
|
||||
|
@ -181,6 +186,17 @@ pub fn expand_test_or_bench(
|
|||
cx.expr_path(should_panic_path("YesWithMessage")),
|
||||
vec![cx.expr_str(sp, sym)]),
|
||||
}),
|
||||
// test_type: ...
|
||||
field("test_type", match test_type(cx) {
|
||||
// test::TestType::UnitTest
|
||||
TestType::UnitTest => cx.expr_path(test_type_path("UnitTest")),
|
||||
// test::TestType::IntegrationTest
|
||||
TestType::IntegrationTest => cx.expr_path(
|
||||
test_type_path("IntegrationTest")
|
||||
),
|
||||
// test::TestPath::Unknown
|
||||
TestType::Unknown => cx.expr_path(test_type_path("Unknown")),
|
||||
}),
|
||||
// },
|
||||
])),
|
||||
// testfn: test::StaticTestFn(...) | test::StaticBenchFn(...)
|
||||
|
@ -261,6 +277,34 @@ fn should_panic(cx: &ExtCtxt<'_>, i: &ast::Item) -> ShouldPanic {
|
|||
}
|
||||
}
|
||||
|
||||
enum TestType {
|
||||
UnitTest,
|
||||
IntegrationTest,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
/// Attempts to determine the type of test.
|
||||
/// Since doctests are created without macro expanding, only possible variants here
|
||||
/// are `UnitTest`, `IntegrationTest` or `Unknown`.
|
||||
fn test_type(cx: &ExtCtxt<'_>) -> TestType {
|
||||
// Root path from context contains the topmost sources directory of the crate.
|
||||
// I.e., for `project` with sources in `src` and tests in `tests` folders
|
||||
// (no matter how many nested folders lie inside),
|
||||
// there will be two different root paths: `/project/src` and `/project/tests`.
|
||||
let crate_path = cx.root_path.as_path();
|
||||
|
||||
if crate_path.ends_with("src") {
|
||||
// `/src` folder contains unit-tests.
|
||||
TestType::UnitTest
|
||||
} else if crate_path.ends_with("tests") {
|
||||
// `/tests` folder contains integration tests.
|
||||
TestType::IntegrationTest
|
||||
} else {
|
||||
// Crate layout doesn't match expected one, test type is unknown.
|
||||
TestType::Unknown
|
||||
}
|
||||
}
|
||||
|
||||
fn has_test_signature(cx: &ExtCtxt<'_>, i: &ast::Item) -> bool {
|
||||
let has_should_panic_attr = attr::contains_name(&i.attrs, sym::should_panic);
|
||||
let ref sd = cx.parse_sess.span_diagnostic;
|
||||
|
|
|
@ -94,6 +94,15 @@ impl<T: Write> OutputFormatter for JsonFormatter<T> {
|
|||
self.write_event("test", desc.name.as_slice(), "failed", exec_time, stdout, None)
|
||||
}
|
||||
|
||||
TrTimedFail => self.write_event(
|
||||
"test",
|
||||
desc.name.as_slice(),
|
||||
"failed",
|
||||
exec_time,
|
||||
stdout,
|
||||
Some(r#""reason": "time limit exceeded""#),
|
||||
),
|
||||
|
||||
TrFailedMsg(ref m) => self.write_event(
|
||||
"test",
|
||||
desc.name.as_slice(),
|
||||
|
|
|
@ -3,6 +3,7 @@ use super::*;
|
|||
pub(crate) struct PrettyFormatter<T> {
|
||||
out: OutputLocation<T>,
|
||||
use_color: bool,
|
||||
time_options: Option<TestTimeOptions>,
|
||||
|
||||
/// Number of columns to fill when aligning names
|
||||
max_name_len: usize,
|
||||
|
@ -16,12 +17,14 @@ impl<T: Write> PrettyFormatter<T> {
|
|||
use_color: bool,
|
||||
max_name_len: usize,
|
||||
is_multithreaded: bool,
|
||||
time_options: Option<TestTimeOptions>,
|
||||
) -> Self {
|
||||
PrettyFormatter {
|
||||
out,
|
||||
use_color,
|
||||
max_name_len,
|
||||
is_multithreaded,
|
||||
time_options
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -30,20 +33,24 @@ impl<T: Write> PrettyFormatter<T> {
|
|||
&self.out
|
||||
}
|
||||
|
||||
pub fn write_ok(&mut self, exec_time: Option<&TestExecTime>) -> io::Result<()> {
|
||||
self.write_short_result("ok", term::color::GREEN, exec_time)
|
||||
pub fn write_ok(&mut self) -> io::Result<()> {
|
||||
self.write_short_result("ok", term::color::GREEN)
|
||||
}
|
||||
|
||||
pub fn write_failed(&mut self, exec_time: Option<&TestExecTime>) -> io::Result<()> {
|
||||
self.write_short_result("FAILED", term::color::RED, exec_time)
|
||||
pub fn write_failed(&mut self) -> io::Result<()> {
|
||||
self.write_short_result("FAILED", term::color::RED)
|
||||
}
|
||||
|
||||
pub fn write_ignored(&mut self, exec_time: Option<&TestExecTime>) -> io::Result<()> {
|
||||
self.write_short_result("ignored", term::color::YELLOW, exec_time)
|
||||
pub fn write_ignored(&mut self) -> io::Result<()> {
|
||||
self.write_short_result("ignored", term::color::YELLOW)
|
||||
}
|
||||
|
||||
pub fn write_allowed_fail(&mut self, exec_time: Option<&TestExecTime>) -> io::Result<()> {
|
||||
self.write_short_result("FAILED (allowed)", term::color::YELLOW, exec_time)
|
||||
pub fn write_allowed_fail(&mut self) -> io::Result<()> {
|
||||
self.write_short_result("FAILED (allowed)", term::color::YELLOW)
|
||||
}
|
||||
|
||||
pub fn write_time_failed(&mut self) -> io::Result<()> {
|
||||
self.write_short_result("FAILED (time limit exceeded)", term::color::RED)
|
||||
}
|
||||
|
||||
pub fn write_bench(&mut self) -> io::Result<()> {
|
||||
|
@ -54,13 +61,8 @@ impl<T: Write> PrettyFormatter<T> {
|
|||
&mut self,
|
||||
result: &str,
|
||||
color: term::color::Color,
|
||||
exec_time: Option<&TestExecTime>,
|
||||
) -> io::Result<()> {
|
||||
self.write_pretty(result, color)?;
|
||||
if let Some(exec_time) = exec_time {
|
||||
self.write_plain(format!(" {}", exec_time))?;
|
||||
}
|
||||
self.write_plain("\n")
|
||||
self.write_pretty(result, color)
|
||||
}
|
||||
|
||||
pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
|
||||
|
@ -88,12 +90,48 @@ impl<T: Write> PrettyFormatter<T> {
|
|||
self.out.flush()
|
||||
}
|
||||
|
||||
pub fn write_successes(&mut self, state: &ConsoleTestState) -> io::Result<()> {
|
||||
self.write_plain("\nsuccesses:\n")?;
|
||||
let mut successes = Vec::new();
|
||||
fn write_time(
|
||||
&mut self,
|
||||
desc: &TestDesc,
|
||||
exec_time: Option<&TestExecTime>
|
||||
) -> io::Result<()> {
|
||||
if let (Some(opts), Some(time)) = (self.time_options, exec_time) {
|
||||
let time_str = format!(" <{}>", time);
|
||||
|
||||
let color = if opts.colored {
|
||||
if opts.is_critical(desc, time) {
|
||||
Some(term::color::RED)
|
||||
} else if opts.is_warn(desc, time) {
|
||||
Some(term::color::YELLOW)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
match color {
|
||||
Some(color) => self.write_pretty(&time_str, color)?,
|
||||
None => self.write_plain(&time_str)?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_results(
|
||||
&mut self,
|
||||
inputs: &Vec<(TestDesc, Vec<u8>)>,
|
||||
results_type: &str
|
||||
) -> io::Result<()> {
|
||||
let results_out_str = format!("\n{}:\n", results_type);
|
||||
|
||||
self.write_plain(&results_out_str)?;
|
||||
|
||||
let mut results = Vec::new();
|
||||
let mut stdouts = String::new();
|
||||
for &(ref f, ref stdout) in &state.not_failures {
|
||||
successes.push(f.name.to_string());
|
||||
for &(ref f, ref stdout) in inputs {
|
||||
results.push(f.name.to_string());
|
||||
if !stdout.is_empty() {
|
||||
stdouts.push_str(&format!("---- {} stdout ----\n", f.name));
|
||||
let output = String::from_utf8_lossy(stdout);
|
||||
|
@ -106,38 +144,24 @@ impl<T: Write> PrettyFormatter<T> {
|
|||
self.write_plain(&stdouts)?;
|
||||
}
|
||||
|
||||
self.write_plain("\nsuccesses:\n")?;
|
||||
successes.sort();
|
||||
for name in &successes {
|
||||
self.write_plain(&results_out_str)?;
|
||||
results.sort();
|
||||
for name in &results {
|
||||
self.write_plain(&format!(" {}\n", name))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
|
||||
self.write_plain("\nfailures:\n")?;
|
||||
let mut failures = Vec::new();
|
||||
let mut fail_out = String::new();
|
||||
for &(ref f, ref stdout) in &state.failures {
|
||||
failures.push(f.name.to_string());
|
||||
if !stdout.is_empty() {
|
||||
fail_out.push_str(&format!("---- {} stdout ----\n", f.name));
|
||||
let output = String::from_utf8_lossy(stdout);
|
||||
fail_out.push_str(&output);
|
||||
fail_out.push_str("\n");
|
||||
}
|
||||
}
|
||||
if !fail_out.is_empty() {
|
||||
self.write_plain("\n")?;
|
||||
self.write_plain(&fail_out)?;
|
||||
}
|
||||
pub fn write_successes(&mut self, state: &ConsoleTestState) -> io::Result<()> {
|
||||
self.write_results(&state.not_failures, "successes")
|
||||
}
|
||||
|
||||
self.write_plain("\nfailures:\n")?;
|
||||
failures.sort();
|
||||
for name in &failures {
|
||||
self.write_plain(&format!(" {}\n", name))?;
|
||||
}
|
||||
Ok(())
|
||||
pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
|
||||
self.write_results(&state.failures, "failures")
|
||||
}
|
||||
|
||||
pub fn write_time_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> {
|
||||
self.write_results(&state.time_failures, "failures (time limit exceeded)")
|
||||
}
|
||||
|
||||
fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> {
|
||||
|
@ -179,15 +203,19 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
|
|||
}
|
||||
|
||||
match *result {
|
||||
TrOk => self.write_ok(exec_time),
|
||||
TrFailed | TrFailedMsg(_) => self.write_failed(exec_time),
|
||||
TrIgnored => self.write_ignored(exec_time),
|
||||
TrAllowedFail => self.write_allowed_fail(exec_time),
|
||||
TrOk => self.write_ok()?,
|
||||
TrFailed | TrFailedMsg(_) => self.write_failed()?,
|
||||
TrIgnored => self.write_ignored()?,
|
||||
TrAllowedFail => self.write_allowed_fail()?,
|
||||
TrBench(ref bs) => {
|
||||
self.write_bench()?;
|
||||
self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
|
||||
self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))?;
|
||||
}
|
||||
TrTimedFail => self.write_time_failed()?,
|
||||
}
|
||||
|
||||
self.write_time(desc, exec_time)?;
|
||||
self.write_plain("\n")
|
||||
}
|
||||
|
||||
fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
|
||||
|
@ -207,7 +235,13 @@ impl<T: Write> OutputFormatter for PrettyFormatter<T> {
|
|||
}
|
||||
let success = state.failed == 0;
|
||||
if !success {
|
||||
self.write_failures(state)?;
|
||||
if !state.failures.is_empty() {
|
||||
self.write_failures(state)?;
|
||||
}
|
||||
|
||||
if !state.time_failures.is_empty() {
|
||||
self.write_time_failures(state)?;
|
||||
}
|
||||
}
|
||||
|
||||
self.write_plain("\ntest result: ")?;
|
||||
|
|
|
@ -180,7 +180,7 @@ impl<T: Write> OutputFormatter for TerseFormatter<T> {
|
|||
) -> io::Result<()> {
|
||||
match *result {
|
||||
TrOk => self.write_ok(),
|
||||
TrFailed | TrFailedMsg(_) => self.write_failed(),
|
||||
TrFailed | TrFailedMsg(_) | TrTimedFail => self.write_failed(),
|
||||
TrIgnored => self.write_ignored(),
|
||||
TrAllowedFail => self.write_allowed_fail(),
|
||||
TrBench(ref bs) => {
|
||||
|
|
|
@ -56,6 +56,7 @@ use std::panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo};
|
|||
use std::path::PathBuf;
|
||||
use std::process;
|
||||
use std::process::{ExitStatus, Command, Termination};
|
||||
use std::str::FromStr;
|
||||
use std::sync::mpsc::{channel, Sender};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
@ -75,13 +76,54 @@ const SECONDARY_TEST_INVOKER_VAR: &'static str = "__RUST_TEST_INVOKE";
|
|||
const TR_OK: i32 = 50;
|
||||
const TR_FAILED: i32 = 51;
|
||||
|
||||
/// This small module contains constants used by `report-time` option.
|
||||
/// Those constants values will be used if corresponding environment variables are not set.
|
||||
///
|
||||
/// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`,
|
||||
/// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`,
|
||||
/// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`.
|
||||
///
|
||||
/// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means
|
||||
/// warn time, and 200 means critical time.
|
||||
pub mod time_constants {
|
||||
use std::time::Duration;
|
||||
use super::TEST_WARN_TIMEOUT_S;
|
||||
|
||||
/// Environment variable for overriding default threshold for unit-tests.
|
||||
pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT";
|
||||
|
||||
// Unit tests are supposed to be really quick.
|
||||
pub const UNIT_WARN: Duration = Duration::from_millis(50);
|
||||
pub const UNIT_CRITICAL: Duration = Duration::from_millis(100);
|
||||
|
||||
/// Environment variable for overriding default threshold for unit-tests.
|
||||
pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION";
|
||||
|
||||
// Integration tests may have a lot of work, so they can take longer to execute.
|
||||
pub const INTEGRATION_WARN: Duration = Duration::from_millis(500);
|
||||
pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000);
|
||||
|
||||
/// Environment variable for overriding default threshold for unit-tests.
|
||||
pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST";
|
||||
|
||||
// Doctests are similar to integration tests, because they can include a lot of
|
||||
// initialization code.
|
||||
pub const DOCTEST_WARN: Duration = INTEGRATION_WARN;
|
||||
pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL;
|
||||
|
||||
// Do not suppose anything about unknown tests, base limits on the
|
||||
// `TEST_WARN_TIMEOUT_S` constant.
|
||||
pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S);
|
||||
pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2);
|
||||
}
|
||||
|
||||
// to be used by rustc to compile tests in libtest
|
||||
pub mod test {
|
||||
pub use crate::{
|
||||
assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
|
||||
Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, RunStrategy,
|
||||
ShouldPanic, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
|
||||
TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
|
||||
TestOpts, TestTimeOptions, TestType, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -97,6 +139,21 @@ pub enum Concurrent {
|
|||
No,
|
||||
}
|
||||
|
||||
/// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html)
|
||||
/// conventions.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum TestType {
|
||||
/// Unit-tests are expected to be in the `src` folder of the crate.
|
||||
UnitTest,
|
||||
/// Integration-style tests are expected to be in the `tests` folder of the crate.
|
||||
IntegrationTest,
|
||||
/// Doctests are created by the `librustdoc` manually, so it's a different type of test.
|
||||
DocTest,
|
||||
/// Tests for the sources that don't follow the project layout convention
|
||||
/// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly).
|
||||
Unknown,
|
||||
}
|
||||
|
||||
// The name of a test. By convention this follows the rules for rust
|
||||
// paths; i.e., it should be a series of identifiers separated by double
|
||||
// colons. This way if some test runner wants to arrange the tests
|
||||
|
@ -232,6 +289,7 @@ pub struct TestDesc {
|
|||
pub ignore: bool,
|
||||
pub should_panic: ShouldPanic,
|
||||
pub allow_fail: bool,
|
||||
pub test_type: TestType,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -403,6 +461,133 @@ pub enum RunIgnored {
|
|||
Only,
|
||||
}
|
||||
|
||||
/// Structure denoting time limits for test execution.
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct TimeThreshold {
|
||||
pub warn: Duration,
|
||||
pub critical: Duration,
|
||||
}
|
||||
|
||||
impl TimeThreshold {
|
||||
/// Creates a new `TimeThreshold` instance with provided durations.
|
||||
pub fn new(warn: Duration, critical: Duration) -> Self {
|
||||
Self {
|
||||
warn,
|
||||
critical,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to create a `TimeThreshold` instance with values obtained
|
||||
/// from the environment variable, and returns `None` if the variable
|
||||
/// is not set.
|
||||
/// Environment variable format is expected to match `\d+,\d+`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if variable with provided name is set but contains inappropriate
|
||||
/// value.
|
||||
pub fn from_env_var(env_var_name: &str) -> Option<Self> {
|
||||
let durations_str = env::var(env_var_name).ok()?;
|
||||
|
||||
// Split string into 2 substrings by comma and try to parse numbers.
|
||||
let durations: Vec<u64> = durations_str
|
||||
.splitn(2, ',')
|
||||
.map(|v| {
|
||||
u64::from_str(v).unwrap_or_else(|_| {
|
||||
panic!(
|
||||
"Duration value in variable {} is expected to be a number, but got {}",
|
||||
env_var_name, v
|
||||
)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Check that we have exactly 2 numbers.
|
||||
if durations.len() != 2 {
|
||||
panic!(
|
||||
"Duration variable {} expected to have 2 numbers separated by comma, but got {}",
|
||||
env_var_name, durations.len()
|
||||
);
|
||||
}
|
||||
|
||||
let (warn, critical) = (durations[0], durations[1]);
|
||||
|
||||
Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Structure with parameters for calculating test execution time.
|
||||
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct TestTimeOptions {
|
||||
pub error_on_excess: bool,
|
||||
pub colored: bool,
|
||||
pub unit_threshold: TimeThreshold,
|
||||
pub integration_threshold: TimeThreshold,
|
||||
pub doctest_threshold: TimeThreshold,
|
||||
}
|
||||
|
||||
impl TestTimeOptions {
|
||||
pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self {
|
||||
let unit_threshold =
|
||||
TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME)
|
||||
.unwrap_or_else(Self::default_unit);
|
||||
|
||||
let integration_threshold =
|
||||
TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME)
|
||||
.unwrap_or_else(Self::default_integration);
|
||||
|
||||
let doctest_threshold =
|
||||
TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME)
|
||||
.unwrap_or_else(Self::default_doctest);
|
||||
|
||||
Self {
|
||||
error_on_excess,
|
||||
colored,
|
||||
unit_threshold,
|
||||
integration_threshold,
|
||||
doctest_threshold,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
|
||||
exec_time.0 >= self.warn_time(test)
|
||||
}
|
||||
|
||||
pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool {
|
||||
exec_time.0 >= self.critical_time(test)
|
||||
}
|
||||
|
||||
fn warn_time(&self, test: &TestDesc) -> Duration {
|
||||
match test.test_type {
|
||||
TestType::UnitTest => self.unit_threshold.warn,
|
||||
TestType::IntegrationTest => self.integration_threshold.warn,
|
||||
TestType::DocTest => self.doctest_threshold.warn,
|
||||
TestType::Unknown => time_constants::UNKNOWN_WARN,
|
||||
}
|
||||
}
|
||||
|
||||
fn critical_time(&self, test: &TestDesc) -> Duration {
|
||||
match test.test_type {
|
||||
TestType::UnitTest => self.unit_threshold.critical,
|
||||
TestType::IntegrationTest => self.integration_threshold.critical,
|
||||
TestType::DocTest => self.doctest_threshold.critical,
|
||||
TestType::Unknown => time_constants::UNKNOWN_CRITICAL,
|
||||
}
|
||||
}
|
||||
|
||||
fn default_unit() -> TimeThreshold {
|
||||
TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL)
|
||||
}
|
||||
|
||||
fn default_integration() -> TimeThreshold {
|
||||
TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL)
|
||||
}
|
||||
|
||||
fn default_doctest() -> TimeThreshold {
|
||||
TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TestOpts {
|
||||
pub list: bool,
|
||||
|
@ -418,7 +603,7 @@ pub struct TestOpts {
|
|||
pub format: OutputFormat,
|
||||
pub test_threads: Option<usize>,
|
||||
pub skip: Vec<String>,
|
||||
pub report_time: bool,
|
||||
pub time_options: Option<TestTimeOptions>,
|
||||
pub options: Options,
|
||||
}
|
||||
|
||||
|
@ -502,10 +687,28 @@ fn optgroups() -> getopts::Options {
|
|||
unstable-options = Allow use of experimental features",
|
||||
"unstable-options",
|
||||
)
|
||||
.optflag(
|
||||
.optflagopt(
|
||||
"",
|
||||
"report-time",
|
||||
"Show execution time of each test. Not available for --format=terse"
|
||||
"Show execution time of each test. Awailable values:
|
||||
plain = do not colorize the execution time (default);
|
||||
colored = colorize output according to the `color` parameter value;
|
||||
|
||||
Threshold values for colorized output can be configured via
|
||||
`RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
|
||||
`RUST_TEST_TIME_DOCTEST` environment variables.
|
||||
|
||||
Not available for --format=terse",
|
||||
"plain|colored"
|
||||
)
|
||||
.optflag(
|
||||
"",
|
||||
"ensure-test-time",
|
||||
"Treat excess of the test execution time limit as error.
|
||||
|
||||
Threshold values for this option can be configured via
|
||||
`RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and
|
||||
`RUST_TEST_TIME_DOCTEST` environment variables."
|
||||
);
|
||||
return opts;
|
||||
}
|
||||
|
@ -554,6 +757,21 @@ fn is_nightly() -> bool {
|
|||
bootstrap || !disable_unstable_features
|
||||
}
|
||||
|
||||
// Gets the option value and checks if unstable features are enabled.
|
||||
macro_rules! unstable_optflag {
|
||||
($matches:ident, $allow_unstable:ident, $option_name:literal) => {{
|
||||
let opt = $matches.opt_present($option_name);
|
||||
if !$allow_unstable && opt {
|
||||
return Some(Err(format!(
|
||||
"The \"{}\" flag is only accepted on the nightly compiler",
|
||||
$option_name
|
||||
)));
|
||||
}
|
||||
|
||||
opt
|
||||
}};
|
||||
}
|
||||
|
||||
// Parses command line arguments into test options
|
||||
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
||||
let mut allow_unstable = false;
|
||||
|
@ -592,26 +810,9 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||
None
|
||||
};
|
||||
|
||||
let exclude_should_panic = matches.opt_present("exclude-should-panic");
|
||||
if !allow_unstable && exclude_should_panic {
|
||||
return Some(Err(
|
||||
"The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
|
||||
));
|
||||
}
|
||||
let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic");
|
||||
|
||||
let include_ignored = matches.opt_present("include-ignored");
|
||||
if !allow_unstable && include_ignored {
|
||||
return Some(Err(
|
||||
"The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let report_time = matches.opt_present("report-time");
|
||||
if !allow_unstable && report_time {
|
||||
return Some(Err(
|
||||
"The \"report-time\" flag is only accepted on the nightly compiler".into(),
|
||||
));
|
||||
}
|
||||
let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored");
|
||||
|
||||
let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
|
||||
(true, true) => {
|
||||
|
@ -641,6 +842,19 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||
};
|
||||
}
|
||||
|
||||
let report_time = unstable_optflag!(matches, allow_unstable, "report-time");
|
||||
let colored_opt_str = matches.opt_str("report-time");
|
||||
let report_time_colored = report_time && colored_opt_str == Some("colored".into());
|
||||
let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-test-time");
|
||||
|
||||
// If `ensure-test-time` option is provided, time output is enforced,
|
||||
// so user won't be confused if any of tests will silently fail.
|
||||
let time_options = if report_time || ensure_test_time {
|
||||
Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let test_threads = match matches.opt_str("test-threads") {
|
||||
Some(n_str) => match n_str.parse::<usize>() {
|
||||
Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
|
||||
|
@ -706,20 +920,20 @@ pub fn parse_opts(args: &[String]) -> Option<OptRes> {
|
|||
format,
|
||||
test_threads,
|
||||
skip: matches.opt_strs("skip"),
|
||||
report_time,
|
||||
time_options,
|
||||
options: Options::new().display_output(matches.opt_present("show-output")),
|
||||
};
|
||||
|
||||
Some(Ok(test_opts))
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct BenchSamples {
|
||||
ns_iter_summ: stats::Summary,
|
||||
mb_s: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum TestResult {
|
||||
TrOk,
|
||||
TrFailed,
|
||||
|
@ -727,6 +941,7 @@ pub enum TestResult {
|
|||
TrIgnored,
|
||||
TrAllowedFail,
|
||||
TrBench(BenchSamples),
|
||||
TrTimedFail,
|
||||
}
|
||||
|
||||
unsafe impl Send for TestResult {}
|
||||
|
@ -774,6 +989,7 @@ struct ConsoleTestState {
|
|||
metrics: MetricMap,
|
||||
failures: Vec<(TestDesc, Vec<u8>)>,
|
||||
not_failures: Vec<(TestDesc, Vec<u8>)>,
|
||||
time_failures: Vec<(TestDesc, Vec<u8>)>,
|
||||
options: Options,
|
||||
}
|
||||
|
||||
|
@ -796,6 +1012,7 @@ impl ConsoleTestState {
|
|||
metrics: MetricMap::new(),
|
||||
failures: Vec::new(),
|
||||
not_failures: Vec::new(),
|
||||
time_failures: Vec::new(),
|
||||
options: opts.options,
|
||||
})
|
||||
}
|
||||
|
@ -831,11 +1048,12 @@ impl ConsoleTestState {
|
|||
TrIgnored => "ignored".to_owned(),
|
||||
TrAllowedFail => "failed (allowed)".to_owned(),
|
||||
TrBench(ref bs) => fmt_bench_samples(bs),
|
||||
TrTimedFail => "failed (time limit exceeded)".to_owned(),
|
||||
},
|
||||
test.name,
|
||||
))?;
|
||||
if let Some(exec_time) = exec_time {
|
||||
self.write_log(|| format!(" {}", exec_time))?;
|
||||
self.write_log(|| format!(" <{}>", exec_time))?;
|
||||
}
|
||||
self.write_log(|| "\n")
|
||||
}
|
||||
|
@ -993,6 +1211,10 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
|
|||
stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
|
||||
st.failures.push((test, stdout));
|
||||
}
|
||||
TrTimedFail => {
|
||||
st.failed += 1;
|
||||
st.time_failures.push((test, stdout));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -1018,6 +1240,7 @@ pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Resu
|
|||
use_color(opts),
|
||||
max_name_len,
|
||||
is_multithreaded,
|
||||
opts.time_options,
|
||||
)),
|
||||
OutputFormat::Terse => Box::new(TerseFormatter::new(
|
||||
output,
|
||||
|
@ -1487,15 +1710,20 @@ pub fn run_test(
|
|||
return;
|
||||
}
|
||||
|
||||
struct TestRunOpts {
|
||||
pub nocapture: bool,
|
||||
pub concurrency: Concurrent,
|
||||
pub time: Option<TestTimeOptions>,
|
||||
}
|
||||
|
||||
fn run_test_inner(
|
||||
desc: TestDesc,
|
||||
nocapture: bool,
|
||||
report_time: bool,
|
||||
strategy: RunStrategy,
|
||||
monitor_ch: Sender<MonitorMsg>,
|
||||
testfn: Box<dyn FnOnce() + Send>,
|
||||
concurrency: Concurrent,
|
||||
opts: TestRunOpts,
|
||||
) {
|
||||
let concurrency = opts.concurrency;
|
||||
let name = desc.name.clone();
|
||||
|
||||
let runtest = move || {
|
||||
|
@ -1518,6 +1746,12 @@ pub fn run_test(
|
|||
}
|
||||
}
|
||||
|
||||
let test_run_opts = TestRunOpts {
|
||||
nocapture: opts.nocapture,
|
||||
concurrency,
|
||||
time: opts.time_options
|
||||
};
|
||||
|
||||
match testfn {
|
||||
DynBenchFn(bencher) => {
|
||||
// Benchmarks aren't expected to panic, so we run them all in-process.
|
||||
|
@ -1538,8 +1772,6 @@ pub fn run_test(
|
|||
};
|
||||
run_test_inner(
|
||||
desc,
|
||||
opts.nocapture,
|
||||
opts.report_time,
|
||||
strategy,
|
||||
monitor_ch,
|
||||
Box::new(move || __rust_begin_short_backtrace(f)),
|
||||
|
@ -1548,12 +1780,10 @@ pub fn run_test(
|
|||
}
|
||||
StaticTestFn(f) => run_test_inner(
|
||||
desc,
|
||||
opts.nocapture,
|
||||
opts.report_time,
|
||||
strategy,
|
||||
monitor_ch,
|
||||
Box::new(move || __rust_begin_short_backtrace(f)),
|
||||
concurrency,
|
||||
test_run_opts,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
@ -1564,10 +1794,13 @@ fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
|
|||
f()
|
||||
}
|
||||
|
||||
fn calc_result<'a>(desc: &TestDesc,
|
||||
task_result: Result<(), &'a (dyn Any + 'static + Send)>)
|
||||
fn calc_result<'a>(
|
||||
desc: &TestDesc,
|
||||
task_result: Result<(), &'a (dyn Any + 'static + Send)>)
|
||||
time_opts: &Option<TestTimeOptions>,
|
||||
exec_time: &Option<TestExecTime>)
|
||||
-> TestResult {
|
||||
match (&desc.should_panic, task_result) {
|
||||
let result = match (&desc.should_panic, task_result) {
|
||||
(&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
|
||||
(&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
|
||||
if err
|
||||
|
@ -1589,7 +1822,22 @@ fn calc_result<'a>(desc: &TestDesc,
|
|||
(&ShouldPanic::Yes, Ok(())) => TrFailedMsg("test did not panic as expected".to_string()),
|
||||
_ if desc.allow_fail => TrAllowedFail,
|
||||
_ => TrFailed,
|
||||
};
|
||||
|
||||
|
||||
// If test is already failed (or allowed to fail), do not change the result.
|
||||
if result != TrOk {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Check if test is failed due to timeout.
|
||||
if let (Some(opts), Some(time)) = (time_opts, exec_time) {
|
||||
if opts.error_on_excess && opts.is_critical(desc, time) {
|
||||
return TrTimedFail;
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn get_result_from_exit_code(desc: &TestDesc, code: i32) -> TestResult {
|
||||
|
|
|
@ -115,7 +115,7 @@ pub trait Stats {
|
|||
}
|
||||
|
||||
/// Extracted collection of all the summary statistics of a sample set.
|
||||
#[derive(Clone, PartialEq, Copy)]
|
||||
#[derive(Debug, Clone, PartialEq, Copy)]
|
||||
#[allow(missing_docs)]
|
||||
pub struct Summary {
|
||||
pub sum: f64,
|
||||
|
|
|
@ -2,10 +2,11 @@ use super::*;
|
|||
|
||||
use crate::test::{
|
||||
filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored, RunStrategy,
|
||||
ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailedMsg,
|
||||
TrIgnored, TrOk,
|
||||
ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions,
|
||||
TestType, TrFailedMsg, TrIgnored, TrOk,
|
||||
};
|
||||
use std::sync::mpsc::channel;
|
||||
use std::time::Duration;
|
||||
|
||||
impl TestOpts {
|
||||
fn new() -> TestOpts {
|
||||
|
@ -23,7 +24,7 @@ impl TestOpts {
|
|||
format: OutputFormat::Pretty,
|
||||
test_threads: None,
|
||||
skip: vec![],
|
||||
report_time: false,
|
||||
time_options: None,
|
||||
options: Options::new(),
|
||||
}
|
||||
}
|
||||
|
@ -37,6 +38,7 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
|
|||
ignore: true,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move || {})),
|
||||
},
|
||||
|
@ -46,6 +48,7 @@ fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move || {})),
|
||||
},
|
||||
|
@ -63,6 +66,7 @@ pub fn do_not_run_ignored_tests() {
|
|||
ignore: true,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
|
@ -81,6 +85,7 @@ pub fn ignored_tests_result_in_ignored() {
|
|||
ignore: true,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
|
@ -101,6 +106,7 @@ fn test_should_panic() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::Yes,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
|
@ -121,6 +127,7 @@ fn test_should_panic_good_message() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::YesWithMessage("error message"),
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
|
@ -143,6 +150,7 @@ fn test_should_panic_bad_message() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::YesWithMessage(expected),
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
|
@ -161,6 +169,7 @@ fn test_should_panic_but_succeeds() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::Yes,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
|
@ -178,11 +187,18 @@ fn report_time_test_template(report_time: bool) -> Option<TestExecTime> {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
let time_options = if report_time {
|
||||
Some(TestTimeOptions::default())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let test_opts = TestOpts {
|
||||
report_time,
|
||||
time_options,
|
||||
..TestOpts::new()
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
|
@ -203,6 +219,97 @@ fn test_should_report_time() {
|
|||
assert!(exec_time.is_some());
|
||||
}
|
||||
|
||||
fn time_test_failure_template(test_type: TestType) -> TestResult {
|
||||
fn f() {}
|
||||
let desc = TestDescAndFn {
|
||||
desc: TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type
|
||||
},
|
||||
testfn: DynTestFn(Box::new(f)),
|
||||
};
|
||||
// `Default` will initialize all the thresholds to 0 milliseconds.
|
||||
let mut time_options = TestTimeOptions::default();
|
||||
time_options.error_on_excess = true;
|
||||
|
||||
let test_opts = TestOpts {
|
||||
time_options: Some(time_options),
|
||||
..TestOpts::new()
|
||||
};
|
||||
let (tx, rx) = channel();
|
||||
run_test(&test_opts, false, desc, tx, Concurrent::No);
|
||||
let (_, result, _, _) = rx.recv().unwrap();
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_on_exceed() {
|
||||
let types = [TestType::UnitTest, TestType::IntegrationTest, TestType::DocTest];
|
||||
|
||||
for test_type in types.into_iter() {
|
||||
let result = time_test_failure_template(*test_type);
|
||||
|
||||
assert_eq!(result, TestResult::TrTimedFail);
|
||||
}
|
||||
|
||||
// Check that for unknown tests thresholds aren't applied.
|
||||
let result = time_test_failure_template(TestType::Unknown);
|
||||
assert_eq!(result, TestResult::TrOk);
|
||||
}
|
||||
|
||||
fn typed_test_desc(test_type: TestType) -> TestDesc {
|
||||
TestDesc {
|
||||
name: StaticTestName("whatever"),
|
||||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type
|
||||
}
|
||||
}
|
||||
|
||||
fn test_exec_time(millis: u64) -> TestExecTime {
|
||||
TestExecTime(Duration::from_millis(millis))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_time_options_threshold() {
|
||||
let unit = TimeThreshold::new(Duration::from_millis(50), Duration::from_millis(100));
|
||||
let integration = TimeThreshold::new(Duration::from_millis(500), Duration::from_millis(1000));
|
||||
let doc = TimeThreshold::new(Duration::from_millis(5000), Duration::from_millis(10000));
|
||||
|
||||
let options = TestTimeOptions {
|
||||
error_on_excess: false,
|
||||
colored: false,
|
||||
unit_threshold: unit.clone(),
|
||||
integration_threshold: integration.clone(),
|
||||
doctest_threshold: doc.clone(),
|
||||
};
|
||||
|
||||
let test_vector = [
|
||||
(TestType::UnitTest, unit.warn.as_millis() - 1, false, false),
|
||||
(TestType::UnitTest, unit.warn.as_millis(), true, false),
|
||||
(TestType::UnitTest, unit.critical.as_millis(), true, true),
|
||||
(TestType::IntegrationTest, integration.warn.as_millis() - 1, false, false),
|
||||
(TestType::IntegrationTest, integration.warn.as_millis(), true, false),
|
||||
(TestType::IntegrationTest, integration.critical.as_millis(), true, true),
|
||||
(TestType::DocTest, doc.warn.as_millis() - 1, false, false),
|
||||
(TestType::DocTest, doc.warn.as_millis(), true, false),
|
||||
(TestType::DocTest, doc.critical.as_millis(), true, true),
|
||||
];
|
||||
|
||||
for (test_type, time, expected_warn, expected_critical) in test_vector.into_iter() {
|
||||
let test_desc = typed_test_desc(*test_type);
|
||||
let exec_time = test_exec_time(*time as u64);
|
||||
|
||||
assert_eq!(options.is_warn(&test_desc, &exec_time), *expected_warn);
|
||||
assert_eq!(options.is_critical(&test_desc, &exec_time), *expected_critical);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_ignored_flag() {
|
||||
let args = vec![
|
||||
|
@ -284,6 +391,7 @@ pub fn exclude_should_panic_option() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::Yes,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move || {})),
|
||||
});
|
||||
|
@ -305,6 +413,7 @@ pub fn exact_filter_match() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(move || {})),
|
||||
})
|
||||
|
@ -416,6 +525,7 @@ pub fn sort_tests() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
},
|
||||
testfn: DynTestFn(Box::new(testfn)),
|
||||
};
|
||||
|
@ -492,6 +602,7 @@ pub fn test_bench_no_iter() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
};
|
||||
|
||||
crate::bench::benchmark(desc, tx, true, f);
|
||||
|
@ -511,6 +622,7 @@ pub fn test_bench_iter() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
};
|
||||
|
||||
crate::bench::benchmark(desc, tx, true, f);
|
||||
|
@ -524,6 +636,7 @@ fn should_sort_failures_before_printing_them() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
};
|
||||
|
||||
let test_b = TestDesc {
|
||||
|
@ -531,9 +644,10 @@ fn should_sort_failures_before_printing_them() {
|
|||
ignore: false,
|
||||
should_panic: ShouldPanic::No,
|
||||
allow_fail: false,
|
||||
test_type: TestType::Unknown,
|
||||
};
|
||||
|
||||
let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
|
||||
let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false, None);
|
||||
|
||||
let st = ConsoleTestState {
|
||||
log_out: None,
|
||||
|
@ -548,6 +662,7 @@ fn should_sort_failures_before_printing_them() {
|
|||
failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
|
||||
options: Options::new(),
|
||||
not_failures: Vec::new(),
|
||||
time_failures: Vec::new(),
|
||||
};
|
||||
|
||||
out.write_failures(&st).unwrap();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue