From 49a571a6d81b39dcaedc8d3f22aa7674dae94b37 Mon Sep 17 00:00:00 2001 From: Trevor Gross Date: Fri, 20 Dec 2024 02:35:15 +0000 Subject: [PATCH 1/3] tests: Print the actual output when lines do not match --- tests/common/mod.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/common/mod.rs b/tests/common/mod.rs index c2c16a2..0b0b615 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -68,7 +68,9 @@ pub fn assert_reordered_log(actual: &str, num: u64, expected_lines: &[&str], tai for expected in expected_lines.iter().map(|l| l.trim()).filter(|l| !l.is_empty()) { match actual_lines.get_mut(expected) { - None | Some(0) => panic!("expected line \"{expected}\" not in log"), + None | Some(0) => { + panic!("expected line \"{expected}\" not in log. actual:\n```\n{actual}\n```") + } Some(num) => *num -= 1, } } From 53859657e0527bf2ff37fae34ee5135de98b09b6 Mon Sep 17 00:00:00 2001 From: Trevor Gross Date: Thu, 14 Mar 2024 14:16:57 -0700 Subject: [PATCH 2/3] Add a "skippable" runner This runner will inspect Ok values to determine if the test should be ignored ("skipped" for our purposes means runtime ignore). This can be seem in terse or json output directly. Co-authored-by: Ben Widawsky Fixes: https://github.com/LukasKalbertodt/libtest-mimic/issues/19 (Most of this is from Ben, I just updated the types) --- src/lib.rs | 38 +++++++++++++++++++++++++++++++++++++- src/printer.rs | 19 ++++++++++++++++--- 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 177d37c..81576d2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -106,6 +106,14 @@ pub struct Trial { info: TestInfo, } +/// A representation of whether a test ran to completion or was skipped during its runtime. +pub enum Completion { + /// Test completed successfully. + Completed, + /// Test was ignored with a reason. + Ignored { reason: String }, +} + impl Trial { /// Creates a (non-benchmark) test with the given name and runner. /// @@ -129,6 +137,30 @@ impl Trial { } } + /// Creates a (non-benchmark) test with the given name and runner. + /// + /// Like other tests, returning an `Err` is a test failure. The `Ok` variant for this test must + /// return a [`Completion`] to indicate whether the test successfully ran to completion, or if + /// it was skipped at some point during testing. If it was skipped, a reason may be provided. + pub fn skippable_test(name: impl Into, runner: R) -> Self + where + R: FnOnce() -> Result + Send + 'static, + { + Self { + runner: Box::new(|_test_mode| match runner() { + Ok(Completion::Completed) => Outcome::Passed, + Ok(Completion::Ignored { reason }) => Outcome::RuntimeIgnored { reason }, + Err(e) => Outcome::Failed(e), + }), + info: TestInfo { + name: name.into(), + kind: String::new(), + is_ignored: false, + is_bench: false, + }, + } + } + /// Creates a benchmark with the given name and runner. /// /// If the runner's parameter `test_mode` is `true`, the runner function @@ -310,6 +342,9 @@ enum Outcome { /// The test or benchmark was ignored. Ignored, + /// The test or benchmark was ignored. + RuntimeIgnored { reason: String }, + /// The benchmark was successfully run. Measured(Measurement), } @@ -480,9 +515,10 @@ pub fn run(args: &Arguments, mut tests: Vec) -> Conclusion { Outcome::Failed(failed) => { failed_tests.push((test, failed.msg)); conclusion.num_failed += 1; - }, + } Outcome::Ignored => conclusion.num_ignored += 1, Outcome::Measured(_) => conclusion.num_measured += 1, + Outcome::RuntimeIgnored { .. } => conclusion.num_ignored += 1, } }; diff --git a/src/printer.rs b/src/printer.rs index 7c93edd..d0bcd9f 100644 --- a/src/printer.rs +++ b/src/printer.rs @@ -159,6 +159,7 @@ impl Printer { writeln!(self.out).unwrap(); return; } + Outcome::RuntimeIgnored { .. } => 'S', }; let style = color_of_outcome(outcome); @@ -184,6 +185,7 @@ impl Printer { Outcome::Failed(_) => "failed", Outcome::Ignored => "ignored", Outcome::Measured(_) => unreachable!(), + Outcome::RuntimeIgnored { .. } => "skipped", }, match outcome { Outcome::Failed(Failed { msg: Some(msg) }) => { @@ -312,15 +314,26 @@ impl Printer { /// Prints a colored 'ok'/'FAILED'/'ignored'/'bench'. fn print_outcome_pretty(&mut self, outcome: &Outcome) { + let style = color_of_outcome(outcome); + let mut r = None; let s = match outcome { Outcome::Passed => "ok", Outcome::Failed { .. } => "FAILED", Outcome::Ignored => "ignored", Outcome::Measured { .. } => "bench", + Outcome::RuntimeIgnored { reason } => { + if !reason.is_empty() { + r = Some(reason); + } + "ignored" + }, }; - let style = color_of_outcome(outcome); - write!(self.out, "{style}{}{style:#}", s).unwrap(); + write!(self.out, "{style}{s}").unwrap(); + if let Some(reason) = r { + write!(self.out, ", {reason}").unwrap(); + } + write!(self.out, "{style:#}").unwrap(); if let Outcome::Measured(Measurement { avg, variance }) = outcome { write!( @@ -350,7 +363,7 @@ fn color_of_outcome(outcome: &Outcome) -> Style { let color = match outcome { Outcome::Passed => AnsiColor::Green, Outcome::Failed { .. } => AnsiColor::Red, - Outcome::Ignored => AnsiColor::Yellow, + Outcome::Ignored | Outcome::RuntimeIgnored { .. }=> AnsiColor::Yellow, Outcome::Measured { .. } => AnsiColor::Cyan, }; Style::new().fg_color(Some(Color::Ansi(color))) From 4c8413b493e1b499bb941d2ced1f4c3d8462f53c Mon Sep 17 00:00:00 2001 From: Trevor Gross Date: Thu, 14 Mar 2024 21:29:05 -0700 Subject: [PATCH 3/3] Add tests for runtime-ignored trials Co-authored-by: Ben Widawsky (most of this is from Ben) --- tests/all_passing.rs | 147 +++++++++++++++++++++++++++++++------------ 1 file changed, 108 insertions(+), 39 deletions(-) diff --git a/tests/all_passing.rs b/tests/all_passing.rs index b5c5552..d60458b 100644 --- a/tests/all_passing.rs +++ b/tests/all_passing.rs @@ -1,5 +1,5 @@ use common::{args, check}; -use libtest_mimic::{Trial, Conclusion}; +use libtest_mimic::{Completion, Conclusion, Trial}; use pretty_assertions::assert_eq; use crate::common::do_run; @@ -7,38 +7,56 @@ use crate::common::do_run; #[macro_use] mod common; - fn tests() -> Vec { vec![ Trial::test("foo", || Ok(())), Trial::test("bar", || Ok(())), Trial::test("barro", || Ok(())), + // Passed + Trial::skippable_test("baz", || Ok(Completion::Completed)), + // Ignored with a reason + Trial::skippable_test("qux", || { + Ok(Completion::Ignored { + reason: "very valid reason".into(), + }) + }), + // Ignored with no reason + Trial::skippable_test("quux", || Ok(Completion::Ignored { reason: "".into() })), ] } #[test] fn normal() { - check(args([]), tests, 3, + check( + args([]), + tests, + 6, Conclusion { num_filtered_out: 0, - num_passed: 3, + num_passed: 4, num_failed: 0, - num_ignored: 0, + num_ignored: 2, num_measured: 0, }, " test foo ... ok test bar ... ok test barro ... ok - " + test baz ... ok + test qux ... ignored, very valid reason + test quux ... ignored + ", ); } #[test] fn filter_one() { - check(args(["foo"]), tests, 1, + check( + args(["foo"]), + tests, + 1, Conclusion { - num_filtered_out: 2, + num_filtered_out: 5, num_passed: 1, num_failed: 0, num_ignored: 0, @@ -50,9 +68,12 @@ fn filter_one() { #[test] fn filter_two() { - check(args(["bar"]), tests, 2, + check( + args(["bar"]), + tests, + 2, Conclusion { - num_filtered_out: 1, + num_filtered_out: 4, num_passed: 2, num_failed: 0, num_ignored: 0, @@ -65,12 +86,14 @@ fn filter_two() { ); } - #[test] fn filter_exact() { - check(args(["bar", "--exact"]), tests, 1, + check( + args(["bar", "--exact"]), + tests, + 1, Conclusion { - num_filtered_out: 2, + num_filtered_out: 5, num_passed: 1, num_failed: 0, num_ignored: 0, @@ -82,9 +105,12 @@ fn filter_exact() { #[test] fn filter_two_and_skip() { - check(args(["--skip", "barro", "bar"]), tests, 1, + check( + args(["--skip", "barro", "bar"]), + tests, + 1, Conclusion { - num_filtered_out: 2, + num_filtered_out: 5, num_passed: 1, num_failed: 0, num_ignored: 0, @@ -94,69 +120,112 @@ fn filter_two_and_skip() { ); } +#[test] +fn filter_runtime_ignored() { + check( + args(["qux", "--exact"]), + tests, + 1, + Conclusion { + num_filtered_out: 5, + num_passed: 0, + num_failed: 0, + num_ignored: 1, + num_measured: 0, + }, + "test qux ... ignored, very valid reason", + ); +} + #[test] fn skip_nothing() { - check(args(["--skip", "peter"]), tests, 3, + check( + args(["--skip", "peter"]), + tests, + 6, Conclusion { num_filtered_out: 0, - num_passed: 3, + num_passed: 4, num_failed: 0, - num_ignored: 0, + num_ignored: 2, num_measured: 0, }, " test foo ... ok test bar ... ok test barro ... ok - " + test baz ... ok + test qux ... ignored, very valid reason + test quux ... ignored + ", ); } #[test] fn skip_two() { - check(args(["--skip", "bar"]), tests, 1, + check( + args(["--skip", "bar"]), + tests, + 4, Conclusion { num_filtered_out: 2, - num_passed: 1, + num_passed: 2, num_failed: 0, - num_ignored: 0, + num_ignored: 2, num_measured: 0, }, - "test foo ... ok" + " + test foo ... ok + test baz ... ok + test qux ... ignored, very valid reason + test quux ... ignored + ", ); } #[test] fn skip_exact() { - check(args(["--exact", "--skip", "bar"]), tests, 2, + check( + args(["--exact", "--skip", "bar"]), + tests, + 5, Conclusion { num_filtered_out: 1, - num_passed: 2, + num_passed: 3, num_failed: 0, - num_ignored: 0, + num_ignored: 2, num_measured: 0, }, " test foo ... ok test barro ... ok - " + test baz ... ok + test qux ... ignored, very valid reason + test quux ... ignored + ", ); } #[test] fn terse_output() { let (c, out) = do_run(args(["--format", "terse"]), tests()); - assert_eq!(c, Conclusion { - num_filtered_out: 0, - num_passed: 3, - num_failed: 0, - num_ignored: 0, - num_measured: 0, - }); - assert_log!(out, " - running 3 tests - ... - test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; \ + assert_eq!( + c, + Conclusion { + num_filtered_out: 0, + num_passed: 4, + num_failed: 0, + num_ignored: 2, + num_measured: 0, + } + ); + assert_log!( + out, + " + running 6 tests + ....SS + test result: ok. 4 passed; 0 failed; 2 ignored; 0 measured; 0 filtered out; \ finished in 0.00s - "); + " + ); }