| 1 | //! Module providing interface for running tests in the console. |
| 2 | |
| 3 | use std::fs::File; |
| 4 | use std::io; |
| 5 | use std::io::prelude::Write; |
| 6 | use std::time::Instant; |
| 7 | |
| 8 | use super::bench::fmt_bench_samples; |
| 9 | use super::cli::TestOpts; |
| 10 | use super::event::{CompletedTest, TestEvent}; |
| 11 | use super::formatters::{ |
| 12 | JsonFormatter, JunitFormatter, OutputFormatter, PrettyFormatter, TerseFormatter, |
| 13 | }; |
| 14 | use super::helpers::concurrency::get_concurrency; |
| 15 | use super::helpers::metrics::MetricMap; |
| 16 | use super::options::{Options, OutputFormat}; |
| 17 | use super::test_result::TestResult; |
| 18 | use super::time::{TestExecTime, TestSuiteExecTime}; |
| 19 | use super::types::{NamePadding, TestDesc, TestDescAndFn}; |
| 20 | use super::{filter_tests, run_tests, term}; |
| 21 | |
| 22 | /// Generic wrapper over stdout. |
| 23 | pub(crate) enum OutputLocation<T> { |
| 24 | Pretty(Box<term::StdoutTerminal>), |
| 25 | Raw(T), |
| 26 | } |
| 27 | |
| 28 | impl<T: Write> Write for OutputLocation<T> { |
| 29 | fn write(&mut self, buf: &[u8]) -> io::Result<usize> { |
| 30 | match *self { |
| 31 | OutputLocation::Pretty(ref mut term: &mut Box) => term.write(buf), |
| 32 | OutputLocation::Raw(ref mut stdout: &mut T) => stdout.write(buf), |
| 33 | } |
| 34 | } |
| 35 | |
| 36 | fn flush(&mut self) -> io::Result<()> { |
| 37 | match *self { |
| 38 | OutputLocation::Pretty(ref mut term: &mut Box) => term.flush(), |
| 39 | OutputLocation::Raw(ref mut stdout: &mut T) => stdout.flush(), |
| 40 | } |
| 41 | } |
| 42 | } |
| 43 | |
| 44 | pub(crate) struct ConsoleTestDiscoveryState { |
| 45 | pub log_out: Option<File>, |
| 46 | pub tests: usize, |
| 47 | pub benchmarks: usize, |
| 48 | pub ignored: usize, |
| 49 | } |
| 50 | |
| 51 | impl ConsoleTestDiscoveryState { |
| 52 | pub(crate) fn new(opts: &TestOpts) -> io::Result<ConsoleTestDiscoveryState> { |
| 53 | let log_out = match opts.logfile { |
| 54 | Some(ref path) => Some(File::create(path)?), |
| 55 | None => None, |
| 56 | }; |
| 57 | |
| 58 | Ok(ConsoleTestDiscoveryState { log_out, tests: 0, benchmarks: 0, ignored: 0 }) |
| 59 | } |
| 60 | |
| 61 | pub(crate) fn write_log<F, S>(&mut self, msg: F) -> io::Result<()> |
| 62 | where |
| 63 | S: AsRef<str>, |
| 64 | F: FnOnce() -> S, |
| 65 | { |
| 66 | match self.log_out { |
| 67 | None => Ok(()), |
| 68 | Some(ref mut o) => { |
| 69 | let msg = msg(); |
| 70 | let msg = msg.as_ref(); |
| 71 | o.write_all(msg.as_bytes()) |
| 72 | } |
| 73 | } |
| 74 | } |
| 75 | } |
| 76 | |
| 77 | pub(crate) struct ConsoleTestState { |
| 78 | pub log_out: Option<File>, |
| 79 | pub total: usize, |
| 80 | pub passed: usize, |
| 81 | pub failed: usize, |
| 82 | pub ignored: usize, |
| 83 | pub filtered_out: usize, |
| 84 | pub measured: usize, |
| 85 | pub exec_time: Option<TestSuiteExecTime>, |
| 86 | pub metrics: MetricMap, |
| 87 | pub failures: Vec<(TestDesc, Vec<u8>)>, |
| 88 | pub not_failures: Vec<(TestDesc, Vec<u8>)>, |
| 89 | pub ignores: Vec<(TestDesc, Vec<u8>)>, |
| 90 | pub time_failures: Vec<(TestDesc, Vec<u8>)>, |
| 91 | pub options: Options, |
| 92 | } |
| 93 | |
| 94 | impl ConsoleTestState { |
| 95 | pub(crate) fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> { |
| 96 | let log_out = match opts.logfile { |
| 97 | Some(ref path) => Some(File::create(path)?), |
| 98 | None => None, |
| 99 | }; |
| 100 | |
| 101 | Ok(ConsoleTestState { |
| 102 | log_out, |
| 103 | total: 0, |
| 104 | passed: 0, |
| 105 | failed: 0, |
| 106 | ignored: 0, |
| 107 | filtered_out: 0, |
| 108 | measured: 0, |
| 109 | exec_time: None, |
| 110 | metrics: MetricMap::new(), |
| 111 | failures: Vec::new(), |
| 112 | not_failures: Vec::new(), |
| 113 | ignores: Vec::new(), |
| 114 | time_failures: Vec::new(), |
| 115 | options: opts.options, |
| 116 | }) |
| 117 | } |
| 118 | |
| 119 | pub(crate) fn write_log<F, S>(&mut self, msg: F) -> io::Result<()> |
| 120 | where |
| 121 | S: AsRef<str>, |
| 122 | F: FnOnce() -> S, |
| 123 | { |
| 124 | match self.log_out { |
| 125 | None => Ok(()), |
| 126 | Some(ref mut o) => { |
| 127 | let msg = msg(); |
| 128 | let msg = msg.as_ref(); |
| 129 | o.write_all(msg.as_bytes()) |
| 130 | } |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | pub(crate) fn write_log_result( |
| 135 | &mut self, |
| 136 | test: &TestDesc, |
| 137 | result: &TestResult, |
| 138 | exec_time: Option<&TestExecTime>, |
| 139 | ) -> io::Result<()> { |
| 140 | self.write_log(|| { |
| 141 | let TestDesc { name, ignore_message, .. } = test; |
| 142 | format!( |
| 143 | " {} {}" , |
| 144 | match *result { |
| 145 | TestResult::TrOk => "ok" .to_owned(), |
| 146 | TestResult::TrFailed => "failed" .to_owned(), |
| 147 | TestResult::TrFailedMsg(ref msg) => format!("failed: {msg}" ), |
| 148 | TestResult::TrIgnored => { |
| 149 | if let Some(msg) = ignore_message { |
| 150 | format!("ignored: {msg}" ) |
| 151 | } else { |
| 152 | "ignored" .to_owned() |
| 153 | } |
| 154 | } |
| 155 | TestResult::TrBench(ref bs) => fmt_bench_samples(bs), |
| 156 | TestResult::TrTimedFail => "failed (time limit exceeded)" .to_owned(), |
| 157 | }, |
| 158 | name, |
| 159 | ) |
| 160 | })?; |
| 161 | if let Some(exec_time) = exec_time { |
| 162 | self.write_log(|| format!(" < {exec_time}>" ))?; |
| 163 | } |
| 164 | self.write_log(|| " \n" ) |
| 165 | } |
| 166 | |
| 167 | fn current_test_count(&self) -> usize { |
| 168 | self.passed + self.failed + self.ignored + self.measured |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | // List the tests to console, and optionally to logfile. Filters are honored. |
| 173 | pub(crate) fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> { |
| 174 | let output = match term::stdout() { |
| 175 | None => OutputLocation::Raw(io::stdout().lock()), |
| 176 | Some(t) => OutputLocation::Pretty(t), |
| 177 | }; |
| 178 | |
| 179 | let mut out: Box<dyn OutputFormatter> = match opts.format { |
| 180 | OutputFormat::Pretty | OutputFormat::Junit => { |
| 181 | Box::new(PrettyFormatter::new(output, false, 0, false, None)) |
| 182 | } |
| 183 | OutputFormat::Terse => Box::new(TerseFormatter::new(output, false, 0, false)), |
| 184 | OutputFormat::Json => Box::new(JsonFormatter::new(output)), |
| 185 | }; |
| 186 | let mut st = ConsoleTestDiscoveryState::new(opts)?; |
| 187 | |
| 188 | out.write_discovery_start()?; |
| 189 | for test in filter_tests(opts, tests).into_iter() { |
| 190 | use crate::TestFn::*; |
| 191 | |
| 192 | let TestDescAndFn { desc, testfn } = test; |
| 193 | |
| 194 | let fntype = match testfn { |
| 195 | StaticTestFn(..) | DynTestFn(..) | StaticBenchAsTestFn(..) | DynBenchAsTestFn(..) => { |
| 196 | st.tests += 1; |
| 197 | "test" |
| 198 | } |
| 199 | StaticBenchFn(..) | DynBenchFn(..) => { |
| 200 | st.benchmarks += 1; |
| 201 | "benchmark" |
| 202 | } |
| 203 | }; |
| 204 | |
| 205 | st.ignored += if desc.ignore { 1 } else { 0 }; |
| 206 | |
| 207 | out.write_test_discovered(&desc, fntype)?; |
| 208 | st.write_log(|| format!(" {fntype} {}\n" , desc.name))?; |
| 209 | } |
| 210 | |
| 211 | out.write_discovery_finish(&st) |
| 212 | } |
| 213 | |
| 214 | // Updates `ConsoleTestState` depending on result of the test execution. |
| 215 | fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) { |
| 216 | let test = completed_test.desc; |
| 217 | let stdout = completed_test.stdout; |
| 218 | match completed_test.result { |
| 219 | TestResult::TrOk => { |
| 220 | st.passed += 1; |
| 221 | st.not_failures.push((test, stdout)); |
| 222 | } |
| 223 | TestResult::TrIgnored => { |
| 224 | st.ignored += 1; |
| 225 | st.ignores.push((test, stdout)); |
| 226 | } |
| 227 | TestResult::TrBench(bs) => { |
| 228 | st.metrics.insert_metric( |
| 229 | test.name.as_slice(), |
| 230 | bs.ns_iter_summ.median, |
| 231 | bs.ns_iter_summ.max - bs.ns_iter_summ.min, |
| 232 | ); |
| 233 | st.measured += 1 |
| 234 | } |
| 235 | TestResult::TrFailed => { |
| 236 | st.failed += 1; |
| 237 | st.failures.push((test, stdout)); |
| 238 | } |
| 239 | TestResult::TrFailedMsg(msg) => { |
| 240 | st.failed += 1; |
| 241 | let mut stdout = stdout; |
| 242 | stdout.extend_from_slice(format!("note: {msg}" ).as_bytes()); |
| 243 | st.failures.push((test, stdout)); |
| 244 | } |
| 245 | TestResult::TrTimedFail => { |
| 246 | st.failed += 1; |
| 247 | st.time_failures.push((test, stdout)); |
| 248 | } |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | // Handler for events that occur during test execution. |
| 253 | // It is provided as a callback to the `run_tests` function. |
| 254 | fn on_test_event( |
| 255 | event: &TestEvent, |
| 256 | st: &mut ConsoleTestState, |
| 257 | out: &mut dyn OutputFormatter, |
| 258 | ) -> io::Result<()> { |
| 259 | match (*event).clone() { |
| 260 | TestEvent::TeFiltered(filtered_tests, shuffle_seed) => { |
| 261 | st.total = filtered_tests; |
| 262 | out.write_run_start(filtered_tests, shuffle_seed)?; |
| 263 | } |
| 264 | TestEvent::TeFilteredOut(filtered_out) => { |
| 265 | st.filtered_out = filtered_out; |
| 266 | } |
| 267 | TestEvent::TeWait(ref test) => out.write_test_start(test)?, |
| 268 | TestEvent::TeTimeout(ref test) => out.write_timeout(test)?, |
| 269 | TestEvent::TeResult(completed_test) => { |
| 270 | let test = &completed_test.desc; |
| 271 | let result = &completed_test.result; |
| 272 | let exec_time = &completed_test.exec_time; |
| 273 | let stdout = &completed_test.stdout; |
| 274 | |
| 275 | st.write_log_result(test, result, exec_time.as_ref())?; |
| 276 | out.write_result(test, result, exec_time.as_ref(), stdout, st)?; |
| 277 | handle_test_result(st, completed_test); |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | Ok(()) |
| 282 | } |
| 283 | |
| 284 | /// A simple console test runner. |
| 285 | /// Runs provided tests reporting process and results to the stdout. |
| 286 | pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> { |
| 287 | let output = match term::stdout() { |
| 288 | None => OutputLocation::Raw(io::stdout()), |
| 289 | Some(t) => OutputLocation::Pretty(t), |
| 290 | }; |
| 291 | |
| 292 | let max_name_len = tests |
| 293 | .iter() |
| 294 | .max_by_key(|t| len_if_padded(t)) |
| 295 | .map(|t| t.desc.name.as_slice().len()) |
| 296 | .unwrap_or(0); |
| 297 | |
| 298 | let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1; |
| 299 | |
| 300 | let mut out: Box<dyn OutputFormatter> = match opts.format { |
| 301 | OutputFormat::Pretty => Box::new(PrettyFormatter::new( |
| 302 | output, |
| 303 | opts.use_color(), |
| 304 | max_name_len, |
| 305 | is_multithreaded, |
| 306 | opts.time_options, |
| 307 | )), |
| 308 | OutputFormat::Terse => { |
| 309 | Box::new(TerseFormatter::new(output, opts.use_color(), max_name_len, is_multithreaded)) |
| 310 | } |
| 311 | OutputFormat::Json => Box::new(JsonFormatter::new(output)), |
| 312 | OutputFormat::Junit => Box::new(JunitFormatter::new(output)), |
| 313 | }; |
| 314 | let mut st = ConsoleTestState::new(opts)?; |
| 315 | |
| 316 | // Prevent the usage of `Instant` in some cases: |
| 317 | // - It's currently not supported for wasm targets without Emscripten nor WASI. |
| 318 | // - It's currently not supported for zkvm targets. |
| 319 | let is_instant_unsupported = |
| 320 | (cfg!(target_family = "wasm" ) && cfg!(target_os = "unknown" )) || cfg!(target_os = "zkvm" ); |
| 321 | |
| 322 | let start_time = (!is_instant_unsupported).then(Instant::now); |
| 323 | run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?; |
| 324 | st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed())); |
| 325 | |
| 326 | assert!(opts.fail_fast || st.current_test_count() == st.total); |
| 327 | |
| 328 | out.write_run_finish(&st) |
| 329 | } |
| 330 | |
| 331 | // Calculates padding for given test description. |
| 332 | fn len_if_padded(t: &TestDescAndFn) -> usize { |
| 333 | match t.testfn.padding() { |
| 334 | NamePadding::PadNone => 0, |
| 335 | NamePadding::PadOnRight => t.desc.name.as_slice().len(), |
| 336 | } |
| 337 | } |
| 338 | |