1//! Support code for rustc's built in unit-test and micro-benchmarking
2//! framework.
3//!
4//! Almost all user code will only be interested in `Bencher` and
5//! `black_box`. All other interactions (such as writing tests and
6//! benchmarks themselves) should be done via the `#[test]` and
7//! `#[bench]` attributes.
8//!
9//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more
10//! details.
11
12// Currently, not much of this is meant for users. It is intended to
13// support the simplest interface possible for representing and
14// running tests while providing a base that other test frameworks may
15// build off of.
16
17#![unstable(feature = "test", issue = "50297")]
18#![doc(test(attr(deny(warnings))))]
19#![doc(rust_logo)]
20#![feature(rustdoc_internals)]
21#![feature(file_buffered)]
22#![feature(internal_output_capture)]
23#![feature(io_const_error)]
24#![feature(staged_api)]
25#![feature(process_exitcode_internals)]
26#![feature(panic_can_unwind)]
27#![feature(test)]
28#![feature(thread_spawn_hook)]
29#![allow(internal_features)]
30#![warn(rustdoc::unescaped_backticks)]
31#![warn(unreachable_pub)]
32
33pub use cli::TestOpts;
34
35pub use self::ColorConfig::*;
36pub use self::bench::{Bencher, black_box};
37pub use self::console::run_tests_console;
38pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
39pub use self::types::TestName::*;
40pub use self::types::*;
41
42// Module to be used by rustc to compile tests in libtest
43pub mod test {
44 pub use crate::bench::Bencher;
45 pub use crate::cli::{TestOpts, parse_opts};
46 pub use crate::helpers::metrics::{Metric, MetricMap};
47 pub use crate::options::{Options, RunIgnored, RunStrategy, ShouldPanic};
48 pub use crate::test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
49 pub use crate::time::{TestExecTime, TestTimeOptions};
50 pub use crate::types::{
51 DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
52 TestDescAndFn, TestId, TestName, TestType,
53 };
54 pub use crate::{assert_test_result, filter_tests, run_test, test_main, test_main_static};
55}
56
57use std::collections::VecDeque;
58use std::io::prelude::Write;
59use std::mem::ManuallyDrop;
60use std::panic::{self, AssertUnwindSafe, PanicHookInfo, catch_unwind};
61use std::process::{self, Command, Termination};
62use std::sync::mpsc::{Sender, channel};
63use std::sync::{Arc, Mutex};
64use std::time::{Duration, Instant};
65use std::{env, io, thread};
66
67pub mod bench;
68mod cli;
69mod console;
70mod event;
71mod formatters;
72mod helpers;
73mod options;
74pub mod stats;
75mod term;
76mod test_result;
77mod time;
78mod types;
79
80#[cfg(test)]
81mod tests;
82
83use core::any::Any;
84
85use event::{CompletedTest, TestEvent};
86use helpers::concurrency::get_concurrency;
87use helpers::shuffle::{get_shuffle_seed, shuffle_tests};
88use options::RunStrategy;
89use test_result::*;
90use time::TestExecTime;
91
92/// Process exit code to be used to indicate test failures.
93pub const ERROR_EXIT_CODE: i32 = 101;
94
95const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
96const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS";
97
98// The default console test runner. It accepts the command line
99// arguments and a vector of test_descs.
100pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
101 test_main_with_exit_callback(args, tests, options, || {})
102}
103
104pub fn test_main_with_exit_callback<F: FnOnce()>(
105 args: &[String],
106 tests: Vec<TestDescAndFn>,
107 options: Option<Options>,
108 exit_callback: F,
109) {
110 let mut opts = match cli::parse_opts(args) {
111 Some(Ok(o)) => o,
112 Some(Err(msg)) => {
113 eprintln!("error: {msg}");
114 process::exit(ERROR_EXIT_CODE);
115 }
116 None => return,
117 };
118 if let Some(options) = options {
119 opts.options = options;
120 }
121 if opts.list {
122 if let Err(e) = console::list_tests_console(&opts, tests) {
123 eprintln!("error: io error when listing tests: {e:?}");
124 process::exit(ERROR_EXIT_CODE);
125 }
126 } else {
127 if !opts.nocapture {
128 // If we encounter a non-unwinding panic, flush any captured output from the current test,
129 // and stop capturing output to ensure that the non-unwinding panic message is visible.
130 // We also acquire the locks for both output streams to prevent output from other threads
131 // from interleaving with the panic message or appearing after it.
132 let builtin_panic_hook = panic::take_hook();
133 let hook = Box::new({
134 move |info: &'_ PanicHookInfo<'_>| {
135 if !info.can_unwind() {
136 std::mem::forget(std::io::stderr().lock());
137 let mut stdout = ManuallyDrop::new(std::io::stdout().lock());
138 if let Some(captured) = io::set_output_capture(None) {
139 if let Ok(data) = captured.lock() {
140 let _ = stdout.write_all(&data);
141 let _ = stdout.flush();
142 }
143 }
144 }
145 builtin_panic_hook(info);
146 }
147 });
148 panic::set_hook(hook);
149 // Use a thread spawning hook to make new threads inherit output capturing.
150 std::thread::add_spawn_hook(|_| {
151 // Get and clone the output capture of the current thread.
152 let output_capture = io::set_output_capture(None);
153 io::set_output_capture(output_capture.clone());
154 // Set the output capture of the new thread.
155 || {
156 io::set_output_capture(output_capture);
157 }
158 });
159 }
160 let res = console::run_tests_console(&opts, tests);
161 // Prevent Valgrind from reporting reachable blocks in users' unit tests.
162 drop(panic::take_hook());
163 exit_callback();
164 match res {
165 Ok(true) => {}
166 Ok(false) => process::exit(ERROR_EXIT_CODE),
167 Err(e) => {
168 eprintln!("error: io error when listing tests: {e:?}");
169 process::exit(ERROR_EXIT_CODE);
170 }
171 }
172 }
173}
174
175/// A variant optimized for invocation with a static test vector.
176/// This will panic (intentionally) when fed any dynamic tests.
177///
178/// This is the entry point for the main function generated by `rustc --test`
179/// when panic=unwind.
180pub fn test_main_static(tests: &[&TestDescAndFn]) {
181 let args: [{unknown}] = env::args().collect::<Vec<_>>();
182 let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
183 test_main(&args, owned_tests, options:None)
184}
185
186/// A variant optimized for invocation with a static test vector.
187/// This will panic (intentionally) when fed any dynamic tests.
188///
189/// Runs tests in panic=abort mode, which involves spawning subprocesses for
190/// tests.
191///
192/// This is the entry point for the main function generated by `rustc --test`
193/// when panic=abort.
194pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
195 // If we're being run in SpawnedSecondary mode, run the test here. run_test
196 // will then exit the process.
197 if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
198 unsafe {
199 env::remove_var(SECONDARY_TEST_INVOKER_VAR);
200 }
201
202 // Convert benchmarks to tests if we're not benchmarking.
203 let mut tests = tests.iter().map(make_owned_test).collect::<Vec<_>>();
204 if env::var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR).is_ok() {
205 unsafe {
206 env::remove_var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR);
207 }
208 } else {
209 tests = convert_benchmarks_to_tests(tests);
210 };
211
212 let test = tests
213 .into_iter()
214 .find(|test| test.desc.name.as_slice() == name)
215 .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name}'"));
216 let TestDescAndFn { desc, testfn } = test;
217 match testfn.into_runnable() {
218 Runnable::Test(runnable_test) => {
219 if runnable_test.is_dynamic() {
220 panic!("only static tests are supported");
221 }
222 run_test_in_spawned_subprocess(desc, runnable_test);
223 }
224 Runnable::Bench(_) => {
225 panic!("benchmarks should not be executed into child processes")
226 }
227 }
228 }
229
230 let args = env::args().collect::<Vec<_>>();
231 let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
232 test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
233}
234
235/// Clones static values for putting into a dynamic vector, which test_main()
236/// needs to hand out ownership of tests to parallel test runners.
237///
238/// This will panic when fed any dynamic tests, because they cannot be cloned.
239fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
240 match test.testfn {
241 StaticTestFn(f: fn() -> {unknown}) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
242 StaticBenchFn(f: fn(&mut Bencher) -> {unknown}) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
243 _ => panic!("non-static tests passed to test::test_main_static"),
244 }
245}
246
247/// Public API used by rustdoc to display the `total` and `compilation` times in the expected
248/// format.
249pub fn print_merged_doctests_times(args: &[String], total_time: f64, compilation_time: f64) {
250 let opts: () = match cli::parse_opts(args) {
251 Some(Ok(o: ())) => o,
252 Some(Err(msg)) => {
253 eprintln!("error: {msg}");
254 process::exit(ERROR_EXIT_CODE);
255 }
256 None => return,
257 };
258 let mut formatter = console::get_formatter(&opts, max_name_len:0);
259 formatter.write_merged_doctests_times(total_time, compilation_time).unwrap();
260}
261
262/// Invoked when unit tests terminate. Returns `Result::Err` if the test is
263/// considered a failure. By default, invokes `report()` and checks for a `0`
264/// result.
265pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
266 let code = result.report().to_i32();
267 if code == 0 {
268 Ok(())
269 } else {
270 Err(format!(
271 "the test returned a termination value with a non-zero status code \
272 ({code}) which indicates a failure"
273 ))
274 }
275}
276
277struct FilteredTests {
278 tests: Vec<(TestId, TestDescAndFn)>,
279 benches: Vec<(TestId, TestDescAndFn)>,
280 next_id: usize,
281}
282
283impl FilteredTests {
284 fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) {
285 let test: TestDescAndFn = TestDescAndFn { desc, testfn };
286 self.benches.push((TestId(self.next_id), test));
287 self.next_id += 1;
288 }
289 fn add_test(&mut self, desc: TestDesc, testfn: TestFn) {
290 let test: TestDescAndFn = TestDescAndFn { desc, testfn };
291 self.tests.push((TestId(self.next_id), test));
292 self.next_id += 1;
293 }
294 fn total_len(&self) -> usize {
295 self.tests.len() + self.benches.len()
296 }
297}
298
299pub fn run_tests<F>(
300 opts: &TestOpts,
301 tests: Vec<TestDescAndFn>,
302 mut notify_about_test_event: F,
303) -> io::Result<()>
304where
305 F: FnMut(TestEvent) -> io::Result<()>,
306{
307 use std::collections::HashMap;
308 use std::hash::{BuildHasherDefault, DefaultHasher};
309 use std::sync::mpsc::RecvTimeoutError;
310
311 struct RunningTest {
312 join_handle: Option<thread::JoinHandle<()>>,
313 }
314
315 impl RunningTest {
316 fn join(self, completed_test: &mut CompletedTest) {
317 if let Some(join_handle) = self.join_handle {
318 if let Err(_) = join_handle.join() {
319 if let TrOk = completed_test.result {
320 completed_test.result =
321 TrFailedMsg("panicked after reporting success".to_string());
322 }
323 }
324 }
325 }
326 }
327
328 // Use a deterministic hasher
329 type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>;
330
331 struct TimeoutEntry {
332 id: TestId,
333 desc: TestDesc,
334 timeout: Instant,
335 }
336
337 let tests_len = tests.len();
338
339 let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 };
340
341 let mut filtered_tests = filter_tests(opts, tests);
342 if !opts.bench_benchmarks {
343 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
344 }
345
346 for test in filtered_tests {
347 let mut desc = test.desc;
348 desc.name = desc.name.with_padding(test.testfn.padding());
349
350 match test.testfn {
351 DynBenchFn(_) | StaticBenchFn(_) => {
352 filtered.add_bench(desc, test.testfn);
353 }
354 testfn => {
355 filtered.add_test(desc, testfn);
356 }
357 };
358 }
359
360 let filtered_out = tests_len - filtered.total_len();
361 let event = TestEvent::TeFilteredOut(filtered_out);
362 notify_about_test_event(event)?;
363
364 let shuffle_seed = get_shuffle_seed(opts);
365
366 let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed);
367 notify_about_test_event(event)?;
368
369 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
370
371 let mut remaining = filtered.tests;
372 if let Some(shuffle_seed) = shuffle_seed {
373 shuffle_tests(shuffle_seed, &mut remaining);
374 }
375 // Store the tests in a VecDeque so we can efficiently remove the first element to run the
376 // tests in the order they were passed (unless shuffled).
377 let mut remaining = VecDeque::from(remaining);
378 let mut pending = 0;
379
380 let (tx, rx) = channel::<CompletedTest>();
381 let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
382 RunStrategy::SpawnPrimary
383 } else {
384 RunStrategy::InProcess
385 };
386
387 let mut running_tests: TestMap = HashMap::default();
388 let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
389
390 fn get_timed_out_tests(
391 running_tests: &TestMap,
392 timeout_queue: &mut VecDeque<TimeoutEntry>,
393 ) -> Vec<TestDesc> {
394 let now = Instant::now();
395 let mut timed_out = Vec::new();
396 while let Some(timeout_entry) = timeout_queue.front() {
397 if now < timeout_entry.timeout {
398 break;
399 }
400 let timeout_entry = timeout_queue.pop_front().unwrap();
401 if running_tests.contains_key(&timeout_entry.id) {
402 timed_out.push(timeout_entry.desc);
403 }
404 }
405 timed_out
406 }
407
408 fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
409 timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| {
410 let now = Instant::now();
411 if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
412 })
413 }
414
415 if concurrency == 1 {
416 while !remaining.is_empty() {
417 let (id, test) = remaining.pop_front().unwrap();
418 let event = TestEvent::TeWait(test.desc.clone());
419 notify_about_test_event(event)?;
420 let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
421 // Wait for the test to complete.
422 let mut completed_test = rx.recv().unwrap();
423 RunningTest { join_handle }.join(&mut completed_test);
424
425 let fail_fast = match completed_test.result {
426 TrIgnored | TrOk | TrBench(_) => false,
427 TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
428 };
429
430 let event = TestEvent::TeResult(completed_test);
431 notify_about_test_event(event)?;
432
433 if fail_fast {
434 return Ok(());
435 }
436 }
437 } else {
438 while pending > 0 || !remaining.is_empty() {
439 while pending < concurrency && !remaining.is_empty() {
440 let (id, test) = remaining.pop_front().unwrap();
441 let timeout = time::get_default_test_timeout();
442 let desc = test.desc.clone();
443
444 let event = TestEvent::TeWait(desc.clone());
445 notify_about_test_event(event)?; //here no pad
446 let join_handle =
447 run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
448 running_tests.insert(id, RunningTest { join_handle });
449 timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
450 pending += 1;
451 }
452
453 let mut res;
454 loop {
455 if let Some(timeout) = calc_timeout(&timeout_queue) {
456 res = rx.recv_timeout(timeout);
457 for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
458 let event = TestEvent::TeTimeout(test);
459 notify_about_test_event(event)?;
460 }
461
462 match res {
463 Err(RecvTimeoutError::Timeout) => {
464 // Result is not yet ready, continue waiting.
465 }
466 _ => {
467 // We've got a result, stop the loop.
468 break;
469 }
470 }
471 } else {
472 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
473 break;
474 }
475 }
476
477 let mut completed_test = res.unwrap();
478 let running_test = running_tests.remove(&completed_test.id).unwrap();
479 running_test.join(&mut completed_test);
480
481 let fail_fast = match completed_test.result {
482 TrIgnored | TrOk | TrBench(_) => false,
483 TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
484 };
485
486 let event = TestEvent::TeResult(completed_test);
487 notify_about_test_event(event)?;
488 pending -= 1;
489
490 if fail_fast {
491 // Prevent remaining test threads from panicking
492 std::mem::forget(rx);
493 return Ok(());
494 }
495 }
496 }
497
498 if opts.bench_benchmarks {
499 // All benchmarks run at the end, in serial.
500 for (id, b) in filtered.benches {
501 let event = TestEvent::TeWait(b.desc.clone());
502 notify_about_test_event(event)?;
503 let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone());
504 // Wait for the test to complete.
505 let mut completed_test = rx.recv().unwrap();
506 RunningTest { join_handle }.join(&mut completed_test);
507
508 let event = TestEvent::TeResult(completed_test);
509 notify_about_test_event(event)?;
510 }
511 }
512 Ok(())
513}
514
515pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
516 let mut filtered = tests;
517 let matches_filter = |test: &TestDescAndFn, filter: &str| {
518 let test_name = test.desc.name.as_slice();
519
520 match opts.filter_exact {
521 true => test_name == filter,
522 false => test_name.contains(filter),
523 }
524 };
525
526 // Remove tests that don't match the test filter
527 if !opts.filters.is_empty() {
528 filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
529 }
530
531 // Skip tests that match any of the skip filters
532 if !opts.skip.is_empty() {
533 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
534 }
535
536 // Excludes #[should_panic] tests
537 if opts.exclude_should_panic {
538 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
539 }
540
541 // maybe unignore tests
542 match opts.run_ignored {
543 RunIgnored::Yes => {
544 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
545 }
546 RunIgnored::Only => {
547 filtered.retain(|test| test.desc.ignore);
548 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
549 }
550 RunIgnored::No => {}
551 }
552
553 filtered
554}
555
556pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
557 // convert benchmarks to tests, if we're not benchmarking them
558 tests
559 .into_iter()
560 .map(|x| {
561 let testfn: TestFn = match x.testfn {
562 DynBenchFn(benchfn) => DynBenchAsTestFn(benchfn),
563 StaticBenchFn(benchfn: fn(&mut Bencher) -> {unknown}) => StaticBenchAsTestFn(benchfn),
564 f: TestFn => f,
565 };
566 TestDescAndFn { desc: x.desc, testfn }
567 })
568 .collect()
569}
570
571pub fn run_test(
572 opts: &TestOpts,
573 force_ignore: bool,
574 id: TestId,
575 test: TestDescAndFn,
576 strategy: RunStrategy,
577 monitor_ch: Sender<CompletedTest>,
578) -> Option<thread::JoinHandle<()>> {
579 let TestDescAndFn { desc, testfn } = test;
580
581 // Emscripten can catch panics but other wasm targets cannot
582 let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
583 && (cfg!(target_family = "wasm") || cfg!(target_os = "zkvm"))
584 && !cfg!(target_os = "emscripten");
585
586 if force_ignore || desc.ignore || ignore_because_no_process_support {
587 let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
588 monitor_ch.send(message).unwrap();
589 return None;
590 }
591
592 match testfn.into_runnable() {
593 Runnable::Test(runnable_test) => {
594 if runnable_test.is_dynamic() {
595 match strategy {
596 RunStrategy::InProcess => (),
597 _ => panic!("Cannot run dynamic test fn out-of-process"),
598 };
599 }
600
601 let name = desc.name.clone();
602 let nocapture = opts.nocapture;
603 let time_options = opts.time_options;
604 let bench_benchmarks = opts.bench_benchmarks;
605
606 let runtest = move || match strategy {
607 RunStrategy::InProcess => run_test_in_process(
608 id,
609 desc,
610 nocapture,
611 time_options.is_some(),
612 runnable_test,
613 monitor_ch,
614 time_options,
615 ),
616 RunStrategy::SpawnPrimary => spawn_test_subprocess(
617 id,
618 desc,
619 nocapture,
620 time_options.is_some(),
621 monitor_ch,
622 time_options,
623 bench_benchmarks,
624 ),
625 };
626
627 // If the platform is single-threaded we're just going to run
628 // the test synchronously, regardless of the concurrency
629 // level.
630 let supports_threads = !cfg!(target_os = "emscripten")
631 && !cfg!(target_family = "wasm")
632 && !cfg!(target_os = "zkvm");
633 if supports_threads {
634 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
635 let mut runtest = Arc::new(Mutex::new(Some(runtest)));
636 let runtest2 = runtest.clone();
637 match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
638 Ok(handle) => Some(handle),
639 Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
640 // `ErrorKind::WouldBlock` means hitting the thread limit on some
641 // platforms, so run the test synchronously here instead.
642 Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
643 None
644 }
645 Err(e) => panic!("failed to spawn thread to run test: {e}"),
646 }
647 } else {
648 runtest();
649 None
650 }
651 }
652 Runnable::Bench(runnable_bench) => {
653 // Benchmarks aren't expected to panic, so we run them all in-process.
654 runnable_bench.run(id, &desc, &monitor_ch, opts.nocapture);
655 None
656 }
657 }
658}
659
660/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
661#[inline(never)]
662fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T {
663 let result: T = f();
664
665 // prevent this frame from being tail-call optimised away
666 black_box(dummy:result)
667}
668
669fn run_test_in_process(
670 id: TestId,
671 desc: TestDesc,
672 nocapture: bool,
673 report_time: bool,
674 runnable_test: RunnableTest,
675 monitor_ch: Sender<CompletedTest>,
676 time_opts: Option<time::TestTimeOptions>,
677) {
678 // Buffer for capturing standard I/O
679 let data = Arc::new(Mutex::new(Vec::new()));
680
681 if !nocapture {
682 io::set_output_capture(Some(data.clone()));
683 }
684
685 let start = report_time.then(Instant::now);
686 let result = fold_err(catch_unwind(AssertUnwindSafe(|| runnable_test.run())));
687 let exec_time = start.map(|start| {
688 let duration = start.elapsed();
689 TestExecTime(duration)
690 });
691
692 io::set_output_capture(None);
693
694 // Determine whether the test passed or failed, by comparing its panic
695 // payload (if any) with its `ShouldPanic` value, and by checking for
696 // fatal timeout.
697 let test_result =
698 calc_result(&desc, result.err().as_deref(), time_opts.as_ref(), exec_time.as_ref());
699 let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
700 let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
701 monitor_ch.send(message).unwrap();
702}
703
704fn fold_err<T, E>(
705 result: Result<Result<T, E>, Box<dyn Any + Send>>,
706) -> Result<T, Box<dyn Any + Send>>
707where
708 E: Send + 'static,
709{
710 match result {
711 Ok(Err(e)) => Err(Box::new(e)),
712 Ok(Ok(v)) => Ok(v),
713 Err(e) => Err(e),
714 }
715}
716
717fn spawn_test_subprocess(
718 id: TestId,
719 desc: TestDesc,
720 nocapture: bool,
721 report_time: bool,
722 monitor_ch: Sender<CompletedTest>,
723 time_opts: Option<time::TestTimeOptions>,
724 bench_benchmarks: bool,
725) {
726 let (result, test_output, exec_time) = (|| {
727 let args = env::args().collect::<Vec<_>>();
728 let current_exe = &args[0];
729
730 let mut command = Command::new(current_exe);
731 command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
732 if bench_benchmarks {
733 command.env(SECONDARY_TEST_BENCH_BENCHMARKS_VAR, "1");
734 }
735 if nocapture {
736 command.stdout(process::Stdio::inherit());
737 command.stderr(process::Stdio::inherit());
738 }
739
740 let start = report_time.then(Instant::now);
741 let output = match command.output() {
742 Ok(out) => out,
743 Err(e) => {
744 let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
745 return (TrFailed, err.into_bytes(), None);
746 }
747 };
748 let exec_time = start.map(|start| {
749 let duration = start.elapsed();
750 TestExecTime(duration)
751 });
752
753 let std::process::Output { stdout, stderr, status } = output;
754 let mut test_output = stdout;
755 formatters::write_stderr_delimiter(&mut test_output, &desc.name);
756 test_output.extend_from_slice(&stderr);
757
758 let result =
759 get_result_from_exit_code(&desc, status, time_opts.as_ref(), exec_time.as_ref());
760 (result, test_output, exec_time)
761 })();
762
763 let message = CompletedTest::new(id, desc, result, exec_time, test_output);
764 monitor_ch.send(message).unwrap();
765}
766
767fn run_test_in_spawned_subprocess(desc: TestDesc, runnable_test: RunnableTest) -> ! {
768 let builtin_panic_hook = panic::take_hook();
769 let record_result = Arc::new(move |panic_info: Option<&'_ PanicHookInfo<'_>>| {
770 let test_result = calc_result(&desc, panic_info.map(|info| info.payload()), None, None);
771
772 // We don't support serializing TrFailedMsg, so just
773 // print the message out to stderr.
774 if let TrFailedMsg(msg) = &test_result {
775 eprintln!("{msg}");
776 }
777
778 if let Some(info) = panic_info {
779 builtin_panic_hook(info);
780 }
781
782 if let TrOk = test_result {
783 process::exit(test_result::TR_OK);
784 } else {
785 process::abort();
786 }
787 });
788 let record_result2 = record_result.clone();
789 panic::set_hook(Box::new(move |info| record_result2(Some(info))));
790 if let Err(message) = runnable_test.run() {
791 panic!("{}", message);
792 }
793 record_result(None);
794 unreachable!("panic=abort callback should have exited the process")
795}
796