1//! Support code for rustc's built in unit-test and micro-benchmarking
2//! framework.
3//!
4//! Almost all user code will only be interested in `Bencher` and
5//! `black_box`. All other interactions (such as writing tests and
6//! benchmarks themselves) should be done via the `#[test]` and
7//! `#[bench]` attributes.
8//!
9//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more
10//! details.
11
12// Currently, not much of this is meant for users. It is intended to
13// support the simplest interface possible for representing and
14// running tests while providing a base that other test frameworks may
15// build off of.
16
17#![unstable(feature = "test", issue = "50297")]
18#![doc(test(attr(deny(warnings))))]
19#![doc(rust_logo)]
20#![feature(rustdoc_internals)]
21#![feature(internal_output_capture)]
22#![feature(staged_api)]
23#![feature(process_exitcode_internals)]
24#![feature(panic_can_unwind)]
25#![feature(test)]
26#![allow(internal_features)]
27
28// Public reexports
29pub use self::bench::{black_box, Bencher};
30pub use self::console::run_tests_console;
31pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
32pub use self::types::TestName::*;
33pub use self::types::*;
34pub use self::ColorConfig::*;
35pub use cli::TestOpts;
36
37// Module to be used by rustc to compile tests in libtest
38pub mod test {
39 pub use crate::{
40 assert_test_result,
41 bench::Bencher,
42 cli::{parse_opts, TestOpts},
43 filter_tests,
44 helpers::metrics::{Metric, MetricMap},
45 options::{Options, RunIgnored, RunStrategy, ShouldPanic},
46 run_test, test_main, test_main_static,
47 test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
48 time::{TestExecTime, TestTimeOptions},
49 types::{
50 DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
51 TestDescAndFn, TestId, TestName, TestType,
52 },
53 };
54}
55
56use std::{
57 collections::VecDeque,
58 env, io,
59 io::prelude::Write,
60 mem::ManuallyDrop,
61 panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
62 process::{self, Command, Termination},
63 sync::mpsc::{channel, Sender},
64 sync::{Arc, Mutex},
65 thread,
66 time::{Duration, Instant},
67};
68
69pub mod bench;
70mod cli;
71mod console;
72mod event;
73mod formatters;
74mod helpers;
75mod options;
76pub mod stats;
77mod term;
78mod test_result;
79mod time;
80mod types;
81
82#[cfg(test)]
83mod tests;
84
85use core::any::Any;
86use event::{CompletedTest, TestEvent};
87use helpers::concurrency::get_concurrency;
88use helpers::exit_code::get_exit_code;
89use helpers::shuffle::{get_shuffle_seed, shuffle_tests};
90use options::RunStrategy;
91use test_result::*;
92use time::TestExecTime;
93
94// Process exit code to be used to indicate test failures.
95const ERROR_EXIT_CODE: i32 = 101;
96
97const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
98const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS";
99
100// The default console test runner. It accepts the command line
101// arguments and a vector of test_descs.
102pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
103 let mut opts = match cli::parse_opts(args) {
104 Some(Ok(o)) => o,
105 Some(Err(msg)) => {
106 eprintln!("error: {msg}");
107 process::exit(ERROR_EXIT_CODE);
108 }
109 None => return,
110 };
111 if let Some(options) = options {
112 opts.options = options;
113 }
114 if opts.list {
115 if let Err(e) = console::list_tests_console(&opts, tests) {
116 eprintln!("error: io error when listing tests: {e:?}");
117 process::exit(ERROR_EXIT_CODE);
118 }
119 } else {
120 if !opts.nocapture {
121 // If we encounter a non-unwinding panic, flush any captured output from the current test,
122 // and stop capturing output to ensure that the non-unwinding panic message is visible.
123 // We also acquire the locks for both output streams to prevent output from other threads
124 // from interleaving with the panic message or appearing after it.
125 let builtin_panic_hook = panic::take_hook();
126 let hook = Box::new({
127 move |info: &'_ PanicInfo<'_>| {
128 if !info.can_unwind() {
129 std::mem::forget(std::io::stderr().lock());
130 let mut stdout = ManuallyDrop::new(std::io::stdout().lock());
131 if let Some(captured) = io::set_output_capture(None) {
132 if let Ok(data) = captured.lock() {
133 let _ = stdout.write_all(&data);
134 let _ = stdout.flush();
135 }
136 }
137 }
138 builtin_panic_hook(info);
139 }
140 });
141 panic::set_hook(hook);
142 }
143 match console::run_tests_console(&opts, tests) {
144 Ok(true) => {}
145 Ok(false) => process::exit(ERROR_EXIT_CODE),
146 Err(e) => {
147 eprintln!("error: io error when listing tests: {e:?}");
148 process::exit(ERROR_EXIT_CODE);
149 }
150 }
151 }
152}
153
154/// A variant optimized for invocation with a static test vector.
155/// This will panic (intentionally) when fed any dynamic tests.
156///
157/// This is the entry point for the main function generated by `rustc --test`
158/// when panic=unwind.
159pub fn test_main_static(tests: &[&TestDescAndFn]) {
160 let args: [{unknown}] = env::args().collect::<Vec<_>>();
161 let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
162 test_main(&args, owned_tests, options:None)
163}
164
165/// A variant optimized for invocation with a static test vector.
166/// This will panic (intentionally) when fed any dynamic tests.
167///
168/// Runs tests in panic=abort mode, which involves spawning subprocesses for
169/// tests.
170///
171/// This is the entry point for the main function generated by `rustc --test`
172/// when panic=abort.
173pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
174 // If we're being run in SpawnedSecondary mode, run the test here. run_test
175 // will then exit the process.
176 if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
177 env::remove_var(SECONDARY_TEST_INVOKER_VAR);
178
179 // Convert benchmarks to tests if we're not benchmarking.
180 let mut tests = tests.iter().map(make_owned_test).collect::<Vec<_>>();
181 if env::var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR).is_ok() {
182 env::remove_var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR);
183 } else {
184 tests = convert_benchmarks_to_tests(tests);
185 };
186
187 let test = tests
188 .into_iter()
189 .find(|test| test.desc.name.as_slice() == name)
190 .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name}'"));
191 let TestDescAndFn { desc, testfn } = test;
192 match testfn.into_runnable() {
193 Runnable::Test(runnable_test) => {
194 if runnable_test.is_dynamic() {
195 panic!("only static tests are supported");
196 }
197 run_test_in_spawned_subprocess(desc, runnable_test);
198 }
199 Runnable::Bench(_) => {
200 panic!("benchmarks should not be executed into child processes")
201 }
202 }
203 }
204
205 let args = env::args().collect::<Vec<_>>();
206 let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
207 test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
208}
209
210/// Clones static values for putting into a dynamic vector, which test_main()
211/// needs to hand out ownership of tests to parallel test runners.
212///
213/// This will panic when fed any dynamic tests, because they cannot be cloned.
214fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
215 match test.testfn {
216 StaticTestFn(f: fn() -> {unknown}) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
217 StaticBenchFn(f: fn(&mut Bencher) -> {unknown}) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
218 _ => panic!("non-static tests passed to test::test_main_static"),
219 }
220}
221
222/// Invoked when unit tests terminate. Returns `Result::Err` if the test is
223/// considered a failure. By default, invokes `report()` and checks for a `0`
224/// result.
225pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
226 let code = result.report().to_i32();
227 if code == 0 {
228 Ok(())
229 } else {
230 Err(format!(
231 "the test returned a termination value with a non-zero status code \
232 ({code}) which indicates a failure"
233 ))
234 }
235}
236
237struct FilteredTests {
238 tests: Vec<(TestId, TestDescAndFn)>,
239 benches: Vec<(TestId, TestDescAndFn)>,
240 next_id: usize,
241}
242
243impl FilteredTests {
244 fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) {
245 let test: TestDescAndFn = TestDescAndFn { desc, testfn };
246 self.benches.push((TestId(self.next_id), test));
247 self.next_id += 1;
248 }
249 fn add_test(&mut self, desc: TestDesc, testfn: TestFn) {
250 let test: TestDescAndFn = TestDescAndFn { desc, testfn };
251 self.tests.push((TestId(self.next_id), test));
252 self.next_id += 1;
253 }
254 fn total_len(&self) -> usize {
255 self.tests.len() + self.benches.len()
256 }
257}
258
259pub fn run_tests<F>(
260 opts: &TestOpts,
261 tests: Vec<TestDescAndFn>,
262 mut notify_about_test_event: F,
263) -> io::Result<()>
264where
265 F: FnMut(TestEvent) -> io::Result<()>,
266{
267 use std::collections::HashMap;
268 use std::hash::{BuildHasherDefault, DefaultHasher};
269 use std::sync::mpsc::RecvTimeoutError;
270
271 struct RunningTest {
272 join_handle: Option<thread::JoinHandle<()>>,
273 }
274
275 impl RunningTest {
276 fn join(self, completed_test: &mut CompletedTest) {
277 if let Some(join_handle) = self.join_handle {
278 if let Err(_) = join_handle.join() {
279 if let TrOk = completed_test.result {
280 completed_test.result =
281 TrFailedMsg("panicked after reporting success".to_string());
282 }
283 }
284 }
285 }
286 }
287
288 // Use a deterministic hasher
289 type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>;
290
291 struct TimeoutEntry {
292 id: TestId,
293 desc: TestDesc,
294 timeout: Instant,
295 }
296
297 let tests_len = tests.len();
298
299 let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 };
300
301 let mut filtered_tests = filter_tests(opts, tests);
302 if !opts.bench_benchmarks {
303 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
304 }
305
306 for test in filtered_tests {
307 let mut desc = test.desc;
308 desc.name = desc.name.with_padding(test.testfn.padding());
309
310 match test.testfn {
311 DynBenchFn(_) | StaticBenchFn(_) => {
312 filtered.add_bench(desc, test.testfn);
313 }
314 testfn => {
315 filtered.add_test(desc, testfn);
316 }
317 };
318 }
319
320 let filtered_out = tests_len - filtered.total_len();
321 let event = TestEvent::TeFilteredOut(filtered_out);
322 notify_about_test_event(event)?;
323
324 let shuffle_seed = get_shuffle_seed(opts);
325
326 let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed);
327 notify_about_test_event(event)?;
328
329 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
330
331 let mut remaining = filtered.tests;
332 if let Some(shuffle_seed) = shuffle_seed {
333 shuffle_tests(shuffle_seed, &mut remaining);
334 }
335 // Store the tests in a VecDeque so we can efficiently remove the first element to run the
336 // tests in the order they were passed (unless shuffled).
337 let mut remaining = VecDeque::from(remaining);
338 let mut pending = 0;
339
340 let (tx, rx) = channel::<CompletedTest>();
341 let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
342 RunStrategy::SpawnPrimary
343 } else {
344 RunStrategy::InProcess
345 };
346
347 let mut running_tests: TestMap = HashMap::default();
348 let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
349
350 fn get_timed_out_tests(
351 running_tests: &TestMap,
352 timeout_queue: &mut VecDeque<TimeoutEntry>,
353 ) -> Vec<TestDesc> {
354 let now = Instant::now();
355 let mut timed_out = Vec::new();
356 while let Some(timeout_entry) = timeout_queue.front() {
357 if now < timeout_entry.timeout {
358 break;
359 }
360 let timeout_entry = timeout_queue.pop_front().unwrap();
361 if running_tests.contains_key(&timeout_entry.id) {
362 timed_out.push(timeout_entry.desc);
363 }
364 }
365 timed_out
366 }
367
368 fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
369 timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| {
370 let now = Instant::now();
371 if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
372 })
373 }
374
375 if concurrency == 1 {
376 while !remaining.is_empty() {
377 let (id, test) = remaining.pop_front().unwrap();
378 let event = TestEvent::TeWait(test.desc.clone());
379 notify_about_test_event(event)?;
380 let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
381 // Wait for the test to complete.
382 let mut completed_test = rx.recv().unwrap();
383 RunningTest { join_handle }.join(&mut completed_test);
384
385 let fail_fast = match completed_test.result {
386 TrIgnored | TrOk | TrBench(_) => false,
387 TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
388 };
389
390 let event = TestEvent::TeResult(completed_test);
391 notify_about_test_event(event)?;
392
393 if fail_fast {
394 return Ok(());
395 }
396 }
397 } else {
398 while pending > 0 || !remaining.is_empty() {
399 while pending < concurrency && !remaining.is_empty() {
400 let (id, test) = remaining.pop_front().unwrap();
401 let timeout = time::get_default_test_timeout();
402 let desc = test.desc.clone();
403
404 let event = TestEvent::TeWait(desc.clone());
405 notify_about_test_event(event)?; //here no pad
406 let join_handle =
407 run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
408 running_tests.insert(id, RunningTest { join_handle });
409 timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
410 pending += 1;
411 }
412
413 let mut res;
414 loop {
415 if let Some(timeout) = calc_timeout(&timeout_queue) {
416 res = rx.recv_timeout(timeout);
417 for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
418 let event = TestEvent::TeTimeout(test);
419 notify_about_test_event(event)?;
420 }
421
422 match res {
423 Err(RecvTimeoutError::Timeout) => {
424 // Result is not yet ready, continue waiting.
425 }
426 _ => {
427 // We've got a result, stop the loop.
428 break;
429 }
430 }
431 } else {
432 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
433 break;
434 }
435 }
436
437 let mut completed_test = res.unwrap();
438 let running_test = running_tests.remove(&completed_test.id).unwrap();
439 running_test.join(&mut completed_test);
440
441 let fail_fast = match completed_test.result {
442 TrIgnored | TrOk | TrBench(_) => false,
443 TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
444 };
445
446 let event = TestEvent::TeResult(completed_test);
447 notify_about_test_event(event)?;
448 pending -= 1;
449
450 if fail_fast {
451 // Prevent remaining test threads from panicking
452 std::mem::forget(rx);
453 return Ok(());
454 }
455 }
456 }
457
458 if opts.bench_benchmarks {
459 // All benchmarks run at the end, in serial.
460 for (id, b) in filtered.benches {
461 let event = TestEvent::TeWait(b.desc.clone());
462 notify_about_test_event(event)?;
463 let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone());
464 // Wait for the test to complete.
465 let mut completed_test = rx.recv().unwrap();
466 RunningTest { join_handle }.join(&mut completed_test);
467
468 let event = TestEvent::TeResult(completed_test);
469 notify_about_test_event(event)?;
470 }
471 }
472 Ok(())
473}
474
475pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
476 let mut filtered = tests;
477 let matches_filter = |test: &TestDescAndFn, filter: &str| {
478 let test_name = test.desc.name.as_slice();
479
480 match opts.filter_exact {
481 true => test_name == filter,
482 false => test_name.contains(filter),
483 }
484 };
485
486 // Remove tests that don't match the test filter
487 if !opts.filters.is_empty() {
488 filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
489 }
490
491 // Skip tests that match any of the skip filters
492 if !opts.skip.is_empty() {
493 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
494 }
495
496 // Excludes #[should_panic] tests
497 if opts.exclude_should_panic {
498 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
499 }
500
501 // maybe unignore tests
502 match opts.run_ignored {
503 RunIgnored::Yes => {
504 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
505 }
506 RunIgnored::Only => {
507 filtered.retain(|test| test.desc.ignore);
508 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
509 }
510 RunIgnored::No => {}
511 }
512
513 filtered
514}
515
516pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
517 // convert benchmarks to tests, if we're not benchmarking them
518 tests
519 .into_iter()
520 .map(|x| {
521 let testfn: TestFn = match x.testfn {
522 DynBenchFn(benchfn) => DynBenchAsTestFn(benchfn),
523 StaticBenchFn(benchfn: fn(&mut Bencher) -> {unknown}) => StaticBenchAsTestFn(benchfn),
524 f: TestFn => f,
525 };
526 TestDescAndFn { desc: x.desc, testfn }
527 })
528 .collect()
529}
530
531pub fn run_test(
532 opts: &TestOpts,
533 force_ignore: bool,
534 id: TestId,
535 test: TestDescAndFn,
536 strategy: RunStrategy,
537 monitor_ch: Sender<CompletedTest>,
538) -> Option<thread::JoinHandle<()>> {
539 let TestDescAndFn { desc, testfn } = test;
540
541 // Emscripten can catch panics but other wasm targets cannot
542 let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
543 && (cfg!(target_family = "wasm") || cfg!(target_os = "zkvm"))
544 && !cfg!(target_os = "emscripten");
545
546 if force_ignore || desc.ignore || ignore_because_no_process_support {
547 let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
548 monitor_ch.send(message).unwrap();
549 return None;
550 }
551
552 match testfn.into_runnable() {
553 Runnable::Test(runnable_test) => {
554 if runnable_test.is_dynamic() {
555 match strategy {
556 RunStrategy::InProcess => (),
557 _ => panic!("Cannot run dynamic test fn out-of-process"),
558 };
559 }
560
561 let name = desc.name.clone();
562 let nocapture = opts.nocapture;
563 let time_options = opts.time_options;
564 let bench_benchmarks = opts.bench_benchmarks;
565
566 let runtest = move || match strategy {
567 RunStrategy::InProcess => run_test_in_process(
568 id,
569 desc,
570 nocapture,
571 time_options.is_some(),
572 runnable_test,
573 monitor_ch,
574 time_options,
575 ),
576 RunStrategy::SpawnPrimary => spawn_test_subprocess(
577 id,
578 desc,
579 nocapture,
580 time_options.is_some(),
581 monitor_ch,
582 time_options,
583 bench_benchmarks,
584 ),
585 };
586
587 // If the platform is single-threaded we're just going to run
588 // the test synchronously, regardless of the concurrency
589 // level.
590 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_family = "wasm");
591 if supports_threads {
592 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
593 let mut runtest = Arc::new(Mutex::new(Some(runtest)));
594 let runtest2 = runtest.clone();
595 match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
596 Ok(handle) => Some(handle),
597 Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
598 // `ErrorKind::WouldBlock` means hitting the thread limit on some
599 // platforms, so run the test synchronously here instead.
600 Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
601 None
602 }
603 Err(e) => panic!("failed to spawn thread to run test: {e}"),
604 }
605 } else {
606 runtest();
607 None
608 }
609 }
610 Runnable::Bench(runnable_bench) => {
611 // Benchmarks aren't expected to panic, so we run them all in-process.
612 runnable_bench.run(id, &desc, &monitor_ch, opts.nocapture);
613 None
614 }
615 }
616}
617
618/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
619#[inline(never)]
620fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T {
621 let result: T = f();
622
623 // prevent this frame from being tail-call optimised away
624 black_box(dummy:result)
625}
626
627fn run_test_in_process(
628 id: TestId,
629 desc: TestDesc,
630 nocapture: bool,
631 report_time: bool,
632 runnable_test: RunnableTest,
633 monitor_ch: Sender<CompletedTest>,
634 time_opts: Option<time::TestTimeOptions>,
635) {
636 // Buffer for capturing standard I/O
637 let data = Arc::new(Mutex::new(Vec::new()));
638
639 if !nocapture {
640 io::set_output_capture(Some(data.clone()));
641 }
642
643 let start = report_time.then(Instant::now);
644 let result = fold_err(catch_unwind(AssertUnwindSafe(|| runnable_test.run())));
645 let exec_time = start.map(|start| {
646 let duration = start.elapsed();
647 TestExecTime(duration)
648 });
649
650 io::set_output_capture(None);
651
652 let test_result = match result {
653 Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time),
654 Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
655 };
656 let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
657 let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
658 monitor_ch.send(message).unwrap();
659}
660
661fn fold_err<T, E>(
662 result: Result<Result<T, E>, Box<dyn Any + Send>>,
663) -> Result<T, Box<dyn Any + Send>>
664where
665 E: Send + 'static,
666{
667 match result {
668 Ok(Err(e)) => Err(Box::new(e)),
669 Ok(Ok(v)) => Ok(v),
670 Err(e) => Err(e),
671 }
672}
673
674fn spawn_test_subprocess(
675 id: TestId,
676 desc: TestDesc,
677 nocapture: bool,
678 report_time: bool,
679 monitor_ch: Sender<CompletedTest>,
680 time_opts: Option<time::TestTimeOptions>,
681 bench_benchmarks: bool,
682) {
683 let (result, test_output, exec_time) = (|| {
684 let args = env::args().collect::<Vec<_>>();
685 let current_exe = &args[0];
686
687 let mut command = Command::new(current_exe);
688 command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
689 if bench_benchmarks {
690 command.env(SECONDARY_TEST_BENCH_BENCHMARKS_VAR, "1");
691 }
692 if nocapture {
693 command.stdout(process::Stdio::inherit());
694 command.stderr(process::Stdio::inherit());
695 }
696
697 let start = report_time.then(Instant::now);
698 let output = match command.output() {
699 Ok(out) => out,
700 Err(e) => {
701 let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
702 return (TrFailed, err.into_bytes(), None);
703 }
704 };
705 let exec_time = start.map(|start| {
706 let duration = start.elapsed();
707 TestExecTime(duration)
708 });
709
710 let std::process::Output { stdout, stderr, status } = output;
711 let mut test_output = stdout;
712 formatters::write_stderr_delimiter(&mut test_output, &desc.name);
713 test_output.extend_from_slice(&stderr);
714
715 let result = match (|| -> Result<TestResult, String> {
716 let exit_code = get_exit_code(status)?;
717 Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time))
718 })() {
719 Ok(r) => r,
720 Err(e) => {
721 write!(&mut test_output, "Unexpected error: {e}").unwrap();
722 TrFailed
723 }
724 };
725
726 (result, test_output, exec_time)
727 })();
728
729 let message = CompletedTest::new(id, desc, result, exec_time, test_output);
730 monitor_ch.send(message).unwrap();
731}
732
733fn run_test_in_spawned_subprocess(desc: TestDesc, runnable_test: RunnableTest) -> ! {
734 let builtin_panic_hook = panic::take_hook();
735 let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
736 let test_result = match panic_info {
737 Some(info) => calc_result(&desc, Err(info.payload()), &None, &None),
738 None => calc_result(&desc, Ok(()), &None, &None),
739 };
740
741 // We don't support serializing TrFailedMsg, so just
742 // print the message out to stderr.
743 if let TrFailedMsg(msg) = &test_result {
744 eprintln!("{msg}");
745 }
746
747 if let Some(info) = panic_info {
748 builtin_panic_hook(info);
749 }
750
751 if let TrOk = test_result {
752 process::exit(test_result::TR_OK);
753 } else {
754 process::exit(test_result::TR_FAILED);
755 }
756 });
757 let record_result2 = record_result.clone();
758 panic::set_hook(Box::new(move |info| record_result2(Some(info))));
759 if let Err(message) = runnable_test.run() {
760 panic!("{}", message);
761 }
762 record_result(None);
763 unreachable!("panic=abort callback should have exited the process")
764}
765