1//! Support code for rustc's built in unit-test and micro-benchmarking
2//! framework.
3//!
4//! Almost all user code will only be interested in `Bencher` and
5//! `black_box`. All other interactions (such as writing tests and
6//! benchmarks themselves) should be done via the `#[test]` and
7//! `#[bench]` attributes.
8//!
9//! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more
10//! details.
11
12// Currently, not much of this is meant for users. It is intended to
13// support the simplest interface possible for representing and
14// running tests while providing a base that other test frameworks may
15// build off of.
16
17#![unstable(feature = "test", issue = "50297")]
18#![doc(test(attr(deny(warnings))))]
19#![doc(rust_logo)]
20#![feature(rustdoc_internals)]
21#![feature(internal_output_capture)]
22#![feature(staged_api)]
23#![feature(process_exitcode_internals)]
24#![feature(panic_can_unwind)]
25#![feature(test)]
26#![allow(internal_features)]
27
28// Public reexports
29pub use self::bench::{black_box, Bencher};
30pub use self::console::run_tests_console;
31pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
32pub use self::types::TestName::*;
33pub use self::types::*;
34pub use self::ColorConfig::*;
35pub use cli::TestOpts;
36
37// Module to be used by rustc to compile tests in libtest
38pub mod test {
39 pub use crate::{
40 assert_test_result,
41 bench::Bencher,
42 cli::{parse_opts, TestOpts},
43 filter_tests,
44 helpers::metrics::{Metric, MetricMap},
45 options::{Options, RunIgnored, RunStrategy, ShouldPanic},
46 run_test, test_main, test_main_static,
47 test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
48 time::{TestExecTime, TestTimeOptions},
49 types::{
50 DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
51 TestDescAndFn, TestId, TestName, TestType,
52 },
53 };
54}
55
56use std::{
57 collections::VecDeque,
58 env, io,
59 io::prelude::Write,
60 mem::ManuallyDrop,
61 panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
62 process::{self, Command, Termination},
63 sync::mpsc::{channel, Sender},
64 sync::{Arc, Mutex},
65 thread,
66 time::{Duration, Instant},
67};
68
69pub mod bench;
70mod cli;
71mod console;
72mod event;
73mod formatters;
74mod helpers;
75mod options;
76pub mod stats;
77mod term;
78mod test_result;
79mod time;
80mod types;
81
82#[cfg(test)]
83mod tests;
84
85use core::any::Any;
86use event::{CompletedTest, TestEvent};
87use helpers::concurrency::get_concurrency;
88use helpers::shuffle::{get_shuffle_seed, shuffle_tests};
89use options::RunStrategy;
90use test_result::*;
91use time::TestExecTime;
92
93// Process exit code to be used to indicate test failures.
94const ERROR_EXIT_CODE: i32 = 101;
95
96const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
97const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS";
98
99// The default console test runner. It accepts the command line
100// arguments and a vector of test_descs.
101pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
102 let mut opts = match cli::parse_opts(args) {
103 Some(Ok(o)) => o,
104 Some(Err(msg)) => {
105 eprintln!("error: {msg}");
106 process::exit(ERROR_EXIT_CODE);
107 }
108 None => return,
109 };
110 if let Some(options) = options {
111 opts.options = options;
112 }
113 if opts.list {
114 if let Err(e) = console::list_tests_console(&opts, tests) {
115 eprintln!("error: io error when listing tests: {e:?}");
116 process::exit(ERROR_EXIT_CODE);
117 }
118 } else {
119 if !opts.nocapture {
120 // If we encounter a non-unwinding panic, flush any captured output from the current test,
121 // and stop capturing output to ensure that the non-unwinding panic message is visible.
122 // We also acquire the locks for both output streams to prevent output from other threads
123 // from interleaving with the panic message or appearing after it.
124 let builtin_panic_hook = panic::take_hook();
125 let hook = Box::new({
126 move |info: &'_ PanicInfo<'_>| {
127 if !info.can_unwind() {
128 std::mem::forget(std::io::stderr().lock());
129 let mut stdout = ManuallyDrop::new(std::io::stdout().lock());
130 if let Some(captured) = io::set_output_capture(None) {
131 if let Ok(data) = captured.lock() {
132 let _ = stdout.write_all(&data);
133 let _ = stdout.flush();
134 }
135 }
136 }
137 builtin_panic_hook(info);
138 }
139 });
140 panic::set_hook(hook);
141 }
142 let res = console::run_tests_console(&opts, tests);
143 // Prevent Valgrind from reporting reachable blocks in users' unit tests.
144 drop(panic::take_hook());
145 match res {
146 Ok(true) => {}
147 Ok(false) => process::exit(ERROR_EXIT_CODE),
148 Err(e) => {
149 eprintln!("error: io error when listing tests: {e:?}");
150 process::exit(ERROR_EXIT_CODE);
151 }
152 }
153 }
154}
155
156/// A variant optimized for invocation with a static test vector.
157/// This will panic (intentionally) when fed any dynamic tests.
158///
159/// This is the entry point for the main function generated by `rustc --test`
160/// when panic=unwind.
161pub fn test_main_static(tests: &[&TestDescAndFn]) {
162 let args: [{unknown}] = env::args().collect::<Vec<_>>();
163 let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
164 test_main(&args, owned_tests, options:None)
165}
166
167/// A variant optimized for invocation with a static test vector.
168/// This will panic (intentionally) when fed any dynamic tests.
169///
170/// Runs tests in panic=abort mode, which involves spawning subprocesses for
171/// tests.
172///
173/// This is the entry point for the main function generated by `rustc --test`
174/// when panic=abort.
175pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
176 // If we're being run in SpawnedSecondary mode, run the test here. run_test
177 // will then exit the process.
178 if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
179 env::remove_var(SECONDARY_TEST_INVOKER_VAR);
180
181 // Convert benchmarks to tests if we're not benchmarking.
182 let mut tests = tests.iter().map(make_owned_test).collect::<Vec<_>>();
183 if env::var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR).is_ok() {
184 env::remove_var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR);
185 } else {
186 tests = convert_benchmarks_to_tests(tests);
187 };
188
189 let test = tests
190 .into_iter()
191 .find(|test| test.desc.name.as_slice() == name)
192 .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name}'"));
193 let TestDescAndFn { desc, testfn } = test;
194 match testfn.into_runnable() {
195 Runnable::Test(runnable_test) => {
196 if runnable_test.is_dynamic() {
197 panic!("only static tests are supported");
198 }
199 run_test_in_spawned_subprocess(desc, runnable_test);
200 }
201 Runnable::Bench(_) => {
202 panic!("benchmarks should not be executed into child processes")
203 }
204 }
205 }
206
207 let args = env::args().collect::<Vec<_>>();
208 let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
209 test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
210}
211
212/// Clones static values for putting into a dynamic vector, which test_main()
213/// needs to hand out ownership of tests to parallel test runners.
214///
215/// This will panic when fed any dynamic tests, because they cannot be cloned.
216fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
217 match test.testfn {
218 StaticTestFn(f: fn() -> {unknown}) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
219 StaticBenchFn(f: fn(&mut Bencher) -> {unknown}) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
220 _ => panic!("non-static tests passed to test::test_main_static"),
221 }
222}
223
224/// Invoked when unit tests terminate. Returns `Result::Err` if the test is
225/// considered a failure. By default, invokes `report()` and checks for a `0`
226/// result.
227pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> {
228 let code = result.report().to_i32();
229 if code == 0 {
230 Ok(())
231 } else {
232 Err(format!(
233 "the test returned a termination value with a non-zero status code \
234 ({code}) which indicates a failure"
235 ))
236 }
237}
238
239struct FilteredTests {
240 tests: Vec<(TestId, TestDescAndFn)>,
241 benches: Vec<(TestId, TestDescAndFn)>,
242 next_id: usize,
243}
244
245impl FilteredTests {
246 fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) {
247 let test: TestDescAndFn = TestDescAndFn { desc, testfn };
248 self.benches.push((TestId(self.next_id), test));
249 self.next_id += 1;
250 }
251 fn add_test(&mut self, desc: TestDesc, testfn: TestFn) {
252 let test: TestDescAndFn = TestDescAndFn { desc, testfn };
253 self.tests.push((TestId(self.next_id), test));
254 self.next_id += 1;
255 }
256 fn total_len(&self) -> usize {
257 self.tests.len() + self.benches.len()
258 }
259}
260
261pub fn run_tests<F>(
262 opts: &TestOpts,
263 tests: Vec<TestDescAndFn>,
264 mut notify_about_test_event: F,
265) -> io::Result<()>
266where
267 F: FnMut(TestEvent) -> io::Result<()>,
268{
269 use std::collections::HashMap;
270 use std::hash::{BuildHasherDefault, DefaultHasher};
271 use std::sync::mpsc::RecvTimeoutError;
272
273 struct RunningTest {
274 join_handle: Option<thread::JoinHandle<()>>,
275 }
276
277 impl RunningTest {
278 fn join(self, completed_test: &mut CompletedTest) {
279 if let Some(join_handle) = self.join_handle {
280 if let Err(_) = join_handle.join() {
281 if let TrOk = completed_test.result {
282 completed_test.result =
283 TrFailedMsg("panicked after reporting success".to_string());
284 }
285 }
286 }
287 }
288 }
289
290 // Use a deterministic hasher
291 type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>;
292
293 struct TimeoutEntry {
294 id: TestId,
295 desc: TestDesc,
296 timeout: Instant,
297 }
298
299 let tests_len = tests.len();
300
301 let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 };
302
303 let mut filtered_tests = filter_tests(opts, tests);
304 if !opts.bench_benchmarks {
305 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
306 }
307
308 for test in filtered_tests {
309 let mut desc = test.desc;
310 desc.name = desc.name.with_padding(test.testfn.padding());
311
312 match test.testfn {
313 DynBenchFn(_) | StaticBenchFn(_) => {
314 filtered.add_bench(desc, test.testfn);
315 }
316 testfn => {
317 filtered.add_test(desc, testfn);
318 }
319 };
320 }
321
322 let filtered_out = tests_len - filtered.total_len();
323 let event = TestEvent::TeFilteredOut(filtered_out);
324 notify_about_test_event(event)?;
325
326 let shuffle_seed = get_shuffle_seed(opts);
327
328 let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed);
329 notify_about_test_event(event)?;
330
331 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
332
333 let mut remaining = filtered.tests;
334 if let Some(shuffle_seed) = shuffle_seed {
335 shuffle_tests(shuffle_seed, &mut remaining);
336 }
337 // Store the tests in a VecDeque so we can efficiently remove the first element to run the
338 // tests in the order they were passed (unless shuffled).
339 let mut remaining = VecDeque::from(remaining);
340 let mut pending = 0;
341
342 let (tx, rx) = channel::<CompletedTest>();
343 let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
344 RunStrategy::SpawnPrimary
345 } else {
346 RunStrategy::InProcess
347 };
348
349 let mut running_tests: TestMap = HashMap::default();
350 let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
351
352 fn get_timed_out_tests(
353 running_tests: &TestMap,
354 timeout_queue: &mut VecDeque<TimeoutEntry>,
355 ) -> Vec<TestDesc> {
356 let now = Instant::now();
357 let mut timed_out = Vec::new();
358 while let Some(timeout_entry) = timeout_queue.front() {
359 if now < timeout_entry.timeout {
360 break;
361 }
362 let timeout_entry = timeout_queue.pop_front().unwrap();
363 if running_tests.contains_key(&timeout_entry.id) {
364 timed_out.push(timeout_entry.desc);
365 }
366 }
367 timed_out
368 }
369
370 fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
371 timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| {
372 let now = Instant::now();
373 if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
374 })
375 }
376
377 if concurrency == 1 {
378 while !remaining.is_empty() {
379 let (id, test) = remaining.pop_front().unwrap();
380 let event = TestEvent::TeWait(test.desc.clone());
381 notify_about_test_event(event)?;
382 let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
383 // Wait for the test to complete.
384 let mut completed_test = rx.recv().unwrap();
385 RunningTest { join_handle }.join(&mut completed_test);
386
387 let fail_fast = match completed_test.result {
388 TrIgnored | TrOk | TrBench(_) => false,
389 TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
390 };
391
392 let event = TestEvent::TeResult(completed_test);
393 notify_about_test_event(event)?;
394
395 if fail_fast {
396 return Ok(());
397 }
398 }
399 } else {
400 while pending > 0 || !remaining.is_empty() {
401 while pending < concurrency && !remaining.is_empty() {
402 let (id, test) = remaining.pop_front().unwrap();
403 let timeout = time::get_default_test_timeout();
404 let desc = test.desc.clone();
405
406 let event = TestEvent::TeWait(desc.clone());
407 notify_about_test_event(event)?; //here no pad
408 let join_handle =
409 run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone());
410 running_tests.insert(id, RunningTest { join_handle });
411 timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
412 pending += 1;
413 }
414
415 let mut res;
416 loop {
417 if let Some(timeout) = calc_timeout(&timeout_queue) {
418 res = rx.recv_timeout(timeout);
419 for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
420 let event = TestEvent::TeTimeout(test);
421 notify_about_test_event(event)?;
422 }
423
424 match res {
425 Err(RecvTimeoutError::Timeout) => {
426 // Result is not yet ready, continue waiting.
427 }
428 _ => {
429 // We've got a result, stop the loop.
430 break;
431 }
432 }
433 } else {
434 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
435 break;
436 }
437 }
438
439 let mut completed_test = res.unwrap();
440 let running_test = running_tests.remove(&completed_test.id).unwrap();
441 running_test.join(&mut completed_test);
442
443 let fail_fast = match completed_test.result {
444 TrIgnored | TrOk | TrBench(_) => false,
445 TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast,
446 };
447
448 let event = TestEvent::TeResult(completed_test);
449 notify_about_test_event(event)?;
450 pending -= 1;
451
452 if fail_fast {
453 // Prevent remaining test threads from panicking
454 std::mem::forget(rx);
455 return Ok(());
456 }
457 }
458 }
459
460 if opts.bench_benchmarks {
461 // All benchmarks run at the end, in serial.
462 for (id, b) in filtered.benches {
463 let event = TestEvent::TeWait(b.desc.clone());
464 notify_about_test_event(event)?;
465 let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone());
466 // Wait for the test to complete.
467 let mut completed_test = rx.recv().unwrap();
468 RunningTest { join_handle }.join(&mut completed_test);
469
470 let event = TestEvent::TeResult(completed_test);
471 notify_about_test_event(event)?;
472 }
473 }
474 Ok(())
475}
476
477pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
478 let mut filtered = tests;
479 let matches_filter = |test: &TestDescAndFn, filter: &str| {
480 let test_name = test.desc.name.as_slice();
481
482 match opts.filter_exact {
483 true => test_name == filter,
484 false => test_name.contains(filter),
485 }
486 };
487
488 // Remove tests that don't match the test filter
489 if !opts.filters.is_empty() {
490 filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
491 }
492
493 // Skip tests that match any of the skip filters
494 if !opts.skip.is_empty() {
495 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
496 }
497
498 // Excludes #[should_panic] tests
499 if opts.exclude_should_panic {
500 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
501 }
502
503 // maybe unignore tests
504 match opts.run_ignored {
505 RunIgnored::Yes => {
506 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
507 }
508 RunIgnored::Only => {
509 filtered.retain(|test| test.desc.ignore);
510 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
511 }
512 RunIgnored::No => {}
513 }
514
515 filtered
516}
517
518pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
519 // convert benchmarks to tests, if we're not benchmarking them
520 tests
521 .into_iter()
522 .map(|x| {
523 let testfn: TestFn = match x.testfn {
524 DynBenchFn(benchfn) => DynBenchAsTestFn(benchfn),
525 StaticBenchFn(benchfn: fn(&mut Bencher) -> {unknown}) => StaticBenchAsTestFn(benchfn),
526 f: TestFn => f,
527 };
528 TestDescAndFn { desc: x.desc, testfn }
529 })
530 .collect()
531}
532
533pub fn run_test(
534 opts: &TestOpts,
535 force_ignore: bool,
536 id: TestId,
537 test: TestDescAndFn,
538 strategy: RunStrategy,
539 monitor_ch: Sender<CompletedTest>,
540) -> Option<thread::JoinHandle<()>> {
541 let TestDescAndFn { desc, testfn } = test;
542
543 // Emscripten can catch panics but other wasm targets cannot
544 let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
545 && (cfg!(target_family = "wasm") || cfg!(target_os = "zkvm"))
546 && !cfg!(target_os = "emscripten");
547
548 if force_ignore || desc.ignore || ignore_because_no_process_support {
549 let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
550 monitor_ch.send(message).unwrap();
551 return None;
552 }
553
554 match testfn.into_runnable() {
555 Runnable::Test(runnable_test) => {
556 if runnable_test.is_dynamic() {
557 match strategy {
558 RunStrategy::InProcess => (),
559 _ => panic!("Cannot run dynamic test fn out-of-process"),
560 };
561 }
562
563 let name = desc.name.clone();
564 let nocapture = opts.nocapture;
565 let time_options = opts.time_options;
566 let bench_benchmarks = opts.bench_benchmarks;
567
568 let runtest = move || match strategy {
569 RunStrategy::InProcess => run_test_in_process(
570 id,
571 desc,
572 nocapture,
573 time_options.is_some(),
574 runnable_test,
575 monitor_ch,
576 time_options,
577 ),
578 RunStrategy::SpawnPrimary => spawn_test_subprocess(
579 id,
580 desc,
581 nocapture,
582 time_options.is_some(),
583 monitor_ch,
584 time_options,
585 bench_benchmarks,
586 ),
587 };
588
589 // If the platform is single-threaded we're just going to run
590 // the test synchronously, regardless of the concurrency
591 // level.
592 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_family = "wasm");
593 if supports_threads {
594 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
595 let mut runtest = Arc::new(Mutex::new(Some(runtest)));
596 let runtest2 = runtest.clone();
597 match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
598 Ok(handle) => Some(handle),
599 Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
600 // `ErrorKind::WouldBlock` means hitting the thread limit on some
601 // platforms, so run the test synchronously here instead.
602 Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
603 None
604 }
605 Err(e) => panic!("failed to spawn thread to run test: {e}"),
606 }
607 } else {
608 runtest();
609 None
610 }
611 }
612 Runnable::Bench(runnable_bench) => {
613 // Benchmarks aren't expected to panic, so we run them all in-process.
614 runnable_bench.run(id, &desc, &monitor_ch, opts.nocapture);
615 None
616 }
617 }
618}
619
620/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
621#[inline(never)]
622fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T {
623 let result: T = f();
624
625 // prevent this frame from being tail-call optimised away
626 black_box(dummy:result)
627}
628
629fn run_test_in_process(
630 id: TestId,
631 desc: TestDesc,
632 nocapture: bool,
633 report_time: bool,
634 runnable_test: RunnableTest,
635 monitor_ch: Sender<CompletedTest>,
636 time_opts: Option<time::TestTimeOptions>,
637) {
638 // Buffer for capturing standard I/O
639 let data = Arc::new(Mutex::new(Vec::new()));
640
641 if !nocapture {
642 io::set_output_capture(Some(data.clone()));
643 }
644
645 let start = report_time.then(Instant::now);
646 let result = fold_err(catch_unwind(AssertUnwindSafe(|| runnable_test.run())));
647 let exec_time = start.map(|start| {
648 let duration = start.elapsed();
649 TestExecTime(duration)
650 });
651
652 io::set_output_capture(None);
653
654 let test_result = match result {
655 Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time),
656 Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
657 };
658 let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
659 let message = CompletedTest::new(id, desc, test_result, exec_time, stdout);
660 monitor_ch.send(message).unwrap();
661}
662
663fn fold_err<T, E>(
664 result: Result<Result<T, E>, Box<dyn Any + Send>>,
665) -> Result<T, Box<dyn Any + Send>>
666where
667 E: Send + 'static,
668{
669 match result {
670 Ok(Err(e)) => Err(Box::new(e)),
671 Ok(Ok(v)) => Ok(v),
672 Err(e) => Err(e),
673 }
674}
675
676fn spawn_test_subprocess(
677 id: TestId,
678 desc: TestDesc,
679 nocapture: bool,
680 report_time: bool,
681 monitor_ch: Sender<CompletedTest>,
682 time_opts: Option<time::TestTimeOptions>,
683 bench_benchmarks: bool,
684) {
685 let (result, test_output, exec_time) = (|| {
686 let args = env::args().collect::<Vec<_>>();
687 let current_exe = &args[0];
688
689 let mut command = Command::new(current_exe);
690 command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
691 if bench_benchmarks {
692 command.env(SECONDARY_TEST_BENCH_BENCHMARKS_VAR, "1");
693 }
694 if nocapture {
695 command.stdout(process::Stdio::inherit());
696 command.stderr(process::Stdio::inherit());
697 }
698
699 let start = report_time.then(Instant::now);
700 let output = match command.output() {
701 Ok(out) => out,
702 Err(e) => {
703 let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
704 return (TrFailed, err.into_bytes(), None);
705 }
706 };
707 let exec_time = start.map(|start| {
708 let duration = start.elapsed();
709 TestExecTime(duration)
710 });
711
712 let std::process::Output { stdout, stderr, status } = output;
713 let mut test_output = stdout;
714 formatters::write_stderr_delimiter(&mut test_output, &desc.name);
715 test_output.extend_from_slice(&stderr);
716
717 let result = get_result_from_exit_code(&desc, status, &time_opts, &exec_time);
718 (result, test_output, exec_time)
719 })();
720
721 let message = CompletedTest::new(id, desc, result, exec_time, test_output);
722 monitor_ch.send(message).unwrap();
723}
724
725fn run_test_in_spawned_subprocess(desc: TestDesc, runnable_test: RunnableTest) -> ! {
726 let builtin_panic_hook = panic::take_hook();
727 let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
728 let test_result = match panic_info {
729 Some(info) => calc_result(&desc, Err(info.payload()), &None, &None),
730 None => calc_result(&desc, Ok(()), &None, &None),
731 };
732
733 // We don't support serializing TrFailedMsg, so just
734 // print the message out to stderr.
735 if let TrFailedMsg(msg) = &test_result {
736 eprintln!("{msg}");
737 }
738
739 if let Some(info) = panic_info {
740 builtin_panic_hook(info);
741 }
742
743 if let TrOk = test_result {
744 process::exit(test_result::TR_OK);
745 } else {
746 process::abort();
747 }
748 });
749 let record_result2 = record_result.clone();
750 panic::set_hook(Box::new(move |info| record_result2(Some(info))));
751 if let Err(message) = runnable_test.run() {
752 panic!("{}", message);
753 }
754 record_result(None);
755 unreachable!("panic=abort callback should have exited the process")
756}
757