1 | //! Support code for rustc's built in unit-test and micro-benchmarking |
2 | //! framework. |
3 | //! |
4 | //! Almost all user code will only be interested in `Bencher` and |
5 | //! `black_box`. All other interactions (such as writing tests and |
6 | //! benchmarks themselves) should be done via the `#[test]` and |
7 | //! `#[bench]` attributes. |
8 | //! |
9 | //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more |
10 | //! details. |
11 | |
12 | // Currently, not much of this is meant for users. It is intended to |
13 | // support the simplest interface possible for representing and |
14 | // running tests while providing a base that other test frameworks may |
15 | // build off of. |
16 | |
17 | #![unstable (feature = "test" , issue = "50297" )] |
18 | #![doc (test(attr(deny(warnings))))] |
19 | #![doc (rust_logo)] |
20 | #![feature (rustdoc_internals)] |
21 | #![feature (file_buffered)] |
22 | #![feature (internal_output_capture)] |
23 | #![feature (io_const_error)] |
24 | #![feature (staged_api)] |
25 | #![feature (process_exitcode_internals)] |
26 | #![feature (panic_can_unwind)] |
27 | #![feature (test)] |
28 | #![feature (thread_spawn_hook)] |
29 | #![allow (internal_features)] |
30 | #![warn (rustdoc::unescaped_backticks)] |
31 | #![warn (unreachable_pub)] |
32 | |
33 | pub use cli::TestOpts; |
34 | |
35 | pub use self::ColorConfig::*; |
36 | pub use self::bench::{Bencher, black_box}; |
37 | pub use self::console::run_tests_console; |
38 | pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic}; |
39 | pub use self::types::TestName::*; |
40 | pub use self::types::*; |
41 | |
42 | // Module to be used by rustc to compile tests in libtest |
43 | pub mod test { |
44 | pub use crate::bench::Bencher; |
45 | pub use crate::cli::{TestOpts, parse_opts}; |
46 | pub use crate::helpers::metrics::{Metric, MetricMap}; |
47 | pub use crate::options::{Options, RunIgnored, RunStrategy, ShouldPanic}; |
48 | pub use crate::test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}; |
49 | pub use crate::time::{TestExecTime, TestTimeOptions}; |
50 | pub use crate::types::{ |
51 | DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, |
52 | TestDescAndFn, TestId, TestName, TestType, |
53 | }; |
54 | pub use crate::{assert_test_result, filter_tests, run_test, test_main, test_main_static}; |
55 | } |
56 | |
57 | use std::collections::VecDeque; |
58 | use std::io::prelude::Write; |
59 | use std::mem::ManuallyDrop; |
60 | use std::panic::{self, AssertUnwindSafe, PanicHookInfo, catch_unwind}; |
61 | use std::process::{self, Command, Termination}; |
62 | use std::sync::mpsc::{Sender, channel}; |
63 | use std::sync::{Arc, Mutex}; |
64 | use std::time::{Duration, Instant}; |
65 | use std::{env, io, thread}; |
66 | |
67 | pub mod bench; |
68 | mod cli; |
69 | mod console; |
70 | mod event; |
71 | mod formatters; |
72 | mod helpers; |
73 | mod options; |
74 | pub mod stats; |
75 | mod term; |
76 | mod test_result; |
77 | mod time; |
78 | mod types; |
79 | |
80 | #[cfg (test)] |
81 | mod tests; |
82 | |
83 | use core::any::Any; |
84 | |
85 | use event::{CompletedTest, TestEvent}; |
86 | use helpers::concurrency::get_concurrency; |
87 | use helpers::shuffle::{get_shuffle_seed, shuffle_tests}; |
88 | use options::RunStrategy; |
89 | use test_result::*; |
90 | use time::TestExecTime; |
91 | |
92 | // Process exit code to be used to indicate test failures. |
93 | const ERROR_EXIT_CODE: i32 = 101; |
94 | |
95 | const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE" ; |
96 | const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS" ; |
97 | |
98 | // The default console test runner. It accepts the command line |
99 | // arguments and a vector of test_descs. |
100 | pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) { |
101 | let mut opts = match cli::parse_opts(args) { |
102 | Some(Ok(o)) => o, |
103 | Some(Err(msg)) => { |
104 | eprintln!("error: {msg}" ); |
105 | process::exit(ERROR_EXIT_CODE); |
106 | } |
107 | None => return, |
108 | }; |
109 | if let Some(options) = options { |
110 | opts.options = options; |
111 | } |
112 | if opts.list { |
113 | if let Err(e) = console::list_tests_console(&opts, tests) { |
114 | eprintln!("error: io error when listing tests: {e:?}" ); |
115 | process::exit(ERROR_EXIT_CODE); |
116 | } |
117 | } else { |
118 | if !opts.nocapture { |
119 | // If we encounter a non-unwinding panic, flush any captured output from the current test, |
120 | // and stop capturing output to ensure that the non-unwinding panic message is visible. |
121 | // We also acquire the locks for both output streams to prevent output from other threads |
122 | // from interleaving with the panic message or appearing after it. |
123 | let builtin_panic_hook = panic::take_hook(); |
124 | let hook = Box::new({ |
125 | move |info: &'_ PanicHookInfo<'_>| { |
126 | if !info.can_unwind() { |
127 | std::mem::forget(std::io::stderr().lock()); |
128 | let mut stdout = ManuallyDrop::new(std::io::stdout().lock()); |
129 | if let Some(captured) = io::set_output_capture(None) { |
130 | if let Ok(data) = captured.lock() { |
131 | let _ = stdout.write_all(&data); |
132 | let _ = stdout.flush(); |
133 | } |
134 | } |
135 | } |
136 | builtin_panic_hook(info); |
137 | } |
138 | }); |
139 | panic::set_hook(hook); |
140 | // Use a thread spawning hook to make new threads inherit output capturing. |
141 | std::thread::add_spawn_hook(|_| { |
142 | // Get and clone the output capture of the current thread. |
143 | let output_capture = io::set_output_capture(None); |
144 | io::set_output_capture(output_capture.clone()); |
145 | // Set the output capture of the new thread. |
146 | || { |
147 | io::set_output_capture(output_capture); |
148 | } |
149 | }); |
150 | } |
151 | let res = console::run_tests_console(&opts, tests); |
152 | // Prevent Valgrind from reporting reachable blocks in users' unit tests. |
153 | drop(panic::take_hook()); |
154 | match res { |
155 | Ok(true) => {} |
156 | Ok(false) => process::exit(ERROR_EXIT_CODE), |
157 | Err(e) => { |
158 | eprintln!("error: io error when listing tests: {e:?}" ); |
159 | process::exit(ERROR_EXIT_CODE); |
160 | } |
161 | } |
162 | } |
163 | } |
164 | |
165 | /// A variant optimized for invocation with a static test vector. |
166 | /// This will panic (intentionally) when fed any dynamic tests. |
167 | /// |
168 | /// This is the entry point for the main function generated by `rustc --test` |
169 | /// when panic=unwind. |
170 | pub fn test_main_static(tests: &[&TestDescAndFn]) { |
171 | let args: Vec = env::args().collect::<Vec<_>>(); |
172 | let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); |
173 | test_main(&args, owned_tests, options:None) |
174 | } |
175 | |
176 | /// A variant optimized for invocation with a static test vector. |
177 | /// This will panic (intentionally) when fed any dynamic tests. |
178 | /// |
179 | /// Runs tests in panic=abort mode, which involves spawning subprocesses for |
180 | /// tests. |
181 | /// |
182 | /// This is the entry point for the main function generated by `rustc --test` |
183 | /// when panic=abort. |
184 | pub fn test_main_static_abort(tests: &[&TestDescAndFn]) { |
185 | // If we're being run in SpawnedSecondary mode, run the test here. run_test |
186 | // will then exit the process. |
187 | if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) { |
188 | unsafe { |
189 | env::remove_var(SECONDARY_TEST_INVOKER_VAR); |
190 | } |
191 | |
192 | // Convert benchmarks to tests if we're not benchmarking. |
193 | let mut tests = tests.iter().map(make_owned_test).collect::<Vec<_>>(); |
194 | if env::var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR).is_ok() { |
195 | unsafe { |
196 | env::remove_var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR); |
197 | } |
198 | } else { |
199 | tests = convert_benchmarks_to_tests(tests); |
200 | }; |
201 | |
202 | let test = tests |
203 | .into_iter() |
204 | .find(|test| test.desc.name.as_slice() == name) |
205 | .unwrap_or_else(|| panic!("couldn't find a test with the provided name ' {name}'" )); |
206 | let TestDescAndFn { desc, testfn } = test; |
207 | match testfn.into_runnable() { |
208 | Runnable::Test(runnable_test) => { |
209 | if runnable_test.is_dynamic() { |
210 | panic!("only static tests are supported" ); |
211 | } |
212 | run_test_in_spawned_subprocess(desc, runnable_test); |
213 | } |
214 | Runnable::Bench(_) => { |
215 | panic!("benchmarks should not be executed into child processes" ) |
216 | } |
217 | } |
218 | } |
219 | |
220 | let args = env::args().collect::<Vec<_>>(); |
221 | let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); |
222 | test_main(&args, owned_tests, Some(Options::new().panic_abort(true))) |
223 | } |
224 | |
225 | /// Clones static values for putting into a dynamic vector, which test_main() |
226 | /// needs to hand out ownership of tests to parallel test runners. |
227 | /// |
228 | /// This will panic when fed any dynamic tests, because they cannot be cloned. |
229 | fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn { |
230 | match test.testfn { |
231 | StaticTestFn(f: fn() -> Result<(), String>) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() }, |
232 | StaticBenchFn(f: fn(&mut Bencher) -> Result<(), …>) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() }, |
233 | _ => panic!("non-static tests passed to test::test_main_static" ), |
234 | } |
235 | } |
236 | |
237 | /// Invoked when unit tests terminate. Returns `Result::Err` if the test is |
238 | /// considered a failure. By default, invokes `report()` and checks for a `0` |
239 | /// result. |
240 | pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> { |
241 | let code: i32 = result.report().to_i32(); |
242 | if code == 0 { |
243 | Ok(()) |
244 | } else { |
245 | Err(format!( |
246 | "the test returned a termination value with a non-zero status code \ |
247 | ( {code}) which indicates a failure" |
248 | )) |
249 | } |
250 | } |
251 | |
252 | struct FilteredTests { |
253 | tests: Vec<(TestId, TestDescAndFn)>, |
254 | benches: Vec<(TestId, TestDescAndFn)>, |
255 | next_id: usize, |
256 | } |
257 | |
258 | impl FilteredTests { |
259 | fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) { |
260 | let test: TestDescAndFn = TestDescAndFn { desc, testfn }; |
261 | self.benches.push((TestId(self.next_id), test)); |
262 | self.next_id += 1; |
263 | } |
264 | fn add_test(&mut self, desc: TestDesc, testfn: TestFn) { |
265 | let test: TestDescAndFn = TestDescAndFn { desc, testfn }; |
266 | self.tests.push((TestId(self.next_id), test)); |
267 | self.next_id += 1; |
268 | } |
269 | fn total_len(&self) -> usize { |
270 | self.tests.len() + self.benches.len() |
271 | } |
272 | } |
273 | |
274 | pub fn run_tests<F>( |
275 | opts: &TestOpts, |
276 | tests: Vec<TestDescAndFn>, |
277 | mut notify_about_test_event: F, |
278 | ) -> io::Result<()> |
279 | where |
280 | F: FnMut(TestEvent) -> io::Result<()>, |
281 | { |
282 | use std::collections::HashMap; |
283 | use std::hash::{BuildHasherDefault, DefaultHasher}; |
284 | use std::sync::mpsc::RecvTimeoutError; |
285 | |
286 | struct RunningTest { |
287 | join_handle: Option<thread::JoinHandle<()>>, |
288 | } |
289 | |
290 | impl RunningTest { |
291 | fn join(self, completed_test: &mut CompletedTest) { |
292 | if let Some(join_handle) = self.join_handle { |
293 | if let Err(_) = join_handle.join() { |
294 | if let TrOk = completed_test.result { |
295 | completed_test.result = |
296 | TrFailedMsg("panicked after reporting success" .to_string()); |
297 | } |
298 | } |
299 | } |
300 | } |
301 | } |
302 | |
303 | // Use a deterministic hasher |
304 | type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>; |
305 | |
306 | struct TimeoutEntry { |
307 | id: TestId, |
308 | desc: TestDesc, |
309 | timeout: Instant, |
310 | } |
311 | |
312 | let tests_len = tests.len(); |
313 | |
314 | let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 }; |
315 | |
316 | let mut filtered_tests = filter_tests(opts, tests); |
317 | if !opts.bench_benchmarks { |
318 | filtered_tests = convert_benchmarks_to_tests(filtered_tests); |
319 | } |
320 | |
321 | for test in filtered_tests { |
322 | let mut desc = test.desc; |
323 | desc.name = desc.name.with_padding(test.testfn.padding()); |
324 | |
325 | match test.testfn { |
326 | DynBenchFn(_) | StaticBenchFn(_) => { |
327 | filtered.add_bench(desc, test.testfn); |
328 | } |
329 | testfn => { |
330 | filtered.add_test(desc, testfn); |
331 | } |
332 | }; |
333 | } |
334 | |
335 | let filtered_out = tests_len - filtered.total_len(); |
336 | let event = TestEvent::TeFilteredOut(filtered_out); |
337 | notify_about_test_event(event)?; |
338 | |
339 | let shuffle_seed = get_shuffle_seed(opts); |
340 | |
341 | let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed); |
342 | notify_about_test_event(event)?; |
343 | |
344 | let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); |
345 | |
346 | let mut remaining = filtered.tests; |
347 | if let Some(shuffle_seed) = shuffle_seed { |
348 | shuffle_tests(shuffle_seed, &mut remaining); |
349 | } |
350 | // Store the tests in a VecDeque so we can efficiently remove the first element to run the |
351 | // tests in the order they were passed (unless shuffled). |
352 | let mut remaining = VecDeque::from(remaining); |
353 | let mut pending = 0; |
354 | |
355 | let (tx, rx) = channel::<CompletedTest>(); |
356 | let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process { |
357 | RunStrategy::SpawnPrimary |
358 | } else { |
359 | RunStrategy::InProcess |
360 | }; |
361 | |
362 | let mut running_tests: TestMap = HashMap::default(); |
363 | let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new(); |
364 | |
365 | fn get_timed_out_tests( |
366 | running_tests: &TestMap, |
367 | timeout_queue: &mut VecDeque<TimeoutEntry>, |
368 | ) -> Vec<TestDesc> { |
369 | let now = Instant::now(); |
370 | let mut timed_out = Vec::new(); |
371 | while let Some(timeout_entry) = timeout_queue.front() { |
372 | if now < timeout_entry.timeout { |
373 | break; |
374 | } |
375 | let timeout_entry = timeout_queue.pop_front().unwrap(); |
376 | if running_tests.contains_key(&timeout_entry.id) { |
377 | timed_out.push(timeout_entry.desc); |
378 | } |
379 | } |
380 | timed_out |
381 | } |
382 | |
383 | fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> { |
384 | timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| { |
385 | let now = Instant::now(); |
386 | if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) } |
387 | }) |
388 | } |
389 | |
390 | if concurrency == 1 { |
391 | while !remaining.is_empty() { |
392 | let (id, test) = remaining.pop_front().unwrap(); |
393 | let event = TestEvent::TeWait(test.desc.clone()); |
394 | notify_about_test_event(event)?; |
395 | let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone()); |
396 | // Wait for the test to complete. |
397 | let mut completed_test = rx.recv().unwrap(); |
398 | RunningTest { join_handle }.join(&mut completed_test); |
399 | |
400 | let fail_fast = match completed_test.result { |
401 | TrIgnored | TrOk | TrBench(_) => false, |
402 | TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast, |
403 | }; |
404 | |
405 | let event = TestEvent::TeResult(completed_test); |
406 | notify_about_test_event(event)?; |
407 | |
408 | if fail_fast { |
409 | return Ok(()); |
410 | } |
411 | } |
412 | } else { |
413 | while pending > 0 || !remaining.is_empty() { |
414 | while pending < concurrency && !remaining.is_empty() { |
415 | let (id, test) = remaining.pop_front().unwrap(); |
416 | let timeout = time::get_default_test_timeout(); |
417 | let desc = test.desc.clone(); |
418 | |
419 | let event = TestEvent::TeWait(desc.clone()); |
420 | notify_about_test_event(event)?; //here no pad |
421 | let join_handle = |
422 | run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone()); |
423 | running_tests.insert(id, RunningTest { join_handle }); |
424 | timeout_queue.push_back(TimeoutEntry { id, desc, timeout }); |
425 | pending += 1; |
426 | } |
427 | |
428 | let mut res; |
429 | loop { |
430 | if let Some(timeout) = calc_timeout(&timeout_queue) { |
431 | res = rx.recv_timeout(timeout); |
432 | for test in get_timed_out_tests(&running_tests, &mut timeout_queue) { |
433 | let event = TestEvent::TeTimeout(test); |
434 | notify_about_test_event(event)?; |
435 | } |
436 | |
437 | match res { |
438 | Err(RecvTimeoutError::Timeout) => { |
439 | // Result is not yet ready, continue waiting. |
440 | } |
441 | _ => { |
442 | // We've got a result, stop the loop. |
443 | break; |
444 | } |
445 | } |
446 | } else { |
447 | res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected); |
448 | break; |
449 | } |
450 | } |
451 | |
452 | let mut completed_test = res.unwrap(); |
453 | let running_test = running_tests.remove(&completed_test.id).unwrap(); |
454 | running_test.join(&mut completed_test); |
455 | |
456 | let fail_fast = match completed_test.result { |
457 | TrIgnored | TrOk | TrBench(_) => false, |
458 | TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast, |
459 | }; |
460 | |
461 | let event = TestEvent::TeResult(completed_test); |
462 | notify_about_test_event(event)?; |
463 | pending -= 1; |
464 | |
465 | if fail_fast { |
466 | // Prevent remaining test threads from panicking |
467 | std::mem::forget(rx); |
468 | return Ok(()); |
469 | } |
470 | } |
471 | } |
472 | |
473 | if opts.bench_benchmarks { |
474 | // All benchmarks run at the end, in serial. |
475 | for (id, b) in filtered.benches { |
476 | let event = TestEvent::TeWait(b.desc.clone()); |
477 | notify_about_test_event(event)?; |
478 | let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone()); |
479 | // Wait for the test to complete. |
480 | let mut completed_test = rx.recv().unwrap(); |
481 | RunningTest { join_handle }.join(&mut completed_test); |
482 | |
483 | let event = TestEvent::TeResult(completed_test); |
484 | notify_about_test_event(event)?; |
485 | } |
486 | } |
487 | Ok(()) |
488 | } |
489 | |
490 | pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> { |
491 | let mut filtered = tests; |
492 | let matches_filter = |test: &TestDescAndFn, filter: &str| { |
493 | let test_name = test.desc.name.as_slice(); |
494 | |
495 | match opts.filter_exact { |
496 | true => test_name == filter, |
497 | false => test_name.contains(filter), |
498 | } |
499 | }; |
500 | |
501 | // Remove tests that don't match the test filter |
502 | if !opts.filters.is_empty() { |
503 | filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter))); |
504 | } |
505 | |
506 | // Skip tests that match any of the skip filters |
507 | if !opts.skip.is_empty() { |
508 | filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); |
509 | } |
510 | |
511 | // Excludes #[should_panic] tests |
512 | if opts.exclude_should_panic { |
513 | filtered.retain(|test| test.desc.should_panic == ShouldPanic::No); |
514 | } |
515 | |
516 | // maybe unignore tests |
517 | match opts.run_ignored { |
518 | RunIgnored::Yes => { |
519 | filtered.iter_mut().for_each(|test| test.desc.ignore = false); |
520 | } |
521 | RunIgnored::Only => { |
522 | filtered.retain(|test| test.desc.ignore); |
523 | filtered.iter_mut().for_each(|test| test.desc.ignore = false); |
524 | } |
525 | RunIgnored::No => {} |
526 | } |
527 | |
528 | filtered |
529 | } |
530 | |
531 | pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> { |
532 | // convert benchmarks to tests, if we're not benchmarking them |
533 | testsimpl Iterator |
534 | .into_iter() |
535 | .map(|x: TestDescAndFn| { |
536 | let testfn: TestFn = match x.testfn { |
537 | DynBenchFn(benchfn: Box Result<…, …> + Send + 'static>) => DynBenchAsTestFn(benchfn), |
538 | StaticBenchFn(benchfn: fn(&mut Bencher) -> Result<(), …>) => StaticBenchAsTestFn(benchfn), |
539 | f: TestFn => f, |
540 | }; |
541 | TestDescAndFn { desc: x.desc, testfn } |
542 | }) |
543 | .collect() |
544 | } |
545 | |
546 | pub fn run_test( |
547 | opts: &TestOpts, |
548 | force_ignore: bool, |
549 | id: TestId, |
550 | test: TestDescAndFn, |
551 | strategy: RunStrategy, |
552 | monitor_ch: Sender<CompletedTest>, |
553 | ) -> Option<thread::JoinHandle<()>> { |
554 | let TestDescAndFn { desc, testfn } = test; |
555 | |
556 | // Emscripten can catch panics but other wasm targets cannot |
557 | let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No |
558 | && (cfg!(target_family = "wasm" ) || cfg!(target_os = "zkvm" )) |
559 | && !cfg!(target_os = "emscripten" ); |
560 | |
561 | if force_ignore || desc.ignore || ignore_because_no_process_support { |
562 | let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new()); |
563 | monitor_ch.send(message).unwrap(); |
564 | return None; |
565 | } |
566 | |
567 | match testfn.into_runnable() { |
568 | Runnable::Test(runnable_test) => { |
569 | if runnable_test.is_dynamic() { |
570 | match strategy { |
571 | RunStrategy::InProcess => (), |
572 | _ => panic!("Cannot run dynamic test fn out-of-process" ), |
573 | }; |
574 | } |
575 | |
576 | let name = desc.name.clone(); |
577 | let nocapture = opts.nocapture; |
578 | let time_options = opts.time_options; |
579 | let bench_benchmarks = opts.bench_benchmarks; |
580 | |
581 | let runtest = move || match strategy { |
582 | RunStrategy::InProcess => run_test_in_process( |
583 | id, |
584 | desc, |
585 | nocapture, |
586 | time_options.is_some(), |
587 | runnable_test, |
588 | monitor_ch, |
589 | time_options, |
590 | ), |
591 | RunStrategy::SpawnPrimary => spawn_test_subprocess( |
592 | id, |
593 | desc, |
594 | nocapture, |
595 | time_options.is_some(), |
596 | monitor_ch, |
597 | time_options, |
598 | bench_benchmarks, |
599 | ), |
600 | }; |
601 | |
602 | // If the platform is single-threaded we're just going to run |
603 | // the test synchronously, regardless of the concurrency |
604 | // level. |
605 | let supports_threads = !cfg!(target_os = "emscripten" ) |
606 | && !cfg!(target_family = "wasm" ) |
607 | && !cfg!(target_os = "zkvm" ); |
608 | if supports_threads { |
609 | let cfg = thread::Builder::new().name(name.as_slice().to_owned()); |
610 | let mut runtest = Arc::new(Mutex::new(Some(runtest))); |
611 | let runtest2 = runtest.clone(); |
612 | match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) { |
613 | Ok(handle) => Some(handle), |
614 | Err(e) if e.kind() == io::ErrorKind::WouldBlock => { |
615 | // `ErrorKind::WouldBlock` means hitting the thread limit on some |
616 | // platforms, so run the test synchronously here instead. |
617 | Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()(); |
618 | None |
619 | } |
620 | Err(e) => panic!("failed to spawn thread to run test: {e}" ), |
621 | } |
622 | } else { |
623 | runtest(); |
624 | None |
625 | } |
626 | } |
627 | Runnable::Bench(runnable_bench) => { |
628 | // Benchmarks aren't expected to panic, so we run them all in-process. |
629 | runnable_bench.run(id, &desc, &monitor_ch, opts.nocapture); |
630 | None |
631 | } |
632 | } |
633 | } |
634 | |
635 | /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. |
636 | #[inline (never)] |
637 | fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T { |
638 | let result: T = f(); |
639 | |
640 | // prevent this frame from being tail-call optimised away |
641 | black_box(dummy:result) |
642 | } |
643 | |
644 | fn run_test_in_process( |
645 | id: TestId, |
646 | desc: TestDesc, |
647 | nocapture: bool, |
648 | report_time: bool, |
649 | runnable_test: RunnableTest, |
650 | monitor_ch: Sender<CompletedTest>, |
651 | time_opts: Option<time::TestTimeOptions>, |
652 | ) { |
653 | // Buffer for capturing standard I/O |
654 | let data = Arc::new(Mutex::new(Vec::new())); |
655 | |
656 | if !nocapture { |
657 | io::set_output_capture(Some(data.clone())); |
658 | } |
659 | |
660 | let start = report_time.then(Instant::now); |
661 | let result = fold_err(catch_unwind(AssertUnwindSafe(|| runnable_test.run()))); |
662 | let exec_time = start.map(|start| { |
663 | let duration = start.elapsed(); |
664 | TestExecTime(duration) |
665 | }); |
666 | |
667 | io::set_output_capture(None); |
668 | |
669 | let test_result = match result { |
670 | Ok(()) => calc_result(&desc, Ok(()), time_opts.as_ref(), exec_time.as_ref()), |
671 | Err(e) => calc_result(&desc, Err(e.as_ref()), time_opts.as_ref(), exec_time.as_ref()), |
672 | }; |
673 | let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec(); |
674 | let message = CompletedTest::new(id, desc, test_result, exec_time, stdout); |
675 | monitor_ch.send(message).unwrap(); |
676 | } |
677 | |
678 | fn fold_err<T, E>( |
679 | result: Result<Result<T, E>, Box<dyn Any + Send>>, |
680 | ) -> Result<T, Box<dyn Any + Send>> |
681 | where |
682 | E: Send + 'static, |
683 | { |
684 | match result { |
685 | Ok(Err(e: E)) => Err(Box::new(e)), |
686 | Ok(Ok(v: T)) => Ok(v), |
687 | Err(e: Box) => Err(e), |
688 | } |
689 | } |
690 | |
691 | fn spawn_test_subprocess( |
692 | id: TestId, |
693 | desc: TestDesc, |
694 | nocapture: bool, |
695 | report_time: bool, |
696 | monitor_ch: Sender<CompletedTest>, |
697 | time_opts: Option<time::TestTimeOptions>, |
698 | bench_benchmarks: bool, |
699 | ) { |
700 | let (result, test_output, exec_time) = (|| { |
701 | let args = env::args().collect::<Vec<_>>(); |
702 | let current_exe = &args[0]; |
703 | |
704 | let mut command = Command::new(current_exe); |
705 | command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice()); |
706 | if bench_benchmarks { |
707 | command.env(SECONDARY_TEST_BENCH_BENCHMARKS_VAR, "1" ); |
708 | } |
709 | if nocapture { |
710 | command.stdout(process::Stdio::inherit()); |
711 | command.stderr(process::Stdio::inherit()); |
712 | } |
713 | |
714 | let start = report_time.then(Instant::now); |
715 | let output = match command.output() { |
716 | Ok(out) => out, |
717 | Err(e) => { |
718 | let err = format!("Failed to spawn {} as child for test: {:?}" , args[0], e); |
719 | return (TrFailed, err.into_bytes(), None); |
720 | } |
721 | }; |
722 | let exec_time = start.map(|start| { |
723 | let duration = start.elapsed(); |
724 | TestExecTime(duration) |
725 | }); |
726 | |
727 | let std::process::Output { stdout, stderr, status } = output; |
728 | let mut test_output = stdout; |
729 | formatters::write_stderr_delimiter(&mut test_output, &desc.name); |
730 | test_output.extend_from_slice(&stderr); |
731 | |
732 | let result = |
733 | get_result_from_exit_code(&desc, status, time_opts.as_ref(), exec_time.as_ref()); |
734 | (result, test_output, exec_time) |
735 | })(); |
736 | |
737 | let message = CompletedTest::new(id, desc, result, exec_time, test_output); |
738 | monitor_ch.send(message).unwrap(); |
739 | } |
740 | |
741 | fn run_test_in_spawned_subprocess(desc: TestDesc, runnable_test: RunnableTest) -> ! { |
742 | let builtin_panic_hook = panic::take_hook(); |
743 | let record_result = Arc::new(move |panic_info: Option<&'_ PanicHookInfo<'_>>| { |
744 | let test_result = match panic_info { |
745 | Some(info) => calc_result(&desc, Err(info.payload()), None, None), |
746 | None => calc_result(&desc, Ok(()), None, None), |
747 | }; |
748 | |
749 | // We don't support serializing TrFailedMsg, so just |
750 | // print the message out to stderr. |
751 | if let TrFailedMsg(msg) = &test_result { |
752 | eprintln!(" {msg}" ); |
753 | } |
754 | |
755 | if let Some(info) = panic_info { |
756 | builtin_panic_hook(info); |
757 | } |
758 | |
759 | if let TrOk = test_result { |
760 | process::exit(test_result::TR_OK); |
761 | } else { |
762 | process::abort(); |
763 | } |
764 | }); |
765 | let record_result2 = record_result.clone(); |
766 | panic::set_hook(Box::new(move |info| record_result2(Some(info)))); |
767 | if let Err(message) = runnable_test.run() { |
768 | panic!("{}" , message); |
769 | } |
770 | record_result(None); |
771 | unreachable!("panic=abort callback should have exited the process" ) |
772 | } |
773 | |