1 | //! Support code for rustc's built in unit-test and micro-benchmarking |
---|---|
2 | //! framework. |
3 | //! |
4 | //! Almost all user code will only be interested in `Bencher` and |
5 | //! `black_box`. All other interactions (such as writing tests and |
6 | //! benchmarks themselves) should be done via the `#[test]` and |
7 | //! `#[bench]` attributes. |
8 | //! |
9 | //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more |
10 | //! details. |
11 | |
12 | // Currently, not much of this is meant for users. It is intended to |
13 | // support the simplest interface possible for representing and |
14 | // running tests while providing a base that other test frameworks may |
15 | // build off of. |
16 | |
17 | #![unstable(feature = "test", issue = "50297")] |
18 | #![doc(test(attr(deny(warnings))))] |
19 | #![doc(rust_logo)] |
20 | #![feature(rustdoc_internals)] |
21 | #![feature(file_buffered)] |
22 | #![feature(internal_output_capture)] |
23 | #![feature(io_const_error)] |
24 | #![feature(staged_api)] |
25 | #![feature(process_exitcode_internals)] |
26 | #![feature(panic_can_unwind)] |
27 | #![feature(test)] |
28 | #![feature(thread_spawn_hook)] |
29 | #![allow(internal_features)] |
30 | #![warn(rustdoc::unescaped_backticks)] |
31 | #![warn(unreachable_pub)] |
32 | |
33 | pub use cli::TestOpts; |
34 | |
35 | pub use self::ColorConfig::*; |
36 | pub use self::bench::{Bencher, black_box}; |
37 | pub use self::console::run_tests_console; |
38 | pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic}; |
39 | pub use self::types::TestName::*; |
40 | pub use self::types::*; |
41 | |
42 | // Module to be used by rustc to compile tests in libtest |
43 | pub mod test { |
44 | pub use crate::bench::Bencher; |
45 | pub use crate::cli::{TestOpts, parse_opts}; |
46 | pub use crate::helpers::metrics::{Metric, MetricMap}; |
47 | pub use crate::options::{Options, RunIgnored, RunStrategy, ShouldPanic}; |
48 | pub use crate::test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}; |
49 | pub use crate::time::{TestExecTime, TestTimeOptions}; |
50 | pub use crate::types::{ |
51 | DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, |
52 | TestDescAndFn, TestId, TestName, TestType, |
53 | }; |
54 | pub use crate::{assert_test_result, filter_tests, run_test, test_main, test_main_static}; |
55 | } |
56 | |
57 | use std::collections::VecDeque; |
58 | use std::io::prelude::Write; |
59 | use std::mem::ManuallyDrop; |
60 | use std::panic::{self, AssertUnwindSafe, PanicHookInfo, catch_unwind}; |
61 | use std::process::{self, Command, Termination}; |
62 | use std::sync::mpsc::{Sender, channel}; |
63 | use std::sync::{Arc, Mutex}; |
64 | use std::time::{Duration, Instant}; |
65 | use std::{env, io, thread}; |
66 | |
67 | pub mod bench; |
68 | mod cli; |
69 | mod console; |
70 | mod event; |
71 | mod formatters; |
72 | mod helpers; |
73 | mod options; |
74 | pub mod stats; |
75 | mod term; |
76 | mod test_result; |
77 | mod time; |
78 | mod types; |
79 | |
80 | #[cfg(test)] |
81 | mod tests; |
82 | |
83 | use core::any::Any; |
84 | |
85 | use event::{CompletedTest, TestEvent}; |
86 | use helpers::concurrency::get_concurrency; |
87 | use helpers::shuffle::{get_shuffle_seed, shuffle_tests}; |
88 | use options::RunStrategy; |
89 | use test_result::*; |
90 | use time::TestExecTime; |
91 | |
92 | // Process exit code to be used to indicate test failures. |
93 | const ERROR_EXIT_CODE: i32 = 101; |
94 | |
95 | const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE"; |
96 | const SECONDARY_TEST_BENCH_BENCHMARKS_VAR: &str = "__RUST_TEST_BENCH_BENCHMARKS"; |
97 | |
98 | // The default console test runner. It accepts the command line |
99 | // arguments and a vector of test_descs. |
100 | pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) { |
101 | test_main_with_exit_callback(args, tests, options, || {}) |
102 | } |
103 | |
104 | pub fn test_main_with_exit_callback<F: FnOnce()>( |
105 | args: &[String], |
106 | tests: Vec<TestDescAndFn>, |
107 | options: Option<Options>, |
108 | exit_callback: F, |
109 | ) { |
110 | let mut opts = match cli::parse_opts(args) { |
111 | Some(Ok(o)) => o, |
112 | Some(Err(msg)) => { |
113 | eprintln!("error:{msg} "); |
114 | process::exit(ERROR_EXIT_CODE); |
115 | } |
116 | None => return, |
117 | }; |
118 | if let Some(options) = options { |
119 | opts.options = options; |
120 | } |
121 | if opts.list { |
122 | if let Err(e) = console::list_tests_console(&opts, tests) { |
123 | eprintln!("error: io error when listing tests:{e:?} "); |
124 | process::exit(ERROR_EXIT_CODE); |
125 | } |
126 | } else { |
127 | if !opts.nocapture { |
128 | // If we encounter a non-unwinding panic, flush any captured output from the current test, |
129 | // and stop capturing output to ensure that the non-unwinding panic message is visible. |
130 | // We also acquire the locks for both output streams to prevent output from other threads |
131 | // from interleaving with the panic message or appearing after it. |
132 | let builtin_panic_hook = panic::take_hook(); |
133 | let hook = Box::new({ |
134 | move |info: &'_ PanicHookInfo<'_>| { |
135 | if !info.can_unwind() { |
136 | std::mem::forget(std::io::stderr().lock()); |
137 | let mut stdout = ManuallyDrop::new(std::io::stdout().lock()); |
138 | if let Some(captured) = io::set_output_capture(None) { |
139 | if let Ok(data) = captured.lock() { |
140 | let _ = stdout.write_all(&data); |
141 | let _ = stdout.flush(); |
142 | } |
143 | } |
144 | } |
145 | builtin_panic_hook(info); |
146 | } |
147 | }); |
148 | panic::set_hook(hook); |
149 | // Use a thread spawning hook to make new threads inherit output capturing. |
150 | std::thread::add_spawn_hook(|_| { |
151 | // Get and clone the output capture of the current thread. |
152 | let output_capture = io::set_output_capture(None); |
153 | io::set_output_capture(output_capture.clone()); |
154 | // Set the output capture of the new thread. |
155 | || { |
156 | io::set_output_capture(output_capture); |
157 | } |
158 | }); |
159 | } |
160 | let res = console::run_tests_console(&opts, tests); |
161 | // Prevent Valgrind from reporting reachable blocks in users' unit tests. |
162 | drop(panic::take_hook()); |
163 | exit_callback(); |
164 | match res { |
165 | Ok(true) => {} |
166 | Ok(false) => process::exit(ERROR_EXIT_CODE), |
167 | Err(e) => { |
168 | eprintln!("error: io error when listing tests:{e:?} "); |
169 | process::exit(ERROR_EXIT_CODE); |
170 | } |
171 | } |
172 | } |
173 | } |
174 | |
175 | /// A variant optimized for invocation with a static test vector. |
176 | /// This will panic (intentionally) when fed any dynamic tests. |
177 | /// |
178 | /// This is the entry point for the main function generated by `rustc --test` |
179 | /// when panic=unwind. |
180 | pub fn test_main_static(tests: &[&TestDescAndFn]) { |
181 | let args: Vec |
182 | let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); |
183 | test_main(&args, owned_tests, options:None) |
184 | } |
185 | |
186 | /// A variant optimized for invocation with a static test vector. |
187 | /// This will panic (intentionally) when fed any dynamic tests. |
188 | /// |
189 | /// Runs tests in panic=abort mode, which involves spawning subprocesses for |
190 | /// tests. |
191 | /// |
192 | /// This is the entry point for the main function generated by `rustc --test` |
193 | /// when panic=abort. |
194 | pub fn test_main_static_abort(tests: &[&TestDescAndFn]) { |
195 | // If we're being run in SpawnedSecondary mode, run the test here. run_test |
196 | // will then exit the process. |
197 | if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) { |
198 | unsafe { |
199 | env::remove_var(SECONDARY_TEST_INVOKER_VAR); |
200 | } |
201 | |
202 | // Convert benchmarks to tests if we're not benchmarking. |
203 | let mut tests = tests.iter().map(make_owned_test).collect::<Vec<_>>(); |
204 | if env::var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR).is_ok() { |
205 | unsafe { |
206 | env::remove_var(SECONDARY_TEST_BENCH_BENCHMARKS_VAR); |
207 | } |
208 | } else { |
209 | tests = convert_benchmarks_to_tests(tests); |
210 | }; |
211 | |
212 | let test = tests |
213 | .into_iter() |
214 | .find(|test| test.desc.name.as_slice() == name) |
215 | .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{name} '")); |
216 | let TestDescAndFn { desc, testfn } = test; |
217 | match testfn.into_runnable() { |
218 | Runnable::Test(runnable_test) => { |
219 | if runnable_test.is_dynamic() { |
220 | panic!("only static tests are supported"); |
221 | } |
222 | run_test_in_spawned_subprocess(desc, runnable_test); |
223 | } |
224 | Runnable::Bench(_) => { |
225 | panic!("benchmarks should not be executed into child processes") |
226 | } |
227 | } |
228 | } |
229 | |
230 | let args = env::args().collect::<Vec<_>>(); |
231 | let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); |
232 | test_main(&args, owned_tests, Some(Options::new().panic_abort(true))) |
233 | } |
234 | |
235 | /// Clones static values for putting into a dynamic vector, which test_main() |
236 | /// needs to hand out ownership of tests to parallel test runners. |
237 | /// |
238 | /// This will panic when fed any dynamic tests, because they cannot be cloned. |
239 | fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn { |
240 | match test.testfn { |
241 | StaticTestFn(f: fn() -> Result<(), String>) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() }, |
242 | StaticBenchFn(f: fn(&mut Bencher) -> Result<(), …>) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() }, |
243 | _ => panic!("non-static tests passed to test::test_main_static"), |
244 | } |
245 | } |
246 | |
247 | /// Invoked when unit tests terminate. Returns `Result::Err` if the test is |
248 | /// considered a failure. By default, invokes `report()` and checks for a `0` |
249 | /// result. |
250 | pub fn assert_test_result<T: Termination>(result: T) -> Result<(), String> { |
251 | let code: i32 = result.report().to_i32(); |
252 | if code == 0 { |
253 | Ok(()) |
254 | } else { |
255 | Err(format!( |
256 | "the test returned a termination value with a non-zero status code \ |
257 | ({code} ) which indicates a failure" |
258 | )) |
259 | } |
260 | } |
261 | |
262 | struct FilteredTests { |
263 | tests: Vec<(TestId, TestDescAndFn)>, |
264 | benches: Vec<(TestId, TestDescAndFn)>, |
265 | next_id: usize, |
266 | } |
267 | |
268 | impl FilteredTests { |
269 | fn add_bench(&mut self, desc: TestDesc, testfn: TestFn) { |
270 | let test: TestDescAndFn = TestDescAndFn { desc, testfn }; |
271 | self.benches.push((TestId(self.next_id), test)); |
272 | self.next_id += 1; |
273 | } |
274 | fn add_test(&mut self, desc: TestDesc, testfn: TestFn) { |
275 | let test: TestDescAndFn = TestDescAndFn { desc, testfn }; |
276 | self.tests.push((TestId(self.next_id), test)); |
277 | self.next_id += 1; |
278 | } |
279 | fn total_len(&self) -> usize { |
280 | self.tests.len() + self.benches.len() |
281 | } |
282 | } |
283 | |
284 | pub fn run_tests<F>( |
285 | opts: &TestOpts, |
286 | tests: Vec<TestDescAndFn>, |
287 | mut notify_about_test_event: F, |
288 | ) -> io::Result<()> |
289 | where |
290 | F: FnMut(TestEvent) -> io::Result<()>, |
291 | { |
292 | use std::collections::HashMap; |
293 | use std::hash::{BuildHasherDefault, DefaultHasher}; |
294 | use std::sync::mpsc::RecvTimeoutError; |
295 | |
296 | struct RunningTest { |
297 | join_handle: Option<thread::JoinHandle<()>>, |
298 | } |
299 | |
300 | impl RunningTest { |
301 | fn join(self, completed_test: &mut CompletedTest) { |
302 | if let Some(join_handle) = self.join_handle { |
303 | if let Err(_) = join_handle.join() { |
304 | if let TrOk = completed_test.result { |
305 | completed_test.result = |
306 | TrFailedMsg("panicked after reporting success".to_string()); |
307 | } |
308 | } |
309 | } |
310 | } |
311 | } |
312 | |
313 | // Use a deterministic hasher |
314 | type TestMap = HashMap<TestId, RunningTest, BuildHasherDefault<DefaultHasher>>; |
315 | |
316 | struct TimeoutEntry { |
317 | id: TestId, |
318 | desc: TestDesc, |
319 | timeout: Instant, |
320 | } |
321 | |
322 | let tests_len = tests.len(); |
323 | |
324 | let mut filtered = FilteredTests { tests: Vec::new(), benches: Vec::new(), next_id: 0 }; |
325 | |
326 | let mut filtered_tests = filter_tests(opts, tests); |
327 | if !opts.bench_benchmarks { |
328 | filtered_tests = convert_benchmarks_to_tests(filtered_tests); |
329 | } |
330 | |
331 | for test in filtered_tests { |
332 | let mut desc = test.desc; |
333 | desc.name = desc.name.with_padding(test.testfn.padding()); |
334 | |
335 | match test.testfn { |
336 | DynBenchFn(_) | StaticBenchFn(_) => { |
337 | filtered.add_bench(desc, test.testfn); |
338 | } |
339 | testfn => { |
340 | filtered.add_test(desc, testfn); |
341 | } |
342 | }; |
343 | } |
344 | |
345 | let filtered_out = tests_len - filtered.total_len(); |
346 | let event = TestEvent::TeFilteredOut(filtered_out); |
347 | notify_about_test_event(event)?; |
348 | |
349 | let shuffle_seed = get_shuffle_seed(opts); |
350 | |
351 | let event = TestEvent::TeFiltered(filtered.total_len(), shuffle_seed); |
352 | notify_about_test_event(event)?; |
353 | |
354 | let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); |
355 | |
356 | let mut remaining = filtered.tests; |
357 | if let Some(shuffle_seed) = shuffle_seed { |
358 | shuffle_tests(shuffle_seed, &mut remaining); |
359 | } |
360 | // Store the tests in a VecDeque so we can efficiently remove the first element to run the |
361 | // tests in the order they were passed (unless shuffled). |
362 | let mut remaining = VecDeque::from(remaining); |
363 | let mut pending = 0; |
364 | |
365 | let (tx, rx) = channel::<CompletedTest>(); |
366 | let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process { |
367 | RunStrategy::SpawnPrimary |
368 | } else { |
369 | RunStrategy::InProcess |
370 | }; |
371 | |
372 | let mut running_tests: TestMap = HashMap::default(); |
373 | let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new(); |
374 | |
375 | fn get_timed_out_tests( |
376 | running_tests: &TestMap, |
377 | timeout_queue: &mut VecDeque<TimeoutEntry>, |
378 | ) -> Vec<TestDesc> { |
379 | let now = Instant::now(); |
380 | let mut timed_out = Vec::new(); |
381 | while let Some(timeout_entry) = timeout_queue.front() { |
382 | if now < timeout_entry.timeout { |
383 | break; |
384 | } |
385 | let timeout_entry = timeout_queue.pop_front().unwrap(); |
386 | if running_tests.contains_key(&timeout_entry.id) { |
387 | timed_out.push(timeout_entry.desc); |
388 | } |
389 | } |
390 | timed_out |
391 | } |
392 | |
393 | fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> { |
394 | timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout, .. }| { |
395 | let now = Instant::now(); |
396 | if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) } |
397 | }) |
398 | } |
399 | |
400 | if concurrency == 1 { |
401 | while !remaining.is_empty() { |
402 | let (id, test) = remaining.pop_front().unwrap(); |
403 | let event = TestEvent::TeWait(test.desc.clone()); |
404 | notify_about_test_event(event)?; |
405 | let join_handle = run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone()); |
406 | // Wait for the test to complete. |
407 | let mut completed_test = rx.recv().unwrap(); |
408 | RunningTest { join_handle }.join(&mut completed_test); |
409 | |
410 | let fail_fast = match completed_test.result { |
411 | TrIgnored | TrOk | TrBench(_) => false, |
412 | TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast, |
413 | }; |
414 | |
415 | let event = TestEvent::TeResult(completed_test); |
416 | notify_about_test_event(event)?; |
417 | |
418 | if fail_fast { |
419 | return Ok(()); |
420 | } |
421 | } |
422 | } else { |
423 | while pending > 0 || !remaining.is_empty() { |
424 | while pending < concurrency && !remaining.is_empty() { |
425 | let (id, test) = remaining.pop_front().unwrap(); |
426 | let timeout = time::get_default_test_timeout(); |
427 | let desc = test.desc.clone(); |
428 | |
429 | let event = TestEvent::TeWait(desc.clone()); |
430 | notify_about_test_event(event)?; //here no pad |
431 | let join_handle = |
432 | run_test(opts, !opts.run_tests, id, test, run_strategy, tx.clone()); |
433 | running_tests.insert(id, RunningTest { join_handle }); |
434 | timeout_queue.push_back(TimeoutEntry { id, desc, timeout }); |
435 | pending += 1; |
436 | } |
437 | |
438 | let mut res; |
439 | loop { |
440 | if let Some(timeout) = calc_timeout(&timeout_queue) { |
441 | res = rx.recv_timeout(timeout); |
442 | for test in get_timed_out_tests(&running_tests, &mut timeout_queue) { |
443 | let event = TestEvent::TeTimeout(test); |
444 | notify_about_test_event(event)?; |
445 | } |
446 | |
447 | match res { |
448 | Err(RecvTimeoutError::Timeout) => { |
449 | // Result is not yet ready, continue waiting. |
450 | } |
451 | _ => { |
452 | // We've got a result, stop the loop. |
453 | break; |
454 | } |
455 | } |
456 | } else { |
457 | res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected); |
458 | break; |
459 | } |
460 | } |
461 | |
462 | let mut completed_test = res.unwrap(); |
463 | let running_test = running_tests.remove(&completed_test.id).unwrap(); |
464 | running_test.join(&mut completed_test); |
465 | |
466 | let fail_fast = match completed_test.result { |
467 | TrIgnored | TrOk | TrBench(_) => false, |
468 | TrFailed | TrFailedMsg(_) | TrTimedFail => opts.fail_fast, |
469 | }; |
470 | |
471 | let event = TestEvent::TeResult(completed_test); |
472 | notify_about_test_event(event)?; |
473 | pending -= 1; |
474 | |
475 | if fail_fast { |
476 | // Prevent remaining test threads from panicking |
477 | std::mem::forget(rx); |
478 | return Ok(()); |
479 | } |
480 | } |
481 | } |
482 | |
483 | if opts.bench_benchmarks { |
484 | // All benchmarks run at the end, in serial. |
485 | for (id, b) in filtered.benches { |
486 | let event = TestEvent::TeWait(b.desc.clone()); |
487 | notify_about_test_event(event)?; |
488 | let join_handle = run_test(opts, false, id, b, run_strategy, tx.clone()); |
489 | // Wait for the test to complete. |
490 | let mut completed_test = rx.recv().unwrap(); |
491 | RunningTest { join_handle }.join(&mut completed_test); |
492 | |
493 | let event = TestEvent::TeResult(completed_test); |
494 | notify_about_test_event(event)?; |
495 | } |
496 | } |
497 | Ok(()) |
498 | } |
499 | |
500 | pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> { |
501 | let mut filtered = tests; |
502 | let matches_filter = |test: &TestDescAndFn, filter: &str| { |
503 | let test_name = test.desc.name.as_slice(); |
504 | |
505 | match opts.filter_exact { |
506 | true => test_name == filter, |
507 | false => test_name.contains(filter), |
508 | } |
509 | }; |
510 | |
511 | // Remove tests that don't match the test filter |
512 | if !opts.filters.is_empty() { |
513 | filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter))); |
514 | } |
515 | |
516 | // Skip tests that match any of the skip filters |
517 | if !opts.skip.is_empty() { |
518 | filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); |
519 | } |
520 | |
521 | // Excludes #[should_panic] tests |
522 | if opts.exclude_should_panic { |
523 | filtered.retain(|test| test.desc.should_panic == ShouldPanic::No); |
524 | } |
525 | |
526 | // maybe unignore tests |
527 | match opts.run_ignored { |
528 | RunIgnored::Yes => { |
529 | filtered.iter_mut().for_each(|test| test.desc.ignore = false); |
530 | } |
531 | RunIgnored::Only => { |
532 | filtered.retain(|test| test.desc.ignore); |
533 | filtered.iter_mut().for_each(|test| test.desc.ignore = false); |
534 | } |
535 | RunIgnored::No => {} |
536 | } |
537 | |
538 | filtered |
539 | } |
540 | |
541 | pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> { |
542 | // convert benchmarks to tests, if we're not benchmarking them |
543 | testsimpl Iterator |
544 | .into_iter() |
545 | .map(|x: TestDescAndFn| { |
546 | let testfn: TestFn = match x.testfn { |
547 | DynBenchFn(benchfn: Box |
548 | StaticBenchFn(benchfn: fn(&mut Bencher) -> Result<(), …>) => StaticBenchAsTestFn(benchfn), |
549 | f: TestFn => f, |
550 | }; |
551 | TestDescAndFn { desc: x.desc, testfn } |
552 | }) |
553 | .collect() |
554 | } |
555 | |
556 | pub fn run_test( |
557 | opts: &TestOpts, |
558 | force_ignore: bool, |
559 | id: TestId, |
560 | test: TestDescAndFn, |
561 | strategy: RunStrategy, |
562 | monitor_ch: Sender<CompletedTest>, |
563 | ) -> Option<thread::JoinHandle<()>> { |
564 | let TestDescAndFn { desc, testfn } = test; |
565 | |
566 | // Emscripten can catch panics but other wasm targets cannot |
567 | let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No |
568 | && (cfg!(target_family = "wasm") || cfg!(target_os = "zkvm")) |
569 | && !cfg!(target_os = "emscripten"); |
570 | |
571 | if force_ignore || desc.ignore || ignore_because_no_process_support { |
572 | let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new()); |
573 | monitor_ch.send(message).unwrap(); |
574 | return None; |
575 | } |
576 | |
577 | match testfn.into_runnable() { |
578 | Runnable::Test(runnable_test) => { |
579 | if runnable_test.is_dynamic() { |
580 | match strategy { |
581 | RunStrategy::InProcess => (), |
582 | _ => panic!("Cannot run dynamic test fn out-of-process"), |
583 | }; |
584 | } |
585 | |
586 | let name = desc.name.clone(); |
587 | let nocapture = opts.nocapture; |
588 | let time_options = opts.time_options; |
589 | let bench_benchmarks = opts.bench_benchmarks; |
590 | |
591 | let runtest = move || match strategy { |
592 | RunStrategy::InProcess => run_test_in_process( |
593 | id, |
594 | desc, |
595 | nocapture, |
596 | time_options.is_some(), |
597 | runnable_test, |
598 | monitor_ch, |
599 | time_options, |
600 | ), |
601 | RunStrategy::SpawnPrimary => spawn_test_subprocess( |
602 | id, |
603 | desc, |
604 | nocapture, |
605 | time_options.is_some(), |
606 | monitor_ch, |
607 | time_options, |
608 | bench_benchmarks, |
609 | ), |
610 | }; |
611 | |
612 | // If the platform is single-threaded we're just going to run |
613 | // the test synchronously, regardless of the concurrency |
614 | // level. |
615 | let supports_threads = !cfg!(target_os = "emscripten") |
616 | && !cfg!(target_family = "wasm") |
617 | && !cfg!(target_os = "zkvm"); |
618 | if supports_threads { |
619 | let cfg = thread::Builder::new().name(name.as_slice().to_owned()); |
620 | let mut runtest = Arc::new(Mutex::new(Some(runtest))); |
621 | let runtest2 = runtest.clone(); |
622 | match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) { |
623 | Ok(handle) => Some(handle), |
624 | Err(e) if e.kind() == io::ErrorKind::WouldBlock => { |
625 | // `ErrorKind::WouldBlock` means hitting the thread limit on some |
626 | // platforms, so run the test synchronously here instead. |
627 | Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()(); |
628 | None |
629 | } |
630 | Err(e) => panic!("failed to spawn thread to run test:{e} "), |
631 | } |
632 | } else { |
633 | runtest(); |
634 | None |
635 | } |
636 | } |
637 | Runnable::Bench(runnable_bench) => { |
638 | // Benchmarks aren't expected to panic, so we run them all in-process. |
639 | runnable_bench.run(id, &desc, &monitor_ch, opts.nocapture); |
640 | None |
641 | } |
642 | } |
643 | } |
644 | |
645 | /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. |
646 | #[inline(never)] |
647 | fn __rust_begin_short_backtrace<T, F: FnOnce() -> T>(f: F) -> T { |
648 | let result: T = f(); |
649 | |
650 | // prevent this frame from being tail-call optimised away |
651 | black_box(dummy:result) |
652 | } |
653 | |
654 | fn run_test_in_process( |
655 | id: TestId, |
656 | desc: TestDesc, |
657 | nocapture: bool, |
658 | report_time: bool, |
659 | runnable_test: RunnableTest, |
660 | monitor_ch: Sender<CompletedTest>, |
661 | time_opts: Option<time::TestTimeOptions>, |
662 | ) { |
663 | // Buffer for capturing standard I/O |
664 | let data = Arc::new(Mutex::new(Vec::new())); |
665 | |
666 | if !nocapture { |
667 | io::set_output_capture(Some(data.clone())); |
668 | } |
669 | |
670 | let start = report_time.then(Instant::now); |
671 | let result = fold_err(catch_unwind(AssertUnwindSafe(|| runnable_test.run()))); |
672 | let exec_time = start.map(|start| { |
673 | let duration = start.elapsed(); |
674 | TestExecTime(duration) |
675 | }); |
676 | |
677 | io::set_output_capture(None); |
678 | |
679 | // Determine whether the test passed or failed, by comparing its panic |
680 | // payload (if any) with its `ShouldPanic` value, and by checking for |
681 | // fatal timeout. |
682 | let test_result = |
683 | calc_result(&desc, result.err().as_deref(), time_opts.as_ref(), exec_time.as_ref()); |
684 | let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec(); |
685 | let message = CompletedTest::new(id, desc, test_result, exec_time, stdout); |
686 | monitor_ch.send(message).unwrap(); |
687 | } |
688 | |
689 | fn fold_err<T, E>( |
690 | result: Result<Result<T, E>, Box<dyn Any + Send>>, |
691 | ) -> Result<T, Box<dyn Any + Send>> |
692 | where |
693 | E: Send + 'static, |
694 | { |
695 | match result { |
696 | Ok(Err(e: E)) => Err(Box::new(e)), |
697 | Ok(Ok(v: T)) => Ok(v), |
698 | Err(e: Box |
699 | } |
700 | } |
701 | |
702 | fn spawn_test_subprocess( |
703 | id: TestId, |
704 | desc: TestDesc, |
705 | nocapture: bool, |
706 | report_time: bool, |
707 | monitor_ch: Sender<CompletedTest>, |
708 | time_opts: Option<time::TestTimeOptions>, |
709 | bench_benchmarks: bool, |
710 | ) { |
711 | let (result, test_output, exec_time) = (|| { |
712 | let args = env::args().collect::<Vec<_>>(); |
713 | let current_exe = &args[0]; |
714 | |
715 | let mut command = Command::new(current_exe); |
716 | command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice()); |
717 | if bench_benchmarks { |
718 | command.env(SECONDARY_TEST_BENCH_BENCHMARKS_VAR, "1"); |
719 | } |
720 | if nocapture { |
721 | command.stdout(process::Stdio::inherit()); |
722 | command.stderr(process::Stdio::inherit()); |
723 | } |
724 | |
725 | let start = report_time.then(Instant::now); |
726 | let output = match command.output() { |
727 | Ok(out) => out, |
728 | Err(e) => { |
729 | let err = format!("Failed to spawn{} as child for test:{:?} ", args[0], e); |
730 | return (TrFailed, err.into_bytes(), None); |
731 | } |
732 | }; |
733 | let exec_time = start.map(|start| { |
734 | let duration = start.elapsed(); |
735 | TestExecTime(duration) |
736 | }); |
737 | |
738 | let std::process::Output { stdout, stderr, status } = output; |
739 | let mut test_output = stdout; |
740 | formatters::write_stderr_delimiter(&mut test_output, &desc.name); |
741 | test_output.extend_from_slice(&stderr); |
742 | |
743 | let result = |
744 | get_result_from_exit_code(&desc, status, time_opts.as_ref(), exec_time.as_ref()); |
745 | (result, test_output, exec_time) |
746 | })(); |
747 | |
748 | let message = CompletedTest::new(id, desc, result, exec_time, test_output); |
749 | monitor_ch.send(message).unwrap(); |
750 | } |
751 | |
752 | fn run_test_in_spawned_subprocess(desc: TestDesc, runnable_test: RunnableTest) -> ! { |
753 | let builtin_panic_hook = panic::take_hook(); |
754 | let record_result = Arc::new(move |panic_info: Option<&'_ PanicHookInfo<'_>>| { |
755 | let test_result = calc_result(&desc, panic_info.map(|info| info.payload()), None, None); |
756 | |
757 | // We don't support serializing TrFailedMsg, so just |
758 | // print the message out to stderr. |
759 | if let TrFailedMsg(msg) = &test_result { |
760 | eprintln!("{msg} "); |
761 | } |
762 | |
763 | if let Some(info) = panic_info { |
764 | builtin_panic_hook(info); |
765 | } |
766 | |
767 | if let TrOk = test_result { |
768 | process::exit(test_result::TR_OK); |
769 | } else { |
770 | process::abort(); |
771 | } |
772 | }); |
773 | let record_result2 = record_result.clone(); |
774 | panic::set_hook(Box::new(move |info| record_result2(Some(info)))); |
775 | if let Err(message) = runnable_test.run() { |
776 | panic!("{}", message); |
777 | } |
778 | record_result(None); |
779 | unreachable!("panic=abort callback should have exited the process") |
780 | } |
781 |
Definitions
- test_main
- test_main_with_exit_callback
- test_main_static
- test_main_static_abort
- desc
- testfn
- make_owned_test
- assert_test_result
- FilteredTests
- tests
- benches
- next_id
- add_bench
- add_test
- total_len
- run_tests
- RunningTest
- join_handle
- join
- TestMap
- TimeoutEntry
- id
- desc
- timeout
- get_timed_out_tests
- calc_timeout
- filter_tests
- convert_benchmarks_to_tests
- run_test
- desc
- testfn
- __rust_begin_short_backtrace
- run_test_in_process
- fold_err
- spawn_test_subprocess
- stdout
- stderr
- status
Learn Rust with the experts
Find out more