1 | use std::{io, io::prelude::Write}; |
2 | |
3 | use super::OutputFormatter; |
4 | use crate::{ |
5 | bench::fmt_bench_samples, |
6 | console::{ConsoleTestDiscoveryState, ConsoleTestState, OutputLocation}, |
7 | term, |
8 | test_result::TestResult, |
9 | time, |
10 | types::NamePadding, |
11 | types::TestDesc, |
12 | }; |
13 | |
14 | // We insert a '\n' when the output hits 100 columns in quiet mode. 88 test |
15 | // result chars leaves 12 chars for a progress count like " 11704/12853". |
16 | const QUIET_MODE_MAX_COLUMN: usize = 88; |
17 | |
18 | pub(crate) struct TerseFormatter<T> { |
19 | out: OutputLocation<T>, |
20 | use_color: bool, |
21 | is_multithreaded: bool, |
22 | /// Number of columns to fill when aligning names |
23 | max_name_len: usize, |
24 | |
25 | test_count: usize, |
26 | total_test_count: usize, |
27 | } |
28 | |
29 | impl<T: Write> TerseFormatter<T> { |
30 | pub fn new( |
31 | out: OutputLocation<T>, |
32 | use_color: bool, |
33 | max_name_len: usize, |
34 | is_multithreaded: bool, |
35 | ) -> Self { |
36 | TerseFormatter { |
37 | out, |
38 | use_color, |
39 | max_name_len, |
40 | is_multithreaded, |
41 | test_count: 0, |
42 | total_test_count: 0, // initialized later, when write_run_start is called |
43 | } |
44 | } |
45 | |
46 | pub fn write_ok(&mut self) -> io::Result<()> { |
47 | self.write_short_result("." , term::color::GREEN) |
48 | } |
49 | |
50 | pub fn write_failed(&mut self) -> io::Result<()> { |
51 | self.write_short_result("F" , term::color::RED) |
52 | } |
53 | |
54 | pub fn write_ignored(&mut self) -> io::Result<()> { |
55 | self.write_short_result("i" , term::color::YELLOW) |
56 | } |
57 | |
58 | pub fn write_bench(&mut self) -> io::Result<()> { |
59 | self.write_pretty("bench" , term::color::CYAN) |
60 | } |
61 | |
62 | pub fn write_short_result( |
63 | &mut self, |
64 | result: &str, |
65 | color: term::color::Color, |
66 | ) -> io::Result<()> { |
67 | self.write_pretty(result, color)?; |
68 | if self.test_count % QUIET_MODE_MAX_COLUMN == QUIET_MODE_MAX_COLUMN - 1 { |
69 | // We insert a new line regularly in order to flush the |
70 | // screen when dealing with line-buffered output (e.g., piping to |
71 | // `stamp` in the rust CI). |
72 | let out = format!(" {}/{} \n" , self.test_count + 1, self.total_test_count); |
73 | self.write_plain(out)?; |
74 | } |
75 | |
76 | self.test_count += 1; |
77 | Ok(()) |
78 | } |
79 | |
80 | pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { |
81 | match self.out { |
82 | OutputLocation::Pretty(ref mut term) => { |
83 | if self.use_color { |
84 | term.fg(color)?; |
85 | } |
86 | term.write_all(word.as_bytes())?; |
87 | if self.use_color { |
88 | term.reset()?; |
89 | } |
90 | term.flush() |
91 | } |
92 | OutputLocation::Raw(ref mut stdout) => { |
93 | stdout.write_all(word.as_bytes())?; |
94 | stdout.flush() |
95 | } |
96 | } |
97 | } |
98 | |
99 | pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> { |
100 | let s = s.as_ref(); |
101 | self.out.write_all(s.as_bytes())?; |
102 | self.out.flush() |
103 | } |
104 | |
105 | pub fn write_outputs(&mut self, state: &ConsoleTestState) -> io::Result<()> { |
106 | self.write_plain(" \nsuccesses: \n" )?; |
107 | let mut successes = Vec::new(); |
108 | let mut stdouts = String::new(); |
109 | for (f, stdout) in &state.not_failures { |
110 | successes.push(f.name.to_string()); |
111 | if !stdout.is_empty() { |
112 | stdouts.push_str(&format!("---- {} stdout ---- \n" , f.name)); |
113 | let output = String::from_utf8_lossy(stdout); |
114 | stdouts.push_str(&output); |
115 | stdouts.push(' \n' ); |
116 | } |
117 | } |
118 | if !stdouts.is_empty() { |
119 | self.write_plain(" \n" )?; |
120 | self.write_plain(&stdouts)?; |
121 | } |
122 | |
123 | self.write_plain(" \nsuccesses: \n" )?; |
124 | successes.sort(); |
125 | for name in &successes { |
126 | self.write_plain(&format!(" {name} \n" ))?; |
127 | } |
128 | Ok(()) |
129 | } |
130 | |
131 | pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { |
132 | self.write_plain(" \nfailures: \n" )?; |
133 | let mut failures = Vec::new(); |
134 | let mut fail_out = String::new(); |
135 | for (f, stdout) in &state.failures { |
136 | failures.push(f.name.to_string()); |
137 | if !stdout.is_empty() { |
138 | fail_out.push_str(&format!("---- {} stdout ---- \n" , f.name)); |
139 | let output = String::from_utf8_lossy(stdout); |
140 | fail_out.push_str(&output); |
141 | fail_out.push(' \n' ); |
142 | } |
143 | } |
144 | if !fail_out.is_empty() { |
145 | self.write_plain(" \n" )?; |
146 | self.write_plain(&fail_out)?; |
147 | } |
148 | |
149 | self.write_plain(" \nfailures: \n" )?; |
150 | failures.sort(); |
151 | for name in &failures { |
152 | self.write_plain(&format!(" {name} \n" ))?; |
153 | } |
154 | Ok(()) |
155 | } |
156 | |
157 | fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { |
158 | let name = desc.padded_name(self.max_name_len, desc.name.padding()); |
159 | if let Some(test_mode) = desc.test_mode() { |
160 | self.write_plain(format!("test {name} - {test_mode} ... " ))?; |
161 | } else { |
162 | self.write_plain(format!("test {name} ... " ))?; |
163 | } |
164 | |
165 | Ok(()) |
166 | } |
167 | } |
168 | |
169 | impl<T: Write> OutputFormatter for TerseFormatter<T> { |
170 | fn write_discovery_start(&mut self) -> io::Result<()> { |
171 | Ok(()) |
172 | } |
173 | |
174 | fn write_test_discovered(&mut self, desc: &TestDesc, test_type: &str) -> io::Result<()> { |
175 | self.write_plain(format!("{}: {test_type} \n" , desc.name)) |
176 | } |
177 | |
178 | fn write_discovery_finish(&mut self, _state: &ConsoleTestDiscoveryState) -> io::Result<()> { |
179 | Ok(()) |
180 | } |
181 | |
182 | fn write_run_start(&mut self, test_count: usize, shuffle_seed: Option<u64>) -> io::Result<()> { |
183 | self.total_test_count = test_count; |
184 | let noun = if test_count != 1 { "tests" } else { "test" }; |
185 | let shuffle_seed_msg = if let Some(shuffle_seed) = shuffle_seed { |
186 | format!(" (shuffle seed: {shuffle_seed})" ) |
187 | } else { |
188 | String::new() |
189 | }; |
190 | self.write_plain(format!(" \nrunning {test_count} {noun}{shuffle_seed_msg} \n" )) |
191 | } |
192 | |
193 | fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { |
194 | // Remnants from old libtest code that used the padding value |
195 | // in order to indicate benchmarks. |
196 | // When running benchmarks, terse-mode should still print their name as if |
197 | // it is the Pretty formatter. |
198 | if !self.is_multithreaded && desc.name.padding() == NamePadding::PadOnRight { |
199 | self.write_test_name(desc)?; |
200 | } |
201 | |
202 | Ok(()) |
203 | } |
204 | |
205 | fn write_result( |
206 | &mut self, |
207 | desc: &TestDesc, |
208 | result: &TestResult, |
209 | _: Option<&time::TestExecTime>, |
210 | _: &[u8], |
211 | _: &ConsoleTestState, |
212 | ) -> io::Result<()> { |
213 | match *result { |
214 | TestResult::TrOk => self.write_ok(), |
215 | TestResult::TrFailed | TestResult::TrFailedMsg(_) | TestResult::TrTimedFail => { |
216 | self.write_failed() |
217 | } |
218 | TestResult::TrIgnored => self.write_ignored(), |
219 | TestResult::TrBench(ref bs) => { |
220 | if self.is_multithreaded { |
221 | self.write_test_name(desc)?; |
222 | } |
223 | self.write_bench()?; |
224 | self.write_plain(format!(": {} \n" , fmt_bench_samples(bs))) |
225 | } |
226 | } |
227 | } |
228 | |
229 | fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { |
230 | self.write_plain(format!( |
231 | "test {} has been running for over {} seconds \n" , |
232 | desc.name, |
233 | time::TEST_WARN_TIMEOUT_S |
234 | )) |
235 | } |
236 | |
237 | fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result<bool> { |
238 | if state.options.display_output { |
239 | self.write_outputs(state)?; |
240 | } |
241 | let success = state.failed == 0; |
242 | if !success { |
243 | self.write_failures(state)?; |
244 | } |
245 | |
246 | self.write_plain(" \ntest result: " )?; |
247 | |
248 | if success { |
249 | // There's no parallelism at this point so it's safe to use color |
250 | self.write_pretty("ok" , term::color::GREEN)?; |
251 | } else { |
252 | self.write_pretty("FAILED" , term::color::RED)?; |
253 | } |
254 | |
255 | let s = format!( |
256 | ". {} passed; {} failed; {} ignored; {} measured; {} filtered out" , |
257 | state.passed, state.failed, state.ignored, state.measured, state.filtered_out |
258 | ); |
259 | |
260 | self.write_plain(s)?; |
261 | |
262 | if let Some(ref exec_time) = state.exec_time { |
263 | let time_str = format!("; finished in {exec_time}" ); |
264 | self.write_plain(time_str)?; |
265 | } |
266 | |
267 | self.write_plain(" \n\n" )?; |
268 | |
269 | // Custom handling of cases where there is only 1 test to execute and that test was ignored. |
270 | // We want to show more detailed information(why was the test ignored) for investigation purposes. |
271 | if self.total_test_count == 1 && state.ignores.len() == 1 { |
272 | let test_desc = &state.ignores[0].0; |
273 | if let Some(im) = test_desc.ignore_message { |
274 | self.write_plain(format!("test: {}, ignore_message: {} \n\n" , test_desc.name, im))?; |
275 | } |
276 | } |
277 | |
278 | Ok(success) |
279 | } |
280 | } |
281 | |