1 | #![cfg (exhaustive)] |
2 | |
3 | use std::str; |
4 | use std::sync::atomic::{AtomicUsize, Ordering}; |
5 | use std::sync::Arc; |
6 | use std::thread; |
7 | |
8 | #[test] |
9 | fn test_exhaustive() { |
10 | const BATCH_SIZE: u32 = 1_000_000; |
11 | let counter = Arc::new(AtomicUsize::new(0)); |
12 | let finished = Arc::new(AtomicUsize::new(0)); |
13 | |
14 | let mut workers = Vec::new(); |
15 | for _ in 0..num_cpus::get() { |
16 | let counter = counter.clone(); |
17 | let finished = finished.clone(); |
18 | workers.push(thread::spawn(move || loop { |
19 | let batch = counter.fetch_add(1, Ordering::Relaxed) as u32; |
20 | if batch > u32::max_value() / BATCH_SIZE { |
21 | return; |
22 | } |
23 | |
24 | let min = batch * BATCH_SIZE; |
25 | let max = if batch == u32::max_value() / BATCH_SIZE { |
26 | u32::max_value() |
27 | } else { |
28 | min + BATCH_SIZE - 1 |
29 | }; |
30 | |
31 | let mut bytes = [0u8; 24]; |
32 | let mut buffer = ryu::Buffer::new(); |
33 | for u in min..=max { |
34 | let f = f32::from_bits(u); |
35 | if !f.is_finite() { |
36 | continue; |
37 | } |
38 | let n = unsafe { ryu::raw::format32(f, &mut bytes[0]) }; |
39 | assert_eq!(Ok(Ok(f)), str::from_utf8(&bytes[..n]).map(str::parse)); |
40 | assert_eq!(Ok(f), buffer.format_finite(f).parse()); |
41 | } |
42 | |
43 | let increment = (max - min + 1) as usize; |
44 | let update = finished.fetch_add(increment, Ordering::Relaxed); |
45 | println!("{}" , update + increment); |
46 | })); |
47 | } |
48 | |
49 | for w in workers { |
50 | w.join().unwrap(); |
51 | } |
52 | } |
53 | |