1use super::Adler32Imp;
2
3/// Resolves update implementation if CPU supports sse2 instructions.
4pub fn get_imp() -> Option<Adler32Imp> {
5 get_imp_inner()
6}
7
8#[inline]
9#[cfg(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))]
10fn get_imp_inner() -> Option<Adler32Imp> {
11 if std::is_x86_feature_detected!("sse2") {
12 Some(imp::update)
13 } else {
14 None
15 }
16}
17
18#[inline]
19#[cfg(all(
20 target_feature = "sse2",
21 not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
22))]
23fn get_imp_inner() -> Option<Adler32Imp> {
24 Some(imp::update)
25}
26
27#[inline]
28#[cfg(all(
29 not(target_feature = "sse2"),
30 not(all(feature = "std", any(target_arch = "x86", target_arch = "x86_64")))
31))]
32fn get_imp_inner() -> Option<Adler32Imp> {
33 None
34}
35
36#[cfg(all(
37 any(target_arch = "x86", target_arch = "x86_64"),
38 any(feature = "std", target_feature = "sse2")
39))]
40mod imp {
41 const MOD: u32 = 65521;
42 const NMAX: usize = 5552;
43 const BLOCK_SIZE: usize = 32;
44 const CHUNK_SIZE: usize = NMAX / BLOCK_SIZE * BLOCK_SIZE;
45
46 #[cfg(target_arch = "x86")]
47 use core::arch::x86::*;
48 #[cfg(target_arch = "x86_64")]
49 use core::arch::x86_64::*;
50
51 pub fn update(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
52 unsafe { update_imp(a, b, data) }
53 }
54
55 #[inline]
56 #[target_feature(enable = "sse2")]
57 unsafe fn update_imp(a: u16, b: u16, data: &[u8]) -> (u16, u16) {
58 let mut a = a as u32;
59 let mut b = b as u32;
60
61 let chunks = data.chunks_exact(CHUNK_SIZE);
62 let remainder = chunks.remainder();
63 for chunk in chunks {
64 update_chunk_block(&mut a, &mut b, chunk);
65 }
66
67 update_block(&mut a, &mut b, remainder);
68
69 (a as u16, b as u16)
70 }
71
72 unsafe fn update_chunk_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
73 debug_assert_eq!(
74 chunk.len(),
75 CHUNK_SIZE,
76 "Unexpected chunk size (expected {}, got {})",
77 CHUNK_SIZE,
78 chunk.len()
79 );
80
81 reduce_add_blocks(a, b, chunk);
82
83 *a %= MOD;
84 *b %= MOD;
85 }
86
87 unsafe fn update_block(a: &mut u32, b: &mut u32, chunk: &[u8]) {
88 debug_assert!(
89 chunk.len() <= CHUNK_SIZE,
90 "Unexpected chunk size (expected <= {}, got {})",
91 CHUNK_SIZE,
92 chunk.len()
93 );
94
95 for byte in reduce_add_blocks(a, b, chunk) {
96 *a += *byte as u32;
97 *b += *a;
98 }
99
100 *a %= MOD;
101 *b %= MOD;
102 }
103
104 #[inline(always)]
105 unsafe fn reduce_add_blocks<'a>(a: &mut u32, b: &mut u32, chunk: &'a [u8]) -> &'a [u8] {
106 if chunk.len() < BLOCK_SIZE {
107 return chunk;
108 }
109
110 let blocks = chunk.chunks_exact(BLOCK_SIZE);
111 let blocks_remainder = blocks.remainder();
112
113 let zero_v = _mm_setzero_si128();
114 let weight_hi_v = get_weight_hi();
115 let weight_lo_v = get_weight_lo();
116
117 let mut p_v = _mm_set_epi32(0, 0, 0, (*a * blocks.len() as u32) as _);
118 let mut a_v = _mm_setzero_si128();
119 let mut b_v = _mm_set_epi32(0, 0, 0, *b as _);
120
121 for block in blocks {
122 let block_ptr = block.as_ptr() as *const _;
123 let left_v = _mm_loadu_si128(block_ptr);
124 let right_v = _mm_loadu_si128(block_ptr.add(1));
125
126 p_v = _mm_add_epi32(p_v, a_v);
127
128 a_v = _mm_add_epi32(a_v, _mm_sad_epu8(left_v, zero_v));
129 let mad = maddubs(left_v, weight_hi_v);
130 b_v = _mm_add_epi32(b_v, mad);
131
132 a_v = _mm_add_epi32(a_v, _mm_sad_epu8(right_v, zero_v));
133 let mad = maddubs(right_v, weight_lo_v);
134 b_v = _mm_add_epi32(b_v, mad);
135 }
136
137 b_v = _mm_add_epi32(b_v, _mm_slli_epi32(p_v, 5));
138
139 *a += reduce_add(a_v);
140 *b = reduce_add(b_v);
141
142 blocks_remainder
143 }
144
145 #[inline(always)]
146 unsafe fn maddubs(a: __m128i, b: __m128i) -> __m128i {
147 let a_lo = _mm_unpacklo_epi8(a, _mm_setzero_si128());
148 let a_hi = _mm_unpackhi_epi8(a, _mm_setzero_si128());
149
150 let b_lo = _mm_unpacklo_epi8(b, _mm_setzero_si128());
151 let b_hi = _mm_unpackhi_epi8(b, _mm_setzero_si128());
152
153 let lo = _mm_madd_epi16(a_lo, b_lo);
154 let hi = _mm_madd_epi16(a_hi, b_hi);
155
156 _mm_add_epi32(lo, hi)
157 }
158
159 #[inline(always)]
160 unsafe fn reduce_add(v: __m128i) -> u32 {
161 let hi = _mm_unpackhi_epi64(v, v);
162 let sum = _mm_add_epi32(hi, v);
163 let hi = _mm_shuffle_epi32(sum, crate::imp::_MM_SHUFFLE(2, 3, 0, 1));
164
165 let sum = _mm_add_epi32(sum, hi);
166
167 _mm_cvtsi128_si32(sum) as _
168 }
169
170 #[inline(always)]
171 unsafe fn get_weight_lo() -> __m128i {
172 _mm_set_epi8(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
173 }
174
175 #[inline(always)]
176 unsafe fn get_weight_hi() -> __m128i {
177 _mm_set_epi8(
178 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
179 )
180 }
181}
182
183#[cfg(test)]
184mod tests {
185 use rand::Rng;
186
187 #[test]
188 fn zeroes() {
189 assert_sum_eq(&[]);
190 assert_sum_eq(&[0]);
191 assert_sum_eq(&[0, 0]);
192 assert_sum_eq(&[0; 100]);
193 assert_sum_eq(&[0; 1024]);
194 assert_sum_eq(&[0; 1024 * 1024]);
195 }
196
197 #[test]
198 fn ones() {
199 assert_sum_eq(&[]);
200 assert_sum_eq(&[1]);
201 assert_sum_eq(&[1, 1]);
202 assert_sum_eq(&[1; 100]);
203 assert_sum_eq(&[1; 1024]);
204 assert_sum_eq(&[1; 1024 * 1024]);
205 }
206
207 #[test]
208 fn random() {
209 let mut random = [0; 1024 * 1024];
210 rand::thread_rng().fill(&mut random[..]);
211
212 assert_sum_eq(&random[..1]);
213 assert_sum_eq(&random[..100]);
214 assert_sum_eq(&random[..1024]);
215 assert_sum_eq(&random[..1024 * 1024]);
216 }
217
218 /// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
219 #[test]
220 fn wiki() {
221 assert_sum_eq(b"Wikipedia");
222 }
223
224 fn assert_sum_eq(data: &[u8]) {
225 if let Some(update) = super::get_imp() {
226 let (a, b) = update(1, 0, data);
227 let left = u32::from(b) << 16 | u32::from(a);
228 let right = adler::adler32_slice(data);
229
230 assert_eq!(left, right, "len({})", data.len());
231 }
232 }
233}
234