1 | //! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes. |
2 | |
3 | // This code is very hot and uses lots of arithmetic, avoid overflow checks for performance. |
4 | // See https://github.com/rust-lang/rust/pull/119440#issuecomment-1874255727 |
5 | use crate::int_overflow::{DebugStrictAdd, DebugStrictSub}; |
6 | use crate::ExtendedHasher; |
7 | |
8 | use std::hash::Hasher; |
9 | use std::mem::{self, MaybeUninit}; |
10 | use std::ptr; |
11 | |
12 | #[cfg (test)] |
13 | mod tests; |
14 | |
15 | // The SipHash algorithm operates on 8-byte chunks. |
16 | const ELEM_SIZE: usize = mem::size_of::<u64>(); |
17 | |
18 | // Size of the buffer in number of elements, not including the spill. |
19 | // |
20 | // The selection of this size was guided by rustc-perf benchmark comparisons of |
21 | // different buffer sizes. It should be periodically reevaluated as the compiler |
22 | // implementation and input characteristics change. |
23 | // |
24 | // Using the same-sized buffer for everything we hash is a performance versus |
25 | // complexity tradeoff. The ideal buffer size, and whether buffering should even |
26 | // be used, depends on what is being hashed. It may be worth it to size the |
27 | // buffer appropriately (perhaps by making SipHasher128 generic over the buffer |
28 | // size) or disable buffering depending on what is being hashed. But at this |
29 | // time, we use the same buffer size for everything. |
30 | const BUFFER_CAPACITY: usize = 8; |
31 | |
32 | // Size of the buffer in bytes, not including the spill. |
33 | const BUFFER_SIZE: usize = BUFFER_CAPACITY * ELEM_SIZE; |
34 | |
35 | // Size of the buffer in number of elements, including the spill. |
36 | const BUFFER_WITH_SPILL_CAPACITY: usize = BUFFER_CAPACITY + 1; |
37 | |
38 | // Size of the buffer in bytes, including the spill. |
39 | const BUFFER_WITH_SPILL_SIZE: usize = BUFFER_WITH_SPILL_CAPACITY * ELEM_SIZE; |
40 | |
41 | // Index of the spill element in the buffer. |
42 | const BUFFER_SPILL_INDEX: usize = BUFFER_WITH_SPILL_CAPACITY - 1; |
43 | |
44 | /// Hashing result of [`SipHasher128`] |
45 | #[derive (Debug, Clone, Copy, PartialEq, Eq)] |
46 | pub struct SipHasher128Hash(pub [u64; 2]); |
47 | |
48 | #[derive (Debug, Clone)] |
49 | #[repr (C)] |
50 | pub struct SipHasher128 { |
51 | // The access pattern during hashing consists of accesses to `nbuf` and |
52 | // `buf` until the buffer is full, followed by accesses to `state` and |
53 | // `processed`, and then repetition of that pattern until hashing is done. |
54 | // This is the basis for the ordering of fields below. However, in practice |
55 | // the cache miss-rate for data access is extremely low regardless of order. |
56 | nbuf: usize, // how many bytes in buf are valid |
57 | buf: [MaybeUninit<u64>; BUFFER_WITH_SPILL_CAPACITY], // unprocessed bytes le |
58 | state: State, // hash State |
59 | processed: usize, // how many bytes we've processed |
60 | } |
61 | |
62 | #[derive (Debug, Clone, Copy)] |
63 | #[repr (C)] |
64 | struct State { |
65 | // v0, v2 and v1, v3 show up in pairs in the algorithm, |
66 | // and simd implementations of SipHash will use vectors |
67 | // of v02 and v13. By placing them in this order in the struct, |
68 | // the compiler can pick up on just a few simd optimizations by itself. |
69 | v0: u64, |
70 | v2: u64, |
71 | v1: u64, |
72 | v3: u64, |
73 | } |
74 | |
75 | macro_rules! compress { |
76 | ($state:expr) => {{ |
77 | compress!($state.v0, $state.v1, $state.v2, $state.v3) |
78 | }}; |
79 | ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{ |
80 | $v0 = $v0.wrapping_add($v1); |
81 | $v2 = $v2.wrapping_add($v3); |
82 | $v1 = $v1.rotate_left(13); |
83 | $v1 ^= $v0; |
84 | $v3 = $v3.rotate_left(16); |
85 | $v3 ^= $v2; |
86 | $v0 = $v0.rotate_left(32); |
87 | |
88 | $v2 = $v2.wrapping_add($v1); |
89 | $v0 = $v0.wrapping_add($v3); |
90 | $v1 = $v1.rotate_left(17); |
91 | $v1 ^= $v2; |
92 | $v3 = $v3.rotate_left(21); |
93 | $v3 ^= $v0; |
94 | $v2 = $v2.rotate_left(32); |
95 | }}; |
96 | } |
97 | |
98 | // Copies up to 8 bytes from source to destination. This performs better than |
99 | // `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real |
100 | // workloads since all of the copies have fixed sizes and avoid calling memcpy. |
101 | // |
102 | // This is specifically designed for copies of up to 8 bytes, because that's the |
103 | // maximum of number bytes needed to fill an 8-byte-sized element on which |
104 | // SipHash operates. Note that for variable-sized copies which are known to be |
105 | // less than 8 bytes, this function will perform more work than necessary unless |
106 | // the compiler is able to optimize the extra work away. |
107 | #[inline ] |
108 | unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) { |
109 | debug_assert!(count <= 8); |
110 | |
111 | unsafe { |
112 | if count == 8 { |
113 | ptr::copy_nonoverlapping(src, dst, 8); |
114 | return; |
115 | } |
116 | |
117 | let mut i = 0; |
118 | if i.debug_strict_add(3) < count { |
119 | ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4); |
120 | i = i.debug_strict_add(4); |
121 | } |
122 | |
123 | if i.debug_strict_add(1) < count { |
124 | ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2); |
125 | i = i.debug_strict_add(2) |
126 | } |
127 | |
128 | if i < count { |
129 | *dst.add(i) = *src.add(i); |
130 | i = i.debug_strict_add(1); |
131 | } |
132 | |
133 | debug_assert_eq!(i, count); |
134 | } |
135 | } |
136 | |
137 | // # Implementation |
138 | // |
139 | // This implementation uses buffering to reduce the hashing cost for inputs |
140 | // consisting of many small integers. Buffering simplifies the integration of |
141 | // integer input--the integer write function typically just appends to the |
142 | // buffer with a statically sized write, updates metadata, and returns. |
143 | // |
144 | // Buffering also prevents alternating between writes that do and do not trigger |
145 | // the hashing process. Only when the entire buffer is full do we transition |
146 | // into hashing. This allows us to keep the hash state in registers for longer, |
147 | // instead of loading and storing it before and after processing each element. |
148 | // |
149 | // When a write fills the buffer, a buffer processing function is invoked to |
150 | // hash all of the buffered input. The buffer processing functions are marked |
151 | // `#[inline(never)]` so that they aren't inlined into the append functions, |
152 | // which ensures the more frequently called append functions remain inlineable |
153 | // and don't include register pushing/popping that would only be made necessary |
154 | // by inclusion of the complex buffer processing path which uses those |
155 | // registers. |
156 | // |
157 | // The buffer includes a "spill"--an extra element at the end--which simplifies |
158 | // the integer write buffer processing path. The value that fills the buffer can |
159 | // be written with a statically sized write that may spill over into the spill. |
160 | // After the buffer is processed, the part of the value that spilled over can be |
161 | // written from the spill to the beginning of the buffer with another statically |
162 | // sized write. This write may copy more bytes than actually spilled over, but |
163 | // we maintain the metadata such that any extra copied bytes will be ignored by |
164 | // subsequent processing. Due to the static sizes, this scheme performs better |
165 | // than copying the exact number of bytes needed into the end and beginning of |
166 | // the buffer. |
167 | // |
168 | // The buffer is uninitialized, which improves performance, but may preclude |
169 | // efficient implementation of alternative approaches. The improvement is not so |
170 | // large that an alternative approach should be disregarded because it cannot be |
171 | // efficiently implemented with an uninitialized buffer. On the other hand, an |
172 | // uninitialized buffer may become more important should a larger one be used. |
173 | // |
174 | // # Platform Dependence |
175 | // |
176 | // The SipHash algorithm operates on byte sequences. It parses the input stream |
177 | // as 8-byte little-endian integers. Therefore, given the same byte sequence, it |
178 | // produces the same result on big- and little-endian hardware. |
179 | // |
180 | // However, the Hasher trait has methods which operate on multi-byte integers. |
181 | // How they are converted into byte sequences can be endian-dependent (by using |
182 | // native byte order) or independent (by consistently using either LE or BE byte |
183 | // order). It can also be `isize` and `usize` size dependent (by using the |
184 | // native size), or independent (by converting to a common size), supposing the |
185 | // values can be represented in 32 bits. |
186 | // |
187 | // In order to make `SipHasher128` consistent with `SipHasher` in libstd, we |
188 | // choose to do the integer to byte sequence conversion in the platform- |
189 | // dependent way. Clients can achieve platform-independent hashing by widening |
190 | // `isize` and `usize` integers to 64 bits on 32-bit systems and byte-swapping |
191 | // integers on big-endian systems before passing them to the writing functions. |
192 | // This causes the input byte sequence to look identical on big- and little- |
193 | // endian systems (supposing `isize` and `usize` values can be represented in 32 |
194 | // bits), which ensures platform-independent results. |
195 | impl SipHasher128 { |
196 | #[inline ] |
197 | pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 { |
198 | let mut hasher = SipHasher128 { |
199 | nbuf: 0, |
200 | // HACK: Manual MaybeUninit::uninit_array, use inline const with Rust 1.79 |
201 | buf: unsafe { |
202 | MaybeUninit::<[MaybeUninit<_>; BUFFER_WITH_SPILL_CAPACITY]>::uninit().assume_init() |
203 | }, |
204 | state: State { |
205 | v0: key0 ^ 0x736f6d6570736575, |
206 | // The XOR with 0xee is only done on 128-bit algorithm version. |
207 | v1: key1 ^ (0x646f72616e646f6d ^ 0xee), |
208 | v2: key0 ^ 0x6c7967656e657261, |
209 | v3: key1 ^ 0x7465646279746573, |
210 | }, |
211 | processed: 0, |
212 | }; |
213 | |
214 | unsafe { |
215 | // Initialize spill because we read from it in `short_write_process_buffer`. |
216 | *hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed(); |
217 | } |
218 | |
219 | hasher |
220 | } |
221 | |
222 | // A specialized write function for values with size <= 8 that should only |
223 | // be called when the write would cause the buffer to fill. |
224 | // |
225 | // SAFETY: the write of `x` into `self.buf` starting at byte offset |
226 | // `self.nbuf` must cause `self.buf` to become fully initialized (and not |
227 | // overflow) if it wasn't already. |
228 | #[inline (never)] |
229 | unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) { |
230 | unsafe { |
231 | let nbuf = self.nbuf; |
232 | debug_assert!(LEN <= 8); |
233 | debug_assert!(nbuf < BUFFER_SIZE); |
234 | debug_assert!(nbuf + LEN >= BUFFER_SIZE); |
235 | debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE); |
236 | |
237 | // Copy first part of input into end of buffer, possibly into spill |
238 | // element. The memcpy call is optimized away because the size is known. |
239 | let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); |
240 | ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN); |
241 | |
242 | // Process buffer. |
243 | for i in 0..BUFFER_CAPACITY { |
244 | let elem = self.buf.get_unchecked(i).assume_init().to_le(); |
245 | self.state.v3 ^= elem; |
246 | Sip13Rounds::c_rounds(&mut self.state); |
247 | self.state.v0 ^= elem; |
248 | } |
249 | |
250 | // Copy remaining input into start of buffer by copying LEN - 1 |
251 | // elements from spill (at most LEN - 1 bytes could have overflowed |
252 | // into the spill). The memcpy call is optimized away because the size |
253 | // is known. And the whole copy is optimized away for LEN == 1. |
254 | let dst = self.buf.as_mut_ptr() as *mut u8; |
255 | let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8; |
256 | ptr::copy_nonoverlapping(src, dst, LEN - 1); |
257 | |
258 | // This function should only be called when the write fills the buffer. |
259 | // Therefore, when LEN == 1, the new `self.nbuf` must be zero. |
260 | // LEN is statically known, so the branch is optimized away. |
261 | self.nbuf = if LEN == 1 { |
262 | 0 |
263 | } else { |
264 | nbuf.debug_strict_add(LEN).debug_strict_sub(BUFFER_SIZE) |
265 | }; |
266 | self.processed = self.processed.debug_strict_add(BUFFER_SIZE); |
267 | } |
268 | } |
269 | |
270 | // A write function for byte slices. |
271 | #[inline ] |
272 | fn slice_write(&mut self, msg: &[u8]) { |
273 | let length = msg.len(); |
274 | let nbuf = self.nbuf; |
275 | debug_assert!(nbuf < BUFFER_SIZE); |
276 | |
277 | if nbuf.debug_strict_add(length) < BUFFER_SIZE { |
278 | unsafe { |
279 | let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); |
280 | |
281 | if length <= 8 { |
282 | copy_nonoverlapping_small(msg.as_ptr(), dst, length); |
283 | } else { |
284 | // This memcpy is *not* optimized away. |
285 | ptr::copy_nonoverlapping(msg.as_ptr(), dst, length); |
286 | } |
287 | } |
288 | |
289 | self.nbuf = nbuf.debug_strict_add(length); |
290 | |
291 | return; |
292 | } |
293 | |
294 | unsafe { self.slice_write_process_buffer(msg) } |
295 | } |
296 | |
297 | // A write function for byte slices that should only be called when the |
298 | // write would cause the buffer to fill. |
299 | // |
300 | // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`, |
301 | // and `msg` must contain enough bytes to initialize the rest of the element |
302 | // containing the byte offset `self.nbuf`. |
303 | #[inline (never)] |
304 | unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) { |
305 | unsafe { |
306 | let length = msg.len(); |
307 | let nbuf = self.nbuf; |
308 | debug_assert!(nbuf < BUFFER_SIZE); |
309 | debug_assert!(nbuf + length >= BUFFER_SIZE); |
310 | |
311 | // Always copy first part of input into current element of buffer. |
312 | // This function should only be called when the write fills the buffer, |
313 | // so we know that there is enough input to fill the current element. |
314 | let valid_in_elem = nbuf % ELEM_SIZE; |
315 | let needed_in_elem = ELEM_SIZE.debug_strict_sub(valid_in_elem); |
316 | |
317 | let src = msg.as_ptr(); |
318 | let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); |
319 | copy_nonoverlapping_small(src, dst, needed_in_elem); |
320 | |
321 | // Process buffer. |
322 | |
323 | // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) / |
324 | // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0. |
325 | // We know that is true, because last step ensured we have a full |
326 | // element in the buffer. |
327 | let last = (nbuf / ELEM_SIZE).debug_strict_add(1); |
328 | |
329 | for i in 0..last { |
330 | let elem = self.buf.get_unchecked(i).assume_init().to_le(); |
331 | self.state.v3 ^= elem; |
332 | Sip13Rounds::c_rounds(&mut self.state); |
333 | self.state.v0 ^= elem; |
334 | } |
335 | |
336 | // Process the remaining element-sized chunks of input. |
337 | let mut processed = needed_in_elem; |
338 | let input_left = length.debug_strict_sub(processed); |
339 | let elems_left = input_left / ELEM_SIZE; |
340 | let extra_bytes_left = input_left % ELEM_SIZE; |
341 | |
342 | for _ in 0..elems_left { |
343 | let elem = (msg.as_ptr().add(processed) as *const u64) |
344 | .read_unaligned() |
345 | .to_le(); |
346 | self.state.v3 ^= elem; |
347 | Sip13Rounds::c_rounds(&mut self.state); |
348 | self.state.v0 ^= elem; |
349 | processed = processed.debug_strict_add(ELEM_SIZE); |
350 | } |
351 | |
352 | // Copy remaining input into start of buffer. |
353 | let src = msg.as_ptr().add(processed); |
354 | let dst = self.buf.as_mut_ptr() as *mut u8; |
355 | copy_nonoverlapping_small(src, dst, extra_bytes_left); |
356 | |
357 | self.nbuf = extra_bytes_left; |
358 | self.processed = self |
359 | .processed |
360 | .debug_strict_add(nbuf.debug_strict_add(processed)); |
361 | } |
362 | } |
363 | |
364 | // A function for finishing the hashing. |
365 | // |
366 | // SAFETY: `buf` must be initialized up to the byte offset `nbuf`. |
367 | #[inline ] |
368 | unsafe fn finish128_inner( |
369 | nbuf: usize, |
370 | buf: &mut [MaybeUninit<u64>; BUFFER_WITH_SPILL_CAPACITY], |
371 | mut state: State, |
372 | processed: usize, |
373 | ) -> [u64; 2] { |
374 | debug_assert!(nbuf < BUFFER_SIZE); |
375 | |
376 | // Process full elements in buffer. |
377 | let last = nbuf / ELEM_SIZE; |
378 | |
379 | for i in 0..last { |
380 | let elem = unsafe { buf.get_unchecked(i).assume_init().to_le() }; |
381 | state.v3 ^= elem; |
382 | Sip13Rounds::c_rounds(&mut state); |
383 | state.v0 ^= elem; |
384 | } |
385 | |
386 | // Get remaining partial element. |
387 | let elem = if nbuf % ELEM_SIZE != 0 { |
388 | unsafe { |
389 | // Ensure element is initialized by writing zero bytes. At most |
390 | // `ELEM_SIZE - 1` are required given the above check. It's safe |
391 | // to write this many because we have the spill and we maintain |
392 | // `self.nbuf` such that this write will start before the spill. |
393 | let dst = (buf.as_mut_ptr() as *mut u8).add(nbuf); |
394 | ptr::write_bytes(dst, 0, ELEM_SIZE - 1); |
395 | buf.get_unchecked(last).assume_init().to_le() |
396 | } |
397 | } else { |
398 | 0 |
399 | }; |
400 | |
401 | // Finalize the hash. |
402 | let length = processed.debug_strict_add(nbuf); |
403 | let b: u64 = ((length as u64 & 0xff) << 56) | elem; |
404 | |
405 | state.v3 ^= b; |
406 | Sip13Rounds::c_rounds(&mut state); |
407 | state.v0 ^= b; |
408 | |
409 | state.v2 ^= 0xee; |
410 | Sip13Rounds::d_rounds(&mut state); |
411 | let l = state.v0 ^ state.v1 ^ state.v2 ^ state.v3; |
412 | |
413 | state.v1 ^= 0xdd; |
414 | Sip13Rounds::d_rounds(&mut state); |
415 | let h = state.v0 ^ state.v1 ^ state.v2 ^ state.v3; |
416 | |
417 | [l, h] |
418 | } |
419 | } |
420 | |
421 | impl Default for SipHasher128 { |
422 | fn default() -> SipHasher128 { |
423 | SipHasher128::new_with_keys(key0:0, key1:0) |
424 | } |
425 | } |
426 | |
427 | impl ExtendedHasher for SipHasher128 { |
428 | type Hash = SipHasher128Hash; |
429 | |
430 | #[inline ] |
431 | fn short_write<const LEN: usize>(&mut self, bytes: [u8; LEN]) { |
432 | let nbuf = self.nbuf; |
433 | debug_assert!(LEN <= 8); |
434 | debug_assert!(nbuf < BUFFER_SIZE); |
435 | debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE); |
436 | |
437 | if nbuf.debug_strict_add(LEN) < BUFFER_SIZE { |
438 | unsafe { |
439 | // The memcpy call is optimized away because the size is known. |
440 | let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf); |
441 | ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN); |
442 | } |
443 | |
444 | self.nbuf = nbuf.debug_strict_add(LEN); |
445 | |
446 | return; |
447 | } |
448 | |
449 | unsafe { self.short_write_process_buffer(bytes) } |
450 | } |
451 | |
452 | #[inline (always)] |
453 | fn finish(mut self) -> SipHasher128Hash { |
454 | SipHasher128Hash(unsafe { |
455 | SipHasher128::finish128_inner(self.nbuf, &mut self.buf, self.state, self.processed) |
456 | }) |
457 | } |
458 | } |
459 | |
460 | impl Hasher for SipHasher128 { |
461 | #[inline ] |
462 | fn write_u8(&mut self, i: u8) { |
463 | self.short_write(i.to_ne_bytes()); |
464 | } |
465 | |
466 | #[inline ] |
467 | fn write_u16(&mut self, i: u16) { |
468 | self.short_write(i.to_ne_bytes()); |
469 | } |
470 | |
471 | #[inline ] |
472 | fn write_u32(&mut self, i: u32) { |
473 | self.short_write(i.to_ne_bytes()); |
474 | } |
475 | |
476 | #[inline ] |
477 | fn write_u64(&mut self, i: u64) { |
478 | self.short_write(i.to_ne_bytes()); |
479 | } |
480 | |
481 | #[inline ] |
482 | fn write_usize(&mut self, i: usize) { |
483 | self.short_write(i.to_ne_bytes()); |
484 | } |
485 | |
486 | #[inline ] |
487 | fn write_i8(&mut self, i: i8) { |
488 | self.short_write((i as u8).to_ne_bytes()); |
489 | } |
490 | |
491 | #[inline ] |
492 | fn write_i16(&mut self, i: i16) { |
493 | self.short_write((i as u16).to_ne_bytes()); |
494 | } |
495 | |
496 | #[inline ] |
497 | fn write_i32(&mut self, i: i32) { |
498 | self.short_write((i as u32).to_ne_bytes()); |
499 | } |
500 | |
501 | #[inline ] |
502 | fn write_i64(&mut self, i: i64) { |
503 | self.short_write((i as u64).to_ne_bytes()); |
504 | } |
505 | |
506 | #[inline ] |
507 | fn write_isize(&mut self, i: isize) { |
508 | self.short_write((i as usize).to_ne_bytes()); |
509 | } |
510 | |
511 | #[inline ] |
512 | fn write(&mut self, msg: &[u8]) { |
513 | self.slice_write(msg); |
514 | } |
515 | |
516 | #[cfg (feature = "nightly" )] |
517 | #[inline ] |
518 | fn write_str(&mut self, s: &str) { |
519 | // This hasher works byte-wise, and `0xFF` cannot show up in a `str`, |
520 | // so just hashing the one extra byte is enough to be prefix-free. |
521 | self.write(s.as_bytes()); |
522 | self.write_u8(0xFF); |
523 | } |
524 | |
525 | fn finish(&self) -> u64 { |
526 | let mut buf = self.buf; |
527 | let [a, b] = unsafe { |
528 | SipHasher128::finish128_inner(self.nbuf, &mut buf, self.state, self.processed) |
529 | }; |
530 | |
531 | // Combining the two halves makes sure we get a good quality hash. |
532 | a.wrapping_mul(3).wrapping_add(b).to_le() |
533 | } |
534 | } |
535 | |
536 | #[derive (Debug, Clone, Default)] |
537 | struct Sip13Rounds; |
538 | |
539 | impl Sip13Rounds { |
540 | #[inline ] |
541 | fn c_rounds(state: &mut State) { |
542 | compress!(state); |
543 | } |
544 | |
545 | #[inline ] |
546 | fn d_rounds(state: &mut State) { |
547 | compress!(state); |
548 | compress!(state); |
549 | compress!(state); |
550 | } |
551 | } |
552 | |