1 | pub use super::bit_reader::GetBitsError; |
2 | use byteorder::ByteOrder; |
3 | use byteorder::LittleEndian; |
4 | |
5 | pub struct BitReaderReversed<'s> { |
6 | idx: isize, //index counts bits already read |
7 | source: &'s [u8], |
8 | |
9 | bit_container: u64, |
10 | bits_in_container: u8, |
11 | } |
12 | |
13 | impl<'s> BitReaderReversed<'s> { |
14 | pub fn bits_remaining(&self) -> isize { |
15 | self.idx + self.bits_in_container as isize |
16 | } |
17 | |
18 | pub fn new(source: &'s [u8]) -> BitReaderReversed<'_> { |
19 | BitReaderReversed { |
20 | idx: source.len() as isize * 8, |
21 | source, |
22 | bit_container: 0, |
23 | bits_in_container: 0, |
24 | } |
25 | } |
26 | |
27 | /// We refill the container in full bytes, shifting the still unread portion to the left, and filling the lower bits with new data |
28 | #[inline (always)] |
29 | fn refill_container(&mut self) { |
30 | let byte_idx = self.byte_idx() as usize; |
31 | |
32 | let retain_bytes = (self.bits_in_container + 7) / 8; |
33 | let want_to_read_bits = 64 - (retain_bytes * 8); |
34 | |
35 | // if there are >= 8 byte left to read we go a fast path: |
36 | // The slice is looking something like this |U..UCCCCCCCCR..R| Where U are some unread bytes, C are the bytes in the container, and R are already read bytes |
37 | // What we do is, we shift the container by a few bytes to the left by just reading a u64 from the correct position, rereading the portion we did not yet return from the conainer. |
38 | // Technically this would still work for positions lower than 8 but this guarantees that enough bytes are in the source and generally makes for less edge cases |
39 | if byte_idx >= 8 { |
40 | self.refill_fast(byte_idx, retain_bytes, want_to_read_bits) |
41 | } else { |
42 | // In the slow path we just read however many bytes we can |
43 | self.refill_slow(byte_idx, want_to_read_bits) |
44 | } |
45 | } |
46 | |
47 | #[inline (always)] |
48 | fn refill_fast(&mut self, byte_idx: usize, retain_bytes: u8, want_to_read_bits: u8) { |
49 | let load_from_byte_idx = byte_idx - 7 + retain_bytes as usize; |
50 | let refill = LittleEndian::read_u64(&self.source[load_from_byte_idx..]); |
51 | self.bit_container = refill; |
52 | self.bits_in_container += want_to_read_bits; |
53 | self.idx -= want_to_read_bits as isize; |
54 | } |
55 | |
56 | #[cold ] |
57 | fn refill_slow(&mut self, byte_idx: usize, want_to_read_bits: u8) { |
58 | let can_read_bits = isize::min(want_to_read_bits as isize, self.idx); |
59 | let can_read_bytes = can_read_bits / 8; |
60 | |
61 | match can_read_bytes { |
62 | 8 => { |
63 | self.bit_container = LittleEndian::read_u64(&self.source[byte_idx - 7..]); |
64 | self.bits_in_container += 64; |
65 | self.idx -= 64; |
66 | } |
67 | 6..=7 => { |
68 | self.bit_container <<= 48; |
69 | self.bits_in_container += 48; |
70 | self.bit_container |= LittleEndian::read_u48(&self.source[byte_idx - 5..]); |
71 | self.idx -= 48; |
72 | } |
73 | 4..=5 => { |
74 | self.bit_container <<= 32; |
75 | self.bits_in_container += 32; |
76 | self.bit_container |= |
77 | u64::from(LittleEndian::read_u32(&self.source[byte_idx - 3..])); |
78 | self.idx -= 32; |
79 | } |
80 | 2..=3 => { |
81 | self.bit_container <<= 16; |
82 | self.bits_in_container += 16; |
83 | self.bit_container |= |
84 | u64::from(LittleEndian::read_u16(&self.source[byte_idx - 1..])); |
85 | self.idx -= 16; |
86 | } |
87 | 1 => { |
88 | self.bit_container <<= 8; |
89 | self.bits_in_container += 8; |
90 | self.bit_container |= u64::from(self.source[byte_idx]); |
91 | self.idx -= 8; |
92 | } |
93 | _ => { |
94 | panic!("This cannot be reached" ); |
95 | } |
96 | } |
97 | } |
98 | |
99 | /// Next byte that should be read into the container |
100 | /// Negative values mean that the source buffer as been read into the container completetly. |
101 | fn byte_idx(&self) -> isize { |
102 | (self.idx - 1) / 8 |
103 | } |
104 | |
105 | #[inline (always)] |
106 | pub fn get_bits(&mut self, n: u8) -> Result<u64, GetBitsError> { |
107 | if n == 0 { |
108 | return Ok(0); |
109 | } |
110 | if self.bits_in_container >= n { |
111 | return Ok(self.get_bits_unchecked(n)); |
112 | } |
113 | |
114 | self.get_bits_cold(n) |
115 | } |
116 | |
117 | #[cold ] |
118 | fn get_bits_cold(&mut self, n: u8) -> Result<u64, GetBitsError> { |
119 | if n > 56 { |
120 | return Err(GetBitsError::TooManyBits { |
121 | num_requested_bits: usize::from(n), |
122 | limit: 56, |
123 | }); |
124 | } |
125 | |
126 | let signed_n = n as isize; |
127 | |
128 | if self.bits_remaining() <= 0 { |
129 | self.idx -= signed_n; |
130 | return Ok(0); |
131 | } |
132 | |
133 | if self.bits_remaining() < signed_n { |
134 | let emulated_read_shift = signed_n - self.bits_remaining(); |
135 | let v = self.get_bits(self.bits_remaining() as u8)?; |
136 | debug_assert!(self.idx == 0); |
137 | let value = v << emulated_read_shift; |
138 | self.idx -= emulated_read_shift; |
139 | return Ok(value); |
140 | } |
141 | |
142 | while (self.bits_in_container < n) && self.idx > 0 { |
143 | self.refill_container(); |
144 | } |
145 | |
146 | debug_assert!(self.bits_in_container >= n); |
147 | |
148 | //if we reach this point there are enough bits in the container |
149 | |
150 | Ok(self.get_bits_unchecked(n)) |
151 | } |
152 | |
153 | #[inline (always)] |
154 | pub fn get_bits_triple( |
155 | &mut self, |
156 | n1: u8, |
157 | n2: u8, |
158 | n3: u8, |
159 | ) -> Result<(u64, u64, u64), GetBitsError> { |
160 | let sum = n1 as usize + n2 as usize + n3 as usize; |
161 | if sum == 0 { |
162 | return Ok((0, 0, 0)); |
163 | } |
164 | if sum > 56 { |
165 | // try and get the values separatly |
166 | return Ok((self.get_bits(n1)?, self.get_bits(n2)?, self.get_bits(n3)?)); |
167 | } |
168 | let sum = sum as u8; |
169 | |
170 | if self.bits_in_container >= sum { |
171 | let v1 = if n1 == 0 { |
172 | 0 |
173 | } else { |
174 | self.get_bits_unchecked(n1) |
175 | }; |
176 | let v2 = if n2 == 0 { |
177 | 0 |
178 | } else { |
179 | self.get_bits_unchecked(n2) |
180 | }; |
181 | let v3 = if n3 == 0 { |
182 | 0 |
183 | } else { |
184 | self.get_bits_unchecked(n3) |
185 | }; |
186 | |
187 | return Ok((v1, v2, v3)); |
188 | } |
189 | |
190 | self.get_bits_triple_cold(n1, n2, n3, sum) |
191 | } |
192 | |
193 | #[cold ] |
194 | fn get_bits_triple_cold( |
195 | &mut self, |
196 | n1: u8, |
197 | n2: u8, |
198 | n3: u8, |
199 | sum: u8, |
200 | ) -> Result<(u64, u64, u64), GetBitsError> { |
201 | let sum_signed = sum as isize; |
202 | |
203 | if self.bits_remaining() <= 0 { |
204 | self.idx -= sum_signed; |
205 | return Ok((0, 0, 0)); |
206 | } |
207 | |
208 | if self.bits_remaining() < sum_signed { |
209 | return Ok((self.get_bits(n1)?, self.get_bits(n2)?, self.get_bits(n3)?)); |
210 | } |
211 | |
212 | while (self.bits_in_container < sum) && self.idx > 0 { |
213 | self.refill_container(); |
214 | } |
215 | |
216 | debug_assert!(self.bits_in_container >= sum); |
217 | |
218 | //if we reach this point there are enough bits in the container |
219 | |
220 | let v1 = if n1 == 0 { |
221 | 0 |
222 | } else { |
223 | self.get_bits_unchecked(n1) |
224 | }; |
225 | let v2 = if n2 == 0 { |
226 | 0 |
227 | } else { |
228 | self.get_bits_unchecked(n2) |
229 | }; |
230 | let v3 = if n3 == 0 { |
231 | 0 |
232 | } else { |
233 | self.get_bits_unchecked(n3) |
234 | }; |
235 | |
236 | Ok((v1, v2, v3)) |
237 | } |
238 | |
239 | #[inline (always)] |
240 | fn get_bits_unchecked(&mut self, n: u8) -> u64 { |
241 | let shift_by = self.bits_in_container - n; |
242 | let mask = (1u64 << n) - 1u64; |
243 | |
244 | let value = self.bit_container >> shift_by; |
245 | self.bits_in_container -= n; |
246 | let value_masked = value & mask; |
247 | debug_assert!(value_masked < (1 << n)); |
248 | |
249 | value_masked |
250 | } |
251 | |
252 | pub fn reset(&mut self, new_source: &'s [u8]) { |
253 | self.idx = new_source.len() as isize * 8; |
254 | self.source = new_source; |
255 | self.bit_container = 0; |
256 | self.bits_in_container = 0; |
257 | } |
258 | } |
259 | |