1//! Streaming decompression functionality.
2
3use super::*;
4use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER};
5use ::core::cell::Cell;
6
7use ::core::convert::TryInto;
8use ::core::{cmp, slice};
9
10use self::output_buffer::OutputBuffer;
11
12pub const TINFL_LZ_DICT_SIZE: usize = 32_768;
13
14/// A struct containing huffman code lengths and the huffman code tree used by the decompressor.
15struct HuffmanTable {
16 /// Length of the code at each index.
17 pub code_size: [u8; MAX_HUFF_SYMBOLS_0],
18 /// Fast lookup table for shorter huffman codes.
19 ///
20 /// See `HuffmanTable::fast_lookup`.
21 pub look_up: [i16; FAST_LOOKUP_SIZE as usize],
22 /// Full huffman tree.
23 ///
24 /// Positive values are edge nodes/symbols, negative values are
25 /// parent nodes/references to other nodes.
26 pub tree: [i16; MAX_HUFF_TREE_SIZE],
27}
28
29impl HuffmanTable {
30 const fn new() -> HuffmanTable {
31 HuffmanTable {
32 code_size: [0; MAX_HUFF_SYMBOLS_0],
33 look_up: [0; FAST_LOOKUP_SIZE as usize],
34 tree: [0; MAX_HUFF_TREE_SIZE],
35 }
36 }
37
38 /// Look for a symbol in the fast lookup table.
39 /// The symbol is stored in the lower 9 bits, the length in the next 6.
40 /// If the returned value is negative, the code wasn't found in the
41 /// fast lookup table and the full tree has to be traversed to find the code.
42 #[inline]
43 fn fast_lookup(&self, bit_buf: BitBuffer) -> i16 {
44 self.look_up[(bit_buf & BitBuffer::from(FAST_LOOKUP_SIZE - 1)) as usize]
45 }
46
47 /// Get the symbol and the code length from the huffman tree.
48 #[inline]
49 fn tree_lookup(&self, fast_symbol: i32, bit_buf: BitBuffer, mut code_len: u32) -> (i32, u32) {
50 let mut symbol = fast_symbol;
51 // We step through the tree until we encounter a positive value, which indicates a
52 // symbol.
53 loop {
54 // symbol here indicates the position of the left (0) node, if the next bit is 1
55 // we add 1 to the lookup position to get the right node.
56 let tree_index = (!symbol + ((bit_buf >> code_len) & 1) as i32) as usize;
57 debug_assert!(tree_index < self.tree.len());
58 if tree_index >= self.tree.len() {
59 break;
60 }
61 symbol = i32::from(self.tree[tree_index]);
62 code_len += 1;
63 if symbol >= 0 {
64 break;
65 }
66 }
67 (symbol, code_len)
68 }
69
70 #[inline]
71 /// Look up a symbol and code length from the bits in the provided bit buffer.
72 ///
73 /// Returns Some(symbol, length) on success,
74 /// None if the length is 0.
75 ///
76 /// It's possible we could avoid checking for 0 if we can guarantee a sane table.
77 /// TODO: Check if a smaller type for code_len helps performance.
78 fn lookup(&self, bit_buf: BitBuffer) -> Option<(i32, u32)> {
79 let symbol = self.fast_lookup(bit_buf).into();
80 if symbol >= 0 {
81 if (symbol >> 9) as u32 != 0 {
82 Some((symbol, (symbol >> 9) as u32))
83 } else {
84 // Zero-length code.
85 None
86 }
87 } else {
88 // We didn't get a symbol from the fast lookup table, so check the tree instead.
89 Some(self.tree_lookup(symbol, bit_buf, FAST_LOOKUP_BITS.into()))
90 }
91 }
92}
93
94/// The number of huffman tables used.
95const MAX_HUFF_TABLES: usize = 3;
96/// The length of the first (literal/length) huffman table.
97const MAX_HUFF_SYMBOLS_0: usize = 288;
98/// The length of the second (distance) huffman table.
99const MAX_HUFF_SYMBOLS_1: usize = 32;
100/// The length of the last (huffman code length) huffman table.
101const _MAX_HUFF_SYMBOLS_2: usize = 19;
102/// The maximum length of a code that can be looked up in the fast lookup table.
103const FAST_LOOKUP_BITS: u8 = 10;
104/// The size of the fast lookup table.
105const FAST_LOOKUP_SIZE: u32 = 1 << FAST_LOOKUP_BITS;
106const MAX_HUFF_TREE_SIZE: usize = MAX_HUFF_SYMBOLS_0 * 2;
107const LITLEN_TABLE: usize = 0;
108const DIST_TABLE: usize = 1;
109const HUFFLEN_TABLE: usize = 2;
110
111/// Flags to [`decompress()`] to control how inflation works.
112///
113/// These define bits for a bitmask argument.
114pub mod inflate_flags {
115 /// Should we try to parse a zlib header?
116 ///
117 /// If unset, the function will expect an RFC1951 deflate stream. If set, it will expect a
118 /// RFC1950 zlib wrapper around the deflate stream.
119 pub const TINFL_FLAG_PARSE_ZLIB_HEADER: u32 = 1;
120
121 /// There will be more input that hasn't been given to the decompressor yet.
122 ///
123 /// This is useful when you want to decompress what you have so far,
124 /// even if you know there is probably more input that hasn't gotten here yet (_e.g._, over a
125 /// network connection). When [`decompress()`][super::decompress] reaches the end of the input
126 /// without finding the end of the compressed stream, it will return
127 /// [`TINFLStatus::NeedsMoreInput`][super::TINFLStatus::NeedsMoreInput] if this is set,
128 /// indicating that you should get more data before calling again. If not set, it will return
129 /// [`TINFLStatus::FailedCannotMakeProgress`][super::TINFLStatus::FailedCannotMakeProgress]
130 /// suggesting the stream is corrupt, since you claimed it was all there.
131 pub const TINFL_FLAG_HAS_MORE_INPUT: u32 = 2;
132
133 /// The output buffer should not wrap around.
134 pub const TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: u32 = 4;
135
136 /// Calculate the adler32 checksum of the output data even if we're not inflating a zlib stream.
137 ///
138 /// If [`TINFL_FLAG_IGNORE_ADLER32`] is specified, it will override this.
139 ///
140 /// NOTE: Enabling/disabling this between calls to decompress will result in an incorrect
141 /// checksum.
142 pub const TINFL_FLAG_COMPUTE_ADLER32: u32 = 8;
143
144 /// Ignore adler32 checksum even if we are inflating a zlib stream.
145 ///
146 /// Overrides [`TINFL_FLAG_COMPUTE_ADLER32`] if both are enabled.
147 ///
148 /// NOTE: This flag does not exist in miniz as it does not support this and is a
149 /// custom addition for miniz_oxide.
150 ///
151 /// NOTE: Should not be changed from enabled to disabled after decompression has started,
152 /// this will result in checksum failure (outside the unlikely event where the checksum happens
153 /// to match anyway).
154 pub const TINFL_FLAG_IGNORE_ADLER32: u32 = 64;
155}
156
157use self::inflate_flags::*;
158
159const MIN_TABLE_SIZES: [u16; 3] = [257, 1, 4];
160
161#[cfg(target_pointer_width = "64")]
162type BitBuffer = u64;
163
164#[cfg(not(target_pointer_width = "64"))]
165type BitBuffer = u32;
166
167/// Main decompression struct.
168///
169pub struct DecompressorOxide {
170 /// Current state of the decompressor.
171 state: core::State,
172 /// Number of bits in the bit buffer.
173 num_bits: u32,
174 /// Zlib CMF
175 z_header0: u32,
176 /// Zlib FLG
177 z_header1: u32,
178 /// Adler32 checksum from the zlib header.
179 z_adler32: u32,
180 /// 1 if the current block is the last block, 0 otherwise.
181 finish: u32,
182 /// The type of the current block.
183 block_type: u32,
184 /// 1 if the adler32 value should be checked.
185 check_adler32: u32,
186 /// Last match distance.
187 dist: u32,
188 /// Variable used for match length, symbols, and a number of other things.
189 counter: u32,
190 /// Number of extra bits for the last length or distance code.
191 num_extra: u32,
192 /// Number of entries in each huffman table.
193 table_sizes: [u32; MAX_HUFF_TABLES],
194 /// Buffer of input data.
195 bit_buf: BitBuffer,
196 /// Huffman tables.
197 tables: [HuffmanTable; MAX_HUFF_TABLES],
198 /// Raw block header.
199 raw_header: [u8; 4],
200 /// Huffman length codes.
201 len_codes: [u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137],
202}
203
204impl DecompressorOxide {
205 /// Create a new tinfl_decompressor with all fields set to 0.
206 pub fn new() -> DecompressorOxide {
207 DecompressorOxide::default()
208 }
209
210 /// Set the current state to `Start`.
211 #[inline]
212 pub fn init(&mut self) {
213 // The rest of the data is reset or overwritten when used.
214 self.state = core::State::Start;
215 }
216
217 /// Returns the adler32 checksum of the currently decompressed data.
218 /// Note: Will return Some(1) if decompressing zlib but ignoring adler32.
219 #[inline]
220 pub fn adler32(&self) -> Option<u32> {
221 if self.state != State::Start && !self.state.is_failure() && self.z_header0 != 0 {
222 Some(self.check_adler32)
223 } else {
224 None
225 }
226 }
227
228 /// Returns the adler32 that was read from the zlib header if it exists.
229 #[inline]
230 pub fn adler32_header(&self) -> Option<u32> {
231 if self.state != State::Start && self.state != State::BadZlibHeader && self.z_header0 != 0 {
232 Some(self.z_adler32)
233 } else {
234 None
235 }
236 }
237}
238
239impl Default for DecompressorOxide {
240 /// Create a new tinfl_decompressor with all fields set to 0.
241 #[inline(always)]
242 fn default() -> Self {
243 DecompressorOxide {
244 state: core::State::Start,
245 num_bits: 0,
246 z_header0: 0,
247 z_header1: 0,
248 z_adler32: 0,
249 finish: 0,
250 block_type: 0,
251 check_adler32: 0,
252 dist: 0,
253 counter: 0,
254 num_extra: 0,
255 table_sizes: [0; MAX_HUFF_TABLES],
256 bit_buf: 0,
257 // TODO:(oyvindln) Check that copies here are optimized out in release mode.
258 tables: [
259 HuffmanTable::new(),
260 HuffmanTable::new(),
261 HuffmanTable::new(),
262 ],
263 raw_header: [0; 4],
264 len_codes: [0; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1 + 137],
265 }
266 }
267}
268
269#[derive(Copy, Clone, PartialEq, Eq, Debug)]
270#[non_exhaustive]
271enum State {
272 Start = 0,
273 ReadZlibCmf,
274 ReadZlibFlg,
275 ReadBlockHeader,
276 BlockTypeNoCompression,
277 RawHeader,
278 RawMemcpy1,
279 RawMemcpy2,
280 ReadTableSizes,
281 ReadHufflenTableCodeSize,
282 ReadLitlenDistTablesCodeSize,
283 ReadExtraBitsCodeSize,
284 DecodeLitlen,
285 WriteSymbol,
286 ReadExtraBitsLitlen,
287 DecodeDistance,
288 ReadExtraBitsDistance,
289 RawReadFirstByte,
290 RawStoreFirstByte,
291 WriteLenBytesToEnd,
292 BlockDone,
293 HuffDecodeOuterLoop1,
294 HuffDecodeOuterLoop2,
295 ReadAdler32,
296
297 DoneForever,
298
299 // Failure states.
300 BlockTypeUnexpected,
301 BadCodeSizeSum,
302 BadDistOrLiteralTableLength,
303 BadTotalSymbols,
304 BadZlibHeader,
305 DistanceOutOfBounds,
306 BadRawLength,
307 BadCodeSizeDistPrevLookup,
308 InvalidLitlen,
309 InvalidDist,
310 InvalidCodeLen,
311}
312
313impl State {
314 fn is_failure(self) -> bool {
315 match self {
316 BlockTypeUnexpected => true,
317 BadCodeSizeSum => true,
318 BadDistOrLiteralTableLength => true,
319 BadTotalSymbols => true,
320 BadZlibHeader => true,
321 DistanceOutOfBounds => true,
322 BadRawLength => true,
323 BadCodeSizeDistPrevLookup => true,
324 InvalidLitlen => true,
325 InvalidDist => true,
326 _ => false,
327 }
328 }
329
330 #[inline]
331 fn begin(&mut self, new_state: State) {
332 *self = new_state;
333 }
334}
335
336use self::State::*;
337
338// Not sure why miniz uses 32-bit values for these, maybe alignment/cache again?
339// # Optimization
340// We add a extra value at the end and make the tables 32 elements long
341// so we can use a mask to avoid bounds checks.
342// The invalid values are set to something high enough to avoid underflowing
343// the match length.
344/// Base length for each length code.
345///
346/// The base is used together with the value of the extra bits to decode the actual
347/// length/distance values in a match.
348#[rustfmt::skip]
349const LENGTH_BASE: [u16; 32] = [
350 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
351 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 512, 512, 512
352];
353
354/// Number of extra bits for each length code.
355#[rustfmt::skip]
356const LENGTH_EXTRA: [u8; 32] = [
357 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
358 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0, 0
359];
360
361/// Base length for each distance code.
362#[rustfmt::skip]
363const DIST_BASE: [u16; 32] = [
364 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33,
365 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537,
366 2049, 3073, 4097, 6145, 8193, 12_289, 16_385, 24_577, 32_768, 32_768
367];
368
369/// Number of extra bits for each distance code.
370#[rustfmt::skip]
371const DIST_EXTRA: [u8; 32] = [
372 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
373 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 13, 13
374];
375
376/// The mask used when indexing the base/extra arrays.
377const BASE_EXTRA_MASK: usize = 32 - 1;
378
379/// Sets the value of all the elements of the slice to `val`.
380#[inline]
381fn memset<T: Copy>(slice: &mut [T], val: T) {
382 for x: &mut T in slice {
383 *x = val
384 }
385}
386
387/// Read an le u16 value from the slice iterator.
388///
389/// # Panics
390/// Panics if there are less than two bytes left.
391#[inline]
392fn read_u16_le(iter: &mut slice::Iter<u8>) -> u16 {
393 let ret: u16 = {
394 let two_bytes: [u8; 2] = iter.as_ref()[..2].try_into().unwrap();
395 u16::from_le_bytes(two_bytes)
396 };
397 iter.nth(1);
398 ret
399}
400
401/// Read an le u32 value from the slice iterator.
402///
403/// # Panics
404/// Panics if there are less than four bytes left.
405#[inline(always)]
406#[cfg(target_pointer_width = "64")]
407fn read_u32_le(iter: &mut slice::Iter<u8>) -> u32 {
408 let ret: u32 = {
409 let four_bytes: [u8; 4] = iter.as_ref()[..4].try_into().unwrap();
410 u32::from_le_bytes(four_bytes)
411 };
412 iter.nth(3);
413 ret
414}
415
416/// Ensure that there is data in the bit buffer.
417///
418/// On 64-bit platform, we use a 64-bit value so this will
419/// result in there being at least 32 bits in the bit buffer.
420/// This function assumes that there is at least 4 bytes left in the input buffer.
421#[inline(always)]
422#[cfg(target_pointer_width = "64")]
423fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>) {
424 // Read four bytes into the buffer at once.
425 if l.num_bits < 30 {
426 l.bit_buf |= BitBuffer::from(read_u32_le(in_iter)) << l.num_bits;
427 l.num_bits += 32;
428 }
429}
430
431/// Same as previous, but for non-64-bit platforms.
432/// Ensures at least 16 bits are present, requires at least 2 bytes in the in buffer.
433#[inline(always)]
434#[cfg(not(target_pointer_width = "64"))]
435fn fill_bit_buffer(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>) {
436 // If the buffer is 32-bit wide, read 2 bytes instead.
437 if l.num_bits < 15 {
438 l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits;
439 l.num_bits += 16;
440 }
441}
442
443/// Check that the zlib header is correct and that there is enough space in the buffer
444/// for the window size specified in the header.
445///
446/// See https://tools.ietf.org/html/rfc1950
447#[inline]
448fn validate_zlib_header(cmf: u32, flg: u32, flags: u32, mask: usize) -> Action {
449 let mut failed =
450 // cmf + flg should be divisible by 31.
451 (((cmf * 256) + flg) % 31 != 0) ||
452 // If this flag is set, a dictionary was used for this zlib compressed data.
453 // This is currently not supported by miniz or miniz-oxide
454 ((flg & 0b0010_0000) != 0) ||
455 // Compression method. Only 8(DEFLATE) is defined by the standard.
456 ((cmf & 15) != 8);
457
458 let window_size = 1 << ((cmf >> 4) + 8);
459 if (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) == 0 {
460 // Bail if the buffer is wrapping and the window size is larger than the buffer.
461 failed |= (mask + 1) < window_size;
462 }
463
464 // Zlib doesn't allow window sizes above 32 * 1024.
465 failed |= window_size > 32_768;
466
467 if failed {
468 Action::Jump(BadZlibHeader)
469 } else {
470 Action::Jump(ReadBlockHeader)
471 }
472}
473
474enum Action {
475 None,
476 Jump(State),
477 End(TINFLStatus),
478}
479
480/// Try to decode the next huffman code, and puts it in the counter field of the decompressor
481/// if successful.
482///
483/// # Returns
484/// The specified action returned from `f` on success,
485/// `Action::End` if there are not enough data left to decode a symbol.
486fn decode_huffman_code<F>(
487 r: &mut DecompressorOxide,
488 l: &mut LocalVars,
489 table: usize,
490 flags: u32,
491 in_iter: &mut slice::Iter<u8>,
492 f: F,
493) -> Action
494where
495 F: FnOnce(&mut DecompressorOxide, &mut LocalVars, i32) -> Action,
496{
497 // As the huffman codes can be up to 15 bits long we need at least 15 bits
498 // ready in the bit buffer to start decoding the next huffman code.
499 if l.num_bits < 15 {
500 // First, make sure there is enough data in the bit buffer to decode a huffman code.
501 if in_iter.len() < 2 {
502 // If there is less than 2 bytes left in the input buffer, we try to look up
503 // the huffman code with what's available, and return if that doesn't succeed.
504 // Original explanation in miniz:
505 // /* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes
506 // * remaining in the input buffer falls below 2. */
507 // /* It reads just enough bytes from the input stream that are needed to decode
508 // * the next Huffman code (and absolutely no more). It works by trying to fully
509 // * decode a */
510 // /* Huffman code by using whatever bits are currently present in the bit buffer.
511 // * If this fails, it reads another byte, and tries again until it succeeds or
512 // * until the */
513 // /* bit buffer contains >=15 bits (deflate's max. Huffman code size). */
514 loop {
515 let mut temp = i32::from(r.tables[table].fast_lookup(l.bit_buf));
516
517 if temp >= 0 {
518 let code_len = (temp >> 9) as u32;
519 if (code_len != 0) && (l.num_bits >= code_len) {
520 break;
521 }
522 } else if l.num_bits > FAST_LOOKUP_BITS.into() {
523 let mut code_len = u32::from(FAST_LOOKUP_BITS);
524 loop {
525 temp = i32::from(
526 r.tables[table].tree
527 [(!temp + ((l.bit_buf >> code_len) & 1) as i32) as usize],
528 );
529 code_len += 1;
530 if temp >= 0 || l.num_bits < code_len + 1 {
531 break;
532 }
533 }
534 if temp >= 0 {
535 break;
536 }
537 }
538
539 // TODO: miniz jumps straight to here after getting here again after failing to read
540 // a byte.
541 // Doing that lets miniz avoid re-doing the lookup that that was done in the
542 // previous call.
543 let mut byte = 0;
544 if let a @ Action::End(_) = read_byte(in_iter, flags, |b| {
545 byte = b;
546 Action::None
547 }) {
548 return a;
549 };
550
551 // Do this outside closure for now to avoid borrowing r.
552 l.bit_buf |= BitBuffer::from(byte) << l.num_bits;
553 l.num_bits += 8;
554
555 if l.num_bits >= 15 {
556 break;
557 }
558 }
559 } else {
560 // There is enough data in the input buffer, so read the next two bytes
561 // and add them to the bit buffer.
562 // Unwrapping here is fine since we just checked that there are at least two
563 // bytes left.
564 l.bit_buf |= BitBuffer::from(read_u16_le(in_iter)) << l.num_bits;
565 l.num_bits += 16;
566 }
567 }
568
569 // We now have at least 15 bits in the input buffer.
570 let mut symbol = i32::from(r.tables[table].fast_lookup(l.bit_buf));
571 let code_len;
572 // If the symbol was found in the fast lookup table.
573 if symbol >= 0 {
574 // Get the length value from the top bits.
575 // As we shift down the sign bit, converting to an unsigned value
576 // shouldn't overflow.
577 code_len = (symbol >> 9) as u32;
578 // Mask out the length value.
579 symbol &= 511;
580 } else {
581 let res = r.tables[table].tree_lookup(symbol, l.bit_buf, u32::from(FAST_LOOKUP_BITS));
582 symbol = res.0;
583 code_len = res.1;
584 };
585
586 if code_len == 0 {
587 return Action::Jump(InvalidCodeLen);
588 }
589
590 l.bit_buf >>= code_len;
591 l.num_bits -= code_len;
592 f(r, l, symbol)
593}
594
595/// Try to read one byte from `in_iter` and call `f` with the read byte as an argument,
596/// returning the result.
597/// If reading fails, `Action::End is returned`
598#[inline]
599fn read_byte<F>(in_iter: &mut slice::Iter<u8>, flags: u32, f: F) -> Action
600where
601 F: FnOnce(u8) -> Action,
602{
603 match in_iter.next() {
604 None => end_of_input(flags),
605 Some(&byte: u8) => f(byte),
606 }
607}
608
609// TODO: `l: &mut LocalVars` may be slow similar to decompress_fast (even with inline(always))
610/// Try to read `amount` number of bits from `in_iter` and call the function `f` with the bits as an
611/// an argument after reading, returning the result of that function, or `Action::End` if there are
612/// not enough bytes left.
613#[inline]
614#[allow(clippy::while_immutable_condition)]
615fn read_bits<F>(
616 l: &mut LocalVars,
617 amount: u32,
618 in_iter: &mut slice::Iter<u8>,
619 flags: u32,
620 f: F,
621) -> Action
622where
623 F: FnOnce(&mut LocalVars, BitBuffer) -> Action,
624{
625 // Clippy gives a false positive warning here due to the closure.
626 // Read enough bytes from the input iterator to cover the number of bits we want.
627 while l.num_bits < amount {
628 match read_byte(in_iter, flags, |byte: u8| {
629 l.bit_buf |= BitBuffer::from(byte) << l.num_bits;
630 l.num_bits += 8;
631 Action::None
632 }) {
633 Action::None => (),
634 // If there are not enough bytes in the input iterator, return and signal that we need
635 // more.
636 action: Action => return action,
637 }
638 }
639
640 let bits: u64 = l.bit_buf & ((1 << amount) - 1);
641 l.bit_buf >>= amount;
642 l.num_bits -= amount;
643 f(l, bits)
644}
645
646#[inline]
647fn pad_to_bytes<F>(l: &mut LocalVars, in_iter: &mut slice::Iter<u8>, flags: u32, f: F) -> Action
648where
649 F: FnOnce(&mut LocalVars) -> Action,
650{
651 let num_bits: u32 = l.num_bits & 7;
652 read_bits(l, amount:num_bits, in_iter, flags, |l: &mut LocalVars, _| f(l))
653}
654
655#[inline]
656fn end_of_input(flags: u32) -> Action {
657 Action::End(if flags & TINFL_FLAG_HAS_MORE_INPUT != 0 {
658 TINFLStatus::NeedsMoreInput
659 } else {
660 TINFLStatus::FailedCannotMakeProgress
661 })
662}
663
664#[inline]
665fn undo_bytes(l: &mut LocalVars, max: u32) -> u32 {
666 let res: u32 = cmp::min(v1:l.num_bits >> 3, v2:max);
667 l.num_bits -= res << 3;
668 res
669}
670
671fn start_static_table(r: &mut DecompressorOxide) {
672 r.table_sizes[LITLEN_TABLE] = 288;
673 r.table_sizes[DIST_TABLE] = 32;
674 memset(&mut r.tables[LITLEN_TABLE].code_size[0..144], val:8);
675 memset(&mut r.tables[LITLEN_TABLE].code_size[144..256], val:9);
676 memset(&mut r.tables[LITLEN_TABLE].code_size[256..280], val:7);
677 memset(&mut r.tables[LITLEN_TABLE].code_size[280..288], val:8);
678 memset(&mut r.tables[DIST_TABLE].code_size[0..32], val:5);
679}
680
681static REVERSED_BITS_LOOKUP: [u32; 1024] = {
682 let mut table: [u32; 1024] = [0; 1024];
683
684 let mut i: usize = 0;
685 while i < 1024 {
686 table[i] = (i as u32).reverse_bits();
687 i += 1;
688 }
689
690 table
691};
692
693fn init_tree(r: &mut DecompressorOxide, l: &mut LocalVars) -> Option<Action> {
694 loop {
695 let bt = r.block_type as usize;
696 if bt >= r.tables.len() {
697 return None;
698 }
699 let table = &mut r.tables[bt];
700 let table_size = r.table_sizes[bt] as usize;
701 if table_size > table.code_size.len() {
702 return None;
703 }
704 let mut total_symbols = [0u32; 16];
705 let mut next_code = [0u32; 17];
706 memset(&mut table.look_up[..], 0);
707 memset(&mut table.tree[..], 0);
708
709 for &code_size in &table.code_size[..table_size] {
710 let cs = code_size as usize;
711 if cs >= total_symbols.len() {
712 return None;
713 }
714 total_symbols[cs] += 1;
715 }
716
717 let mut used_symbols = 0;
718 let mut total = 0;
719 for (ts, next) in total_symbols
720 .iter()
721 .copied()
722 .zip(next_code.iter_mut().skip(1))
723 .skip(1)
724 {
725 used_symbols += ts;
726 total += ts;
727 total <<= 1;
728 *next = total;
729 }
730
731 if total != 65_536 && used_symbols > 1 {
732 return Some(Action::Jump(BadTotalSymbols));
733 }
734
735 let mut tree_next = -1;
736 for symbol_index in 0..table_size {
737 let mut rev_code = 0;
738 let code_size = table.code_size[symbol_index];
739 if code_size == 0 || usize::from(code_size) >= next_code.len() {
740 continue;
741 }
742
743 let mut cur_code = next_code[code_size as usize];
744 next_code[code_size as usize] += 1;
745
746 let n = cur_code & (u32::MAX >> (32 - code_size));
747
748 let mut rev_code = if n < 1024 {
749 REVERSED_BITS_LOOKUP[n as usize] >> (32 - code_size)
750 } else {
751 for _ in 0..code_size {
752 rev_code = (rev_code << 1) | (cur_code & 1);
753 cur_code >>= 1;
754 }
755 rev_code
756 };
757
758 if code_size <= FAST_LOOKUP_BITS {
759 let k = (i16::from(code_size) << 9) | symbol_index as i16;
760 while rev_code < FAST_LOOKUP_SIZE {
761 table.look_up[rev_code as usize] = k;
762 rev_code += 1 << code_size;
763 }
764 continue;
765 }
766
767 let mut tree_cur = table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize];
768 if tree_cur == 0 {
769 table.look_up[(rev_code & (FAST_LOOKUP_SIZE - 1)) as usize] = tree_next;
770 tree_cur = tree_next;
771 tree_next -= 2;
772 }
773
774 rev_code >>= FAST_LOOKUP_BITS - 1;
775 for _ in FAST_LOOKUP_BITS + 1..code_size {
776 rev_code >>= 1;
777 tree_cur -= (rev_code & 1) as i16;
778 let tree_index = (-tree_cur - 1) as usize;
779 if tree_index >= table.tree.len() {
780 return None;
781 }
782 if table.tree[tree_index] == 0 {
783 table.tree[tree_index] = tree_next;
784 tree_cur = tree_next;
785 tree_next -= 2;
786 } else {
787 tree_cur = table.tree[tree_index];
788 }
789 }
790
791 rev_code >>= 1;
792 tree_cur -= (rev_code & 1) as i16;
793 let tree_index = (-tree_cur - 1) as usize;
794 if tree_index >= table.tree.len() {
795 return None;
796 }
797 table.tree[tree_index] = symbol_index as i16;
798 }
799
800 if r.block_type == 2 {
801 l.counter = 0;
802 return Some(Action::Jump(ReadLitlenDistTablesCodeSize));
803 }
804
805 if r.block_type == 0 {
806 break;
807 }
808 r.block_type -= 1;
809 }
810
811 l.counter = 0;
812 Some(Action::Jump(DecodeLitlen))
813}
814
815// A helper macro for generating the state machine.
816//
817// As Rust doesn't have fallthrough on matches, we have to return to the match statement
818// and jump for each state change. (Which would ideally be optimized away, but often isn't.)
819macro_rules! generate_state {
820 ($state: ident, $state_machine: tt, $f: expr) => {
821 loop {
822 match $f {
823 Action::None => continue,
824 Action::Jump(new_state) => {
825 $state = new_state;
826 continue $state_machine;
827 },
828 Action::End(result) => break $state_machine result,
829 }
830 }
831 };
832}
833
834#[derive(Copy, Clone)]
835struct LocalVars {
836 pub bit_buf: BitBuffer,
837 pub num_bits: u32,
838 pub dist: u32,
839 pub counter: u32,
840 pub num_extra: u32,
841}
842
843#[inline]
844fn transfer(
845 out_slice: &mut [u8],
846 mut source_pos: usize,
847 mut out_pos: usize,
848 match_len: usize,
849 out_buf_size_mask: usize,
850) {
851 // special case that comes up surprisingly often. in the case that `source_pos`
852 // is 1 less than `out_pos`, we can say that the entire range will be the same
853 // value and optimize this to be a simple `memset`
854 let source_diff = if source_pos > out_pos {
855 source_pos - out_pos
856 } else {
857 out_pos - source_pos
858 };
859 if out_buf_size_mask == usize::MAX && source_diff == 1 && out_pos > source_pos {
860 let init = out_slice[out_pos - 1];
861 let end = (match_len >> 2) * 4 + out_pos;
862
863 out_slice[out_pos..end].fill(init);
864 out_pos = end;
865 source_pos = end - 1;
866 // if the difference between `source_pos` and `out_pos` is greater than 3, we
867 // can do slightly better than the naive case by copying everything at once
868 } else if out_buf_size_mask == usize::MAX && source_diff >= 4 && out_pos > source_pos {
869 for _ in 0..match_len >> 2 {
870 out_slice.copy_within(source_pos..=source_pos + 3, out_pos);
871 source_pos += 4;
872 out_pos += 4;
873 }
874 } else {
875 for _ in 0..match_len >> 2 {
876 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
877 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
878 out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask];
879 out_slice[out_pos + 3] = out_slice[(source_pos + 3) & out_buf_size_mask];
880 source_pos += 4;
881 out_pos += 4;
882 }
883 }
884
885 match match_len & 3 {
886 0 => (),
887 1 => out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask],
888 2 => {
889 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
890 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
891 }
892 3 => {
893 out_slice[out_pos] = out_slice[source_pos & out_buf_size_mask];
894 out_slice[out_pos + 1] = out_slice[(source_pos + 1) & out_buf_size_mask];
895 out_slice[out_pos + 2] = out_slice[(source_pos + 2) & out_buf_size_mask];
896 }
897 _ => unreachable!(),
898 }
899}
900
901/// Presumes that there is at least match_len bytes in output left.
902#[inline]
903fn apply_match(
904 out_slice: &mut [u8],
905 out_pos: usize,
906 dist: usize,
907 match_len: usize,
908 out_buf_size_mask: usize,
909) {
910 debug_assert!(out_pos.checked_add(match_len).unwrap() <= out_slice.len());
911
912 let source_pos = out_pos.wrapping_sub(dist) & out_buf_size_mask;
913
914 if match_len == 3 {
915 let out_slice = Cell::from_mut(out_slice).as_slice_of_cells();
916 if let Some(dst) = out_slice.get(out_pos..out_pos + 3) {
917 // Moving bounds checks before any memory mutation allows the optimizer
918 // combine them together.
919 let src = out_slice
920 .get(source_pos)
921 .zip(out_slice.get((source_pos + 1) & out_buf_size_mask))
922 .zip(out_slice.get((source_pos + 2) & out_buf_size_mask));
923 if let Some(((a, b), c)) = src {
924 // For correctness, the memory reads and writes have to be interleaved.
925 // Cells make it possible for read and write references to overlap.
926 dst[0].set(a.get());
927 dst[1].set(b.get());
928 dst[2].set(c.get());
929 }
930 }
931 return;
932 }
933
934 if cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) {
935 // We are not on x86 so copy manually.
936 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
937 return;
938 }
939
940 if source_pos >= out_pos && (source_pos - out_pos) < match_len {
941 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
942 } else if match_len <= dist && source_pos + match_len < out_slice.len() {
943 // Destination and source segments does not intersect and source does not wrap.
944 if source_pos < out_pos {
945 let (from_slice, to_slice) = out_slice.split_at_mut(out_pos);
946 to_slice[..match_len].copy_from_slice(&from_slice[source_pos..source_pos + match_len]);
947 } else {
948 let (to_slice, from_slice) = out_slice.split_at_mut(source_pos);
949 to_slice[out_pos..out_pos + match_len].copy_from_slice(&from_slice[..match_len]);
950 }
951 } else {
952 transfer(out_slice, source_pos, out_pos, match_len, out_buf_size_mask);
953 }
954}
955
956/// Fast inner decompression loop which is run while there is at least
957/// 259 bytes left in the output buffer, and at least 6 bytes left in the input buffer
958/// (The maximum one match would need + 1).
959///
960/// This was inspired by a similar optimization in zlib, which uses this info to do
961/// faster unchecked copies of multiple bytes at a time.
962/// Currently we don't do this here, but this function does avoid having to jump through the
963/// big match loop on each state change(as rust does not have fallthrough or gotos at the moment),
964/// and already improves decompression speed a fair bit.
965fn decompress_fast(
966 r: &mut DecompressorOxide,
967 in_iter: &mut slice::Iter<u8>,
968 out_buf: &mut OutputBuffer,
969 flags: u32,
970 local_vars: &mut LocalVars,
971 out_buf_size_mask: usize,
972) -> (TINFLStatus, State) {
973 // Make a local copy of the most used variables, to avoid having to update and read from values
974 // in a random memory location and to encourage more register use.
975 let mut l = *local_vars;
976 let mut state;
977
978 let status: TINFLStatus = 'o: loop {
979 state = State::DecodeLitlen;
980 loop {
981 // This function assumes that there is at least 259 bytes left in the output buffer,
982 // and that there is at least 14 bytes left in the input buffer. 14 input bytes:
983 // 15 (prev lit) + 15 (length) + 5 (length extra) + 15 (dist)
984 // + 29 + 32 (left in bit buf, including last 13 dist extra) = 111 bits < 14 bytes
985 // We need the one extra byte as we may write one length and one full match
986 // before checking again.
987 if out_buf.bytes_left() < 259 || in_iter.len() < 14 {
988 state = State::DecodeLitlen;
989 break 'o TINFLStatus::Done;
990 }
991
992 fill_bit_buffer(&mut l, in_iter);
993
994 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
995 l.counter = symbol as u32;
996 l.bit_buf >>= code_len;
997 l.num_bits -= code_len;
998
999 if (l.counter & 256) != 0 {
1000 // The symbol is not a literal.
1001 break;
1002 } else {
1003 // If we have a 32-bit buffer we need to read another two bytes now
1004 // to have enough bits to keep going.
1005 if cfg!(not(target_pointer_width = "64")) {
1006 fill_bit_buffer(&mut l, in_iter);
1007 }
1008
1009 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
1010 l.bit_buf >>= code_len;
1011 l.num_bits -= code_len;
1012 // The previous symbol was a literal, so write it directly and check
1013 // the next one.
1014 out_buf.write_byte(l.counter as u8);
1015 if (symbol & 256) != 0 {
1016 l.counter = symbol as u32;
1017 // The symbol is a length value.
1018 break;
1019 } else {
1020 // The symbol is a literal, so write it directly and continue.
1021 out_buf.write_byte(symbol as u8);
1022 }
1023 } else {
1024 state.begin(InvalidCodeLen);
1025 break 'o TINFLStatus::Failed;
1026 }
1027 }
1028 } else {
1029 state.begin(InvalidCodeLen);
1030 break 'o TINFLStatus::Failed;
1031 }
1032 }
1033
1034 // Mask the top bits since they may contain length info.
1035 l.counter &= 511;
1036 if l.counter == 256 {
1037 // We hit the end of block symbol.
1038 state.begin(BlockDone);
1039 break 'o TINFLStatus::Done;
1040 } else if l.counter > 285 {
1041 // Invalid code.
1042 // We already verified earlier that the code is > 256.
1043 state.begin(InvalidLitlen);
1044 break 'o TINFLStatus::Failed;
1045 } else {
1046 // The symbol was a length code.
1047 // # Optimization
1048 // Mask the value to avoid bounds checks
1049 // We could use get_unchecked later if can statically verify that
1050 // this will never go out of bounds.
1051 l.num_extra = u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1052 l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1053 // Length and distance codes have a number of extra bits depending on
1054 // the base, which together with the base gives us the exact value.
1055
1056 fill_bit_buffer(&mut l, in_iter);
1057 if l.num_extra != 0 {
1058 let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1);
1059 l.bit_buf >>= l.num_extra;
1060 l.num_bits -= l.num_extra;
1061 l.counter += extra_bits as u32;
1062 }
1063
1064 // We found a length code, so a distance code should follow.
1065
1066 if cfg!(not(target_pointer_width = "64")) {
1067 fill_bit_buffer(&mut l, in_iter);
1068 }
1069
1070 if let Some((mut symbol, code_len)) = r.tables[DIST_TABLE].lookup(l.bit_buf) {
1071 symbol &= 511;
1072 l.bit_buf >>= code_len;
1073 l.num_bits -= code_len;
1074 if symbol > 29 {
1075 state.begin(InvalidDist);
1076 break 'o TINFLStatus::Failed;
1077 }
1078
1079 l.num_extra = u32::from(DIST_EXTRA[symbol as usize]);
1080 l.dist = u32::from(DIST_BASE[symbol as usize]);
1081 } else {
1082 state.begin(InvalidCodeLen);
1083 break 'o TINFLStatus::Failed;
1084 }
1085
1086 if l.num_extra != 0 {
1087 fill_bit_buffer(&mut l, in_iter);
1088 let extra_bits = l.bit_buf & ((1 << l.num_extra) - 1);
1089 l.bit_buf >>= l.num_extra;
1090 l.num_bits -= l.num_extra;
1091 l.dist += extra_bits as u32;
1092 }
1093
1094 let position = out_buf.position();
1095 if l.dist as usize > out_buf.position()
1096 && (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0)
1097 {
1098 // We encountered a distance that refers a position before
1099 // the start of the decoded data, so we can't continue.
1100 state.begin(DistanceOutOfBounds);
1101 break TINFLStatus::Failed;
1102 }
1103
1104 apply_match(
1105 out_buf.get_mut(),
1106 position,
1107 l.dist as usize,
1108 l.counter as usize,
1109 out_buf_size_mask,
1110 );
1111
1112 out_buf.set_position(position + l.counter as usize);
1113 }
1114 };
1115
1116 *local_vars = l;
1117 (status, state)
1118}
1119
1120/// Main decompression function. Keeps decompressing data from `in_buf` until the `in_buf` is
1121/// empty, `out` is full, the end of the deflate stream is hit, or there is an error in the
1122/// deflate stream.
1123///
1124/// # Arguments
1125///
1126/// `r` is a [`DecompressorOxide`] struct with the state of this stream.
1127///
1128/// `in_buf` is a reference to the compressed data that is to be decompressed. The decompressor will
1129/// start at the first byte of this buffer.
1130///
1131/// `out` is a reference to the buffer that will store the decompressed data, and that
1132/// stores previously decompressed data if any.
1133///
1134/// * The offset given by `out_pos` indicates where in the output buffer slice writing should start.
1135/// * If [`TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF`] is not set, the output buffer is used in a
1136/// wrapping manner, and it's size is required to be a power of 2.
1137/// * The decompression function normally needs access to 32KiB of the previously decompressed data
1138///(or to the beginning of the decompressed data if less than 32KiB has been decompressed.)
1139/// - If this data is not available, decompression may fail.
1140/// - Some deflate compressors allow specifying a window size which limits match distances to
1141/// less than this, or alternatively an RLE mode where matches will only refer to the previous byte
1142/// and thus allows a smaller output buffer. The window size can be specified in the zlib
1143/// header structure, however, the header data should not be relied on to be correct.
1144///
1145/// `flags` indicates settings and status to the decompression function.
1146/// * The [`TINFL_FLAG_HAS_MORE_INPUT`] has to be specified if more compressed data is to be provided
1147/// in a subsequent call to this function.
1148/// * See the the [`inflate_flags`] module for details on other flags.
1149///
1150/// # Returns
1151///
1152/// Returns a tuple containing the status of the compressor, the number of input bytes read, and the
1153/// number of bytes output to `out`.
1154///
1155/// This function shouldn't panic pending any bugs.
1156pub fn decompress(
1157 r: &mut DecompressorOxide,
1158 in_buf: &[u8],
1159 out: &mut [u8],
1160 out_pos: usize,
1161 flags: u32,
1162) -> (TINFLStatus, usize, usize) {
1163 let out_buf_size_mask = if flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0 {
1164 usize::max_value()
1165 } else {
1166 // In the case of zero len, any attempt to write would produce HasMoreOutput,
1167 // so to gracefully process the case of there really being no output,
1168 // set the mask to all zeros.
1169 out.len().saturating_sub(1)
1170 };
1171
1172 // Ensure the output buffer's size is a power of 2, unless the output buffer
1173 // is large enough to hold the entire output file (in which case it doesn't
1174 // matter).
1175 // Also make sure that the output buffer position is not past the end of the output buffer.
1176 if (out_buf_size_mask.wrapping_add(1) & out_buf_size_mask) != 0 || out_pos > out.len() {
1177 return (TINFLStatus::BadParam, 0, 0);
1178 }
1179
1180 let mut in_iter = in_buf.iter();
1181
1182 let mut state = r.state;
1183
1184 let mut out_buf = OutputBuffer::from_slice_and_pos(out, out_pos);
1185
1186 // Make a local copy of the important variables here so we can work with them on the stack.
1187 let mut l = LocalVars {
1188 bit_buf: r.bit_buf,
1189 num_bits: r.num_bits,
1190 dist: r.dist,
1191 counter: r.counter,
1192 num_extra: r.num_extra,
1193 };
1194
1195 let mut status = 'state_machine: loop {
1196 match state {
1197 Start => generate_state!(state, 'state_machine, {
1198 l.bit_buf = 0;
1199 l.num_bits = 0;
1200 l.dist = 0;
1201 l.counter = 0;
1202 l.num_extra = 0;
1203 r.z_header0 = 0;
1204 r.z_header1 = 0;
1205 r.z_adler32 = 1;
1206 r.check_adler32 = 1;
1207 if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 {
1208 Action::Jump(State::ReadZlibCmf)
1209 } else {
1210 Action::Jump(State::ReadBlockHeader)
1211 }
1212 }),
1213
1214 ReadZlibCmf => generate_state!(state, 'state_machine, {
1215 read_byte(&mut in_iter, flags, |cmf| {
1216 r.z_header0 = u32::from(cmf);
1217 Action::Jump(State::ReadZlibFlg)
1218 })
1219 }),
1220
1221 ReadZlibFlg => generate_state!(state, 'state_machine, {
1222 read_byte(&mut in_iter, flags, |flg| {
1223 r.z_header1 = u32::from(flg);
1224 validate_zlib_header(r.z_header0, r.z_header1, flags, out_buf_size_mask)
1225 })
1226 }),
1227
1228 // Read the block header and jump to the relevant section depending on the block type.
1229 ReadBlockHeader => generate_state!(state, 'state_machine, {
1230 read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| {
1231 r.finish = (bits & 1) as u32;
1232 r.block_type = (bits >> 1) as u32 & 3;
1233 match r.block_type {
1234 0 => Action::Jump(BlockTypeNoCompression),
1235 1 => {
1236 start_static_table(r);
1237 init_tree(r, l).unwrap_or(Action::End(TINFLStatus::Failed))
1238 },
1239 2 => {
1240 l.counter = 0;
1241 Action::Jump(ReadTableSizes)
1242 },
1243 3 => Action::Jump(BlockTypeUnexpected),
1244 _ => unreachable!()
1245 }
1246 })
1247 }),
1248
1249 // Raw/Stored/uncompressed block.
1250 BlockTypeNoCompression => generate_state!(state, 'state_machine, {
1251 pad_to_bytes(&mut l, &mut in_iter, flags, |l| {
1252 l.counter = 0;
1253 Action::Jump(RawHeader)
1254 })
1255 }),
1256
1257 // Check that the raw block header is correct.
1258 RawHeader => generate_state!(state, 'state_machine, {
1259 if l.counter < 4 {
1260 // Read block length and block length check.
1261 if l.num_bits != 0 {
1262 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1263 r.raw_header[l.counter as usize] = bits as u8;
1264 l.counter += 1;
1265 Action::None
1266 })
1267 } else {
1268 read_byte(&mut in_iter, flags, |byte| {
1269 r.raw_header[l.counter as usize] = byte;
1270 l.counter += 1;
1271 Action::None
1272 })
1273 }
1274 } else {
1275 // Check if the length value of a raw block is correct.
1276 // The 2 first (2-byte) words in a raw header are the length and the
1277 // ones complement of the length.
1278 let length = u16::from(r.raw_header[0]) | (u16::from(r.raw_header[1]) << 8);
1279 let check = u16::from(r.raw_header[2]) | (u16::from(r.raw_header[3]) << 8);
1280 let valid = length == !check;
1281 l.counter = length.into();
1282
1283 if !valid {
1284 Action::Jump(BadRawLength)
1285 } else if l.counter == 0 {
1286 // Empty raw block. Sometimes used for synchronization.
1287 Action::Jump(BlockDone)
1288 } else if l.num_bits != 0 {
1289 // There is some data in the bit buffer, so we need to write that first.
1290 Action::Jump(RawReadFirstByte)
1291 } else {
1292 // The bit buffer is empty, so memcpy the rest of the uncompressed data from
1293 // the block.
1294 Action::Jump(RawMemcpy1)
1295 }
1296 }
1297 }),
1298
1299 // Read the byte from the bit buffer.
1300 RawReadFirstByte => generate_state!(state, 'state_machine, {
1301 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1302 l.dist = bits as u32;
1303 Action::Jump(RawStoreFirstByte)
1304 })
1305 }),
1306
1307 // Write the byte we just read to the output buffer.
1308 RawStoreFirstByte => generate_state!(state, 'state_machine, {
1309 if out_buf.bytes_left() == 0 {
1310 Action::End(TINFLStatus::HasMoreOutput)
1311 } else {
1312 out_buf.write_byte(l.dist as u8);
1313 l.counter -= 1;
1314 if l.counter == 0 || l.num_bits == 0 {
1315 Action::Jump(RawMemcpy1)
1316 } else {
1317 // There is still some data left in the bit buffer that needs to be output.
1318 // TODO: Changed this to jump to `RawReadfirstbyte` rather than
1319 // `RawStoreFirstByte` as that seemed to be the correct path, but this
1320 // needs testing.
1321 Action::Jump(RawReadFirstByte)
1322 }
1323 }
1324 }),
1325
1326 RawMemcpy1 => generate_state!(state, 'state_machine, {
1327 if l.counter == 0 {
1328 Action::Jump(BlockDone)
1329 } else if out_buf.bytes_left() == 0 {
1330 Action::End(TINFLStatus::HasMoreOutput)
1331 } else {
1332 Action::Jump(RawMemcpy2)
1333 }
1334 }),
1335
1336 RawMemcpy2 => generate_state!(state, 'state_machine, {
1337 if in_iter.len() > 0 {
1338 // Copy as many raw bytes as possible from the input to the output using memcpy.
1339 // Raw block lengths are limited to 64 * 1024, so casting through usize and u32
1340 // is not an issue.
1341 let space_left = out_buf.bytes_left();
1342 let bytes_to_copy = cmp::min(cmp::min(
1343 space_left,
1344 in_iter.len()),
1345 l.counter as usize
1346 );
1347
1348 out_buf.write_slice(&in_iter.as_slice()[..bytes_to_copy]);
1349
1350 in_iter.nth(bytes_to_copy - 1);
1351 l.counter -= bytes_to_copy as u32;
1352 Action::Jump(RawMemcpy1)
1353 } else {
1354 end_of_input(flags)
1355 }
1356 }),
1357
1358 // Read how many huffman codes/symbols are used for each table.
1359 ReadTableSizes => generate_state!(state, 'state_machine, {
1360 if l.counter < 3 {
1361 let num_bits = [5, 5, 4][l.counter as usize];
1362 read_bits(&mut l, num_bits, &mut in_iter, flags, |l, bits| {
1363 r.table_sizes[l.counter as usize] =
1364 bits as u32 + u32::from(MIN_TABLE_SIZES[l.counter as usize]);
1365 l.counter += 1;
1366 Action::None
1367 })
1368 } else {
1369 memset(&mut r.tables[HUFFLEN_TABLE].code_size[..], 0);
1370 l.counter = 0;
1371 // Check that the litlen and distance are within spec.
1372 // litlen table should be <=286 acc to the RFC and
1373 // additionally zlib rejects dist table sizes larger than 30.
1374 // NOTE this the final sizes after adding back predefined values, not
1375 // raw value in the data.
1376 // See miniz_oxide issue #130 and https://github.com/madler/zlib/issues/82.
1377 if r.table_sizes[LITLEN_TABLE] <= 286 && r.table_sizes[DIST_TABLE] <= 30 {
1378 Action::Jump(ReadHufflenTableCodeSize)
1379 }
1380 else {
1381 Action::Jump(BadDistOrLiteralTableLength)
1382 }
1383 }
1384 }),
1385
1386 // Read the 3-bit lengths of the huffman codes describing the huffman code lengths used
1387 // to decode the lengths of the main tables.
1388 ReadHufflenTableCodeSize => generate_state!(state, 'state_machine, {
1389 if l.counter < r.table_sizes[HUFFLEN_TABLE] {
1390 read_bits(&mut l, 3, &mut in_iter, flags, |l, bits| {
1391 // These lengths are not stored in a normal ascending order, but rather one
1392 // specified by the deflate specification intended to put the most used
1393 // values at the front as trailing zero lengths do not have to be stored.
1394 r.tables[HUFFLEN_TABLE]
1395 .code_size[HUFFMAN_LENGTH_ORDER[l.counter as usize] as usize] =
1396 bits as u8;
1397 l.counter += 1;
1398 Action::None
1399 })
1400 } else {
1401 r.table_sizes[HUFFLEN_TABLE] = 19;
1402 init_tree(r, &mut l).unwrap_or(Action::End(TINFLStatus::Failed))
1403 }
1404 }),
1405
1406 ReadLitlenDistTablesCodeSize => generate_state!(state, 'state_machine, {
1407 if l.counter < r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] {
1408 decode_huffman_code(
1409 r, &mut l, HUFFLEN_TABLE,
1410 flags, &mut in_iter, |r, l, symbol| {
1411 l.dist = symbol as u32;
1412 if l.dist < 16 {
1413 r.len_codes[l.counter as usize] = l.dist as u8;
1414 l.counter += 1;
1415 Action::None
1416 } else if l.dist == 16 && l.counter == 0 {
1417 Action::Jump(BadCodeSizeDistPrevLookup)
1418 } else {
1419 l.num_extra = [2, 3, 7][l.dist as usize - 16];
1420 Action::Jump(ReadExtraBitsCodeSize)
1421 }
1422 }
1423 )
1424 } else if l.counter != r.table_sizes[LITLEN_TABLE] + r.table_sizes[DIST_TABLE] {
1425 Action::Jump(BadCodeSizeSum)
1426 } else {
1427 r.tables[LITLEN_TABLE].code_size[..r.table_sizes[LITLEN_TABLE] as usize]
1428 .copy_from_slice(&r.len_codes[..r.table_sizes[LITLEN_TABLE] as usize]);
1429
1430 let dist_table_start = r.table_sizes[LITLEN_TABLE] as usize;
1431 let dist_table_end = (r.table_sizes[LITLEN_TABLE] +
1432 r.table_sizes[DIST_TABLE]) as usize;
1433 r.tables[DIST_TABLE].code_size[..r.table_sizes[DIST_TABLE] as usize]
1434 .copy_from_slice(&r.len_codes[dist_table_start..dist_table_end]);
1435
1436 r.block_type -= 1;
1437 init_tree(r, &mut l).unwrap_or(Action::End(TINFLStatus::Failed))
1438 }
1439 }),
1440
1441 ReadExtraBitsCodeSize => generate_state!(state, 'state_machine, {
1442 let num_extra = l.num_extra;
1443 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, mut extra_bits| {
1444 // Mask to avoid a bounds check.
1445 extra_bits += [3, 3, 11][(l.dist as usize - 16) & 3];
1446 let val = if l.dist == 16 {
1447 r.len_codes[l.counter as usize - 1]
1448 } else {
1449 0
1450 };
1451
1452 memset(
1453 &mut r.len_codes[
1454 l.counter as usize..l.counter as usize + extra_bits as usize
1455 ],
1456 val,
1457 );
1458 l.counter += extra_bits as u32;
1459 Action::Jump(ReadLitlenDistTablesCodeSize)
1460 })
1461 }),
1462
1463 DecodeLitlen => generate_state!(state, 'state_machine, {
1464 if in_iter.len() < 4 || out_buf.bytes_left() < 2 {
1465 // See if we can decode a literal with the data we have left.
1466 // Jumps to next state (WriteSymbol) if successful.
1467 decode_huffman_code(
1468 r,
1469 &mut l,
1470 LITLEN_TABLE,
1471 flags,
1472 &mut in_iter,
1473 |_r, l, symbol| {
1474 l.counter = symbol as u32;
1475 Action::Jump(WriteSymbol)
1476 },
1477 )
1478 } else if
1479 // If there is enough space, use the fast inner decompression
1480 // function.
1481 out_buf.bytes_left() >= 259 &&
1482 in_iter.len() >= 14
1483 {
1484 let (status, new_state) = decompress_fast(
1485 r,
1486 &mut in_iter,
1487 &mut out_buf,
1488 flags,
1489 &mut l,
1490 out_buf_size_mask,
1491 );
1492
1493 state = new_state;
1494 if status == TINFLStatus::Done {
1495 Action::Jump(new_state)
1496 } else {
1497 Action::End(status)
1498 }
1499 } else {
1500 fill_bit_buffer(&mut l, &mut in_iter);
1501
1502 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
1503
1504 l.counter = symbol as u32;
1505 l.bit_buf >>= code_len;
1506 l.num_bits -= code_len;
1507
1508 if (l.counter & 256) != 0 {
1509 // The symbol is not a literal.
1510 Action::Jump(HuffDecodeOuterLoop1)
1511 } else {
1512 // If we have a 32-bit buffer we need to read another two bytes now
1513 // to have enough bits to keep going.
1514 if cfg!(not(target_pointer_width = "64")) {
1515 fill_bit_buffer(&mut l, &mut in_iter);
1516 }
1517
1518 if let Some((symbol, code_len)) = r.tables[LITLEN_TABLE].lookup(l.bit_buf) {
1519
1520 l.bit_buf >>= code_len;
1521 l.num_bits -= code_len;
1522 // The previous symbol was a literal, so write it directly and check
1523 // the next one.
1524 out_buf.write_byte(l.counter as u8);
1525 if (symbol & 256) != 0 {
1526 l.counter = symbol as u32;
1527 // The symbol is a length value.
1528 Action::Jump(HuffDecodeOuterLoop1)
1529 } else {
1530 // The symbol is a literal, so write it directly and continue.
1531 out_buf.write_byte(symbol as u8);
1532 Action::None
1533 }
1534 } else {
1535 Action::Jump(InvalidCodeLen)
1536 }
1537 }
1538 } else {
1539 Action::Jump(InvalidCodeLen)
1540 }
1541 }
1542 }),
1543
1544 WriteSymbol => generate_state!(state, 'state_machine, {
1545 if l.counter >= 256 {
1546 Action::Jump(HuffDecodeOuterLoop1)
1547 } else if out_buf.bytes_left() > 0 {
1548 out_buf.write_byte(l.counter as u8);
1549 Action::Jump(DecodeLitlen)
1550 } else {
1551 Action::End(TINFLStatus::HasMoreOutput)
1552 }
1553 }),
1554
1555 HuffDecodeOuterLoop1 => generate_state!(state, 'state_machine, {
1556 // Mask the top bits since they may contain length info.
1557 l.counter &= 511;
1558
1559 if l.counter
1560 == 256 {
1561 // We hit the end of block symbol.
1562 Action::Jump(BlockDone)
1563 } else if l.counter > 285 {
1564 // Invalid code.
1565 // We already verified earlier that the code is > 256.
1566 Action::Jump(InvalidLitlen)
1567 } else {
1568 // # Optimization
1569 // Mask the value to avoid bounds checks
1570 // We could use get_unchecked later if can statically verify that
1571 // this will never go out of bounds.
1572 l.num_extra =
1573 u32::from(LENGTH_EXTRA[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1574 l.counter = u32::from(LENGTH_BASE[(l.counter - 257) as usize & BASE_EXTRA_MASK]);
1575 // Length and distance codes have a number of extra bits depending on
1576 // the base, which together with the base gives us the exact value.
1577 if l.num_extra != 0 {
1578 Action::Jump(ReadExtraBitsLitlen)
1579 } else {
1580 Action::Jump(DecodeDistance)
1581 }
1582 }
1583 }),
1584
1585 ReadExtraBitsLitlen => generate_state!(state, 'state_machine, {
1586 let num_extra = l.num_extra;
1587 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| {
1588 l.counter += extra_bits as u32;
1589 Action::Jump(DecodeDistance)
1590 })
1591 }),
1592
1593 DecodeDistance => generate_state!(state, 'state_machine, {
1594 // Try to read a huffman code from the input buffer and look up what
1595 // length code the decoded symbol refers to.
1596 decode_huffman_code(r, &mut l, DIST_TABLE, flags, &mut in_iter, |_r, l, symbol| {
1597 if symbol > 29 {
1598 // Invalid distance code.
1599 return Action::Jump(InvalidDist)
1600 }
1601 // # Optimization
1602 // Mask the value to avoid bounds checks
1603 // We could use get_unchecked later if can statically verify that
1604 // this will never go out of bounds.
1605 l.num_extra = u32::from(DIST_EXTRA[symbol as usize & BASE_EXTRA_MASK]);
1606 l.dist = u32::from(DIST_BASE[symbol as usize & BASE_EXTRA_MASK]);
1607 if l.num_extra != 0 {
1608 // ReadEXTRA_BITS_DISTACNE
1609 Action::Jump(ReadExtraBitsDistance)
1610 } else {
1611 Action::Jump(HuffDecodeOuterLoop2)
1612 }
1613 })
1614 }),
1615
1616 ReadExtraBitsDistance => generate_state!(state, 'state_machine, {
1617 let num_extra = l.num_extra;
1618 read_bits(&mut l, num_extra, &mut in_iter, flags, |l, extra_bits| {
1619 l.dist += extra_bits as u32;
1620 Action::Jump(HuffDecodeOuterLoop2)
1621 })
1622 }),
1623
1624 HuffDecodeOuterLoop2 => generate_state!(state, 'state_machine, {
1625 if l.dist as usize > out_buf.position() &&
1626 (flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF != 0)
1627 {
1628 // We encountered a distance that refers a position before
1629 // the start of the decoded data, so we can't continue.
1630 Action::Jump(DistanceOutOfBounds)
1631 } else {
1632 let out_pos = out_buf.position();
1633 let source_pos = out_buf.position()
1634 .wrapping_sub(l.dist as usize) & out_buf_size_mask;
1635
1636 let out_len = out_buf.get_ref().len();
1637 let match_end_pos = out_buf.position() + l.counter as usize;
1638
1639 if match_end_pos > out_len ||
1640 // miniz doesn't do this check here. Not sure how it makes sure
1641 // that this case doesn't happen.
1642 (source_pos >= out_pos && (source_pos - out_pos) < l.counter as usize)
1643 {
1644 // Not enough space for all of the data in the output buffer,
1645 // so copy what we have space for.
1646 if l.counter == 0 {
1647 Action::Jump(DecodeLitlen)
1648 } else {
1649 Action::Jump(WriteLenBytesToEnd)
1650 }
1651 } else {
1652 apply_match(
1653 out_buf.get_mut(),
1654 out_pos,
1655 l.dist as usize,
1656 l.counter as usize,
1657 out_buf_size_mask
1658 );
1659 out_buf.set_position(out_pos + l.counter as usize);
1660 Action::Jump(DecodeLitlen)
1661 }
1662 }
1663 }),
1664
1665 WriteLenBytesToEnd => generate_state!(state, 'state_machine, {
1666 if out_buf.bytes_left() > 0 {
1667 let out_pos = out_buf.position();
1668 let source_pos = out_buf.position()
1669 .wrapping_sub(l.dist as usize) & out_buf_size_mask;
1670
1671
1672 let len = cmp::min(out_buf.bytes_left(), l.counter as usize);
1673
1674 transfer(out_buf.get_mut(), source_pos, out_pos, len, out_buf_size_mask);
1675
1676 out_buf.set_position(out_pos + len);
1677 l.counter -= len as u32;
1678 if l.counter == 0 {
1679 Action::Jump(DecodeLitlen)
1680 } else {
1681 Action::None
1682 }
1683 } else {
1684 Action::End(TINFLStatus::HasMoreOutput)
1685 }
1686 }),
1687
1688 BlockDone => generate_state!(state, 'state_machine, {
1689 // End once we've read the last block.
1690 if r.finish != 0 {
1691 pad_to_bytes(&mut l, &mut in_iter, flags, |_| Action::None);
1692
1693 let in_consumed = in_buf.len() - in_iter.len();
1694 let undo = undo_bytes(&mut l, in_consumed as u32) as usize;
1695 in_iter = in_buf[in_consumed - undo..].iter();
1696
1697 l.bit_buf &= ((1 as BitBuffer) << l.num_bits) - 1;
1698 debug_assert_eq!(l.num_bits, 0);
1699
1700 if flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0 {
1701 l.counter = 0;
1702 Action::Jump(ReadAdler32)
1703 } else {
1704 Action::Jump(DoneForever)
1705 }
1706 } else {
1707 Action::Jump(ReadBlockHeader)
1708 }
1709 }),
1710
1711 ReadAdler32 => generate_state!(state, 'state_machine, {
1712 if l.counter < 4 {
1713 if l.num_bits != 0 {
1714 read_bits(&mut l, 8, &mut in_iter, flags, |l, bits| {
1715 r.z_adler32 <<= 8;
1716 r.z_adler32 |= bits as u32;
1717 l.counter += 1;
1718 Action::None
1719 })
1720 } else {
1721 read_byte(&mut in_iter, flags, |byte| {
1722 r.z_adler32 <<= 8;
1723 r.z_adler32 |= u32::from(byte);
1724 l.counter += 1;
1725 Action::None
1726 })
1727 }
1728 } else {
1729 Action::Jump(DoneForever)
1730 }
1731 }),
1732
1733 // We are done.
1734 DoneForever => break TINFLStatus::Done,
1735
1736 // Anything else indicates failure.
1737 // BadZlibHeader | BadRawLength | BadDistOrLiteralTableLength | BlockTypeUnexpected |
1738 // DistanceOutOfBounds |
1739 // BadTotalSymbols | BadCodeSizeDistPrevLookup | BadCodeSizeSum | InvalidLitlen |
1740 // InvalidDist | InvalidCodeLen
1741 _ => break TINFLStatus::Failed,
1742 };
1743 };
1744
1745 let in_undo = if status != TINFLStatus::NeedsMoreInput
1746 && status != TINFLStatus::FailedCannotMakeProgress
1747 {
1748 undo_bytes(&mut l, (in_buf.len() - in_iter.len()) as u32) as usize
1749 } else {
1750 0
1751 };
1752
1753 // Make sure HasMoreOutput overrides NeedsMoreInput if the output buffer is full.
1754 // (Unless the missing input is the adler32 value in which case we don't need to write anything.)
1755 // TODO: May want to see if we can do this in a better way.
1756 if status == TINFLStatus::NeedsMoreInput
1757 && out_buf.bytes_left() == 0
1758 && state != State::ReadAdler32
1759 {
1760 status = TINFLStatus::HasMoreOutput
1761 }
1762
1763 r.state = state;
1764 r.bit_buf = l.bit_buf;
1765 r.num_bits = l.num_bits;
1766 r.dist = l.dist;
1767 r.counter = l.counter;
1768 r.num_extra = l.num_extra;
1769
1770 r.bit_buf &= ((1 as BitBuffer) << r.num_bits) - 1;
1771
1772 // If this is a zlib stream, and update the adler32 checksum with the decompressed bytes if
1773 // requested.
1774 let need_adler = if (flags & TINFL_FLAG_IGNORE_ADLER32) == 0 {
1775 flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32) != 0
1776 } else {
1777 // If TINFL_FLAG_IGNORE_ADLER32 is enabled, ignore the checksum.
1778 false
1779 };
1780 if need_adler && status as i32 >= 0 {
1781 let out_buf_pos = out_buf.position();
1782 r.check_adler32 = update_adler32(r.check_adler32, &out_buf.get_ref()[out_pos..out_buf_pos]);
1783
1784 // disabled so that random input from fuzzer would not be rejected early,
1785 // before it has a chance to reach interesting parts of code
1786 if !cfg!(fuzzing) {
1787 // Once we are done, check if the checksum matches with the one provided in the zlib header.
1788 if status == TINFLStatus::Done
1789 && flags & TINFL_FLAG_PARSE_ZLIB_HEADER != 0
1790 && r.check_adler32 != r.z_adler32
1791 {
1792 status = TINFLStatus::Adler32Mismatch;
1793 }
1794 }
1795 }
1796
1797 (
1798 status,
1799 in_buf.len() - in_iter.len() - in_undo,
1800 out_buf.position() - out_pos,
1801 )
1802}
1803
1804#[cfg(test)]
1805mod test {
1806 use super::*;
1807
1808 //TODO: Fix these.
1809
1810 fn tinfl_decompress_oxide<'i>(
1811 r: &mut DecompressorOxide,
1812 input_buffer: &'i [u8],
1813 output_buffer: &mut [u8],
1814 flags: u32,
1815 ) -> (TINFLStatus, &'i [u8], usize) {
1816 let (status, in_pos, out_pos) = decompress(r, input_buffer, output_buffer, 0, flags);
1817 (status, &input_buffer[in_pos..], out_pos)
1818 }
1819
1820 #[test]
1821 fn decompress_zlib() {
1822 let encoded = [
1823 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19,
1824 ];
1825 let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER;
1826
1827 let mut b = DecompressorOxide::new();
1828 const LEN: usize = 32;
1829 let mut b_buf = [0; LEN];
1830
1831 // This should fail with the out buffer being to small.
1832 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags);
1833
1834 assert_eq!(b_status.0, TINFLStatus::Failed);
1835
1836 let flags = flags | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
1837
1838 b = DecompressorOxide::new();
1839
1840 // With TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF set this should no longer fail.
1841 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags);
1842
1843 assert_eq!(b_buf[..b_status.2], b"Hello, zlib!"[..]);
1844 assert_eq!(b_status.0, TINFLStatus::Done);
1845 }
1846
1847 #[cfg(feature = "with-alloc")]
1848 #[test]
1849 fn raw_block() {
1850 const LEN: usize = 64;
1851
1852 let text = b"Hello, zlib!";
1853 let encoded = {
1854 let len = text.len();
1855 let notlen = !len;
1856 let mut encoded = vec![
1857 1,
1858 len as u8,
1859 (len >> 8) as u8,
1860 notlen as u8,
1861 (notlen >> 8) as u8,
1862 ];
1863 encoded.extend_from_slice(&text[..]);
1864 encoded
1865 };
1866
1867 //let flags = TINFL_FLAG_COMPUTE_ADLER32 | TINFL_FLAG_PARSE_ZLIB_HEADER |
1868 let flags = TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
1869
1870 let mut b = DecompressorOxide::new();
1871
1872 let mut b_buf = [0; LEN];
1873
1874 let b_status = tinfl_decompress_oxide(&mut b, &encoded[..], &mut b_buf, flags);
1875 assert_eq!(b_buf[..b_status.2], text[..]);
1876 assert_eq!(b_status.0, TINFLStatus::Done);
1877 }
1878
1879 fn masked_lookup(table: &HuffmanTable, bit_buf: BitBuffer) -> (i32, u32) {
1880 let ret = table.lookup(bit_buf).unwrap();
1881 (ret.0 & 511, ret.1)
1882 }
1883
1884 #[test]
1885 fn fixed_table_lookup() {
1886 let mut d = DecompressorOxide::new();
1887 d.block_type = 1;
1888 start_static_table(&mut d);
1889 let mut l = LocalVars {
1890 bit_buf: d.bit_buf,
1891 num_bits: d.num_bits,
1892 dist: d.dist,
1893 counter: d.counter,
1894 num_extra: d.num_extra,
1895 };
1896 init_tree(&mut d, &mut l).unwrap();
1897 let llt = &d.tables[LITLEN_TABLE];
1898 let dt = &d.tables[DIST_TABLE];
1899 assert_eq!(masked_lookup(llt, 0b00001100), (0, 8));
1900 assert_eq!(masked_lookup(llt, 0b00011110), (72, 8));
1901 assert_eq!(masked_lookup(llt, 0b01011110), (74, 8));
1902 assert_eq!(masked_lookup(llt, 0b11111101), (143, 8));
1903 assert_eq!(masked_lookup(llt, 0b000010011), (144, 9));
1904 assert_eq!(masked_lookup(llt, 0b111111111), (255, 9));
1905 assert_eq!(masked_lookup(llt, 0b00000000), (256, 7));
1906 assert_eq!(masked_lookup(llt, 0b1110100), (279, 7));
1907 assert_eq!(masked_lookup(llt, 0b00000011), (280, 8));
1908 assert_eq!(masked_lookup(llt, 0b11100011), (287, 8));
1909
1910 assert_eq!(masked_lookup(dt, 0), (0, 5));
1911 assert_eq!(masked_lookup(dt, 20), (5, 5));
1912 }
1913
1914 // Only run this test with alloc enabled as it uses a larger buffer.
1915 #[cfg(feature = "with-alloc")]
1916 fn check_result(input: &[u8], expected_status: TINFLStatus, expected_state: State, zlib: bool) {
1917 let mut r = DecompressorOxide::default();
1918 let mut output_buf = vec![0; 1024 * 32];
1919 let flags = if zlib {
1920 inflate_flags::TINFL_FLAG_PARSE_ZLIB_HEADER
1921 } else {
1922 0
1923 } | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF
1924 | TINFL_FLAG_HAS_MORE_INPUT;
1925 let (d_status, _in_bytes, _out_bytes) =
1926 decompress(&mut r, input, &mut output_buf, 0, flags);
1927 assert_eq!(expected_status, d_status);
1928 assert_eq!(expected_state, r.state);
1929 }
1930
1931 #[cfg(feature = "with-alloc")]
1932 #[test]
1933 fn bogus_input() {
1934 use self::check_result as cr;
1935 const F: TINFLStatus = TINFLStatus::Failed;
1936 const OK: TINFLStatus = TINFLStatus::Done;
1937 // Bad CM.
1938 cr(&[0x77, 0x85], F, State::BadZlibHeader, true);
1939 // Bad window size (but check is correct).
1940 cr(&[0x88, 0x98], F, State::BadZlibHeader, true);
1941 // Bad check bits.
1942 cr(&[0x78, 0x98], F, State::BadZlibHeader, true);
1943
1944 // Too many code lengths. (From inflate library issues)
1945 cr(
1946 b"M\xff\xffM*\xad\xad\xad\xad\xad\xad\xad\xcd\xcd\xcdM",
1947 F,
1948 State::BadDistOrLiteralTableLength,
1949 false,
1950 );
1951
1952 // Bad CLEN (also from inflate library issues)
1953 cr(
1954 b"\xdd\xff\xff*M\x94ffffffffff",
1955 F,
1956 State::BadDistOrLiteralTableLength,
1957 false,
1958 );
1959
1960 // Port of inflate coverage tests from zlib-ng
1961 // https://github.com/Dead2/zlib-ng/blob/develop/test/infcover.c
1962 let c = |a, b, c| cr(a, b, c, false);
1963
1964 // Invalid uncompressed/raw block length.
1965 c(&[0, 0, 0, 0, 0], F, State::BadRawLength);
1966 // Ok empty uncompressed block.
1967 c(&[3, 0], OK, State::DoneForever);
1968 // Invalid block type.
1969 c(&[6], F, State::BlockTypeUnexpected);
1970 // Ok uncompressed block.
1971 c(&[1, 1, 0, 0xfe, 0xff, 0], OK, State::DoneForever);
1972 // Too many litlens, we handle this later than zlib, so this test won't
1973 // give the same result.
1974 // c(&[0xfc, 0, 0], F, State::BadTotalSymbols);
1975 // Invalid set of code lengths - TODO Check if this is the correct error for this.
1976 c(&[4, 0, 0xfe, 0xff], F, State::BadTotalSymbols);
1977 // Invalid repeat in list of code lengths.
1978 // (Try to repeat a non-existent code.)
1979 c(&[4, 0, 0x24, 0x49, 0], F, State::BadCodeSizeDistPrevLookup);
1980 // Missing end of block code (should we have a separate error for this?) - fails on further input
1981 // c(&[4, 0, 0x24, 0xe9, 0xff, 0x6d], F, State::BadTotalSymbols);
1982 // Invalid set of literals/lengths
1983 c(
1984 &[
1985 4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x71, 0xff, 0xff, 0x93, 0x11, 0,
1986 ],
1987 F,
1988 State::BadTotalSymbols,
1989 );
1990 // Invalid set of distances _ needsmoreinput
1991 // c(&[4, 0x80, 0x49, 0x92, 0x24, 0x49, 0x92, 0x24, 0x0f, 0xb4, 0xff, 0xff, 0xc3, 0x84], F, State::BadTotalSymbols);
1992 // Invalid distance code
1993 c(&[2, 0x7e, 0xff, 0xff], F, State::InvalidDist);
1994
1995 // Distance refers to position before the start
1996 c(
1997 &[0x0c, 0xc0, 0x81, 0, 0, 0, 0, 0, 0x90, 0xff, 0x6b, 0x4, 0],
1998 F,
1999 State::DistanceOutOfBounds,
2000 );
2001
2002 // Trailer
2003 // Bad gzip trailer checksum GZip header not handled by miniz_oxide
2004 //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0x01], F, State::BadCRC, false)
2005 // Bad gzip trailer length
2006 //cr(&[0x1f, 0x8b, 0x08 ,0 ,0 ,0 ,0 ,0 ,0 ,0 ,0x03, 0, 0, 0, 0, 0, 0, 0, 0, 0x01], F, State::BadCRC, false)
2007 }
2008
2009 #[test]
2010 fn empty_output_buffer_non_wrapping() {
2011 let encoded = [
2012 120, 156, 243, 72, 205, 201, 201, 215, 81, 168, 202, 201, 76, 82, 4, 0, 27, 101, 4, 19,
2013 ];
2014 let flags = TINFL_FLAG_COMPUTE_ADLER32
2015 | TINFL_FLAG_PARSE_ZLIB_HEADER
2016 | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
2017 let mut r = DecompressorOxide::new();
2018 let mut output_buf: [u8; 0] = [];
2019 // Check that we handle an empty buffer properly and not panicking.
2020 // https://github.com/Frommi/miniz_oxide/issues/23
2021 let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags);
2022 assert_eq!(res, (TINFLStatus::HasMoreOutput, 4, 0));
2023 }
2024
2025 #[test]
2026 fn empty_output_buffer_wrapping() {
2027 let encoded = [
2028 0x73, 0x49, 0x4d, 0xcb, 0x49, 0x2c, 0x49, 0x55, 0x00, 0x11, 0x00,
2029 ];
2030 let flags = TINFL_FLAG_COMPUTE_ADLER32;
2031 let mut r = DecompressorOxide::new();
2032 let mut output_buf: [u8; 0] = [];
2033 // Check that we handle an empty buffer properly and not panicking.
2034 // https://github.com/Frommi/miniz_oxide/issues/23
2035 let res = decompress(&mut r, &encoded, &mut output_buf, 0, flags);
2036 assert_eq!(res, (TINFLStatus::HasMoreOutput, 2, 0));
2037 }
2038}
2039