| 1 | //! Streaming compression functionality. |
| 2 | |
| 3 | use alloc::boxed::Box; |
| 4 | use core::convert::TryInto; |
| 5 | use core::{cmp, mem}; |
| 6 | |
| 7 | use super::super::*; |
| 8 | use super::deflate_flags::*; |
| 9 | use super::CompressionLevel; |
| 10 | use crate::deflate::buffer::{ |
| 11 | update_hash, HashBuffers, LocalBuf, LZ_CODE_BUF_MASK, LZ_CODE_BUF_SIZE, LZ_DICT_FULL_SIZE, |
| 12 | LZ_HASH_BITS, LZ_HASH_SHIFT, LZ_HASH_SIZE, OUT_BUF_SIZE, |
| 13 | }; |
| 14 | use crate::deflate::stored::compress_stored; |
| 15 | use crate::deflate::zlib; |
| 16 | use crate::shared::{update_adler32, HUFFMAN_LENGTH_ORDER, MZ_ADLER32_INIT}; |
| 17 | use crate::DataFormat; |
| 18 | |
| 19 | // Currently not bubbled up outside this module, so can fill in with more |
| 20 | // context eventually if needed. |
| 21 | type Result<T, E = Error> = core::result::Result<T, E>; |
| 22 | pub(crate) struct Error {} |
| 23 | |
| 24 | pub(crate) const MAX_PROBES_MASK: u32 = 0xFFF; |
| 25 | |
| 26 | const MAX_SUPPORTED_HUFF_CODESIZE: usize = 15; |
| 27 | |
| 28 | // Length code for length values - 256. |
| 29 | // We use an offset to help with bound check avoidance as we can mask values to 32 |
| 30 | // and it also saves some memory as we can use a u8 instead of a u16. |
| 31 | // Conventiently our table is large enough that we can get away with using an |
| 32 | // offset of 256 which results in very efficient code. |
| 33 | const LEN_SYM: [u8; 256] = [ |
| 34 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, |
| 35 | 15, 15, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, |
| 36 | 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, |
| 37 | 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, |
| 38 | 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, |
| 39 | 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, |
| 40 | 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, |
| 41 | 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, |
| 42 | 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, |
| 43 | 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, |
| 44 | 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, |
| 45 | ]; |
| 46 | |
| 47 | const LEN_SYM_OFFSET: usize = 256; |
| 48 | |
| 49 | /// Number of extra bits for length values. |
| 50 | #[rustfmt::skip] |
| 51 | const LEN_EXTRA: [u8; 256] = [ |
| 52 | 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, |
| 53 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, |
| 54 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, |
| 55 | 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, |
| 56 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, |
| 57 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, |
| 58 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, |
| 59 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, |
| 60 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 61 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 62 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 63 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 64 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 65 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 66 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 67 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0 |
| 68 | ]; |
| 69 | |
| 70 | /// Distance codes for distances smaller than 512. |
| 71 | #[rustfmt::skip] |
| 72 | const SMALL_DIST_SYM: [u8; 512] = [ |
| 73 | 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, |
| 74 | 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, |
| 75 | 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, |
| 76 | 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, |
| 77 | 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, |
| 78 | 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, |
| 79 | 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, |
| 80 | 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, |
| 81 | 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, |
| 82 | 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, |
| 83 | 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, |
| 84 | 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, |
| 85 | 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, |
| 86 | 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, |
| 87 | 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, |
| 88 | 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, |
| 89 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 90 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 91 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 92 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 93 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 94 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 95 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 96 | 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, |
| 97 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, |
| 98 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, |
| 99 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, |
| 100 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, |
| 101 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, |
| 102 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, |
| 103 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, |
| 104 | 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17 |
| 105 | ]; |
| 106 | |
| 107 | /// Number of extra bits for distances smaller than 512. |
| 108 | #[rustfmt::skip] |
| 109 | const SMALL_DIST_EXTRA: [u8; 512] = [ |
| 110 | 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, |
| 111 | 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, |
| 112 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 113 | 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, |
| 114 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, |
| 115 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, |
| 116 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, |
| 117 | 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, |
| 118 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
| 119 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
| 120 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
| 121 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
| 122 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
| 123 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
| 124 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, |
| 125 | 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 |
| 126 | ]; |
| 127 | |
| 128 | /// Base values to calculate distances above 512. |
| 129 | #[rustfmt::skip] |
| 130 | const LARGE_DIST_SYM: [u8; 128] = [ |
| 131 | 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, |
| 132 | 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, |
| 133 | 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, |
| 134 | 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, |
| 135 | 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, |
| 136 | 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, |
| 137 | 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, |
| 138 | 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 |
| 139 | ]; |
| 140 | |
| 141 | /// Number of extra bits distances above 512. |
| 142 | #[rustfmt::skip] |
| 143 | const LARGE_DIST_EXTRA: [u8; 128] = [ |
| 144 | 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, |
| 145 | 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, |
| 146 | 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, |
| 147 | 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, |
| 148 | 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, |
| 149 | 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, |
| 150 | 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, |
| 151 | 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13 |
| 152 | ]; |
| 153 | |
| 154 | #[rustfmt::skip] |
| 155 | const BITMASKS: [u32; 17] = [ |
| 156 | 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, |
| 157 | 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF |
| 158 | ]; |
| 159 | |
| 160 | /// The maximum number of checks for matches in the hash table the compressor will make for each |
| 161 | /// compression level. |
| 162 | pub(crate) const NUM_PROBES: [u16; 11] = [0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500]; |
| 163 | |
| 164 | #[derive (Copy, Clone)] |
| 165 | struct SymFreq { |
| 166 | key: u16, |
| 167 | sym_index: u16, |
| 168 | } |
| 169 | |
| 170 | pub mod deflate_flags { |
| 171 | /// Whether to use a zlib wrapper. |
| 172 | pub const TDEFL_WRITE_ZLIB_HEADER: u32 = 0x0000_1000; |
| 173 | /// Should we compute the adler32 checksum. |
| 174 | pub const TDEFL_COMPUTE_ADLER32: u32 = 0x0000_2000; |
| 175 | /// Should we use greedy parsing (as opposed to lazy parsing where look ahead one or more |
| 176 | /// bytes to check for better matches.) |
| 177 | pub const TDEFL_GREEDY_PARSING_FLAG: u32 = 0x0000_4000; |
| 178 | /// Used in miniz to skip zero-initializing hash and dict. We don't do this here, so |
| 179 | /// this flag is ignored. |
| 180 | pub const TDEFL_NONDETERMINISTIC_PARSING_FLAG: u32 = 0x0000_8000; |
| 181 | /// Only look for matches with a distance of 0. |
| 182 | pub const TDEFL_RLE_MATCHES: u32 = 0x0001_0000; |
| 183 | /// Only use matches that are at least 6 bytes long. |
| 184 | pub const TDEFL_FILTER_MATCHES: u32 = 0x0002_0000; |
| 185 | /// Force the compressor to only output static blocks. (Blocks using the default huffman codes |
| 186 | /// specified in the deflate specification.) |
| 187 | pub const TDEFL_FORCE_ALL_STATIC_BLOCKS: u32 = 0x0004_0000; |
| 188 | /// Force the compressor to only output raw/uncompressed blocks. |
| 189 | pub const TDEFL_FORCE_ALL_RAW_BLOCKS: u32 = 0x0008_0000; |
| 190 | } |
| 191 | |
| 192 | /// Strategy setting for compression. |
| 193 | /// |
| 194 | /// The non-default settings offer some special-case compression variants. |
| 195 | #[repr (i32)] |
| 196 | #[derive (Debug, Copy, Clone, PartialEq, Eq, Hash)] |
| 197 | pub enum CompressionStrategy { |
| 198 | /// Don't use any of the special strategies. |
| 199 | Default = 0, |
| 200 | /// Only use matches that are at least 5 bytes long. |
| 201 | Filtered = 1, |
| 202 | /// Don't look for matches, only huffman encode the literals. |
| 203 | HuffmanOnly = 2, |
| 204 | /// Only look for matches with a distance of 1, i.e do run-length encoding only. |
| 205 | RLE = 3, |
| 206 | /// Only use static/fixed blocks. (Blocks using the default huffman codes |
| 207 | /// specified in the deflate specification.) |
| 208 | Fixed = 4, |
| 209 | } |
| 210 | |
| 211 | impl From<CompressionStrategy> for i32 { |
| 212 | #[inline (always)] |
| 213 | fn from(value: CompressionStrategy) -> Self { |
| 214 | value as i32 |
| 215 | } |
| 216 | } |
| 217 | |
| 218 | /// A list of deflate flush types. |
| 219 | #[derive (Debug, Copy, Clone, PartialEq, Eq, Hash)] |
| 220 | pub enum TDEFLFlush { |
| 221 | /// Normal operation. |
| 222 | /// |
| 223 | /// Compress as much as there is space for, and then return waiting for more input. |
| 224 | None = 0, |
| 225 | |
| 226 | /// Try to flush all the current data and output an empty raw block. |
| 227 | Sync = 2, |
| 228 | |
| 229 | /// Same as [`Sync`][Self::Sync], but reset the dictionary so that the following data does not |
| 230 | /// depend on previous data. |
| 231 | Full = 3, |
| 232 | |
| 233 | /// Try to flush everything and end the deflate stream. |
| 234 | /// |
| 235 | /// On success this will yield a [`TDEFLStatus::Done`] return status. |
| 236 | Finish = 4, |
| 237 | } |
| 238 | |
| 239 | impl From<MZFlush> for TDEFLFlush { |
| 240 | fn from(flush: MZFlush) -> Self { |
| 241 | match flush { |
| 242 | MZFlush::None => TDEFLFlush::None, |
| 243 | MZFlush::Sync => TDEFLFlush::Sync, |
| 244 | MZFlush::Full => TDEFLFlush::Full, |
| 245 | MZFlush::Finish => TDEFLFlush::Finish, |
| 246 | _ => TDEFLFlush::None, // TODO: ??? What to do ??? |
| 247 | } |
| 248 | } |
| 249 | } |
| 250 | |
| 251 | impl TDEFLFlush { |
| 252 | pub const fn new(flush: i32) -> Result<Self, MZError> { |
| 253 | match flush { |
| 254 | 0 => Ok(TDEFLFlush::None), |
| 255 | 2 => Ok(TDEFLFlush::Sync), |
| 256 | 3 => Ok(TDEFLFlush::Full), |
| 257 | 4 => Ok(TDEFLFlush::Finish), |
| 258 | _ => Err(MZError::Param), |
| 259 | } |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | /// Return status of compression. |
| 264 | #[repr (i32)] |
| 265 | #[derive (Debug, Copy, Clone, PartialEq, Eq, Hash)] |
| 266 | pub enum TDEFLStatus { |
| 267 | /// Usage error. |
| 268 | /// |
| 269 | /// This indicates that either the [`CompressorOxide`] experienced a previous error, or the |
| 270 | /// stream has already been [`TDEFLFlush::Finish`]'d. |
| 271 | BadParam = -2, |
| 272 | |
| 273 | /// Error putting data into output buffer. |
| 274 | /// |
| 275 | /// This usually indicates a too-small buffer. |
| 276 | PutBufFailed = -1, |
| 277 | |
| 278 | /// Compression succeeded normally. |
| 279 | Okay = 0, |
| 280 | |
| 281 | /// Compression succeeded and the deflate stream was ended. |
| 282 | /// |
| 283 | /// This is the result of calling compression with [`TDEFLFlush::Finish`]. |
| 284 | Done = 1, |
| 285 | } |
| 286 | |
| 287 | const MAX_HUFF_SYMBOLS: usize = 288; |
| 288 | /// Size of hash chain for fast compression mode. |
| 289 | const LEVEL1_HASH_SIZE_MASK: u32 = 4095; |
| 290 | /// The number of huffman tables used by the compressor. |
| 291 | /// Literal/length, Distances and Length of the huffman codes for the other two tables. |
| 292 | const MAX_HUFF_TABLES: usize = 3; |
| 293 | /// Literal/length codes |
| 294 | const MAX_HUFF_SYMBOLS_0: usize = 288; |
| 295 | /// Distance codes. |
| 296 | const MAX_HUFF_SYMBOLS_1: usize = 32; |
| 297 | /// Huffman length values. |
| 298 | const MAX_HUFF_SYMBOLS_2: usize = 19; |
| 299 | /// Size of the chained hash table. |
| 300 | pub(crate) const LZ_DICT_SIZE: usize = 32_768; |
| 301 | /// Mask used when stepping through the hash chains. |
| 302 | pub(crate) const LZ_DICT_SIZE_MASK: usize = (LZ_DICT_SIZE as u32 - 1) as usize; |
| 303 | /// The minimum length of a match. |
| 304 | pub(crate) const MIN_MATCH_LEN: u8 = 3; |
| 305 | /// The maximum length of a match. |
| 306 | pub(crate) const MAX_MATCH_LEN: usize = 258; |
| 307 | |
| 308 | pub(crate) const DEFAULT_FLAGS: u32 = NUM_PROBES[4] as u32 | TDEFL_WRITE_ZLIB_HEADER; |
| 309 | |
| 310 | #[cfg (test)] |
| 311 | #[inline ] |
| 312 | fn write_u16_le(val: u16, slice: &mut [u8], pos: usize) { |
| 313 | slice[pos] = val as u8; |
| 314 | slice[pos + 1] = (val >> 8) as u8; |
| 315 | } |
| 316 | |
| 317 | // Read the two bytes starting at pos and interpret them as an u16. |
| 318 | #[inline (always)] |
| 319 | const fn read_u16_le<const N: usize>(slice: &[u8; N], pos: usize) -> u16 { |
| 320 | // The compiler is smart enough to optimize this into an unaligned load. |
| 321 | slice[pos] as u16 | ((slice[pos + 1] as u16) << 8) |
| 322 | } |
| 323 | |
| 324 | /// Main compression struct. |
| 325 | pub struct CompressorOxide { |
| 326 | pub(crate) lz: LZOxide, |
| 327 | pub(crate) params: ParamsOxide, |
| 328 | /// Put HuffmanOxide on the heap with default trick to avoid |
| 329 | /// excessive stack copies. |
| 330 | pub(crate) huff: Box<HuffmanOxide>, |
| 331 | pub(crate) dict: DictOxide, |
| 332 | } |
| 333 | |
| 334 | impl CompressorOxide { |
| 335 | /// Create a new `CompressorOxide` with the given flags. |
| 336 | /// |
| 337 | /// # Notes |
| 338 | /// This function may be changed to take different parameters in the future. |
| 339 | pub fn new(flags: u32) -> Self { |
| 340 | CompressorOxide { |
| 341 | lz: LZOxide::new(), |
| 342 | params: ParamsOxide::new(flags), |
| 343 | huff: Box::default(), |
| 344 | dict: DictOxide::new(flags), |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | /// Get the adler32 checksum of the currently encoded data. |
| 349 | pub const fn adler32(&self) -> u32 { |
| 350 | self.params.adler32 |
| 351 | } |
| 352 | |
| 353 | /// Get the return status of the previous [`compress`](fn.compress.html) |
| 354 | /// call with this compressor. |
| 355 | pub const fn prev_return_status(&self) -> TDEFLStatus { |
| 356 | self.params.prev_return_status |
| 357 | } |
| 358 | |
| 359 | /// Get the raw compressor flags. |
| 360 | /// |
| 361 | /// # Notes |
| 362 | /// This function may be deprecated or changed in the future to use more rust-style flags. |
| 363 | pub const fn flags(&self) -> i32 { |
| 364 | self.params.flags as i32 |
| 365 | } |
| 366 | |
| 367 | /// Returns whether the compressor is wrapping the data in a zlib format or not. |
| 368 | pub const fn data_format(&self) -> DataFormat { |
| 369 | if (self.params.flags & TDEFL_WRITE_ZLIB_HEADER) != 0 { |
| 370 | DataFormat::Zlib |
| 371 | } else { |
| 372 | DataFormat::Raw |
| 373 | } |
| 374 | } |
| 375 | |
| 376 | /// Reset the state of the compressor, keeping the same parameters. |
| 377 | /// |
| 378 | /// This avoids re-allocating data. |
| 379 | pub fn reset(&mut self) { |
| 380 | // LZ buf and huffman has no settings or dynamic memory |
| 381 | // that needs to be saved, so we simply replace them. |
| 382 | self.lz = LZOxide::new(); |
| 383 | self.params.reset(); |
| 384 | *self.huff = HuffmanOxide::default(); |
| 385 | self.dict.reset(); |
| 386 | } |
| 387 | |
| 388 | /// Set the compression level of the compressor. |
| 389 | /// |
| 390 | /// Using this to change level after compression has started is supported. |
| 391 | /// # Notes |
| 392 | /// The compression strategy will be reset to the default one when this is called. |
| 393 | pub fn set_compression_level(&mut self, level: CompressionLevel) { |
| 394 | let format = self.data_format(); |
| 395 | self.set_format_and_level(format, level as u8); |
| 396 | } |
| 397 | |
| 398 | /// Set the compression level of the compressor using an integer value. |
| 399 | /// |
| 400 | /// Using this to change level after compression has started is supported. |
| 401 | /// # Notes |
| 402 | /// The compression strategy will be reset to the default one when this is called. |
| 403 | pub fn set_compression_level_raw(&mut self, level: u8) { |
| 404 | let format = self.data_format(); |
| 405 | self.set_format_and_level(format, level); |
| 406 | } |
| 407 | |
| 408 | /// Update the compression settings of the compressor. |
| 409 | /// |
| 410 | /// Changing the `DataFormat` after compression has started will result in |
| 411 | /// a corrupted stream. |
| 412 | /// |
| 413 | /// # Notes |
| 414 | /// This function mainly intended for setting the initial settings after e.g creating with |
| 415 | /// `default` or after calling `CompressorOxide::reset()`, and behaviour may be changed |
| 416 | /// to disallow calling it after starting compression in the future. |
| 417 | pub fn set_format_and_level(&mut self, data_format: DataFormat, level: u8) { |
| 418 | let flags = create_comp_flags_from_zip_params( |
| 419 | level.into(), |
| 420 | data_format.to_window_bits(), |
| 421 | CompressionStrategy::Default as i32, |
| 422 | ); |
| 423 | self.params.update_flags(flags); |
| 424 | self.dict.update_flags(flags); |
| 425 | } |
| 426 | } |
| 427 | |
| 428 | impl Default for CompressorOxide { |
| 429 | /// Initialize the compressor with a level of 4, zlib wrapper and |
| 430 | /// the default strategy. |
| 431 | fn default() -> Self { |
| 432 | CompressorOxide { |
| 433 | lz: LZOxide::new(), |
| 434 | params: ParamsOxide::new(DEFAULT_FLAGS), |
| 435 | huff: Box::default(), |
| 436 | dict: DictOxide::new(DEFAULT_FLAGS), |
| 437 | } |
| 438 | } |
| 439 | } |
| 440 | |
| 441 | /// Callback function and user used in `compress_to_output`. |
| 442 | pub struct CallbackFunc<'a> { |
| 443 | pub put_buf_func: &'a mut dyn FnMut(&[u8]) -> bool, |
| 444 | } |
| 445 | |
| 446 | impl CallbackFunc<'_> { |
| 447 | fn flush_output( |
| 448 | &mut self, |
| 449 | saved_output: SavedOutputBufferOxide, |
| 450 | params: &mut ParamsOxide, |
| 451 | ) -> i32 { |
| 452 | // TODO: As this could be unsafe since |
| 453 | // we can't verify the function pointer |
| 454 | // this whole function should maybe be unsafe as well. |
| 455 | let call_success: bool = (self.put_buf_func)(¶ms.local_buf.b[0..saved_output.pos]); |
| 456 | |
| 457 | if !call_success { |
| 458 | params.prev_return_status = TDEFLStatus::PutBufFailed; |
| 459 | return params.prev_return_status as i32; |
| 460 | } |
| 461 | |
| 462 | params.flush_remaining as i32 |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | struct CallbackBuf<'a> { |
| 467 | pub out_buf: &'a mut [u8], |
| 468 | } |
| 469 | |
| 470 | impl CallbackBuf<'_> { |
| 471 | fn flush_output( |
| 472 | &mut self, |
| 473 | saved_output: SavedOutputBufferOxide, |
| 474 | params: &mut ParamsOxide, |
| 475 | ) -> i32 { |
| 476 | if saved_output.local { |
| 477 | let n: usize = cmp::min(v1:saved_output.pos, self.out_buf.len() - params.out_buf_ofs); |
| 478 | (self.out_buf[params.out_buf_ofs..params.out_buf_ofs + n]) |
| 479 | .copy_from_slice(¶ms.local_buf.b[..n]); |
| 480 | |
| 481 | params.out_buf_ofs += n; |
| 482 | if saved_output.pos != n { |
| 483 | params.flush_ofs = n as u32; |
| 484 | params.flush_remaining = (saved_output.pos - n) as u32; |
| 485 | } |
| 486 | } else { |
| 487 | params.out_buf_ofs += saved_output.pos; |
| 488 | } |
| 489 | |
| 490 | params.flush_remaining as i32 |
| 491 | } |
| 492 | } |
| 493 | |
| 494 | enum CallbackOut<'a> { |
| 495 | Func(CallbackFunc<'a>), |
| 496 | Buf(CallbackBuf<'a>), |
| 497 | } |
| 498 | |
| 499 | impl CallbackOut<'_> { |
| 500 | fn new_output_buffer<'b>( |
| 501 | &'b mut self, |
| 502 | local_buf: &'b mut [u8], |
| 503 | out_buf_ofs: usize, |
| 504 | ) -> OutputBufferOxide<'b> { |
| 505 | let is_local; |
| 506 | let buf_len = OUT_BUF_SIZE - 16; |
| 507 | let chosen_buffer = match *self { |
| 508 | CallbackOut::Buf(ref mut cb) if cb.out_buf.len() - out_buf_ofs >= OUT_BUF_SIZE => { |
| 509 | is_local = false; |
| 510 | &mut cb.out_buf[out_buf_ofs..out_buf_ofs + buf_len] |
| 511 | } |
| 512 | _ => { |
| 513 | is_local = true; |
| 514 | &mut local_buf[..buf_len] |
| 515 | } |
| 516 | }; |
| 517 | |
| 518 | OutputBufferOxide { |
| 519 | inner: chosen_buffer, |
| 520 | inner_pos: 0, |
| 521 | local: is_local, |
| 522 | bit_buffer: 0, |
| 523 | bits_in: 0, |
| 524 | } |
| 525 | } |
| 526 | } |
| 527 | |
| 528 | pub(crate) struct CallbackOxide<'a> { |
| 529 | in_buf: Option<&'a [u8]>, |
| 530 | in_buf_size: Option<&'a mut usize>, |
| 531 | out_buf_size: Option<&'a mut usize>, |
| 532 | out: CallbackOut<'a>, |
| 533 | } |
| 534 | |
| 535 | impl<'a> CallbackOxide<'a> { |
| 536 | fn new_callback_buf(in_buf: &'a [u8], out_buf: &'a mut [u8]) -> Self { |
| 537 | CallbackOxide { |
| 538 | in_buf: Some(in_buf), |
| 539 | in_buf_size: None, |
| 540 | out_buf_size: None, |
| 541 | out: CallbackOut::Buf(CallbackBuf { out_buf }), |
| 542 | } |
| 543 | } |
| 544 | |
| 545 | fn new_callback_func(in_buf: &'a [u8], callback_func: CallbackFunc<'a>) -> Self { |
| 546 | CallbackOxide { |
| 547 | in_buf: Some(in_buf), |
| 548 | in_buf_size: None, |
| 549 | out_buf_size: None, |
| 550 | out: CallbackOut::Func(callback_func), |
| 551 | } |
| 552 | } |
| 553 | |
| 554 | fn update_size(&mut self, in_size: Option<usize>, out_size: Option<usize>) { |
| 555 | if let (Some(in_size), Some(size)) = (in_size, self.in_buf_size.as_mut()) { |
| 556 | **size = in_size; |
| 557 | } |
| 558 | |
| 559 | if let (Some(out_size), Some(size)) = (out_size, self.out_buf_size.as_mut()) { |
| 560 | **size = out_size |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | fn flush_output( |
| 565 | &mut self, |
| 566 | saved_output: SavedOutputBufferOxide, |
| 567 | params: &mut ParamsOxide, |
| 568 | ) -> i32 { |
| 569 | if saved_output.pos == 0 { |
| 570 | return params.flush_remaining as i32; |
| 571 | } |
| 572 | |
| 573 | self.update_size(Some(params.src_pos), None); |
| 574 | match self.out { |
| 575 | CallbackOut::Func(ref mut cf) => cf.flush_output(saved_output, params), |
| 576 | CallbackOut::Buf(ref mut cb) => cb.flush_output(saved_output, params), |
| 577 | } |
| 578 | } |
| 579 | |
| 580 | pub(crate) fn buf(&mut self) -> Option<&'a [u8]> { |
| 581 | self.in_buf |
| 582 | } |
| 583 | } |
| 584 | |
| 585 | struct OutputBufferOxide<'a> { |
| 586 | pub inner: &'a mut [u8], |
| 587 | pub inner_pos: usize, |
| 588 | pub local: bool, |
| 589 | |
| 590 | pub bit_buffer: u32, |
| 591 | pub bits_in: u32, |
| 592 | } |
| 593 | |
| 594 | impl OutputBufferOxide<'_> { |
| 595 | /// Write bits to the bit buffer and flushes |
| 596 | /// the bit buffer so any whole bytes are output |
| 597 | /// to the underlying buffer. |
| 598 | fn put_bits(&mut self, bits: u32, len: u32) { |
| 599 | // TODO: Removing this assertion worsens performance |
| 600 | // Need to figure out why |
| 601 | assert!(bits <= ((1u32 << len) - 1u32)); |
| 602 | self.bit_buffer |= bits << self.bits_in; |
| 603 | self.bits_in += len; |
| 604 | |
| 605 | while self.bits_in >= 8 { |
| 606 | self.inner[self.inner_pos] = self.bit_buffer as u8; |
| 607 | self.inner_pos += 1; |
| 608 | self.bit_buffer >>= 8; |
| 609 | self.bits_in -= 8; |
| 610 | } |
| 611 | } |
| 612 | |
| 613 | #[inline ] |
| 614 | /// Write the provided bits to the bit buffer without flushing |
| 615 | /// anything. Does not check if there is actually space for it. |
| 616 | fn put_bits_no_flush(&mut self, bits: u32, len: u32) { |
| 617 | self.bit_buffer |= bits << self.bits_in; |
| 618 | self.bits_in += len; |
| 619 | } |
| 620 | |
| 621 | const fn save(&self) -> SavedOutputBufferOxide { |
| 622 | SavedOutputBufferOxide { |
| 623 | pos: self.inner_pos, |
| 624 | bit_buffer: self.bit_buffer, |
| 625 | bits_in: self.bits_in, |
| 626 | local: self.local, |
| 627 | } |
| 628 | } |
| 629 | |
| 630 | fn load(&mut self, saved: SavedOutputBufferOxide) { |
| 631 | self.inner_pos = saved.pos; |
| 632 | self.bit_buffer = saved.bit_buffer; |
| 633 | self.bits_in = saved.bits_in; |
| 634 | self.local = saved.local; |
| 635 | } |
| 636 | |
| 637 | #[inline ] |
| 638 | /// Pad the bit buffer to a whole byte with |
| 639 | /// zeroes and write that byte to the output buffer. |
| 640 | fn pad_to_bytes(&mut self) { |
| 641 | if self.bits_in != 0 { |
| 642 | let len = 8 - self.bits_in; |
| 643 | self.put_bits(0, len); |
| 644 | } |
| 645 | } |
| 646 | |
| 647 | #[inline ] |
| 648 | fn write_bytes(&mut self, bytes: &[u8]) { |
| 649 | debug_assert_eq!(self.bits_in, 0); |
| 650 | self.inner[self.inner_pos..self.inner_pos + bytes.len()].copy_from_slice(bytes); |
| 651 | self.inner_pos += bytes.len(); |
| 652 | } |
| 653 | } |
| 654 | |
| 655 | struct SavedOutputBufferOxide { |
| 656 | pub pos: usize, |
| 657 | pub bit_buffer: u32, |
| 658 | pub bits_in: u32, |
| 659 | pub local: bool, |
| 660 | } |
| 661 | |
| 662 | struct BitBuffer { |
| 663 | pub bit_buffer: u64, |
| 664 | pub bits_in: u32, |
| 665 | } |
| 666 | |
| 667 | impl BitBuffer { |
| 668 | fn put_fast(&mut self, bits: u64, len: u32) { |
| 669 | self.bit_buffer |= bits << self.bits_in; |
| 670 | self.bits_in += len; |
| 671 | } |
| 672 | |
| 673 | fn flush(&mut self, output: &mut OutputBufferOxide) -> Result<()> { |
| 674 | let pos: usize = output.inner_pos; |
| 675 | { |
| 676 | // isolation to please borrow checker |
| 677 | let inner: &mut [u8] = &mut output.inner[pos..pos + 8]; |
| 678 | let bytes: [u8; _] = u64::to_le_bytes(self.bit_buffer); |
| 679 | inner.copy_from_slice(&bytes); |
| 680 | } |
| 681 | match output.inner_pos.checked_add((self.bits_in >> 3) as usize) { |
| 682 | Some(n: usize) if n <= output.inner.len() => output.inner_pos = n, |
| 683 | _ => return Err(Error {}), |
| 684 | } |
| 685 | self.bit_buffer >>= self.bits_in & !7; |
| 686 | self.bits_in &= 7; |
| 687 | Ok(()) |
| 688 | } |
| 689 | } |
| 690 | |
| 691 | /// A struct containing data about huffman codes and symbol frequencies. |
| 692 | /// |
| 693 | /// NOTE: Only the literal/lengths have enough symbols to actually use |
| 694 | /// the full array. It's unclear why it's defined like this in miniz, |
| 695 | /// it could be for cache/alignment reasons. |
| 696 | pub(crate) struct HuffmanOxide { |
| 697 | /// Number of occurrences of each symbol. |
| 698 | pub count: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], |
| 699 | /// The bits of the huffman code assigned to the symbol |
| 700 | pub codes: [[u16; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], |
| 701 | /// The length of the huffman code assigned to the symbol. |
| 702 | pub code_sizes: [[u8; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], |
| 703 | } |
| 704 | |
| 705 | /// Tables used for literal/lengths in `HuffmanOxide`. |
| 706 | const LITLEN_TABLE: usize = 0; |
| 707 | /// Tables for distances. |
| 708 | const DIST_TABLE: usize = 1; |
| 709 | /// Tables for the run-length encoded huffman lengths for literals/lengths/distances. |
| 710 | const HUFF_CODES_TABLE: usize = 2; |
| 711 | |
| 712 | /// Status of RLE encoding of huffman code lengths. |
| 713 | struct Rle { |
| 714 | pub z_count: u32, |
| 715 | pub repeat_count: u16, |
| 716 | pub prev_code_size: u8, |
| 717 | } |
| 718 | |
| 719 | impl Rle { |
| 720 | fn prev_code_size( |
| 721 | &mut self, |
| 722 | packed_code_sizes: &mut [u8], |
| 723 | packed_pos: &mut usize, |
| 724 | h: &mut HuffmanOxide, |
| 725 | ) -> Result<()> { |
| 726 | let mut write = |buf| write(buf, packed_code_sizes, packed_pos); |
| 727 | let counts = &mut h.count[HUFF_CODES_TABLE]; |
| 728 | if self.repeat_count != 0 { |
| 729 | if self.repeat_count < 3 { |
| 730 | counts[self.prev_code_size as usize] = |
| 731 | counts[self.prev_code_size as usize].wrapping_add(self.repeat_count); |
| 732 | let code = self.prev_code_size; |
| 733 | write(&[code, code, code][..self.repeat_count as usize])?; |
| 734 | } else { |
| 735 | counts[16] = counts[16].wrapping_add(1); |
| 736 | write(&[16, (self.repeat_count - 3) as u8][..])?; |
| 737 | } |
| 738 | self.repeat_count = 0; |
| 739 | } |
| 740 | |
| 741 | Ok(()) |
| 742 | } |
| 743 | |
| 744 | fn zero_code_size( |
| 745 | &mut self, |
| 746 | packed_code_sizes: &mut [u8], |
| 747 | packed_pos: &mut usize, |
| 748 | h: &mut HuffmanOxide, |
| 749 | ) -> Result<()> { |
| 750 | let mut write = |buf| write(buf, packed_code_sizes, packed_pos); |
| 751 | let counts = &mut h.count[HUFF_CODES_TABLE]; |
| 752 | if self.z_count != 0 { |
| 753 | if self.z_count < 3 { |
| 754 | counts[0] = counts[0].wrapping_add(self.z_count as u16); |
| 755 | write(&[0, 0, 0][..self.z_count as usize])?; |
| 756 | } else if self.z_count <= 10 { |
| 757 | counts[17] = counts[17].wrapping_add(1); |
| 758 | write(&[17, (self.z_count - 3) as u8][..])?; |
| 759 | } else { |
| 760 | counts[18] = counts[18].wrapping_add(1); |
| 761 | write(&[18, (self.z_count - 11) as u8][..])?; |
| 762 | } |
| 763 | self.z_count = 0; |
| 764 | } |
| 765 | |
| 766 | Ok(()) |
| 767 | } |
| 768 | } |
| 769 | |
| 770 | fn write(src: &[u8], dst: &mut [u8], dst_pos: &mut usize) -> Result<()> { |
| 771 | match dst.get_mut(*dst_pos..*dst_pos + src.len()) { |
| 772 | Some(s: &mut [u8]) => s.copy_from_slice(src), |
| 773 | None => return Err(Error {}), |
| 774 | } |
| 775 | *dst_pos += src.len(); |
| 776 | Ok(()) |
| 777 | } |
| 778 | |
| 779 | impl Default for HuffmanOxide { |
| 780 | fn default() -> Self { |
| 781 | HuffmanOxide { |
| 782 | count: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], |
| 783 | codes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], |
| 784 | code_sizes: [[0; MAX_HUFF_SYMBOLS]; MAX_HUFF_TABLES], |
| 785 | } |
| 786 | } |
| 787 | } |
| 788 | |
| 789 | impl HuffmanOxide { |
| 790 | fn radix_sort_symbols<'a>( |
| 791 | symbols0: &'a mut [SymFreq], |
| 792 | symbols1: &'a mut [SymFreq], |
| 793 | ) -> &'a mut [SymFreq] { |
| 794 | let mut hist = [[0; 256]; 2]; |
| 795 | |
| 796 | for freq in symbols0.iter() { |
| 797 | hist[0][(freq.key & 0xFF) as usize] += 1; |
| 798 | hist[1][((freq.key >> 8) & 0xFF) as usize] += 1; |
| 799 | } |
| 800 | |
| 801 | let mut n_passes = 2; |
| 802 | if symbols0.len() == hist[1][0] { |
| 803 | n_passes -= 1; |
| 804 | } |
| 805 | |
| 806 | let mut current_symbols = symbols0; |
| 807 | let mut new_symbols = symbols1; |
| 808 | |
| 809 | for (pass, hist_item) in hist.iter().enumerate().take(n_passes) { |
| 810 | let mut offsets = [0; 256]; |
| 811 | let mut offset = 0; |
| 812 | for i in 0..256 { |
| 813 | offsets[i] = offset; |
| 814 | offset += hist_item[i]; |
| 815 | } |
| 816 | |
| 817 | for sym in current_symbols.iter() { |
| 818 | let j = ((sym.key >> (pass * 8)) & 0xFF) as usize; |
| 819 | new_symbols[offsets[j]] = *sym; |
| 820 | offsets[j] += 1; |
| 821 | } |
| 822 | |
| 823 | mem::swap(&mut current_symbols, &mut new_symbols); |
| 824 | } |
| 825 | |
| 826 | current_symbols |
| 827 | } |
| 828 | |
| 829 | fn calculate_minimum_redundancy(symbols: &mut [SymFreq]) { |
| 830 | match symbols.len() { |
| 831 | 0 => (), |
| 832 | 1 => symbols[0].key = 1, |
| 833 | n => { |
| 834 | symbols[0].key += symbols[1].key; |
| 835 | let mut root = 0; |
| 836 | let mut leaf = 2; |
| 837 | for next in 1..n - 1 { |
| 838 | if (leaf >= n) || (symbols[root].key < symbols[leaf].key) { |
| 839 | symbols[next].key = symbols[root].key; |
| 840 | symbols[root].key = next as u16; |
| 841 | root += 1; |
| 842 | } else { |
| 843 | symbols[next].key = symbols[leaf].key; |
| 844 | leaf += 1; |
| 845 | } |
| 846 | |
| 847 | if (leaf >= n) || (root < next && symbols[root].key < symbols[leaf].key) { |
| 848 | symbols[next].key = symbols[next].key.wrapping_add(symbols[root].key); |
| 849 | symbols[root].key = next as u16; |
| 850 | root += 1; |
| 851 | } else { |
| 852 | symbols[next].key = symbols[next].key.wrapping_add(symbols[leaf].key); |
| 853 | leaf += 1; |
| 854 | } |
| 855 | } |
| 856 | |
| 857 | symbols[n - 2].key = 0; |
| 858 | for next in (0..n - 2).rev() { |
| 859 | symbols[next].key = symbols[symbols[next].key as usize].key + 1; |
| 860 | } |
| 861 | |
| 862 | let mut avbl = 1; |
| 863 | let mut used = 0; |
| 864 | let mut dpth = 0; |
| 865 | let mut root = (n - 2) as i32; |
| 866 | let mut next = (n - 1) as i32; |
| 867 | while avbl > 0 { |
| 868 | while (root >= 0) && (symbols[root as usize].key == dpth) { |
| 869 | used += 1; |
| 870 | root -= 1; |
| 871 | } |
| 872 | while avbl > used { |
| 873 | symbols[next as usize].key = dpth; |
| 874 | next -= 1; |
| 875 | avbl -= 1; |
| 876 | } |
| 877 | avbl = 2 * used; |
| 878 | dpth += 1; |
| 879 | used = 0; |
| 880 | } |
| 881 | } |
| 882 | } |
| 883 | } |
| 884 | |
| 885 | fn enforce_max_code_size(num_codes: &mut [i32], code_list_len: usize, max_code_size: usize) { |
| 886 | if code_list_len <= 1 { |
| 887 | return; |
| 888 | } |
| 889 | |
| 890 | num_codes[max_code_size] += num_codes[max_code_size + 1..].iter().sum::<i32>(); |
| 891 | let total = num_codes[1..=max_code_size] |
| 892 | .iter() |
| 893 | .rev() |
| 894 | .enumerate() |
| 895 | .fold(0u32, |total, (i, &x)| total + ((x as u32) << i)); |
| 896 | |
| 897 | for _ in (1 << max_code_size)..total { |
| 898 | num_codes[max_code_size] -= 1; |
| 899 | for i in (1..max_code_size).rev() { |
| 900 | if num_codes[i] != 0 { |
| 901 | num_codes[i] -= 1; |
| 902 | num_codes[i + 1] += 2; |
| 903 | break; |
| 904 | } |
| 905 | } |
| 906 | } |
| 907 | } |
| 908 | |
| 909 | fn optimize_table( |
| 910 | &mut self, |
| 911 | table_num: usize, |
| 912 | table_len: usize, |
| 913 | code_size_limit: usize, |
| 914 | static_table: bool, |
| 915 | ) { |
| 916 | let mut num_codes = [0i32; 32 + 1]; |
| 917 | let mut next_code = [0u32; MAX_SUPPORTED_HUFF_CODESIZE + 1]; |
| 918 | |
| 919 | if static_table { |
| 920 | for &code_size in &self.code_sizes[table_num][..table_len] { |
| 921 | num_codes[code_size as usize] += 1; |
| 922 | } |
| 923 | } else { |
| 924 | let mut symbols0 = [SymFreq { |
| 925 | key: 0, |
| 926 | sym_index: 0, |
| 927 | }; MAX_HUFF_SYMBOLS]; |
| 928 | let mut symbols1 = [SymFreq { |
| 929 | key: 0, |
| 930 | sym_index: 0, |
| 931 | }; MAX_HUFF_SYMBOLS]; |
| 932 | |
| 933 | let mut num_used_symbols = 0; |
| 934 | for i in 0..table_len { |
| 935 | if self.count[table_num][i] != 0 { |
| 936 | symbols0[num_used_symbols] = SymFreq { |
| 937 | key: self.count[table_num][i], |
| 938 | sym_index: i as u16, |
| 939 | }; |
| 940 | num_used_symbols += 1; |
| 941 | } |
| 942 | } |
| 943 | |
| 944 | let symbols = Self::radix_sort_symbols( |
| 945 | &mut symbols0[..num_used_symbols], |
| 946 | &mut symbols1[..num_used_symbols], |
| 947 | ); |
| 948 | Self::calculate_minimum_redundancy(symbols); |
| 949 | |
| 950 | for symbol in symbols.iter() { |
| 951 | num_codes[symbol.key as usize] += 1; |
| 952 | } |
| 953 | |
| 954 | Self::enforce_max_code_size(&mut num_codes, num_used_symbols, code_size_limit); |
| 955 | |
| 956 | self.code_sizes[table_num].fill(0); |
| 957 | self.codes[table_num].fill(0); |
| 958 | |
| 959 | let mut last = num_used_symbols; |
| 960 | for (i, &num_item) in num_codes |
| 961 | .iter() |
| 962 | .enumerate() |
| 963 | .take(code_size_limit + 1) |
| 964 | .skip(1) |
| 965 | { |
| 966 | let first = last - num_item as usize; |
| 967 | for symbol in &symbols[first..last] { |
| 968 | self.code_sizes[table_num][symbol.sym_index as usize] = i as u8; |
| 969 | } |
| 970 | last = first; |
| 971 | } |
| 972 | } |
| 973 | |
| 974 | let mut j = 0; |
| 975 | next_code[1] = 0; |
| 976 | for i in 2..=code_size_limit { |
| 977 | j = (j + num_codes[i - 1]) << 1; |
| 978 | next_code[i] = j as u32; |
| 979 | } |
| 980 | |
| 981 | for (&code_size, huff_code) in self.code_sizes[table_num] |
| 982 | .iter() |
| 983 | .take(table_len) |
| 984 | .zip(self.codes[table_num].iter_mut().take(table_len)) |
| 985 | { |
| 986 | if code_size == 0 { |
| 987 | continue; |
| 988 | } |
| 989 | |
| 990 | let code = next_code[code_size as usize]; |
| 991 | |
| 992 | next_code[code_size as usize] += 1; |
| 993 | |
| 994 | let rev_code = (code as u16).reverse_bits() >> (16 - code_size); |
| 995 | |
| 996 | *huff_code = rev_code; |
| 997 | } |
| 998 | } |
| 999 | |
| 1000 | fn start_static_block(&mut self, output: &mut OutputBufferOxide) { |
| 1001 | self.code_sizes[LITLEN_TABLE][0..144].fill(8); |
| 1002 | self.code_sizes[LITLEN_TABLE][144..256].fill(9); |
| 1003 | self.code_sizes[LITLEN_TABLE][256..280].fill(7); |
| 1004 | self.code_sizes[LITLEN_TABLE][280..288].fill(8); |
| 1005 | |
| 1006 | self.code_sizes[DIST_TABLE][..32].fill(5); |
| 1007 | |
| 1008 | self.optimize_table(LITLEN_TABLE, 288, 15, true); |
| 1009 | self.optimize_table(DIST_TABLE, 32, 15, true); |
| 1010 | |
| 1011 | output.put_bits(0b01, 2) |
| 1012 | } |
| 1013 | |
| 1014 | fn start_dynamic_block(&mut self, output: &mut OutputBufferOxide) -> Result<()> { |
| 1015 | // There will always be one, and only one end of block code. |
| 1016 | self.count[0][256] = 1; |
| 1017 | |
| 1018 | self.optimize_table(0, MAX_HUFF_SYMBOLS_0, 15, false); |
| 1019 | self.optimize_table(1, MAX_HUFF_SYMBOLS_1, 15, false); |
| 1020 | |
| 1021 | let num_lit_codes = 286 |
| 1022 | - &self.code_sizes[0][257..286] |
| 1023 | .iter() |
| 1024 | .rev() |
| 1025 | .take_while(|&x| *x == 0) |
| 1026 | .count(); |
| 1027 | |
| 1028 | let num_dist_codes = 30 |
| 1029 | - &self.code_sizes[1][1..30] |
| 1030 | .iter() |
| 1031 | .rev() |
| 1032 | .take_while(|&x| *x == 0) |
| 1033 | .count(); |
| 1034 | |
| 1035 | let mut code_sizes_to_pack = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1]; |
| 1036 | let mut packed_code_sizes = [0u8; MAX_HUFF_SYMBOLS_0 + MAX_HUFF_SYMBOLS_1]; |
| 1037 | |
| 1038 | let total_code_sizes_to_pack = num_lit_codes + num_dist_codes; |
| 1039 | |
| 1040 | code_sizes_to_pack[..num_lit_codes].copy_from_slice(&self.code_sizes[0][..num_lit_codes]); |
| 1041 | |
| 1042 | code_sizes_to_pack[num_lit_codes..total_code_sizes_to_pack] |
| 1043 | .copy_from_slice(&self.code_sizes[1][..num_dist_codes]); |
| 1044 | |
| 1045 | let mut rle = Rle { |
| 1046 | z_count: 0, |
| 1047 | repeat_count: 0, |
| 1048 | prev_code_size: 0xFF, |
| 1049 | }; |
| 1050 | |
| 1051 | self.count[HUFF_CODES_TABLE][..MAX_HUFF_SYMBOLS_2].fill(0); |
| 1052 | |
| 1053 | let mut packed_pos = 0; |
| 1054 | for &code_size in &code_sizes_to_pack[..total_code_sizes_to_pack] { |
| 1055 | if code_size == 0 { |
| 1056 | rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; |
| 1057 | rle.z_count += 1; |
| 1058 | if rle.z_count == 138 { |
| 1059 | rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; |
| 1060 | } |
| 1061 | } else { |
| 1062 | rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; |
| 1063 | if code_size != rle.prev_code_size { |
| 1064 | rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; |
| 1065 | self.count[HUFF_CODES_TABLE][code_size as usize] = |
| 1066 | self.count[HUFF_CODES_TABLE][code_size as usize].wrapping_add(1); |
| 1067 | write(&[code_size], &mut packed_code_sizes, &mut packed_pos)?; |
| 1068 | } else { |
| 1069 | rle.repeat_count += 1; |
| 1070 | if rle.repeat_count == 6 { |
| 1071 | rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; |
| 1072 | } |
| 1073 | } |
| 1074 | } |
| 1075 | rle.prev_code_size = code_size; |
| 1076 | } |
| 1077 | |
| 1078 | if rle.repeat_count != 0 { |
| 1079 | rle.prev_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; |
| 1080 | } else { |
| 1081 | rle.zero_code_size(&mut packed_code_sizes, &mut packed_pos, self)?; |
| 1082 | } |
| 1083 | |
| 1084 | self.optimize_table(2, MAX_HUFF_SYMBOLS_2, 7, false); |
| 1085 | |
| 1086 | output.put_bits_no_flush(2, 2); |
| 1087 | |
| 1088 | output.put_bits_no_flush((num_lit_codes - 257) as u32, 5); |
| 1089 | output.put_bits_no_flush((num_dist_codes - 1) as u32, 5); |
| 1090 | |
| 1091 | let mut num_bit_lengths = 18 |
| 1092 | - HUFFMAN_LENGTH_ORDER |
| 1093 | .iter() |
| 1094 | .rev() |
| 1095 | .take_while(|&swizzle| self.code_sizes[HUFF_CODES_TABLE][*swizzle as usize] == 0) |
| 1096 | .count(); |
| 1097 | |
| 1098 | num_bit_lengths = cmp::max(4, num_bit_lengths + 1); |
| 1099 | output.put_bits(num_bit_lengths as u32 - 4, 4); |
| 1100 | for &swizzle in &HUFFMAN_LENGTH_ORDER[..num_bit_lengths] { |
| 1101 | output.put_bits( |
| 1102 | u32::from(self.code_sizes[HUFF_CODES_TABLE][swizzle as usize]), |
| 1103 | 3, |
| 1104 | ); |
| 1105 | } |
| 1106 | |
| 1107 | let mut packed_code_size_index = 0; |
| 1108 | while packed_code_size_index < packed_pos { |
| 1109 | let code = packed_code_sizes[packed_code_size_index] as usize; |
| 1110 | packed_code_size_index += 1; |
| 1111 | assert!(code < MAX_HUFF_SYMBOLS_2); |
| 1112 | output.put_bits( |
| 1113 | u32::from(self.codes[HUFF_CODES_TABLE][code]), |
| 1114 | u32::from(self.code_sizes[HUFF_CODES_TABLE][code]), |
| 1115 | ); |
| 1116 | if code >= 16 { |
| 1117 | output.put_bits( |
| 1118 | u32::from(packed_code_sizes[packed_code_size_index]), |
| 1119 | [2, 3, 7][code - 16], |
| 1120 | ); |
| 1121 | packed_code_size_index += 1; |
| 1122 | } |
| 1123 | } |
| 1124 | |
| 1125 | Ok(()) |
| 1126 | } |
| 1127 | } |
| 1128 | |
| 1129 | pub(crate) struct DictOxide { |
| 1130 | /// The maximum number of checks in the hash chain, for the initial, |
| 1131 | /// and the lazy match respectively. |
| 1132 | pub max_probes: [u32; 2], |
| 1133 | /// Buffer of input data. |
| 1134 | /// Padded with 1 byte to simplify matching code in `compress_fast`. |
| 1135 | pub b: HashBuffers, |
| 1136 | |
| 1137 | pub code_buf_dict_pos: usize, |
| 1138 | pub lookahead_size: usize, |
| 1139 | pub lookahead_pos: usize, |
| 1140 | pub size: usize, |
| 1141 | loop_len: u8, |
| 1142 | } |
| 1143 | |
| 1144 | const fn probes_from_flags(flags: u32) -> [u32; 2] { |
| 1145 | [ |
| 1146 | 1 + ((flags & 0xFFF) + 2) / 3, |
| 1147 | 1 + (((flags & 0xFFF) >> 2) + 2) / 3, |
| 1148 | ] |
| 1149 | } |
| 1150 | |
| 1151 | impl DictOxide { |
| 1152 | fn new(flags: u32) -> Self { |
| 1153 | DictOxide { |
| 1154 | max_probes: probes_from_flags(flags), |
| 1155 | b: HashBuffers::default(), |
| 1156 | code_buf_dict_pos: 0, |
| 1157 | lookahead_size: 0, |
| 1158 | lookahead_pos: 0, |
| 1159 | size: 0, |
| 1160 | loop_len: 32, |
| 1161 | } |
| 1162 | } |
| 1163 | |
| 1164 | fn update_flags(&mut self, flags: u32) { |
| 1165 | self.max_probes = probes_from_flags(flags); |
| 1166 | } |
| 1167 | |
| 1168 | fn reset(&mut self) { |
| 1169 | self.b.reset(); |
| 1170 | self.code_buf_dict_pos = 0; |
| 1171 | self.lookahead_size = 0; |
| 1172 | self.lookahead_pos = 0; |
| 1173 | self.size = 0; |
| 1174 | } |
| 1175 | |
| 1176 | /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of |
| 1177 | /// type T. |
| 1178 | #[inline ] |
| 1179 | fn read_unaligned_u32(&self, pos: usize) -> u32 { |
| 1180 | // Masking the value here helps avoid bounds checks. |
| 1181 | let pos = pos & LZ_DICT_SIZE_MASK; |
| 1182 | let end = pos + 4; |
| 1183 | // Somehow this assertion makes things faster. |
| 1184 | // TODO: as of may 2024 this does not seem to make any difference |
| 1185 | // so consider removing. |
| 1186 | assert!(end < LZ_DICT_FULL_SIZE); |
| 1187 | |
| 1188 | let bytes: [u8; 4] = self.b.dict[pos..end].try_into().unwrap(); |
| 1189 | u32::from_le_bytes(bytes) |
| 1190 | } |
| 1191 | |
| 1192 | /// Do an unaligned read of the data at `pos` in the dictionary and treat it as if it was of |
| 1193 | /// type T. |
| 1194 | #[inline ] |
| 1195 | fn read_unaligned_u64(&self, pos: usize) -> u64 { |
| 1196 | // Help evade bounds/panic code check by masking the position value |
| 1197 | // This provides a small speedup at the cost of an instruction or two instead of |
| 1198 | // having to use unsafe. |
| 1199 | let pos = pos & LZ_DICT_SIZE_MASK; |
| 1200 | let bytes: [u8; 8] = self.b.dict[pos..pos + 8].try_into().unwrap(); |
| 1201 | u64::from_le_bytes(bytes) |
| 1202 | } |
| 1203 | |
| 1204 | /// Try to find a match for the data at lookahead_pos in the dictionary that is |
| 1205 | /// longer than `match_len`. |
| 1206 | /// Returns a tuple containing (match_distance, match_length). Will be equal to the input |
| 1207 | /// values if no better matches were found. |
| 1208 | fn find_match( |
| 1209 | &self, |
| 1210 | lookahead_pos: usize, |
| 1211 | max_dist: usize, |
| 1212 | max_match_len: u32, |
| 1213 | mut match_dist: u32, |
| 1214 | mut match_len: u32, |
| 1215 | ) -> (u32, u32) { |
| 1216 | // Clamp the match len and max_match_len to be valid. (It should be when this is called, but |
| 1217 | // do it for now just in case for safety reasons.) |
| 1218 | // This should normally end up as at worst conditional moves, |
| 1219 | // so it shouldn't slow us down much. |
| 1220 | // TODO: Statically verify these so we don't need to do this. |
| 1221 | let max_match_len = cmp::min(MAX_MATCH_LEN as u32, max_match_len); |
| 1222 | match_len = cmp::max(match_len, 1); |
| 1223 | |
| 1224 | // If we already have a match of the full length don't bother searching for another one. |
| 1225 | if max_match_len <= match_len { |
| 1226 | return (match_dist, match_len); |
| 1227 | } |
| 1228 | |
| 1229 | let pos = lookahead_pos & LZ_DICT_SIZE_MASK; |
| 1230 | let mut probe_pos = pos; |
| 1231 | // Number of probes into the hash chains. |
| 1232 | let mut num_probes_left = if match_len < 32 { |
| 1233 | self.max_probes[0] |
| 1234 | } else { |
| 1235 | self.max_probes[1] |
| 1236 | }; |
| 1237 | |
| 1238 | // Read the last byte of the current match, and the next one, used to compare matches. |
| 1239 | let mut c01: u16 = read_u16_le(&self.b.dict, pos + match_len as usize - 1); |
| 1240 | // Read the two bytes at the end position of the current match. |
| 1241 | let s01: u16 = read_u16_le(&self.b.dict, pos); |
| 1242 | |
| 1243 | 'outer: loop { |
| 1244 | let mut dist; |
| 1245 | 'found: loop { |
| 1246 | num_probes_left -= 1; |
| 1247 | if num_probes_left == 0 { |
| 1248 | // We have done as many probes in the hash chain as the current compression |
| 1249 | // settings allow, so return the best match we found, if any. |
| 1250 | return (match_dist, match_len); |
| 1251 | } |
| 1252 | |
| 1253 | for _ in 0..3 { |
| 1254 | let next_probe_pos = self.b.next[probe_pos] as usize; |
| 1255 | |
| 1256 | dist = (lookahead_pos - next_probe_pos) & 0xFFFF; |
| 1257 | // Optimization: The last condition should never be hit but helps the compiler by avoiding |
| 1258 | // doing the bounds check in the read_u16_le call and adding the extra instructions |
| 1259 | // for branching to a panic after that and instead just adds the extra instruction here |
| 1260 | // instead saving some instructions and thus improving performance a bit. |
| 1261 | // May want to investigate whether we can avoid it entirely but as of now the compiler |
| 1262 | // isn't able to deduce that match_len - 1 is bounded to [1-257] |
| 1263 | // Disable clippy lint as it needs to be written in this specific way |
| 1264 | // rather than MAX_MATCH_LEN to work |
| 1265 | // because the compiler isn't super smart.... |
| 1266 | #[allow (clippy::int_plus_one)] |
| 1267 | if next_probe_pos == 0 |
| 1268 | || dist > max_dist |
| 1269 | || match_len as usize - 1 >= MAX_MATCH_LEN |
| 1270 | { |
| 1271 | // We reached the end of the hash chain, or the next value is further away |
| 1272 | // than the maximum allowed distance, so return the best match we found, if |
| 1273 | // any. |
| 1274 | return (match_dist, match_len); |
| 1275 | } |
| 1276 | |
| 1277 | // Mask the position value to get the position in the hash chain of the next |
| 1278 | // position to match against. |
| 1279 | probe_pos = next_probe_pos & LZ_DICT_SIZE_MASK; |
| 1280 | |
| 1281 | if read_u16_le(&self.b.dict, probe_pos + match_len as usize - 1) == c01 { |
| 1282 | break 'found; |
| 1283 | } |
| 1284 | } |
| 1285 | } |
| 1286 | |
| 1287 | if dist == 0 { |
| 1288 | // We've looked through the whole match range, so return the best match we |
| 1289 | // found. |
| 1290 | return (match_dist, match_len); |
| 1291 | } |
| 1292 | |
| 1293 | // Check if the two first bytes match. |
| 1294 | if read_u16_le(&self.b.dict, probe_pos) != s01 { |
| 1295 | continue; |
| 1296 | } |
| 1297 | |
| 1298 | let mut p = pos + 2; |
| 1299 | let mut q = probe_pos + 2; |
| 1300 | // The first two bytes matched, so check the full length of the match. |
| 1301 | // TODO: This is a workaround for an upstream issue introduced after a LLVM upgrade in rust 1.82. |
| 1302 | // the compiler is too smart and ends up unrolling the loop which causes the performance to get worse |
| 1303 | // Using a variable instead of a constant here to prevent it seems to at least get back some of the performance loss. |
| 1304 | for _ in 0..self.loop_len as i32 { |
| 1305 | let p_data: u64 = self.read_unaligned_u64(p); |
| 1306 | let q_data: u64 = self.read_unaligned_u64(q); |
| 1307 | // Compare of 8 bytes at a time by using unaligned loads of 64-bit integers. |
| 1308 | let xor_data = p_data ^ q_data; |
| 1309 | if xor_data == 0 { |
| 1310 | p += 8; |
| 1311 | q += 8; |
| 1312 | } else { |
| 1313 | // If not all of the last 8 bytes matched, check how may of them did. |
| 1314 | let trailing = xor_data.trailing_zeros(); |
| 1315 | |
| 1316 | let probe_len = p - pos + (trailing as usize >> 3); |
| 1317 | if probe_len > match_len as usize { |
| 1318 | match_dist = dist as u32; |
| 1319 | match_len = cmp::min(max_match_len, probe_len as u32); |
| 1320 | if match_len >= max_match_len { |
| 1321 | // We found a match that had the maximum allowed length, |
| 1322 | // so there is now point searching further. |
| 1323 | return (match_dist, match_len); |
| 1324 | } |
| 1325 | // We found a better match, so save the last two bytes for further match |
| 1326 | // comparisons. |
| 1327 | // Optimization: use saturating_sub makes the compiler able to evade the bounds check |
| 1328 | // at the cost of some extra instructions since it avoids any possibility of wraparound. |
| 1329 | // need to see if we can find a better way to do this since this is still a bit costly. |
| 1330 | c01 = |
| 1331 | read_u16_le(&self.b.dict, (pos + match_len as usize).saturating_sub(1)); |
| 1332 | } |
| 1333 | continue 'outer; |
| 1334 | } |
| 1335 | } |
| 1336 | |
| 1337 | return (dist as u32, cmp::min(max_match_len, MAX_MATCH_LEN as u32)); |
| 1338 | } |
| 1339 | } |
| 1340 | } |
| 1341 | |
| 1342 | pub(crate) struct ParamsOxide { |
| 1343 | pub flags: u32, |
| 1344 | pub greedy_parsing: bool, |
| 1345 | pub block_index: u32, |
| 1346 | |
| 1347 | pub saved_match_dist: u32, |
| 1348 | pub saved_match_len: u32, |
| 1349 | pub saved_lit: u8, |
| 1350 | |
| 1351 | pub flush: TDEFLFlush, |
| 1352 | pub flush_ofs: u32, |
| 1353 | pub flush_remaining: u32, |
| 1354 | pub finished: bool, |
| 1355 | |
| 1356 | pub adler32: u32, |
| 1357 | |
| 1358 | pub src_pos: usize, |
| 1359 | |
| 1360 | pub out_buf_ofs: usize, |
| 1361 | pub prev_return_status: TDEFLStatus, |
| 1362 | |
| 1363 | pub saved_bit_buffer: u32, |
| 1364 | pub saved_bits_in: u32, |
| 1365 | |
| 1366 | pub local_buf: Box<LocalBuf>, |
| 1367 | } |
| 1368 | |
| 1369 | impl ParamsOxide { |
| 1370 | fn new(flags: u32) -> Self { |
| 1371 | ParamsOxide { |
| 1372 | flags, |
| 1373 | greedy_parsing: flags & TDEFL_GREEDY_PARSING_FLAG != 0, |
| 1374 | block_index: 0, |
| 1375 | saved_match_dist: 0, |
| 1376 | saved_match_len: 0, |
| 1377 | saved_lit: 0, |
| 1378 | flush: TDEFLFlush::None, |
| 1379 | flush_ofs: 0, |
| 1380 | flush_remaining: 0, |
| 1381 | finished: false, |
| 1382 | adler32: MZ_ADLER32_INIT, |
| 1383 | src_pos: 0, |
| 1384 | out_buf_ofs: 0, |
| 1385 | prev_return_status: TDEFLStatus::Okay, |
| 1386 | saved_bit_buffer: 0, |
| 1387 | saved_bits_in: 0, |
| 1388 | local_buf: Box::default(), |
| 1389 | } |
| 1390 | } |
| 1391 | |
| 1392 | fn update_flags(&mut self, flags: u32) { |
| 1393 | self.flags = flags; |
| 1394 | self.greedy_parsing = self.flags & TDEFL_GREEDY_PARSING_FLAG != 0; |
| 1395 | } |
| 1396 | |
| 1397 | /// Reset state, saving settings. |
| 1398 | fn reset(&mut self) { |
| 1399 | self.block_index = 0; |
| 1400 | self.saved_match_len = 0; |
| 1401 | self.saved_match_dist = 0; |
| 1402 | self.saved_lit = 0; |
| 1403 | self.flush = TDEFLFlush::None; |
| 1404 | self.flush_ofs = 0; |
| 1405 | self.flush_remaining = 0; |
| 1406 | self.finished = false; |
| 1407 | self.adler32 = MZ_ADLER32_INIT; |
| 1408 | self.src_pos = 0; |
| 1409 | self.out_buf_ofs = 0; |
| 1410 | self.prev_return_status = TDEFLStatus::Okay; |
| 1411 | self.saved_bit_buffer = 0; |
| 1412 | self.saved_bits_in = 0; |
| 1413 | self.local_buf.b = [0; OUT_BUF_SIZE]; |
| 1414 | } |
| 1415 | } |
| 1416 | |
| 1417 | pub(crate) struct LZOxide { |
| 1418 | pub codes: [u8; LZ_CODE_BUF_SIZE], |
| 1419 | pub code_position: usize, |
| 1420 | pub flag_position: usize, |
| 1421 | |
| 1422 | // The total number of bytes in the current block. |
| 1423 | pub total_bytes: u32, |
| 1424 | pub num_flags_left: u32, |
| 1425 | } |
| 1426 | |
| 1427 | impl LZOxide { |
| 1428 | const fn new() -> Self { |
| 1429 | LZOxide { |
| 1430 | codes: [0; LZ_CODE_BUF_SIZE], |
| 1431 | code_position: 1, |
| 1432 | flag_position: 0, |
| 1433 | total_bytes: 0, |
| 1434 | num_flags_left: 8, |
| 1435 | } |
| 1436 | } |
| 1437 | |
| 1438 | fn write_code(&mut self, val: u8) { |
| 1439 | // Perf - go via u16 to help evade bounds check |
| 1440 | // TODO: see if we can use u16 for flag_position in general. |
| 1441 | self.codes[usize::from(self.code_position as u16)] = val; |
| 1442 | self.code_position += 1; |
| 1443 | } |
| 1444 | |
| 1445 | fn init_flag(&mut self) { |
| 1446 | if self.num_flags_left == 8 { |
| 1447 | *self.get_flag() = 0; |
| 1448 | self.code_position -= 1; |
| 1449 | } else { |
| 1450 | *self.get_flag() >>= self.num_flags_left; |
| 1451 | } |
| 1452 | } |
| 1453 | |
| 1454 | fn get_flag(&mut self) -> &mut u8 { |
| 1455 | // Perf - go via u16 to help evade bounds check |
| 1456 | // TODO: see if we can use u16 for flag_position in general. |
| 1457 | &mut self.codes[usize::from(self.flag_position as u16)] |
| 1458 | } |
| 1459 | |
| 1460 | fn plant_flag(&mut self) { |
| 1461 | self.flag_position = self.code_position; |
| 1462 | self.code_position += 1; |
| 1463 | } |
| 1464 | |
| 1465 | fn consume_flag(&mut self) { |
| 1466 | self.num_flags_left -= 1; |
| 1467 | if self.num_flags_left == 0 { |
| 1468 | self.num_flags_left = 8; |
| 1469 | self.plant_flag(); |
| 1470 | } |
| 1471 | } |
| 1472 | } |
| 1473 | |
| 1474 | fn compress_lz_codes( |
| 1475 | huff: &HuffmanOxide, |
| 1476 | output: &mut OutputBufferOxide, |
| 1477 | lz_code_buf: &[u8; LZ_CODE_BUF_SIZE], |
| 1478 | lz_code_buf_used_len: usize, |
| 1479 | ) -> Result<bool> { |
| 1480 | let mut flags = 1; |
| 1481 | let mut bb = BitBuffer { |
| 1482 | bit_buffer: u64::from(output.bit_buffer), |
| 1483 | bits_in: output.bits_in, |
| 1484 | }; |
| 1485 | |
| 1486 | // Help out the compiler know this variable won't be larger than |
| 1487 | // the buffer length since the constants won't propagate through the function call. |
| 1488 | let lz_code_buf_used_len = cmp::min(lz_code_buf.len(), lz_code_buf_used_len); |
| 1489 | |
| 1490 | let mut i: usize = 0; |
| 1491 | while i < lz_code_buf_used_len { |
| 1492 | if flags == 1 { |
| 1493 | flags = u32::from(lz_code_buf[i]) | 0x100; |
| 1494 | i += 1; |
| 1495 | } |
| 1496 | |
| 1497 | // The lz code was a length code |
| 1498 | if flags & 1 == 1 { |
| 1499 | flags >>= 1; |
| 1500 | |
| 1501 | let sym; |
| 1502 | let num_extra_bits; |
| 1503 | |
| 1504 | let match_len = lz_code_buf[i & LZ_CODE_BUF_MASK] as usize; |
| 1505 | |
| 1506 | let match_dist = lz_code_buf[(i + 1) & LZ_CODE_BUF_MASK] as u16 |
| 1507 | | ((lz_code_buf[(i + 2) & LZ_CODE_BUF_MASK] as u16) << 8); |
| 1508 | |
| 1509 | i += 3; |
| 1510 | |
| 1511 | debug_assert!(huff.code_sizes[0][LEN_SYM[match_len] as usize + LEN_SYM_OFFSET] != 0); |
| 1512 | let len_sym = (LEN_SYM[match_len] & 31) as usize + LEN_SYM_OFFSET; |
| 1513 | |
| 1514 | bb.put_fast( |
| 1515 | u64::from(huff.codes[0][len_sym]), |
| 1516 | u32::from(huff.code_sizes[0][len_sym]), |
| 1517 | ); |
| 1518 | bb.put_fast( |
| 1519 | match_len as u64 & u64::from(BITMASKS[(LEN_EXTRA[match_len] & 7) as usize]), |
| 1520 | u32::from(LEN_EXTRA[match_len]), |
| 1521 | ); |
| 1522 | |
| 1523 | if match_dist < 512 { |
| 1524 | sym = SMALL_DIST_SYM[match_dist as usize] as usize; |
| 1525 | num_extra_bits = SMALL_DIST_EXTRA[match_dist as usize] as usize; |
| 1526 | } else { |
| 1527 | sym = LARGE_DIST_SYM[(match_dist >> 8) as usize] as usize; |
| 1528 | num_extra_bits = LARGE_DIST_EXTRA[(match_dist >> 8) as usize] as usize; |
| 1529 | } |
| 1530 | |
| 1531 | debug_assert!(huff.code_sizes[1][sym] != 0); |
| 1532 | bb.put_fast( |
| 1533 | u64::from(huff.codes[1][sym]), |
| 1534 | u32::from(huff.code_sizes[1][sym]), |
| 1535 | ); |
| 1536 | bb.put_fast( |
| 1537 | u64::from(match_dist) & u64::from(BITMASKS[num_extra_bits & 15]), |
| 1538 | num_extra_bits as u32, |
| 1539 | ); |
| 1540 | } else { |
| 1541 | // The lz code was a literal |
| 1542 | for _ in 0..3 { |
| 1543 | flags >>= 1; |
| 1544 | let lit = lz_code_buf[i & LZ_CODE_BUF_MASK]; |
| 1545 | i += 1; |
| 1546 | |
| 1547 | debug_assert!(huff.code_sizes[0][lit as usize] != 0); |
| 1548 | bb.put_fast( |
| 1549 | u64::from(huff.codes[0][lit as usize]), |
| 1550 | u32::from(huff.code_sizes[0][lit as usize]), |
| 1551 | ); |
| 1552 | |
| 1553 | if flags & 1 == 1 || i >= lz_code_buf_used_len { |
| 1554 | break; |
| 1555 | } |
| 1556 | } |
| 1557 | } |
| 1558 | |
| 1559 | bb.flush(output)?; |
| 1560 | } |
| 1561 | |
| 1562 | output.bits_in = 0; |
| 1563 | output.bit_buffer = 0; |
| 1564 | while bb.bits_in != 0 { |
| 1565 | let n = cmp::min(bb.bits_in, 16); |
| 1566 | output.put_bits(bb.bit_buffer as u32 & BITMASKS[n as usize], n); |
| 1567 | bb.bit_buffer >>= n; |
| 1568 | bb.bits_in -= n; |
| 1569 | } |
| 1570 | |
| 1571 | // Output the end of block symbol. |
| 1572 | output.put_bits( |
| 1573 | u32::from(huff.codes[0][256]), |
| 1574 | u32::from(huff.code_sizes[0][256]), |
| 1575 | ); |
| 1576 | |
| 1577 | Ok(true) |
| 1578 | } |
| 1579 | |
| 1580 | fn compress_block( |
| 1581 | huff: &mut HuffmanOxide, |
| 1582 | output: &mut OutputBufferOxide, |
| 1583 | lz: &LZOxide, |
| 1584 | static_block: bool, |
| 1585 | ) -> Result<bool> { |
| 1586 | if static_block { |
| 1587 | huff.start_static_block(output); |
| 1588 | } else { |
| 1589 | huff.start_dynamic_block(output)?; |
| 1590 | } |
| 1591 | |
| 1592 | compress_lz_codes(huff, output, &lz.codes, lz_code_buf_used_len:lz.code_position) |
| 1593 | } |
| 1594 | |
| 1595 | pub(crate) fn flush_block( |
| 1596 | d: &mut CompressorOxide, |
| 1597 | callback: &mut CallbackOxide, |
| 1598 | flush: TDEFLFlush, |
| 1599 | ) -> Result<i32> { |
| 1600 | let mut saved_buffer; |
| 1601 | { |
| 1602 | let mut output = callback |
| 1603 | .out |
| 1604 | .new_output_buffer(&mut d.params.local_buf.b, d.params.out_buf_ofs); |
| 1605 | output.bit_buffer = d.params.saved_bit_buffer; |
| 1606 | output.bits_in = d.params.saved_bits_in; |
| 1607 | |
| 1608 | // TODO: Don't think this second condition should be here but need to verify. |
| 1609 | let use_raw_block = (d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0) |
| 1610 | && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos) <= d.dict.size; |
| 1611 | debug_assert_eq!( |
| 1612 | use_raw_block, |
| 1613 | d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0 |
| 1614 | ); |
| 1615 | |
| 1616 | assert!(d.params.flush_remaining == 0); |
| 1617 | d.params.flush_ofs = 0; |
| 1618 | d.params.flush_remaining = 0; |
| 1619 | |
| 1620 | d.lz.init_flag(); |
| 1621 | |
| 1622 | // If we are at the start of the stream, write the zlib header if requested. |
| 1623 | if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 && d.params.block_index == 0 { |
| 1624 | let header = zlib::header_from_flags(d.params.flags); |
| 1625 | output.put_bits_no_flush(header[0].into(), 8); |
| 1626 | output.put_bits(header[1].into(), 8); |
| 1627 | } |
| 1628 | |
| 1629 | // Output the block header. |
| 1630 | output.put_bits((flush == TDEFLFlush::Finish) as u32, 1); |
| 1631 | |
| 1632 | saved_buffer = output.save(); |
| 1633 | |
| 1634 | let comp_success = if !use_raw_block { |
| 1635 | let use_static = |
| 1636 | (d.params.flags & TDEFL_FORCE_ALL_STATIC_BLOCKS != 0) || (d.lz.total_bytes < 48); |
| 1637 | compress_block(&mut d.huff, &mut output, &d.lz, use_static)? |
| 1638 | } else { |
| 1639 | false |
| 1640 | }; |
| 1641 | |
| 1642 | // If we failed to compress anything and the output would take up more space than the output |
| 1643 | // data, output a stored block instead, which has at most 5 bytes of overhead. |
| 1644 | // We only use some simple heuristics for now. |
| 1645 | // A stored block will have an overhead of at least 4 bytes containing the block length |
| 1646 | // but usually more due to the length parameters having to start at a byte boundary and thus |
| 1647 | // requiring up to 5 bytes of padding. |
| 1648 | // As a static block will have an overhead of at most 1 bit per byte |
| 1649 | // (as literals are either 8 or 9 bytes), a raw block will |
| 1650 | // never take up less space if the number of input bytes are less than 32. |
| 1651 | let expanded = (d.lz.total_bytes > 32) |
| 1652 | && (output.inner_pos - saved_buffer.pos + 1 >= (d.lz.total_bytes as usize)) |
| 1653 | && (d.dict.lookahead_pos - d.dict.code_buf_dict_pos <= d.dict.size); |
| 1654 | |
| 1655 | if use_raw_block || expanded { |
| 1656 | output.load(saved_buffer); |
| 1657 | |
| 1658 | // Block header. |
| 1659 | output.put_bits(0, 2); |
| 1660 | |
| 1661 | // Block length has to start on a byte boundary, so pad. |
| 1662 | output.pad_to_bytes(); |
| 1663 | |
| 1664 | // Block length and ones complement of block length. |
| 1665 | output.put_bits(d.lz.total_bytes & 0xFFFF, 16); |
| 1666 | output.put_bits(!d.lz.total_bytes & 0xFFFF, 16); |
| 1667 | |
| 1668 | // Write the actual bytes. |
| 1669 | let start = d.dict.code_buf_dict_pos & LZ_DICT_SIZE_MASK; |
| 1670 | let end = (d.dict.code_buf_dict_pos + d.lz.total_bytes as usize) & LZ_DICT_SIZE_MASK; |
| 1671 | let dict = &mut d.dict.b.dict; |
| 1672 | if start < end { |
| 1673 | // The data does not wrap around. |
| 1674 | output.write_bytes(&dict[start..end]); |
| 1675 | } else if d.lz.total_bytes > 0 { |
| 1676 | // The data wraps around and the input was not 0 bytes. |
| 1677 | output.write_bytes(&dict[start..LZ_DICT_SIZE]); |
| 1678 | output.write_bytes(&dict[..end]); |
| 1679 | } |
| 1680 | } else if !comp_success { |
| 1681 | output.load(saved_buffer); |
| 1682 | compress_block(&mut d.huff, &mut output, &d.lz, true)?; |
| 1683 | } |
| 1684 | |
| 1685 | if flush != TDEFLFlush::None { |
| 1686 | if flush == TDEFLFlush::Finish { |
| 1687 | output.pad_to_bytes(); |
| 1688 | if d.params.flags & TDEFL_WRITE_ZLIB_HEADER != 0 { |
| 1689 | let mut adler = d.params.adler32; |
| 1690 | for _ in 0..4 { |
| 1691 | output.put_bits((adler >> 24) & 0xFF, 8); |
| 1692 | adler <<= 8; |
| 1693 | } |
| 1694 | } |
| 1695 | } else { |
| 1696 | // Sync or Full flush. |
| 1697 | // Output an empty raw block. |
| 1698 | output.put_bits(0, 3); |
| 1699 | output.pad_to_bytes(); |
| 1700 | output.put_bits(0, 16); |
| 1701 | output.put_bits(0xFFFF, 16); |
| 1702 | } |
| 1703 | } |
| 1704 | |
| 1705 | d.huff.count[0][..MAX_HUFF_SYMBOLS_0].fill(0); |
| 1706 | d.huff.count[1][..MAX_HUFF_SYMBOLS_1].fill(0); |
| 1707 | |
| 1708 | // Clear LZ buffer for the next block. |
| 1709 | d.lz.code_position = 1; |
| 1710 | d.lz.flag_position = 0; |
| 1711 | d.lz.num_flags_left = 8; |
| 1712 | d.dict.code_buf_dict_pos += d.lz.total_bytes as usize; |
| 1713 | d.lz.total_bytes = 0; |
| 1714 | d.params.block_index += 1; |
| 1715 | |
| 1716 | saved_buffer = output.save(); |
| 1717 | |
| 1718 | d.params.saved_bit_buffer = saved_buffer.bit_buffer; |
| 1719 | d.params.saved_bits_in = saved_buffer.bits_in; |
| 1720 | } |
| 1721 | |
| 1722 | Ok(callback.flush_output(saved_buffer, &mut d.params)) |
| 1723 | } |
| 1724 | |
| 1725 | pub(crate) fn record_literal(h: &mut HuffmanOxide, lz: &mut LZOxide, lit: u8) { |
| 1726 | lz.total_bytes += 1; |
| 1727 | lz.write_code(val:lit); |
| 1728 | |
| 1729 | *lz.get_flag() >>= 1; |
| 1730 | lz.consume_flag(); |
| 1731 | |
| 1732 | h.count[0][lit as usize] += 1; |
| 1733 | } |
| 1734 | |
| 1735 | fn record_match(h: &mut HuffmanOxide, lz: &mut LZOxide, match_len: u32, mut match_dist: u32) { |
| 1736 | debug_assert!(match_len >= MIN_MATCH_LEN.into()); |
| 1737 | debug_assert!(match_dist >= 1); |
| 1738 | debug_assert!(match_dist as usize <= LZ_DICT_SIZE); |
| 1739 | |
| 1740 | lz.total_bytes += match_len; |
| 1741 | match_dist -= 1; |
| 1742 | let match_len = (match_len - u32::from(MIN_MATCH_LEN)) as u8; |
| 1743 | lz.write_code(match_len); |
| 1744 | lz.write_code(match_dist as u8); |
| 1745 | lz.write_code((match_dist >> 8) as u8); |
| 1746 | |
| 1747 | *lz.get_flag() >>= 1; |
| 1748 | *lz.get_flag() |= 0x80; |
| 1749 | lz.consume_flag(); |
| 1750 | |
| 1751 | let symbol = if match_dist < 512 { |
| 1752 | SMALL_DIST_SYM[match_dist as usize] |
| 1753 | } else { |
| 1754 | LARGE_DIST_SYM[((match_dist >> 8) & 127) as usize] |
| 1755 | } as usize; |
| 1756 | h.count[1][symbol] += 1; |
| 1757 | // Mask the values from LEN_SYM here as the compiler isn't quite smart enough to infer |
| 1758 | // that it only contains values smaller than 32. |
| 1759 | h.count[0][(LEN_SYM[match_len as usize] as usize & 31) + LEN_SYM_OFFSET] += 1; |
| 1760 | } |
| 1761 | |
| 1762 | fn compress_normal(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool { |
| 1763 | let in_buf = match callback.in_buf { |
| 1764 | None => return true, |
| 1765 | Some(in_buf) => in_buf, |
| 1766 | }; |
| 1767 | |
| 1768 | let mut src_pos = d.params.src_pos; |
| 1769 | let mut lookahead_size = d.dict.lookahead_size; |
| 1770 | let mut lookahead_pos = d.dict.lookahead_pos; |
| 1771 | let mut saved_lit = d.params.saved_lit; |
| 1772 | let mut saved_match_dist = d.params.saved_match_dist; |
| 1773 | let mut saved_match_len = d.params.saved_match_len; |
| 1774 | |
| 1775 | while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size != 0) { |
| 1776 | let src_buf_left = in_buf.len() - src_pos; |
| 1777 | let num_bytes_to_process = cmp::min(src_buf_left, MAX_MATCH_LEN - lookahead_size); |
| 1778 | |
| 1779 | if lookahead_size + d.dict.size >= usize::from(MIN_MATCH_LEN) - 1 |
| 1780 | && num_bytes_to_process > 0 |
| 1781 | { |
| 1782 | let dictb = &mut d.dict.b; |
| 1783 | |
| 1784 | let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK; |
| 1785 | let mut ins_pos = lookahead_pos + lookahead_size - 2; |
| 1786 | // Start the hash value from the first two bytes |
| 1787 | let mut hash = update_hash( |
| 1788 | u16::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK]), |
| 1789 | dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK], |
| 1790 | ); |
| 1791 | |
| 1792 | lookahead_size += num_bytes_to_process; |
| 1793 | |
| 1794 | for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] { |
| 1795 | // Add byte to input buffer. |
| 1796 | dictb.dict[dst_pos] = c; |
| 1797 | if dst_pos < MAX_MATCH_LEN - 1 { |
| 1798 | dictb.dict[LZ_DICT_SIZE + dst_pos] = c; |
| 1799 | } |
| 1800 | |
| 1801 | // Generate hash from the current byte, |
| 1802 | hash = update_hash(hash, c); |
| 1803 | dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize]; |
| 1804 | // and insert it into the hash chain. |
| 1805 | dictb.hash[hash as usize] = ins_pos as u16; |
| 1806 | dst_pos = (dst_pos + 1) & LZ_DICT_SIZE_MASK; |
| 1807 | ins_pos += 1; |
| 1808 | } |
| 1809 | |
| 1810 | src_pos += num_bytes_to_process; |
| 1811 | } else { |
| 1812 | let dictb = &mut d.dict.b; |
| 1813 | for &c in &in_buf[src_pos..src_pos + num_bytes_to_process] { |
| 1814 | let dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK; |
| 1815 | dictb.dict[dst_pos] = c; |
| 1816 | if dst_pos < MAX_MATCH_LEN - 1 { |
| 1817 | dictb.dict[LZ_DICT_SIZE + dst_pos] = c; |
| 1818 | } |
| 1819 | |
| 1820 | lookahead_size += 1; |
| 1821 | if lookahead_size + d.dict.size >= MIN_MATCH_LEN.into() { |
| 1822 | let ins_pos = lookahead_pos + lookahead_size - 3; |
| 1823 | let hash = ((u32::from(dictb.dict[ins_pos & LZ_DICT_SIZE_MASK]) |
| 1824 | << (LZ_HASH_SHIFT * 2)) |
| 1825 | ^ ((u32::from(dictb.dict[(ins_pos + 1) & LZ_DICT_SIZE_MASK]) |
| 1826 | << LZ_HASH_SHIFT) |
| 1827 | ^ u32::from(c))) |
| 1828 | & (LZ_HASH_SIZE as u32 - 1); |
| 1829 | |
| 1830 | dictb.next[ins_pos & LZ_DICT_SIZE_MASK] = dictb.hash[hash as usize]; |
| 1831 | dictb.hash[hash as usize] = ins_pos as u16; |
| 1832 | } |
| 1833 | } |
| 1834 | |
| 1835 | src_pos += num_bytes_to_process; |
| 1836 | } |
| 1837 | |
| 1838 | d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size); |
| 1839 | if d.params.flush == TDEFLFlush::None && lookahead_size < MAX_MATCH_LEN { |
| 1840 | break; |
| 1841 | } |
| 1842 | |
| 1843 | let mut len_to_move = 1; |
| 1844 | let mut cur_match_dist = 0; |
| 1845 | let mut cur_match_len = if saved_match_len != 0 { |
| 1846 | saved_match_len |
| 1847 | } else { |
| 1848 | u32::from(MIN_MATCH_LEN) - 1 |
| 1849 | }; |
| 1850 | let cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK; |
| 1851 | if d.params.flags & TDEFL_RLE_MATCHES != 0 { |
| 1852 | // If TDEFL_RLE_MATCHES is set, we only look for repeating sequences of the current byte. |
| 1853 | if d.dict.size != 0 { |
| 1854 | let c = d.dict.b.dict[(cur_pos.wrapping_sub(1)) & LZ_DICT_SIZE_MASK]; |
| 1855 | cur_match_len = d.dict.b.dict[cur_pos..(cur_pos + lookahead_size)] |
| 1856 | .iter() |
| 1857 | .take_while(|&x| *x == c) |
| 1858 | .count() as u32; |
| 1859 | if cur_match_len < MIN_MATCH_LEN.into() { |
| 1860 | cur_match_len = 0 |
| 1861 | } else { |
| 1862 | cur_match_dist = 1 |
| 1863 | } |
| 1864 | } |
| 1865 | } else { |
| 1866 | // Try to find a match for the bytes at the current position. |
| 1867 | let dist_len = d.dict.find_match( |
| 1868 | lookahead_pos, |
| 1869 | d.dict.size, |
| 1870 | lookahead_size as u32, |
| 1871 | cur_match_dist, |
| 1872 | cur_match_len, |
| 1873 | ); |
| 1874 | cur_match_dist = dist_len.0; |
| 1875 | cur_match_len = dist_len.1; |
| 1876 | } |
| 1877 | |
| 1878 | let far_and_small = cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024; |
| 1879 | let filter_small = d.params.flags & TDEFL_FILTER_MATCHES != 0 && cur_match_len <= 5; |
| 1880 | if far_and_small || filter_small || cur_pos == cur_match_dist as usize { |
| 1881 | cur_match_dist = 0; |
| 1882 | cur_match_len = 0; |
| 1883 | } |
| 1884 | |
| 1885 | if saved_match_len != 0 { |
| 1886 | if cur_match_len > saved_match_len { |
| 1887 | record_literal(&mut d.huff, &mut d.lz, saved_lit); |
| 1888 | if cur_match_len >= 128 { |
| 1889 | record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist); |
| 1890 | saved_match_len = 0; |
| 1891 | len_to_move = cur_match_len as usize; |
| 1892 | } else { |
| 1893 | saved_lit = d.dict.b.dict[cur_pos]; |
| 1894 | saved_match_dist = cur_match_dist; |
| 1895 | saved_match_len = cur_match_len; |
| 1896 | } |
| 1897 | } else { |
| 1898 | record_match(&mut d.huff, &mut d.lz, saved_match_len, saved_match_dist); |
| 1899 | len_to_move = (saved_match_len - 1) as usize; |
| 1900 | saved_match_len = 0; |
| 1901 | } |
| 1902 | } else if cur_match_dist == 0 { |
| 1903 | record_literal( |
| 1904 | &mut d.huff, |
| 1905 | &mut d.lz, |
| 1906 | d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)], |
| 1907 | ); |
| 1908 | } else if d.params.greedy_parsing |
| 1909 | || (d.params.flags & TDEFL_RLE_MATCHES != 0) |
| 1910 | || cur_match_len >= 128 |
| 1911 | { |
| 1912 | // If we are using lazy matching, check for matches at the next byte if the current |
| 1913 | // match was shorter than 128 bytes. |
| 1914 | record_match(&mut d.huff, &mut d.lz, cur_match_len, cur_match_dist); |
| 1915 | len_to_move = cur_match_len as usize; |
| 1916 | } else { |
| 1917 | saved_lit = d.dict.b.dict[cmp::min(cur_pos, d.dict.b.dict.len() - 1)]; |
| 1918 | saved_match_dist = cur_match_dist; |
| 1919 | saved_match_len = cur_match_len; |
| 1920 | } |
| 1921 | |
| 1922 | lookahead_pos += len_to_move; |
| 1923 | assert!(lookahead_size >= len_to_move); |
| 1924 | lookahead_size -= len_to_move; |
| 1925 | d.dict.size = cmp::min(d.dict.size + len_to_move, LZ_DICT_SIZE); |
| 1926 | |
| 1927 | let lz_buf_tight = d.lz.code_position > LZ_CODE_BUF_SIZE - 8; |
| 1928 | let fat = ((d.lz.code_position * 115) >> 7) >= d.lz.total_bytes as usize; |
| 1929 | let buf_fat = (d.lz.total_bytes > 31 * 1024) && fat; |
| 1930 | |
| 1931 | if lz_buf_tight || buf_fat { |
| 1932 | d.params.src_pos = src_pos; |
| 1933 | // These values are used in flush_block, so we need to write them back here. |
| 1934 | d.dict.lookahead_size = lookahead_size; |
| 1935 | d.dict.lookahead_pos = lookahead_pos; |
| 1936 | |
| 1937 | let n = flush_block(d, callback, TDEFLFlush::None) |
| 1938 | .unwrap_or(TDEFLStatus::PutBufFailed as i32); |
| 1939 | if n != 0 { |
| 1940 | d.params.saved_lit = saved_lit; |
| 1941 | d.params.saved_match_dist = saved_match_dist; |
| 1942 | d.params.saved_match_len = saved_match_len; |
| 1943 | return n > 0; |
| 1944 | } |
| 1945 | } |
| 1946 | } |
| 1947 | |
| 1948 | d.params.src_pos = src_pos; |
| 1949 | d.dict.lookahead_size = lookahead_size; |
| 1950 | d.dict.lookahead_pos = lookahead_pos; |
| 1951 | d.params.saved_lit = saved_lit; |
| 1952 | d.params.saved_match_dist = saved_match_dist; |
| 1953 | d.params.saved_match_len = saved_match_len; |
| 1954 | true |
| 1955 | } |
| 1956 | |
| 1957 | const COMP_FAST_LOOKAHEAD_SIZE: usize = 4096; |
| 1958 | |
| 1959 | fn compress_fast(d: &mut CompressorOxide, callback: &mut CallbackOxide) -> bool { |
| 1960 | let mut src_pos = d.params.src_pos; |
| 1961 | let mut lookahead_size = d.dict.lookahead_size; |
| 1962 | let mut lookahead_pos = d.dict.lookahead_pos; |
| 1963 | |
| 1964 | let mut cur_pos = lookahead_pos & LZ_DICT_SIZE_MASK; |
| 1965 | let in_buf = match callback.in_buf { |
| 1966 | None => return true, |
| 1967 | Some(in_buf) => in_buf, |
| 1968 | }; |
| 1969 | |
| 1970 | debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2); |
| 1971 | |
| 1972 | while src_pos < in_buf.len() || (d.params.flush != TDEFLFlush::None && lookahead_size > 0) { |
| 1973 | let mut dst_pos = (lookahead_pos + lookahead_size) & LZ_DICT_SIZE_MASK; |
| 1974 | let mut num_bytes_to_process = cmp::min( |
| 1975 | in_buf.len() - src_pos, |
| 1976 | COMP_FAST_LOOKAHEAD_SIZE - lookahead_size, |
| 1977 | ); |
| 1978 | lookahead_size += num_bytes_to_process; |
| 1979 | |
| 1980 | while num_bytes_to_process != 0 { |
| 1981 | let n = cmp::min(LZ_DICT_SIZE - dst_pos, num_bytes_to_process); |
| 1982 | d.dict.b.dict[dst_pos..dst_pos + n].copy_from_slice(&in_buf[src_pos..src_pos + n]); |
| 1983 | |
| 1984 | if dst_pos < MAX_MATCH_LEN - 1 { |
| 1985 | let m = cmp::min(n, MAX_MATCH_LEN - 1 - dst_pos); |
| 1986 | d.dict.b.dict[dst_pos + LZ_DICT_SIZE..dst_pos + LZ_DICT_SIZE + m] |
| 1987 | .copy_from_slice(&in_buf[src_pos..src_pos + m]); |
| 1988 | } |
| 1989 | |
| 1990 | src_pos += n; |
| 1991 | dst_pos = (dst_pos + n) & LZ_DICT_SIZE_MASK; |
| 1992 | num_bytes_to_process -= n; |
| 1993 | } |
| 1994 | |
| 1995 | d.dict.size = cmp::min(LZ_DICT_SIZE - lookahead_size, d.dict.size); |
| 1996 | if d.params.flush == TDEFLFlush::None && lookahead_size < COMP_FAST_LOOKAHEAD_SIZE { |
| 1997 | break; |
| 1998 | } |
| 1999 | |
| 2000 | while lookahead_size >= 4 { |
| 2001 | let mut cur_match_len = 1; |
| 2002 | |
| 2003 | let first_trigram = d.dict.read_unaligned_u32(cur_pos) & 0xFF_FFFF; |
| 2004 | |
| 2005 | let hash = (first_trigram ^ (first_trigram >> (24 - (LZ_HASH_BITS - 8)))) |
| 2006 | & LEVEL1_HASH_SIZE_MASK; |
| 2007 | |
| 2008 | let mut probe_pos = usize::from(d.dict.b.hash[hash as usize]); |
| 2009 | d.dict.b.hash[hash as usize] = lookahead_pos as u16; |
| 2010 | |
| 2011 | let mut cur_match_dist = (lookahead_pos - probe_pos) as u16; |
| 2012 | if cur_match_dist as usize <= d.dict.size { |
| 2013 | probe_pos &= LZ_DICT_SIZE_MASK; |
| 2014 | |
| 2015 | let trigram = d.dict.read_unaligned_u32(probe_pos) & 0xFF_FFFF; |
| 2016 | |
| 2017 | if first_trigram == trigram { |
| 2018 | // Trigram was tested, so we can start with "+ 3" displacement. |
| 2019 | let mut p = cur_pos + 3; |
| 2020 | let mut q = probe_pos + 3; |
| 2021 | cur_match_len = (|| { |
| 2022 | for _ in 0..32 { |
| 2023 | let p_data: u64 = d.dict.read_unaligned_u64(p); |
| 2024 | let q_data: u64 = d.dict.read_unaligned_u64(q); |
| 2025 | let xor_data = p_data ^ q_data; |
| 2026 | if xor_data == 0 { |
| 2027 | p += 8; |
| 2028 | q += 8; |
| 2029 | } else { |
| 2030 | let trailing = xor_data.trailing_zeros(); |
| 2031 | return p as u32 - cur_pos as u32 + (trailing >> 3); |
| 2032 | } |
| 2033 | } |
| 2034 | |
| 2035 | if cur_match_dist == 0 { |
| 2036 | 0 |
| 2037 | } else { |
| 2038 | MAX_MATCH_LEN as u32 |
| 2039 | } |
| 2040 | })(); |
| 2041 | |
| 2042 | if cur_match_len < MIN_MATCH_LEN.into() |
| 2043 | || (cur_match_len == MIN_MATCH_LEN.into() && cur_match_dist >= 8 * 1024) |
| 2044 | { |
| 2045 | let lit = first_trigram as u8; |
| 2046 | cur_match_len = 1; |
| 2047 | d.lz.write_code(lit); |
| 2048 | *d.lz.get_flag() >>= 1; |
| 2049 | d.huff.count[0][lit as usize] += 1; |
| 2050 | } else { |
| 2051 | // Limit the match to the length of the lookahead so we don't create a match |
| 2052 | // that ends after the end of the input data. |
| 2053 | cur_match_len = cmp::min(cur_match_len, lookahead_size as u32); |
| 2054 | debug_assert!(cur_match_len >= MIN_MATCH_LEN.into()); |
| 2055 | debug_assert!(cur_match_len <= MAX_MATCH_LEN as u32); |
| 2056 | debug_assert!(cur_match_dist >= 1); |
| 2057 | debug_assert!(cur_match_dist as usize <= LZ_DICT_SIZE); |
| 2058 | cur_match_dist -= 1; |
| 2059 | |
| 2060 | d.lz.write_code((cur_match_len - u32::from(MIN_MATCH_LEN)) as u8); |
| 2061 | d.lz.write_code(cur_match_dist as u8); |
| 2062 | d.lz.write_code((cur_match_dist >> 8) as u8); |
| 2063 | |
| 2064 | *d.lz.get_flag() >>= 1; |
| 2065 | *d.lz.get_flag() |= 0x80; |
| 2066 | if cur_match_dist < 512 { |
| 2067 | d.huff.count[1][SMALL_DIST_SYM[cur_match_dist as usize] as usize] += 1; |
| 2068 | } else { |
| 2069 | d.huff.count[1] |
| 2070 | [LARGE_DIST_SYM[(cur_match_dist >> 8) as usize] as usize] += 1; |
| 2071 | } |
| 2072 | |
| 2073 | d.huff.count[0][(LEN_SYM |
| 2074 | [(cur_match_len - u32::from(MIN_MATCH_LEN)) as usize & 255] |
| 2075 | as usize |
| 2076 | & 31) |
| 2077 | + LEN_SYM_OFFSET] += 1; |
| 2078 | } |
| 2079 | } else { |
| 2080 | d.lz.write_code(first_trigram as u8); |
| 2081 | *d.lz.get_flag() >>= 1; |
| 2082 | d.huff.count[0][first_trigram as u8 as usize] += 1; |
| 2083 | } |
| 2084 | |
| 2085 | d.lz.consume_flag(); |
| 2086 | d.lz.total_bytes += cur_match_len; |
| 2087 | lookahead_pos += cur_match_len as usize; |
| 2088 | d.dict.size = cmp::min(d.dict.size + cur_match_len as usize, LZ_DICT_SIZE); |
| 2089 | cur_pos = (cur_pos + cur_match_len as usize) & LZ_DICT_SIZE_MASK; |
| 2090 | lookahead_size -= cur_match_len as usize; |
| 2091 | |
| 2092 | if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 { |
| 2093 | // These values are used in flush_block, so we need to write them back here. |
| 2094 | d.dict.lookahead_size = lookahead_size; |
| 2095 | d.dict.lookahead_pos = lookahead_pos; |
| 2096 | |
| 2097 | let n = match flush_block(d, callback, TDEFLFlush::None) { |
| 2098 | Err(_) => { |
| 2099 | d.params.src_pos = src_pos; |
| 2100 | d.params.prev_return_status = TDEFLStatus::PutBufFailed; |
| 2101 | return false; |
| 2102 | } |
| 2103 | Ok(status) => status, |
| 2104 | }; |
| 2105 | if n != 0 { |
| 2106 | d.params.src_pos = src_pos; |
| 2107 | return n > 0; |
| 2108 | } |
| 2109 | debug_assert!(d.lz.code_position < LZ_CODE_BUF_SIZE - 2); |
| 2110 | |
| 2111 | lookahead_size = d.dict.lookahead_size; |
| 2112 | lookahead_pos = d.dict.lookahead_pos; |
| 2113 | } |
| 2114 | } |
| 2115 | } |
| 2116 | |
| 2117 | while lookahead_size != 0 { |
| 2118 | let lit = d.dict.b.dict[cur_pos]; |
| 2119 | d.lz.total_bytes += 1; |
| 2120 | d.lz.write_code(lit); |
| 2121 | *d.lz.get_flag() >>= 1; |
| 2122 | d.lz.consume_flag(); |
| 2123 | |
| 2124 | d.huff.count[0][lit as usize] += 1; |
| 2125 | lookahead_pos += 1; |
| 2126 | d.dict.size = cmp::min(d.dict.size + 1, LZ_DICT_SIZE); |
| 2127 | cur_pos = (cur_pos + 1) & LZ_DICT_SIZE_MASK; |
| 2128 | lookahead_size -= 1; |
| 2129 | |
| 2130 | if d.lz.code_position > LZ_CODE_BUF_SIZE - 8 { |
| 2131 | // These values are used in flush_block, so we need to write them back here. |
| 2132 | d.dict.lookahead_size = lookahead_size; |
| 2133 | d.dict.lookahead_pos = lookahead_pos; |
| 2134 | |
| 2135 | let n = match flush_block(d, callback, TDEFLFlush::None) { |
| 2136 | Err(_) => { |
| 2137 | d.params.prev_return_status = TDEFLStatus::PutBufFailed; |
| 2138 | d.params.src_pos = src_pos; |
| 2139 | return false; |
| 2140 | } |
| 2141 | Ok(status) => status, |
| 2142 | }; |
| 2143 | if n != 0 { |
| 2144 | d.params.src_pos = src_pos; |
| 2145 | return n > 0; |
| 2146 | } |
| 2147 | |
| 2148 | lookahead_size = d.dict.lookahead_size; |
| 2149 | lookahead_pos = d.dict.lookahead_pos; |
| 2150 | } |
| 2151 | } |
| 2152 | } |
| 2153 | |
| 2154 | d.params.src_pos = src_pos; |
| 2155 | d.dict.lookahead_size = lookahead_size; |
| 2156 | d.dict.lookahead_pos = lookahead_pos; |
| 2157 | true |
| 2158 | } |
| 2159 | |
| 2160 | fn flush_output_buffer(c: &mut CallbackOxide, p: &mut ParamsOxide) -> (TDEFLStatus, usize, usize) { |
| 2161 | let mut res: (TDEFLStatus, usize, usize) = (TDEFLStatus::Okay, p.src_pos, 0); |
| 2162 | if let CallbackOut::Buf(ref mut cb: &mut CallbackBuf<'_>) = c.out { |
| 2163 | let n: usize = cmp::min(v1:cb.out_buf.len() - p.out_buf_ofs, v2:p.flush_remaining as usize); |
| 2164 | if n != 0 { |
| 2165 | cb.out_buf[p.out_buf_ofs..p.out_buf_ofs + n] |
| 2166 | .copy_from_slice(&p.local_buf.b[p.flush_ofs as usize..p.flush_ofs as usize + n]); |
| 2167 | } |
| 2168 | p.flush_ofs += n as u32; |
| 2169 | p.flush_remaining -= n as u32; |
| 2170 | p.out_buf_ofs += n; |
| 2171 | res.2 = p.out_buf_ofs; |
| 2172 | } |
| 2173 | |
| 2174 | if p.finished && p.flush_remaining == 0 { |
| 2175 | res.0 = TDEFLStatus::Done |
| 2176 | } |
| 2177 | res |
| 2178 | } |
| 2179 | |
| 2180 | /// Main compression function. Tries to compress as much as possible from `in_buf` and |
| 2181 | /// puts compressed output into `out_buf`. |
| 2182 | /// |
| 2183 | /// The value of `flush` determines if the compressor should attempt to flush all output |
| 2184 | /// and alternatively try to finish the stream. |
| 2185 | /// |
| 2186 | /// Use [`TDEFLFlush::Finish`] on the final call to signal that the stream is finishing. |
| 2187 | /// |
| 2188 | /// Note that this function does not keep track of whether a flush marker has been output, so |
| 2189 | /// if called using [`TDEFLFlush::Sync`], the caller needs to ensure there is enough space in the |
| 2190 | /// output buffer if they want to avoid repeated flush markers. |
| 2191 | /// See #105 for details. |
| 2192 | /// |
| 2193 | /// # Returns |
| 2194 | /// Returns a tuple containing the current status of the compressor, the current position |
| 2195 | /// in the input buffer and the current position in the output buffer. |
| 2196 | pub fn compress( |
| 2197 | d: &mut CompressorOxide, |
| 2198 | in_buf: &[u8], |
| 2199 | out_buf: &mut [u8], |
| 2200 | flush: TDEFLFlush, |
| 2201 | ) -> (TDEFLStatus, usize, usize) { |
| 2202 | compress_inner( |
| 2203 | d, |
| 2204 | &mut CallbackOxide::new_callback_buf(in_buf, out_buf), |
| 2205 | flush, |
| 2206 | ) |
| 2207 | } |
| 2208 | |
| 2209 | /// Main compression function. Callbacks output. |
| 2210 | /// |
| 2211 | /// # Returns |
| 2212 | /// Returns a tuple containing the current status of the compressor, the current position |
| 2213 | /// in the input buffer. |
| 2214 | /// |
| 2215 | /// The caller is responsible for ensuring the `CallbackFunc` struct will not cause undefined |
| 2216 | /// behaviour. |
| 2217 | pub fn compress_to_output( |
| 2218 | d: &mut CompressorOxide, |
| 2219 | in_buf: &[u8], |
| 2220 | flush: TDEFLFlush, |
| 2221 | mut callback_func: impl FnMut(&[u8]) -> bool, |
| 2222 | ) -> (TDEFLStatus, usize) { |
| 2223 | let res: (TDEFLStatus, usize, usize) = compress_inner( |
| 2224 | d, |
| 2225 | &mut CallbackOxide::new_callback_func( |
| 2226 | in_buf, |
| 2227 | CallbackFunc { |
| 2228 | put_buf_func: &mut callback_func, |
| 2229 | }, |
| 2230 | ), |
| 2231 | flush, |
| 2232 | ); |
| 2233 | |
| 2234 | (res.0, res.1) |
| 2235 | } |
| 2236 | |
| 2237 | fn compress_inner( |
| 2238 | d: &mut CompressorOxide, |
| 2239 | callback: &mut CallbackOxide, |
| 2240 | flush: TDEFLFlush, |
| 2241 | ) -> (TDEFLStatus, usize, usize) { |
| 2242 | d.params.out_buf_ofs = 0; |
| 2243 | d.params.src_pos = 0; |
| 2244 | |
| 2245 | let prev_ok = d.params.prev_return_status == TDEFLStatus::Okay; |
| 2246 | let flush_finish_once = d.params.flush != TDEFLFlush::Finish || flush == TDEFLFlush::Finish; |
| 2247 | |
| 2248 | d.params.flush = flush; |
| 2249 | if !prev_ok || !flush_finish_once { |
| 2250 | d.params.prev_return_status = TDEFLStatus::BadParam; |
| 2251 | return (d.params.prev_return_status, 0, 0); |
| 2252 | } |
| 2253 | |
| 2254 | if d.params.flush_remaining != 0 || d.params.finished { |
| 2255 | let res = flush_output_buffer(callback, &mut d.params); |
| 2256 | d.params.prev_return_status = res.0; |
| 2257 | return res; |
| 2258 | } |
| 2259 | |
| 2260 | let one_probe = d.params.flags & MAX_PROBES_MASK == 1; |
| 2261 | let greedy = d.params.flags & TDEFL_GREEDY_PARSING_FLAG != 0; |
| 2262 | let filter_or_rle = d.params.flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS) != 0; |
| 2263 | |
| 2264 | let raw = d.params.flags & TDEFL_FORCE_ALL_RAW_BLOCKS != 0; |
| 2265 | |
| 2266 | let compress_success = if raw { |
| 2267 | compress_stored(d, callback) |
| 2268 | } else if one_probe && greedy && !filter_or_rle { |
| 2269 | compress_fast(d, callback) |
| 2270 | } else { |
| 2271 | compress_normal(d, callback) |
| 2272 | }; |
| 2273 | |
| 2274 | if !compress_success { |
| 2275 | return ( |
| 2276 | d.params.prev_return_status, |
| 2277 | d.params.src_pos, |
| 2278 | d.params.out_buf_ofs, |
| 2279 | ); |
| 2280 | } |
| 2281 | |
| 2282 | if let Some(in_buf) = callback.in_buf { |
| 2283 | if d.params.flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32) != 0 { |
| 2284 | d.params.adler32 = update_adler32(d.params.adler32, &in_buf[..d.params.src_pos]); |
| 2285 | } |
| 2286 | } |
| 2287 | |
| 2288 | let flush_none = d.params.flush == TDEFLFlush::None; |
| 2289 | let in_left = callback.in_buf.map_or(0, |buf| buf.len()) - d.params.src_pos; |
| 2290 | let remaining = in_left != 0 || d.params.flush_remaining != 0; |
| 2291 | if !flush_none && d.dict.lookahead_size == 0 && !remaining { |
| 2292 | let flush = d.params.flush; |
| 2293 | match flush_block(d, callback, flush) { |
| 2294 | Err(_) => { |
| 2295 | d.params.prev_return_status = TDEFLStatus::PutBufFailed; |
| 2296 | return ( |
| 2297 | d.params.prev_return_status, |
| 2298 | d.params.src_pos, |
| 2299 | d.params.out_buf_ofs, |
| 2300 | ); |
| 2301 | } |
| 2302 | Ok(x) if x < 0 => { |
| 2303 | return ( |
| 2304 | d.params.prev_return_status, |
| 2305 | d.params.src_pos, |
| 2306 | d.params.out_buf_ofs, |
| 2307 | ) |
| 2308 | } |
| 2309 | _ => { |
| 2310 | d.params.finished = d.params.flush == TDEFLFlush::Finish; |
| 2311 | if d.params.flush == TDEFLFlush::Full { |
| 2312 | d.dict.b.hash.fill(0); |
| 2313 | d.dict.b.next.fill(0); |
| 2314 | d.dict.size = 0; |
| 2315 | } |
| 2316 | } |
| 2317 | } |
| 2318 | } |
| 2319 | |
| 2320 | let res = flush_output_buffer(callback, &mut d.params); |
| 2321 | d.params.prev_return_status = res.0; |
| 2322 | |
| 2323 | res |
| 2324 | } |
| 2325 | |
| 2326 | /// Create a set of compression flags using parameters used by zlib and other compressors. |
| 2327 | /// Mainly intended for use with transition from c libraries as it deals with raw integers. |
| 2328 | /// |
| 2329 | /// # Parameters |
| 2330 | /// `level` determines compression level. Clamped to maximum of 10. Negative values result in |
| 2331 | /// `CompressionLevel::DefaultLevel`. |
| 2332 | /// `window_bits`: Above 0, wraps the stream in a zlib wrapper, 0 or negative for a raw deflate |
| 2333 | /// stream. |
| 2334 | /// `strategy`: Sets the strategy if this conforms to any of the values in `CompressionStrategy`. |
| 2335 | /// |
| 2336 | /// # Notes |
| 2337 | /// This function may be removed or moved to the `miniz_oxide_c_api` in the future. |
| 2338 | pub fn create_comp_flags_from_zip_params(level: i32, window_bits: i32, strategy: i32) -> u32 { |
| 2339 | let num_probes = (if level >= 0 { |
| 2340 | cmp::min(10, level) |
| 2341 | } else { |
| 2342 | CompressionLevel::DefaultLevel as i32 |
| 2343 | }) as usize; |
| 2344 | let greedy = if level <= 3 { |
| 2345 | TDEFL_GREEDY_PARSING_FLAG |
| 2346 | } else { |
| 2347 | 0 |
| 2348 | }; |
| 2349 | let mut comp_flags = u32::from(NUM_PROBES[num_probes]) | greedy; |
| 2350 | |
| 2351 | if window_bits > 0 { |
| 2352 | comp_flags |= TDEFL_WRITE_ZLIB_HEADER; |
| 2353 | } |
| 2354 | |
| 2355 | if level == 0 { |
| 2356 | comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; |
| 2357 | } else if strategy == CompressionStrategy::Filtered as i32 { |
| 2358 | comp_flags |= TDEFL_FILTER_MATCHES; |
| 2359 | } else if strategy == CompressionStrategy::HuffmanOnly as i32 { |
| 2360 | comp_flags &= !MAX_PROBES_MASK; |
| 2361 | } else if strategy == CompressionStrategy::Fixed as i32 { |
| 2362 | comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; |
| 2363 | } else if strategy == CompressionStrategy::RLE as i32 { |
| 2364 | comp_flags |= TDEFL_RLE_MATCHES; |
| 2365 | } |
| 2366 | |
| 2367 | comp_flags |
| 2368 | } |
| 2369 | |
| 2370 | #[cfg (test)] |
| 2371 | mod test { |
| 2372 | use super::{ |
| 2373 | compress_to_output, create_comp_flags_from_zip_params, read_u16_le, write_u16_le, |
| 2374 | CompressionStrategy, CompressorOxide, TDEFLFlush, TDEFLStatus, DEFAULT_FLAGS, |
| 2375 | MZ_DEFAULT_WINDOW_BITS, |
| 2376 | }; |
| 2377 | use crate::inflate::decompress_to_vec; |
| 2378 | use alloc::vec; |
| 2379 | |
| 2380 | #[test ] |
| 2381 | fn u16_to_slice() { |
| 2382 | let mut slice = [0, 0]; |
| 2383 | write_u16_le(2000, &mut slice, 0); |
| 2384 | assert_eq!(slice, [208, 7]); |
| 2385 | } |
| 2386 | |
| 2387 | #[test ] |
| 2388 | fn u16_from_slice() { |
| 2389 | let slice = [208, 7]; |
| 2390 | assert_eq!(read_u16_le(&slice, 0), 2000); |
| 2391 | } |
| 2392 | |
| 2393 | #[test ] |
| 2394 | fn compress_output() { |
| 2395 | assert_eq!( |
| 2396 | DEFAULT_FLAGS, |
| 2397 | create_comp_flags_from_zip_params( |
| 2398 | 4, |
| 2399 | MZ_DEFAULT_WINDOW_BITS, |
| 2400 | CompressionStrategy::Default as i32 |
| 2401 | ) |
| 2402 | ); |
| 2403 | |
| 2404 | let slice = [ |
| 2405 | 1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3, |
| 2406 | ]; |
| 2407 | let mut encoded = vec![]; |
| 2408 | let flags = create_comp_flags_from_zip_params(6, 0, 0); |
| 2409 | let mut d = CompressorOxide::new(flags); |
| 2410 | let (status, in_consumed) = |
| 2411 | compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| { |
| 2412 | encoded.extend_from_slice(out); |
| 2413 | true |
| 2414 | }); |
| 2415 | |
| 2416 | assert_eq!(status, TDEFLStatus::Done); |
| 2417 | assert_eq!(in_consumed, slice.len()); |
| 2418 | |
| 2419 | let decoded = decompress_to_vec(&encoded[..]).unwrap(); |
| 2420 | assert_eq!(&decoded[..], &slice[..]); |
| 2421 | } |
| 2422 | |
| 2423 | #[test ] |
| 2424 | /// Check fast compress mode |
| 2425 | fn compress_fast() { |
| 2426 | let slice = [ |
| 2427 | 1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3, |
| 2428 | ]; |
| 2429 | let mut encoded = vec![]; |
| 2430 | let flags = create_comp_flags_from_zip_params(1, 0, 0); |
| 2431 | let mut d = CompressorOxide::new(flags); |
| 2432 | let (status, in_consumed) = |
| 2433 | compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| { |
| 2434 | encoded.extend_from_slice(out); |
| 2435 | true |
| 2436 | }); |
| 2437 | |
| 2438 | assert_eq!(status, TDEFLStatus::Done); |
| 2439 | assert_eq!(in_consumed, slice.len()); |
| 2440 | |
| 2441 | // Needs to be altered if algorithm improves. |
| 2442 | assert_eq!( |
| 2443 | &encoded[..], |
| 2444 | [99, 100, 98, 102, 1, 98, 48, 98, 3, 147, 204, 76, 204, 140, 76, 204, 0] |
| 2445 | ); |
| 2446 | |
| 2447 | let decoded = decompress_to_vec(&encoded[..]).unwrap(); |
| 2448 | assert_eq!(&decoded[..], &slice[..]); |
| 2449 | } |
| 2450 | |
| 2451 | #[test ] |
| 2452 | fn zlib_window_bits() { |
| 2453 | use crate::inflate::stream::{inflate, InflateState}; |
| 2454 | use crate::DataFormat; |
| 2455 | use alloc::boxed::Box; |
| 2456 | let slice = [ |
| 2457 | 1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 6, 1, 2, 3, 1, 2, 3, 2, 3, 1, 2, 3, 35, 22, 22, 2, |
| 2458 | 6, 2, 6, |
| 2459 | ]; |
| 2460 | let mut encoded = vec![]; |
| 2461 | let flags = create_comp_flags_from_zip_params(2, 1, CompressionStrategy::RLE.into()); |
| 2462 | let mut d = CompressorOxide::new(flags); |
| 2463 | let (status, in_consumed) = |
| 2464 | compress_to_output(&mut d, &slice, TDEFLFlush::Finish, |out: &[u8]| { |
| 2465 | encoded.extend_from_slice(out); |
| 2466 | true |
| 2467 | }); |
| 2468 | |
| 2469 | assert_eq!(status, TDEFLStatus::Done); |
| 2470 | assert_eq!(in_consumed, slice.len()); |
| 2471 | |
| 2472 | let mut output = vec![0; slice.len()]; |
| 2473 | |
| 2474 | let mut decompressor = Box::new(InflateState::new(DataFormat::Zlib)); |
| 2475 | |
| 2476 | let mut out_slice = output.as_mut_slice(); |
| 2477 | // Feed 1 byte at a time and no back buffer to test that RLE encoding has been used. |
| 2478 | for i in 0..encoded.len() { |
| 2479 | let result = inflate( |
| 2480 | &mut decompressor, |
| 2481 | &encoded[i..i + 1], |
| 2482 | out_slice, |
| 2483 | crate::MZFlush::None, |
| 2484 | ); |
| 2485 | out_slice = &mut out_slice[result.bytes_written..]; |
| 2486 | } |
| 2487 | let cmf = decompressor.decompressor().zlib_header().0; |
| 2488 | assert_eq!(cmf, 8); |
| 2489 | assert_eq!(output, slice) |
| 2490 | } |
| 2491 | } |
| 2492 | |