| 1 | //! Composable structures to handle writing an image.
|
| 2 |
|
| 3 |
|
| 4 | use std::fmt::Debug;
|
| 5 | use std::io::Seek;
|
| 6 | use std::iter::Peekable;
|
| 7 | use std::ops::Not;
|
| 8 | use std::sync::mpsc;
|
| 9 | use rayon_core::{ThreadPool, ThreadPoolBuildError};
|
| 10 |
|
| 11 | use smallvec::alloc::collections::BTreeMap;
|
| 12 |
|
| 13 | use crate::block::UncompressedBlock;
|
| 14 | use crate::block::chunk::Chunk;
|
| 15 | use crate::compression::Compression;
|
| 16 | use crate::error::{Error, Result, UnitResult, usize_to_u64};
|
| 17 | use crate::io::{Data, Tracking, Write};
|
| 18 | use crate::meta::{Headers, MetaData, OffsetTables};
|
| 19 | use crate::meta::attribute::LineOrder;
|
| 20 |
|
| 21 | /// Write an exr file by writing one chunk after another in a closure.
|
| 22 | /// In the closure, you are provided a chunk writer, which should be used to write all the chunks.
|
| 23 | /// Assumes the your write destination is buffered.
|
| 24 | pub fn write_chunks_with<W: Write + Seek>(
|
| 25 | buffered_write: W, headers: Headers, pedantic: bool,
|
| 26 | write_chunks: impl FnOnce(MetaData, &mut ChunkWriter<W>) -> UnitResult
|
| 27 | ) -> UnitResult {
|
| 28 | // this closure approach ensures that after writing all chunks, the file is always completed and checked and flushed
|
| 29 | let (meta: MetaData, mut writer: ChunkWriter) = ChunkWriter::new_for_buffered(buffered_byte_writer:buffered_write, headers, pedantic)?;
|
| 30 | write_chunks(meta, &mut writer)?;
|
| 31 | writer.complete_meta_data()
|
| 32 | }
|
| 33 |
|
| 34 | /// Can consume compressed pixel chunks, writing them a file.
|
| 35 | /// Use `sequential_blocks_compressor` or `parallel_blocks_compressor` to compress your data,
|
| 36 | /// or use `compress_all_blocks_sequential` or `compress_all_blocks_parallel`.
|
| 37 | /// Use `on_progress` to obtain a new writer
|
| 38 | /// that triggers a callback for each block.
|
| 39 | // #[must_use]
|
| 40 | #[derive (Debug)]
|
| 41 | #[must_use ]
|
| 42 | pub struct ChunkWriter<W> {
|
| 43 | header_count: usize,
|
| 44 | byte_writer: Tracking<W>,
|
| 45 | chunk_indices_byte_location: std::ops::Range<usize>,
|
| 46 | chunk_indices_increasing_y: OffsetTables,
|
| 47 | chunk_count: usize, // TODO compose?
|
| 48 | }
|
| 49 |
|
| 50 | /// A new writer that triggers a callback
|
| 51 | /// for each block written to the inner writer.
|
| 52 | #[derive (Debug)]
|
| 53 | #[must_use ]
|
| 54 | pub struct OnProgressChunkWriter<'w, W, F> {
|
| 55 | chunk_writer: &'w mut W,
|
| 56 | written_chunks: usize,
|
| 57 | on_progress: F,
|
| 58 | }
|
| 59 |
|
| 60 | /// Write chunks to a byte destination.
|
| 61 | /// Then write each chunk with `writer.write_chunk(chunk)`.
|
| 62 | pub trait ChunksWriter: Sized {
|
| 63 |
|
| 64 | /// The total number of chunks that the complete file will contain.
|
| 65 | fn total_chunks_count(&self) -> usize;
|
| 66 |
|
| 67 | /// Any more calls will result in an error and have no effect.
|
| 68 | /// If writing results in an error, the file and the writer
|
| 69 | /// may remain in an invalid state and should not be used further.
|
| 70 | /// Errors when the chunk at this index was already written.
|
| 71 | fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult;
|
| 72 |
|
| 73 | /// Obtain a new writer that calls the specified closure for each block that is written to this writer.
|
| 74 | fn on_progress<F>(&mut self, on_progress: F) -> OnProgressChunkWriter<'_, Self, F> where F: FnMut(f64) {
|
| 75 | OnProgressChunkWriter { chunk_writer: self, written_chunks: 0, on_progress }
|
| 76 | }
|
| 77 |
|
| 78 | /// Obtain a new writer that can compress blocks to chunks, which are then passed to this writer.
|
| 79 | fn sequential_blocks_compressor<'w>(&'w mut self, meta: &'w MetaData) -> SequentialBlocksCompressor<'w, Self> {
|
| 80 | SequentialBlocksCompressor::new(meta, self)
|
| 81 | }
|
| 82 |
|
| 83 | /// Obtain a new writer that can compress blocks to chunks on multiple threads, which are then passed to this writer.
|
| 84 | /// Returns none if the sequential compressor should be used instead (thread pool creation failure or too large performance overhead).
|
| 85 | fn parallel_blocks_compressor<'w>(&'w mut self, meta: &'w MetaData) -> Option<ParallelBlocksCompressor<'w, Self>> {
|
| 86 | ParallelBlocksCompressor::new(meta, self)
|
| 87 | }
|
| 88 |
|
| 89 | /// Compresses all blocks to the file.
|
| 90 | /// The index of the block must be in increasing line order within the header.
|
| 91 | /// Obtain iterator with `MetaData::collect_ordered_blocks(...)` or similar methods.
|
| 92 | fn compress_all_blocks_sequential(mut self, meta: &MetaData, blocks: impl Iterator<Item=(usize, UncompressedBlock)>) -> UnitResult {
|
| 93 | let mut writer = self.sequential_blocks_compressor(meta);
|
| 94 |
|
| 95 | // TODO check block order if line order is not unspecified!
|
| 96 | for (index_in_header_increasing_y, block) in blocks {
|
| 97 | writer.compress_block(index_in_header_increasing_y, block)?;
|
| 98 | }
|
| 99 |
|
| 100 | // TODO debug_assert_eq!(self.is_complete());
|
| 101 | Ok(())
|
| 102 | }
|
| 103 |
|
| 104 | /// Compresses all blocks to the file.
|
| 105 | /// The index of the block must be in increasing line order within the header.
|
| 106 | /// Obtain iterator with `MetaData::collect_ordered_blocks(...)` or similar methods.
|
| 107 | /// Will fallback to sequential processing where threads are not available, or where it would not speed up the process.
|
| 108 | fn compress_all_blocks_parallel(mut self, meta: &MetaData, blocks: impl Iterator<Item=(usize, UncompressedBlock)>) -> UnitResult {
|
| 109 | let mut parallel_writer = match self.parallel_blocks_compressor(meta) {
|
| 110 | None => return self.compress_all_blocks_sequential(meta, blocks),
|
| 111 | Some(writer) => writer,
|
| 112 | };
|
| 113 |
|
| 114 | // TODO check block order if line order is not unspecified!
|
| 115 | for (index_in_header_increasing_y, block) in blocks {
|
| 116 | parallel_writer.add_block_to_compression_queue(index_in_header_increasing_y, block)?;
|
| 117 | }
|
| 118 |
|
| 119 | // TODO debug_assert_eq!(self.is_complete());
|
| 120 | Ok(())
|
| 121 | }
|
| 122 | }
|
| 123 |
|
| 124 |
|
| 125 | impl<W> ChunksWriter for ChunkWriter<W> where W: Write + Seek {
|
| 126 |
|
| 127 | /// The total number of chunks that the complete file will contain.
|
| 128 | fn total_chunks_count(&self) -> usize { self.chunk_count }
|
| 129 |
|
| 130 | /// Any more calls will result in an error and have no effect.
|
| 131 | /// If writing results in an error, the file and the writer
|
| 132 | /// may remain in an invalid state and should not be used further.
|
| 133 | /// Errors when the chunk at this index was already written.
|
| 134 | fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult {
|
| 135 | let header_chunk_indices = &mut self.chunk_indices_increasing_y[chunk.layer_index];
|
| 136 |
|
| 137 | if index_in_header_increasing_y >= header_chunk_indices.len() {
|
| 138 | return Err(Error::invalid("too large chunk index" ));
|
| 139 | }
|
| 140 |
|
| 141 | let chunk_index_slot = &mut header_chunk_indices[index_in_header_increasing_y];
|
| 142 | if *chunk_index_slot != 0 {
|
| 143 | return Err(Error::invalid(format!("chunk at index {} is already written" , index_in_header_increasing_y)));
|
| 144 | }
|
| 145 |
|
| 146 | *chunk_index_slot = usize_to_u64(self.byte_writer.byte_position());
|
| 147 | chunk.write(&mut self.byte_writer, self.header_count)?;
|
| 148 | Ok(())
|
| 149 | }
|
| 150 | }
|
| 151 |
|
| 152 | impl<W> ChunkWriter<W> where W: Write + Seek {
|
| 153 | // -- the following functions are private, because they must be called in a strict order --
|
| 154 |
|
| 155 | /// Writes the meta data and zeroed offset tables as a placeholder.
|
| 156 | fn new_for_buffered(buffered_byte_writer: W, headers: Headers, pedantic: bool) -> Result<(MetaData, Self)> {
|
| 157 | let mut write = Tracking::new(buffered_byte_writer);
|
| 158 | let requirements = MetaData::write_validating_to_buffered(&mut write, headers.as_slice(), pedantic)?;
|
| 159 |
|
| 160 | // TODO: use increasing line order where possible, but this requires us to know whether we want to be parallel right now
|
| 161 | /*// if non-parallel compression, we always use increasing order anyways
|
| 162 | if !parallel || !has_compression {
|
| 163 | for header in &mut headers {
|
| 164 | if header.line_order == LineOrder::Unspecified {
|
| 165 | header.line_order = LineOrder::Increasing;
|
| 166 | }
|
| 167 | }
|
| 168 | }*/
|
| 169 |
|
| 170 | let offset_table_size: usize = headers.iter().map(|header| header.chunk_count).sum();
|
| 171 |
|
| 172 | let offset_table_start_byte = write.byte_position();
|
| 173 | let offset_table_end_byte = write.byte_position() + offset_table_size * u64::BYTE_SIZE;
|
| 174 |
|
| 175 | // skip offset tables, filling with 0, will be updated after the last chunk has been written
|
| 176 | write.seek_write_to(offset_table_end_byte)?;
|
| 177 |
|
| 178 | let header_count = headers.len();
|
| 179 | let chunk_indices_increasing_y = headers.iter()
|
| 180 | .map(|header| vec![0_u64; header.chunk_count]).collect();
|
| 181 |
|
| 182 | let meta_data = MetaData { requirements, headers };
|
| 183 |
|
| 184 | Ok((meta_data, ChunkWriter {
|
| 185 | header_count,
|
| 186 | byte_writer: write,
|
| 187 | chunk_count: offset_table_size,
|
| 188 | chunk_indices_byte_location: offset_table_start_byte .. offset_table_end_byte,
|
| 189 | chunk_indices_increasing_y,
|
| 190 | }))
|
| 191 | }
|
| 192 |
|
| 193 | /// Seek back to the meta data, write offset tables, and flush the byte writer.
|
| 194 | /// Leaves the writer seeked to the middle of the file.
|
| 195 | fn complete_meta_data(mut self) -> UnitResult {
|
| 196 | if self.chunk_indices_increasing_y.iter().flatten().any(|&index| index == 0) {
|
| 197 | return Err(Error::invalid("some chunks are not written yet" ))
|
| 198 | }
|
| 199 |
|
| 200 | // write all offset tables
|
| 201 | debug_assert_ne!(self.byte_writer.byte_position(), self.chunk_indices_byte_location.end, "offset table has already been updated" );
|
| 202 | self.byte_writer.seek_write_to(self.chunk_indices_byte_location.start)?;
|
| 203 |
|
| 204 | for table in self.chunk_indices_increasing_y {
|
| 205 | u64::write_slice(&mut self.byte_writer, table.as_slice())?;
|
| 206 | }
|
| 207 |
|
| 208 | self.byte_writer.flush()?; // make sure we catch all (possibly delayed) io errors before returning
|
| 209 | Ok(())
|
| 210 | }
|
| 211 |
|
| 212 | }
|
| 213 |
|
| 214 |
|
| 215 | impl<'w, W, F> ChunksWriter for OnProgressChunkWriter<'w, W, F> where W: 'w + ChunksWriter, F: FnMut(f64) {
|
| 216 | fn total_chunks_count(&self) -> usize {
|
| 217 | self.chunk_writer.total_chunks_count()
|
| 218 | }
|
| 219 |
|
| 220 | fn write_chunk(&mut self, index_in_header_increasing_y: usize, chunk: Chunk) -> UnitResult {
|
| 221 | let total_chunks = self.total_chunks_count();
|
| 222 | let on_progress = &mut self.on_progress;
|
| 223 |
|
| 224 | // guarantee on_progress being called with 0 once
|
| 225 | if self.written_chunks == 0 { on_progress(0.0); }
|
| 226 |
|
| 227 | self.chunk_writer.write_chunk(index_in_header_increasing_y, chunk)?;
|
| 228 |
|
| 229 | self.written_chunks += 1;
|
| 230 |
|
| 231 | on_progress({
|
| 232 | // guarantee finishing with progress 1.0 for last block at least once, float division might slightly differ from 1.0
|
| 233 | if self.written_chunks == total_chunks { 1.0 }
|
| 234 | else { self.written_chunks as f64 / total_chunks as f64 }
|
| 235 | });
|
| 236 |
|
| 237 | Ok(())
|
| 238 | }
|
| 239 | }
|
| 240 |
|
| 241 |
|
| 242 | /// Write blocks that appear in any order and reorder them before writing.
|
| 243 | #[derive (Debug)]
|
| 244 | #[must_use ]
|
| 245 | pub struct SortedBlocksWriter<'w, W> {
|
| 246 | chunk_writer: &'w mut W,
|
| 247 | pending_chunks: BTreeMap<usize, (usize, Chunk)>,
|
| 248 | unwritten_chunk_indices: Peekable<std::ops::Range<usize>>,
|
| 249 | requires_sorting: bool, // using this instead of Option, because of borrowing
|
| 250 | }
|
| 251 |
|
| 252 |
|
| 253 | impl<'w, W> SortedBlocksWriter<'w, W> where W: ChunksWriter {
|
| 254 |
|
| 255 | /// New sorting writer. Returns `None` if sorting is not required.
|
| 256 | pub fn new(meta_data: &MetaData, chunk_writer: &'w mut W) -> SortedBlocksWriter<'w, W> {
|
| 257 | let requires_sorting = meta_data.headers.iter()
|
| 258 | .any(|header| header.line_order != LineOrder::Unspecified);
|
| 259 |
|
| 260 | let total_chunk_count = chunk_writer.total_chunks_count();
|
| 261 |
|
| 262 | SortedBlocksWriter {
|
| 263 | pending_chunks: BTreeMap::new(),
|
| 264 | unwritten_chunk_indices: (0 .. total_chunk_count).peekable(),
|
| 265 | requires_sorting,
|
| 266 | chunk_writer
|
| 267 | }
|
| 268 | }
|
| 269 |
|
| 270 | /// Write the chunk or stash it. In the closure, write all chunks that can be written now.
|
| 271 | pub fn write_or_stash_chunk(&mut self, chunk_index_in_file: usize, chunk_y_index: usize, chunk: Chunk) -> UnitResult {
|
| 272 | if self.requires_sorting.not() {
|
| 273 | return self.chunk_writer.write_chunk(chunk_y_index, chunk);
|
| 274 | }
|
| 275 |
|
| 276 | // write this chunk now if possible
|
| 277 | if self.unwritten_chunk_indices.peek() == Some(&chunk_index_in_file){
|
| 278 | self.chunk_writer.write_chunk(chunk_y_index, chunk)?;
|
| 279 | self.unwritten_chunk_indices.next().expect("peeked chunk index is missing" );
|
| 280 |
|
| 281 | // write all pending blocks that are immediate successors of this block
|
| 282 | while let Some((next_chunk_y_index, next_chunk)) = self
|
| 283 | .unwritten_chunk_indices.peek().cloned()
|
| 284 | .and_then(|id| self.pending_chunks.remove(&id))
|
| 285 | {
|
| 286 | self.chunk_writer.write_chunk(next_chunk_y_index, next_chunk)?;
|
| 287 | self.unwritten_chunk_indices.next().expect("peeked chunk index is missing" );
|
| 288 | }
|
| 289 | }
|
| 290 |
|
| 291 | else {
|
| 292 | // the argument block is not to be written now,
|
| 293 | // and all the pending blocks are not next up either,
|
| 294 | // so just stash this block
|
| 295 | self.pending_chunks.insert(chunk_index_in_file, (chunk_y_index, chunk));
|
| 296 | }
|
| 297 |
|
| 298 | Ok(())
|
| 299 | }
|
| 300 |
|
| 301 | /// Where the chunks will be written to.
|
| 302 | pub fn inner_chunks_writer(&self) -> &W {
|
| 303 | &self.chunk_writer
|
| 304 | }
|
| 305 | }
|
| 306 |
|
| 307 |
|
| 308 |
|
| 309 | /// Compress blocks to a chunk writer in this thread.
|
| 310 | #[derive (Debug)]
|
| 311 | #[must_use ]
|
| 312 | pub struct SequentialBlocksCompressor<'w, W> {
|
| 313 | meta: &'w MetaData,
|
| 314 | chunks_writer: &'w mut W,
|
| 315 | }
|
| 316 |
|
| 317 | impl<'w, W> SequentialBlocksCompressor<'w, W> where W: 'w + ChunksWriter {
|
| 318 |
|
| 319 | /// New blocks writer.
|
| 320 | pub fn new(meta: &'w MetaData, chunks_writer: &'w mut W) -> Self { Self { meta, chunks_writer, } }
|
| 321 |
|
| 322 | /// This is where the compressed blocks are written to.
|
| 323 | pub fn inner_chunks_writer(&'w self) -> &'w W { self.chunks_writer }
|
| 324 |
|
| 325 | /// Compress a single block immediately. The index of the block must be in increasing line order.
|
| 326 | pub fn compress_block(&mut self, index_in_header_increasing_y: usize, block: UncompressedBlock) -> UnitResult {
|
| 327 | self.chunks_writer.write_chunk(
|
| 328 | index_in_header_increasing_y,
|
| 329 | block.compress_to_chunk(&self.meta.headers)?
|
| 330 | )
|
| 331 | }
|
| 332 | }
|
| 333 |
|
| 334 | /// Compress blocks to a chunk writer with multiple threads.
|
| 335 | #[derive (Debug)]
|
| 336 | #[must_use ]
|
| 337 | pub struct ParallelBlocksCompressor<'w, W> {
|
| 338 | meta: &'w MetaData,
|
| 339 | sorted_writer: SortedBlocksWriter<'w, W>,
|
| 340 |
|
| 341 | sender: mpsc::Sender<Result<(usize, usize, Chunk)>>,
|
| 342 | receiver: mpsc::Receiver<Result<(usize, usize, Chunk)>>,
|
| 343 | pool: rayon_core::ThreadPool,
|
| 344 |
|
| 345 | currently_compressing_count: usize,
|
| 346 | written_chunk_count: usize, // used to check for last chunk
|
| 347 | max_threads: usize,
|
| 348 | next_incoming_chunk_index: usize, // used to remember original chunk order
|
| 349 | }
|
| 350 |
|
| 351 | impl<'w, W> ParallelBlocksCompressor<'w, W> where W: 'w + ChunksWriter {
|
| 352 |
|
| 353 | /// New blocks writer. Returns none if sequential compression should be used.
|
| 354 | /// Use `new_with_thread_pool` to customize the threadpool.
|
| 355 | pub fn new(meta: &'w MetaData, chunks_writer: &'w mut W) -> Option<Self> {
|
| 356 | Self::new_with_thread_pool(meta, chunks_writer, ||{
|
| 357 | rayon_core::ThreadPoolBuilder::new()
|
| 358 | .thread_name(|index| format!("OpenEXR Block Compressor Thread # {}" , index))
|
| 359 | .build()
|
| 360 | })
|
| 361 | }
|
| 362 |
|
| 363 | /// New blocks writer. Returns none if sequential compression should be used.
|
| 364 | pub fn new_with_thread_pool<CreatePool>(
|
| 365 | meta: &'w MetaData, chunks_writer: &'w mut W, try_create_thread_pool: CreatePool)
|
| 366 | -> Option<Self>
|
| 367 | where CreatePool: FnOnce() -> std::result::Result<ThreadPool, ThreadPoolBuildError>
|
| 368 | {
|
| 369 | if meta.headers.iter().all(|head|head.compression == Compression::Uncompressed) {
|
| 370 | return None;
|
| 371 | }
|
| 372 |
|
| 373 | // in case thread pool creation fails (for example on WASM currently),
|
| 374 | // we revert to sequential compression
|
| 375 | let pool = match try_create_thread_pool() {
|
| 376 | Ok(pool) => pool,
|
| 377 |
|
| 378 | // TODO print warning?
|
| 379 | Err(_) => return None,
|
| 380 | };
|
| 381 |
|
| 382 | let max_threads = pool.current_num_threads().max(1).min(chunks_writer.total_chunks_count()) + 2; // ca one block for each thread at all times
|
| 383 | let (send, recv) = mpsc::channel(); // TODO bounded channel simplifies logic?
|
| 384 |
|
| 385 | Some(Self {
|
| 386 | sorted_writer: SortedBlocksWriter::new(meta, chunks_writer),
|
| 387 | next_incoming_chunk_index: 0,
|
| 388 | currently_compressing_count: 0,
|
| 389 | written_chunk_count: 0,
|
| 390 | sender: send,
|
| 391 | receiver: recv,
|
| 392 | max_threads,
|
| 393 | pool,
|
| 394 | meta,
|
| 395 | })
|
| 396 | }
|
| 397 |
|
| 398 | /// This is where the compressed blocks are written to.
|
| 399 | pub fn inner_chunks_writer(&'w self) -> &'w W { self.sorted_writer.inner_chunks_writer() }
|
| 400 |
|
| 401 | // private, as may underflow counter in release mode
|
| 402 | fn write_next_queued_chunk(&mut self) -> UnitResult {
|
| 403 | debug_assert!(self.currently_compressing_count > 0, "cannot wait for chunks as there are none left" );
|
| 404 |
|
| 405 | let some_compressed_chunk = self.receiver.recv()
|
| 406 | .expect("cannot receive compressed block" );
|
| 407 |
|
| 408 | self.currently_compressing_count -= 1;
|
| 409 | let (chunk_file_index, chunk_y_index, chunk) = some_compressed_chunk?;
|
| 410 | self.sorted_writer.write_or_stash_chunk(chunk_file_index, chunk_y_index, chunk)?;
|
| 411 |
|
| 412 | self.written_chunk_count += 1;
|
| 413 | Ok(())
|
| 414 | }
|
| 415 |
|
| 416 | /// Wait until all currently compressing chunks in the compressor have been written.
|
| 417 | pub fn write_all_queued_chunks(&mut self) -> UnitResult {
|
| 418 | while self.currently_compressing_count > 0 {
|
| 419 | self.write_next_queued_chunk()?;
|
| 420 | }
|
| 421 |
|
| 422 | debug_assert_eq!(self.currently_compressing_count, 0, "counter does not match block count" );
|
| 423 | Ok(())
|
| 424 | }
|
| 425 |
|
| 426 | /// Add a single block to the compressor queue. The index of the block must be in increasing line order.
|
| 427 | /// When calling this function for the last block, this method waits until all the blocks have been written.
|
| 428 | /// This only works when you write as many blocks as the image expects, otherwise you can use `wait_for_all_remaining_chunks`.
|
| 429 | /// Waits for a block from the queue to be written, if the queue already has enough items.
|
| 430 | pub fn add_block_to_compression_queue(&mut self, index_in_header_increasing_y: usize, block: UncompressedBlock) -> UnitResult {
|
| 431 |
|
| 432 | // if pipe is full, block to wait for a slot to free up
|
| 433 | if self.currently_compressing_count >= self.max_threads {
|
| 434 | self.write_next_queued_chunk()?;
|
| 435 | }
|
| 436 |
|
| 437 | // add the argument chunk to the compression queueue
|
| 438 | let index_in_file = self.next_incoming_chunk_index;
|
| 439 | let sender = self.sender.clone();
|
| 440 | let meta = self.meta.clone();
|
| 441 |
|
| 442 | self.pool.spawn(move ||{
|
| 443 | let compressed_or_err = block.compress_to_chunk(&meta.headers);
|
| 444 |
|
| 445 | // by now, decompressing could have failed in another thread.
|
| 446 | // the error is then already handled, so we simply
|
| 447 | // don't send the decompressed block and do nothing
|
| 448 | let _ = sender.send(compressed_or_err.map(move |compressed| (index_in_file, index_in_header_increasing_y, compressed)));
|
| 449 | });
|
| 450 |
|
| 451 | self.currently_compressing_count += 1;
|
| 452 | self.next_incoming_chunk_index += 1;
|
| 453 |
|
| 454 | // if this is the last chunk, wait for all chunks to complete before returning
|
| 455 | if self.written_chunk_count + self.currently_compressing_count == self.inner_chunks_writer().total_chunks_count() {
|
| 456 | self.write_all_queued_chunks()?;
|
| 457 | debug_assert_eq!(
|
| 458 | self.written_chunk_count, self.inner_chunks_writer().total_chunks_count(),
|
| 459 | "written chunk count mismatch"
|
| 460 | );
|
| 461 | }
|
| 462 |
|
| 463 |
|
| 464 | Ok(())
|
| 465 | }
|
| 466 | }
|
| 467 |
|
| 468 |
|
| 469 |
|
| 470 | |