1 |
|
2 | //! Specialized binary input and output.
|
3 | //! Uses the error handling for this crate.
|
4 |
|
5 | #![doc (hidden)]
|
6 | pub use ::std::io::{Read, Write};
|
7 |
|
8 | use half::slice::{HalfFloatSliceExt};
|
9 | use lebe::prelude::*;
|
10 | use ::half::f16;
|
11 | use crate::error::{Error, Result, UnitResult, IoResult};
|
12 | use std::io::{Seek, SeekFrom};
|
13 | use std::path::Path;
|
14 | use std::fs::File;
|
15 | use std::convert::TryFrom;
|
16 |
|
17 |
|
18 | /// Skip reading uninteresting bytes without allocating.
|
19 | #[inline ]
|
20 | pub fn skip_bytes(read: &mut impl Read, count: usize) -> IoResult<()> {
|
21 | let count: u64 = u64::try_from(count).unwrap();
|
22 |
|
23 | let skipped: u64 = std::io::copy(
|
24 | &mut read.by_ref().take(count),
|
25 | &mut std::io::sink()
|
26 | )?;
|
27 |
|
28 | // the reader may have ended before we skipped the desired number of bytes
|
29 | if skipped < count {
|
30 | return Err(std::io::Error::new(
|
31 | kind:std::io::ErrorKind::UnexpectedEof,
|
32 | error:"cannot skip more bytes than exist"
|
33 | ));
|
34 | }
|
35 |
|
36 | debug_assert_eq!(skipped, count, "skip bytes bug" );
|
37 | Ok(())
|
38 | }
|
39 |
|
40 | /// If an error occurs while writing, attempts to delete the partially written file.
|
41 | /// Creates a file just before the first write operation, not when this function is called.
|
42 | #[inline ]
|
43 | pub fn attempt_delete_file_on_write_error<'p>(path: &'p Path, write: impl FnOnce(LateFile<'p>) -> UnitResult) -> UnitResult {
|
44 | match write(LateFile::from(path)) {
|
45 | Err(error: Error) => { // FIXME deletes existing file if creation of new file fails?
|
46 | let _deleted: Result<(), Error> = std::fs::remove_file(path); // ignore deletion errors
|
47 | Err(error)
|
48 | },
|
49 |
|
50 | ok: Result<(), Error> => ok,
|
51 | }
|
52 | }
|
53 |
|
54 | #[derive (Debug)]
|
55 | pub struct LateFile<'p> {
|
56 | path: &'p Path,
|
57 | file: Option<File>
|
58 | }
|
59 |
|
60 | impl<'p> From<&'p Path> for LateFile<'p> {
|
61 | fn from(path: &'p Path) -> Self { Self { path, file: None } }
|
62 | }
|
63 |
|
64 | impl<'p> LateFile<'p> {
|
65 | fn file(&mut self) -> std::io::Result<&mut File> {
|
66 | if self.file.is_none() { self.file = Some(File::create(self.path)?); }
|
67 | Ok(self.file.as_mut().unwrap()) // will not be reached if creation fails
|
68 | }
|
69 | }
|
70 |
|
71 | impl<'p> std::io::Write for LateFile<'p> {
|
72 | fn write(&mut self, buffer: &[u8]) -> std::io::Result<usize> {
|
73 | self.file()?.write(buf:buffer)
|
74 | }
|
75 |
|
76 | fn flush(&mut self) -> std::io::Result<()> {
|
77 | if let Some(file: &mut File) = &mut self.file { file.flush() }
|
78 | else { Ok(()) }
|
79 | }
|
80 | }
|
81 |
|
82 | impl<'p> Seek for LateFile<'p> {
|
83 | fn seek(&mut self, position: SeekFrom) -> std::io::Result<u64> {
|
84 | self.file()?.seek(pos:position)
|
85 | }
|
86 | }
|
87 |
|
88 |
|
89 | /// Peek a single byte without consuming it.
|
90 | #[derive (Debug)]
|
91 | pub struct PeekRead<T> {
|
92 |
|
93 | /// Cannot be exposed as it will not contain peeked values anymore.
|
94 | inner: T,
|
95 |
|
96 | peeked: Option<IoResult<u8>>,
|
97 | }
|
98 |
|
99 | impl<T: Read> PeekRead<T> {
|
100 |
|
101 | /// Wrap a reader to make it peekable.
|
102 | #[inline ]
|
103 | pub fn new(inner: T) -> Self {
|
104 | Self { inner, peeked: None }
|
105 | }
|
106 |
|
107 | /// Read a single byte and return that without consuming it.
|
108 | /// The next `read` call will include that byte.
|
109 | #[inline ]
|
110 | pub fn peek_u8(&mut self) -> &IoResult<u8> {
|
111 | self.peeked = self.peeked.take().or_else(|| Some(u8::read_from_little_endian(&mut self.inner)));
|
112 | self.peeked.as_ref().unwrap() // unwrap cannot fail because we just set it
|
113 | }
|
114 |
|
115 | /// Skip a single byte if it equals the specified value.
|
116 | /// Returns whether the value was found.
|
117 | /// Consumes the peeked result if an error occurred.
|
118 | #[inline ]
|
119 | pub fn skip_if_eq(&mut self, value: u8) -> IoResult<bool> {
|
120 | match self.peek_u8() {
|
121 | Ok(peeked) if *peeked == value => {
|
122 | self.peeked = None; // consume the byte
|
123 | Ok(true)
|
124 | },
|
125 |
|
126 | Ok(_) => Ok(false),
|
127 |
|
128 | // return the error otherwise.
|
129 | // unwrap is safe because this branch cannot be reached otherwise.
|
130 | // we need to take() from self because io errors cannot be cloned.
|
131 | Err(_) => Err(self.peeked.take().unwrap().err().unwrap())
|
132 | }
|
133 | }
|
134 | }
|
135 |
|
136 |
|
137 | impl<T: Read> Read for PeekRead<T> {
|
138 | fn read(&mut self, target_buffer: &mut [u8]) -> IoResult<usize> {
|
139 | if target_buffer.is_empty() {
|
140 | return Ok(0)
|
141 | }
|
142 |
|
143 | match self.peeked.take() {
|
144 | None => self.inner.read(buf:target_buffer),
|
145 | Some(peeked: Result) => {
|
146 | target_buffer[0] = peeked?;
|
147 |
|
148 | // indexing [1..] is safe because an empty buffer already returned ok
|
149 | Ok(1 + self.inner.read(&mut target_buffer[1..])?)
|
150 | }
|
151 | }
|
152 | }
|
153 | }
|
154 |
|
155 | impl<T: Read + Seek> PeekRead<Tracking<T>> {
|
156 |
|
157 | /// Seek this read to the specified byte position.
|
158 | /// Discards any previously peeked value.
|
159 | pub fn skip_to(&mut self, position: usize) -> std::io::Result<()> {
|
160 | self.inner.seek_read_to(target_position:position)?;
|
161 | self.peeked = None;
|
162 | Ok(())
|
163 | }
|
164 | }
|
165 |
|
166 | impl<T: Read> PeekRead<Tracking<T>> {
|
167 |
|
168 | /// Current number of bytes read.
|
169 | pub fn byte_position(&self) -> usize {
|
170 | self.inner.byte_position()
|
171 | }
|
172 | }
|
173 |
|
174 | /// Keep track of what byte we are at.
|
175 | /// Used to skip back to a previous place after writing some information.
|
176 | #[derive (Debug)]
|
177 | pub struct Tracking<T> {
|
178 |
|
179 | /// Do not expose to prevent seeking without updating position
|
180 | inner: T,
|
181 |
|
182 | position: usize,
|
183 | }
|
184 |
|
185 | impl<T: Read> Read for Tracking<T> {
|
186 | fn read(&mut self, buffer: &mut [u8]) -> std::io::Result<usize> {
|
187 | let count: usize = self.inner.read(buf:buffer)?;
|
188 | self.position += count;
|
189 | Ok(count)
|
190 | }
|
191 | }
|
192 |
|
193 | impl<T: Write> Write for Tracking<T> {
|
194 | fn write(&mut self, buffer: &[u8]) -> std::io::Result<usize> {
|
195 | let count: usize = self.inner.write(buf:buffer)?;
|
196 | self.position += count;
|
197 | Ok(count)
|
198 | }
|
199 |
|
200 | fn flush(&mut self) -> std::io::Result<()> {
|
201 | self.inner.flush()
|
202 | }
|
203 | }
|
204 |
|
205 | impl<T> Tracking<T> {
|
206 |
|
207 | /// If `inner` is a reference, if must never be seeked directly,
|
208 | /// but only through this `Tracking` instance.
|
209 | pub fn new(inner: T) -> Self {
|
210 | Tracking { inner, position: 0 }
|
211 | }
|
212 |
|
213 | /// Current number of bytes written or read.
|
214 | pub fn byte_position(&self) -> usize {
|
215 | self.position
|
216 | }
|
217 | }
|
218 |
|
219 | impl<T: Read + Seek> Tracking<T> {
|
220 |
|
221 | /// Set the reader to the specified byte position.
|
222 | /// If it is only a couple of bytes, no seek system call is performed.
|
223 | pub fn seek_read_to(&mut self, target_position: usize) -> std::io::Result<()> {
|
224 | let delta: i128 = target_position as i128 - self.position as i128; // FIXME panicked at 'attempt to subtract with overflow'
|
225 | debug_assert!(delta.abs() < usize::MAX as i128);
|
226 |
|
227 | if delta > 0 && delta < 16 { // TODO profile that this is indeed faster than a syscall! (should be because of bufread buffer discard)
|
228 | skip_bytes(self, count:delta as usize)?;
|
229 | self.position += delta as usize;
|
230 | }
|
231 | else if delta != 0 {
|
232 | self.inner.seek(pos:SeekFrom::Start(u64::try_from(target_position).unwrap()))?;
|
233 | self.position = target_position;
|
234 | }
|
235 |
|
236 | Ok(())
|
237 | }
|
238 | }
|
239 |
|
240 | impl<T: Write + Seek> Tracking<T> {
|
241 |
|
242 | /// Move the writing cursor to the specified target byte index.
|
243 | /// If seeking forward, this will write zeroes.
|
244 | pub fn seek_write_to(&mut self, target_position: usize) -> std::io::Result<()> {
|
245 | if target_position < self.position {
|
246 | self.inner.seek(pos:SeekFrom::Start(u64::try_from(target_position).unwrap()))?;
|
247 | }
|
248 | else if target_position > self.position {
|
249 | std::io::copy(
|
250 | &mut std::io::repeat(0).take(u64::try_from(target_position - self.position).unwrap()),
|
251 | self
|
252 | )?;
|
253 | }
|
254 |
|
255 | self.position = target_position;
|
256 | Ok(())
|
257 | }
|
258 | }
|
259 |
|
260 |
|
261 | /// Generic trait that defines common binary operations such as reading and writing for this type.
|
262 | pub trait Data: Sized + Default + Clone {
|
263 |
|
264 | /// Number of bytes this would consume in an exr file.
|
265 | const BYTE_SIZE: usize = ::std::mem::size_of::<Self>();
|
266 |
|
267 | /// Read a value of type `Self`.
|
268 | fn read(read: &mut impl Read) -> Result<Self>;
|
269 |
|
270 | /// Read as many values of type `Self` as fit into the specified slice.
|
271 | /// If the slice cannot be filled completely, returns `Error::Invalid`.
|
272 | fn read_slice(read: &mut impl Read, slice: &mut[Self]) -> UnitResult;
|
273 |
|
274 | /// Read as many values of type `Self` as specified with `data_size`.
|
275 | ///
|
276 | /// This method will not allocate more memory than `soft_max` at once.
|
277 | /// If `hard_max` is specified, it will never read any more than that.
|
278 | /// Returns `Error::Invalid` if reader does not contain the desired number of elements.
|
279 | #[inline ]
|
280 | fn read_vec(read: &mut impl Read, data_size: usize, soft_max: usize, hard_max: Option<usize>, purpose: &'static str) -> Result<Vec<Self>> {
|
281 | let mut vec = Vec::with_capacity(data_size.min(soft_max));
|
282 | Self::read_into_vec(read, &mut vec, data_size, soft_max, hard_max, purpose)?;
|
283 | Ok(vec)
|
284 | }
|
285 |
|
286 | /// Write this value to the writer.
|
287 | fn write(self, write: &mut impl Write) -> UnitResult;
|
288 |
|
289 | /// Write all values of that slice to the writer.
|
290 | fn write_slice(write: &mut impl Write, slice: &[Self]) -> UnitResult;
|
291 |
|
292 |
|
293 | /// Read as many values of type `Self` as specified with `data_size` into the provided vector.
|
294 | ///
|
295 | /// This method will not allocate more memory than `soft_max` at once.
|
296 | /// If `hard_max` is specified, it will never read any more than that.
|
297 | /// Returns `Error::Invalid` if reader does not contain the desired number of elements.
|
298 | #[inline ]
|
299 | fn read_into_vec(read: &mut impl Read, data: &mut Vec<Self>, data_size: usize, soft_max: usize, hard_max: Option<usize>, purpose: &'static str) -> UnitResult {
|
300 | if let Some(max) = hard_max {
|
301 | if data_size > max {
|
302 | return Err(Error::invalid(purpose))
|
303 | }
|
304 | }
|
305 |
|
306 | let soft_max = hard_max.unwrap_or(soft_max).min(soft_max);
|
307 | let end = data.len() + data_size;
|
308 |
|
309 | // do not allocate more than $chunks memory at once
|
310 | // (most of the time, this loop will run only once)
|
311 | while data.len() < end {
|
312 | let chunk_start = data.len();
|
313 | let chunk_end = (chunk_start + soft_max).min(data_size);
|
314 |
|
315 | data.resize(chunk_end, Self::default());
|
316 | Self::read_slice(read, &mut data[chunk_start .. chunk_end])?; // safe because of `min(data_size)``
|
317 | }
|
318 |
|
319 | Ok(())
|
320 | }
|
321 |
|
322 | /// Write the length of the slice and then its contents.
|
323 | #[inline ]
|
324 | fn write_i32_sized_slice<W: Write>(write: &mut W, slice: &[Self]) -> UnitResult {
|
325 | i32::try_from(slice.len())?.write(write)?;
|
326 | Self::write_slice(write, slice)
|
327 | }
|
328 |
|
329 | /// Read the desired element count and then read that many items into a vector.
|
330 | ///
|
331 | /// This method will not allocate more memory than `soft_max` at once.
|
332 | /// If `hard_max` is specified, it will never read any more than that.
|
333 | /// Returns `Error::Invalid` if reader does not contain the desired number of elements.
|
334 | #[inline ]
|
335 | fn read_i32_sized_vec(read: &mut impl Read, soft_max: usize, hard_max: Option<usize>, purpose: &'static str) -> Result<Vec<Self>> {
|
336 | let size = usize::try_from(i32::read(read)?)?;
|
337 | Self::read_vec(read, size, soft_max, hard_max, purpose)
|
338 | }
|
339 |
|
340 | /// Fill the slice with this value.
|
341 | #[inline ]
|
342 | fn fill_slice(self, slice: &mut [Self]) where Self: Copy {
|
343 | // hopefully compiles down to a single memset call
|
344 | for value in slice {
|
345 | *value = self;
|
346 | }
|
347 | }
|
348 | }
|
349 |
|
350 |
|
351 | macro_rules! implement_data_for_primitive {
|
352 | ($kind: ident) => {
|
353 | impl Data for $kind {
|
354 | #[inline]
|
355 | fn read(read: &mut impl Read) -> Result<Self> {
|
356 | Ok(read.read_from_little_endian()?)
|
357 | }
|
358 |
|
359 | #[inline]
|
360 | fn write(self, write: &mut impl Write) -> Result<()> {
|
361 | write.write_as_little_endian(&self)?;
|
362 | Ok(())
|
363 | }
|
364 |
|
365 | #[inline]
|
366 | fn read_slice(read: &mut impl Read, slice: &mut [Self]) -> Result<()> {
|
367 | read.read_from_little_endian_into(slice)?;
|
368 | Ok(())
|
369 | }
|
370 |
|
371 | #[inline]
|
372 | fn write_slice(write: &mut impl Write, slice: &[Self]) -> Result<()> {
|
373 | write.write_as_little_endian(slice)?;
|
374 | Ok(())
|
375 | }
|
376 | }
|
377 | };
|
378 | }
|
379 |
|
380 | implement_data_for_primitive!(u8);
|
381 | implement_data_for_primitive!(i8);
|
382 | implement_data_for_primitive!(i16);
|
383 | implement_data_for_primitive!(u16);
|
384 | implement_data_for_primitive!(u32);
|
385 | implement_data_for_primitive!(i32);
|
386 | implement_data_for_primitive!(i64);
|
387 | implement_data_for_primitive!(u64);
|
388 | implement_data_for_primitive!(f32);
|
389 | implement_data_for_primitive!(f64);
|
390 |
|
391 |
|
392 | impl Data for f16 {
|
393 | #[inline ]
|
394 | fn read(read: &mut impl Read) -> Result<Self> {
|
395 | u16::read(read).map(op:f16::from_bits)
|
396 | }
|
397 |
|
398 | #[inline ]
|
399 | fn read_slice(read: &mut impl Read, slice: &mut [Self]) -> Result<()> {
|
400 | let bits: &mut [u16] = slice.reinterpret_cast_mut();
|
401 | u16::read_slice(read, slice:bits)
|
402 | }
|
403 |
|
404 | #[inline ]
|
405 | fn write(self, write: &mut impl Write) -> Result<()> {
|
406 | self.to_bits().write(write)
|
407 | }
|
408 |
|
409 | #[inline ]
|
410 | fn write_slice(write: &mut impl Write, slice: &[Self]) -> Result<()> {
|
411 | let bits: &[u16] = slice.reinterpret_cast();
|
412 | u16::write_slice(write, slice:bits)
|
413 | }
|
414 | }
|
415 |
|
416 |
|
417 | #[cfg (test)]
|
418 | mod test {
|
419 | use crate::io::PeekRead;
|
420 | use std::io::Read;
|
421 |
|
422 | #[test ]
|
423 | fn peek(){
|
424 | use lebe::prelude::*;
|
425 | let buffer: &[u8] = &[0,1,2,3];
|
426 | let mut peek = PeekRead::new(buffer);
|
427 |
|
428 | assert_eq!(peek.peek_u8().as_ref().unwrap(), &0);
|
429 | assert_eq!(peek.peek_u8().as_ref().unwrap(), &0);
|
430 | assert_eq!(peek.peek_u8().as_ref().unwrap(), &0);
|
431 | assert_eq!(u8::read_from_little_endian(&mut peek).unwrap(), 0_u8);
|
432 |
|
433 | assert_eq!(peek.read(&mut [0,0]).unwrap(), 2);
|
434 |
|
435 | assert_eq!(peek.peek_u8().as_ref().unwrap(), &3);
|
436 | assert_eq!(u8::read_from_little_endian(&mut peek).unwrap(), 3_u8);
|
437 |
|
438 | assert!(peek.peek_u8().is_err());
|
439 | assert!(peek.peek_u8().is_err());
|
440 | assert!(peek.peek_u8().is_err());
|
441 | assert!(peek.peek_u8().is_err());
|
442 |
|
443 | assert!(u8::read_from_little_endian(&mut peek).is_err());
|
444 | }
|
445 | }
|
446 |
|
447 |
|
448 | |