1
2//! Describes all meta data possible in an exr file.
3//! Contains functionality to read and write meta data from bytes.
4//! Browse the `exr::image` module to get started with the high-level interface.
5
6pub mod attribute;
7pub mod header;
8
9
10use crate::io::*;
11use ::smallvec::SmallVec;
12use self::attribute::*;
13use crate::block::chunk::{TileCoordinates, CompressedBlock};
14use crate::error::*;
15use std::fs::File;
16use std::io::{BufReader};
17use crate::math::*;
18use std::collections::{HashSet};
19use std::convert::TryFrom;
20use crate::meta::header::{Header};
21use crate::block::{BlockIndex, UncompressedBlock};
22
23
24// TODO rename MetaData to ImageInfo?
25
26/// Contains the complete meta data of an exr image.
27/// Defines how the image is split up in the file,
28/// the number and type of images and channels,
29/// and various other attributes.
30/// The usage of custom attributes is encouraged.
31#[derive(Debug, Clone, PartialEq)]
32pub struct MetaData {
33
34 /// Some flags summarizing the features that must be supported to decode the file.
35 pub requirements: Requirements,
36
37 /// One header to describe each layer in this file.
38 // TODO rename to layer descriptions?
39 pub headers: Headers,
40}
41
42
43/// List of `Header`s.
44pub type Headers = SmallVec<[Header; 3]>;
45
46/// List of `OffsetTable`s.
47pub type OffsetTables = SmallVec<[OffsetTable; 3]>;
48
49
50/// The offset table is an ordered list of indices referencing pixel data in the exr file.
51/// For each pixel tile in the image, an index exists, which points to the byte-location
52/// of the corresponding pixel data in the file. That index can be used to load specific
53/// portions of an image without processing all bytes in a file. For each header,
54/// an offset table exists with its indices ordered by `LineOrder::Increasing`.
55// If the multipart bit is unset and the chunkCount attribute is not present,
56// the number of entries in the chunk table is computed using the
57// dataWindow, tileDesc, and compression attribute.
58//
59// If the multipart bit is set, the header must contain a
60// chunkCount attribute, that contains the length of the offset table.
61pub type OffsetTable = Vec<u64>;
62
63
64/// A summary of requirements that must be met to read this exr file.
65/// Used to determine whether this file can be read by a given reader.
66/// It includes the OpenEXR version number. This library aims to support version `2.0`.
67#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)]
68pub struct Requirements {
69
70 /// This library supports reading version 1 and 2, and writing version 2.
71 // TODO write version 1 for simple images
72 pub file_format_version: u8,
73
74 /// If true, this image has tiled blocks and contains only a single layer.
75 /// If false and not deep and not multilayer, this image is a single layer image with scan line blocks.
76 pub is_single_layer_and_tiled: bool,
77
78 // in c or bad c++ this might have been relevant (omg is he allowed to say that)
79 /// Whether this file has strings with a length greater than 31.
80 /// Strings can never be longer than 255.
81 pub has_long_names: bool,
82
83 /// This image contains at least one layer with deep data.
84 pub has_deep_data: bool,
85
86 /// Whether this file contains multiple layers.
87 pub has_multiple_layers: bool,
88}
89
90
91/// Locates a rectangular section of pixels in an image.
92#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
93pub struct TileIndices {
94
95 /// Index of the tile.
96 pub location: TileCoordinates,
97
98 /// Pixel size of the tile.
99 pub size: Vec2<usize>,
100}
101
102/// How the image pixels are split up into separate blocks.
103#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
104pub enum BlockDescription {
105
106 /// The image is divided into scan line blocks.
107 /// The number of scan lines in a block depends on the compression method.
108 ScanLines,
109
110 /// The image is divided into tile blocks.
111 /// Also specifies the size of each tile in the image
112 /// and whether this image contains multiple resolution levels.
113 Tiles(TileDescription)
114}
115
116
117/*impl TileIndices {
118 pub fn cmp(&self, other: &Self) -> Ordering {
119 match self.location.level_index.1.cmp(&other.location.level_index.1) {
120 Ordering::Equal => {
121 match self.location.level_index.0.cmp(&other.location.level_index.0) {
122 Ordering::Equal => {
123 match self.location.tile_index.1.cmp(&other.location.tile_index.1) {
124 Ordering::Equal => {
125 self.location.tile_index.0.cmp(&other.location.tile_index.0)
126 },
127
128 other => other,
129 }
130 },
131
132 other => other
133 }
134 },
135
136 other => other
137 }
138 }
139}*/
140
141impl BlockDescription {
142
143 /// Whether this image is tiled. If false, this image is divided into scan line blocks.
144 pub fn has_tiles(&self) -> bool {
145 match self {
146 BlockDescription::Tiles { .. } => true,
147 _ => false
148 }
149 }
150}
151
152
153
154
155
156/// The first four bytes of each exr file.
157/// Used to abort reading non-exr files.
158pub mod magic_number {
159 use super::*;
160
161 /// The first four bytes of each exr file.
162 pub const BYTES: [u8; 4] = [0x76, 0x2f, 0x31, 0x01];
163
164 /// Without validation, write this instance to the byte stream.
165 pub fn write(write: &mut impl Write) -> Result<()> {
166 u8::write_slice(write, &self::BYTES)
167 }
168
169 /// Consumes four bytes from the reader and returns whether the file may be an exr file.
170 // TODO check if exr before allocating BufRead
171 pub fn is_exr(read: &mut impl Read) -> Result<bool> {
172 let mut magic_num = [0; 4];
173 u8::read_slice(read, &mut magic_num)?;
174 Ok(magic_num == self::BYTES)
175 }
176
177 /// Validate this image. If it is an exr file, return `Ok(())`.
178 pub fn validate_exr(read: &mut impl Read) -> UnitResult {
179 if self::is_exr(read)? {
180 Ok(())
181
182 } else {
183 Err(Error::invalid("file identifier missing"))
184 }
185 }
186}
187
188/// A `0_u8` at the end of a sequence.
189pub mod sequence_end {
190 use super::*;
191
192 /// Number of bytes this would consume in an exr file.
193 pub fn byte_size() -> usize {
194 1
195 }
196
197 /// Without validation, write this instance to the byte stream.
198 pub fn write<W: Write>(write: &mut W) -> UnitResult {
199 0_u8.write(write)
200 }
201
202 /// Peeks the next byte. If it is zero, consumes the byte and returns true.
203 pub fn has_come(read: &mut PeekRead<impl Read>) -> Result<bool> {
204 Ok(read.skip_if_eq(0)?)
205 }
206}
207
208fn missing_attribute(name: &str) -> Error {
209 Error::invalid(message:format!("missing or invalid {} attribute", name))
210}
211
212
213/// Compute the number of tiles required to contain all values.
214pub fn compute_block_count(full_res: usize, tile_size: usize) -> usize {
215 // round up, because if the image is not evenly divisible by the tiles,
216 // we add another tile at the end (which is only partially used)
217 RoundingMode::Up.divide(dividend:full_res, divisor:tile_size)
218}
219
220/// Compute the start position and size of a block inside a dimension.
221#[inline]
222pub fn calculate_block_position_and_size(total_size: usize, block_size: usize, block_index: usize) -> Result<(usize, usize)> {
223 let block_position: usize = block_size * block_index;
224
225 Ok((
226 block_position,
227 calculate_block_size(total_size, block_size, block_position)?
228 ))
229}
230
231/// Calculate the size of a single block. If this is the last block,
232/// this only returns the required size, which is always smaller than the default block size.
233// TODO use this method everywhere instead of convoluted formulas
234#[inline]
235pub fn calculate_block_size(total_size: usize, block_size: usize, block_position: usize) -> Result<usize> {
236 if block_position >= total_size {
237 return Err(Error::invalid(message:"block index"))
238 }
239
240 if block_position + block_size <= total_size {
241 Ok(block_size)
242 }
243 else {
244 Ok(total_size - block_position)
245 }
246}
247
248
249/// Calculate number of mip levels in a given resolution.
250// TODO this should be cached? log2 may be very expensive
251pub fn compute_level_count(round: RoundingMode, full_res: usize) -> usize {
252 usize::try_from(round.log2(number:u32::try_from(full_res).unwrap())).unwrap() + 1
253}
254
255/// Calculate the size of a single mip level by index.
256// TODO this should be cached? log2 may be very expensive
257pub fn compute_level_size(round: RoundingMode, full_res: usize, level_index: usize) -> usize {
258 assert!(level_index < std::mem::size_of::<usize>() * 8, "largest level size exceeds maximum integer value");
259 round.divide(dividend:full_res, divisor:1 << level_index).max(1)
260}
261
262/// Iterates over all rip map level resolutions of a given size, including the indices of each level.
263/// The order of iteration conforms to `LineOrder::Increasing`.
264// TODO cache these?
265// TODO compute these directly instead of summing up an iterator?
266pub fn rip_map_levels(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=(Vec2<usize>, Vec2<usize>)> {
267 rip_map_indices(round, max_resolution).map(move |level_indices: Vec2|{
268 // TODO progressively divide instead??
269 let width: usize = compute_level_size(round, full_res:max_resolution.width(), level_index:level_indices.x());
270 let height: usize = compute_level_size(round, full_res:max_resolution.height(), level_index:level_indices.y());
271 (level_indices, Vec2(width, height))
272 })
273}
274
275/// Iterates over all mip map level resolutions of a given size, including the indices of each level.
276/// The order of iteration conforms to `LineOrder::Increasing`.
277// TODO cache all these level values when computing table offset size??
278// TODO compute these directly instead of summing up an iterator?
279pub fn mip_map_levels(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=(usize, Vec2<usize>)> {
280 mip_map_indicesimpl Iterator(round, max_resolution)
281 .map(move |level_index: usize|{
282 // TODO progressively divide instead??
283 let width: usize = compute_level_size(round, full_res:max_resolution.width(), level_index);
284 let height: usize = compute_level_size(round, full_res:max_resolution.height(), level_index);
285 (level_index, Vec2(width, height))
286 })
287}
288
289/// Iterates over all rip map level indices of a given size.
290/// The order of iteration conforms to `LineOrder::Increasing`.
291pub fn rip_map_indices(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=Vec2<usize>> {
292 let (width: usize, height: usize) = (
293 compute_level_count(round, full_res:max_resolution.width()),
294 compute_level_count(round, full_res:max_resolution.height())
295 );
296
297 (0..height).flat_map(move |y_level: usize|{
298 (0..width).map(move |x_level: usize|{
299 Vec2(x_level, y_level)
300 })
301 })
302}
303
304/// Iterates over all mip map level indices of a given size.
305/// The order of iteration conforms to `LineOrder::Increasing`.
306pub fn mip_map_indices(round: RoundingMode, max_resolution: Vec2<usize>) -> impl Iterator<Item=usize> {
307 0..compute_level_count(round, full_res:max_resolution.width().max(max_resolution.height()))
308}
309
310/// Compute the number of chunks that an image is divided into. May be an expensive operation.
311// If not multilayer and chunkCount not present,
312// the number of entries in the chunk table is computed
313// using the dataWindow and tileDesc attributes and the compression format
314pub fn compute_chunk_count(compression: Compression, data_size: Vec2<usize>, blocks: BlockDescription) -> usize {
315
316 if let BlockDescription::Tiles(tiles) = blocks {
317 let round = tiles.rounding_mode;
318 let Vec2(tile_width, tile_height) = tiles.tile_size;
319
320 // TODO cache all these level values??
321 use crate::meta::attribute::LevelMode::*;
322 match tiles.level_mode {
323 Singular => {
324 let tiles_x = compute_block_count(data_size.width(), tile_width);
325 let tiles_y = compute_block_count(data_size.height(), tile_height);
326 tiles_x * tiles_y
327 }
328
329 MipMap => {
330 mip_map_levels(round, data_size).map(|(_, Vec2(level_width, level_height))| {
331 compute_block_count(level_width, tile_width) * compute_block_count(level_height, tile_height)
332 }).sum()
333 },
334
335 RipMap => {
336 rip_map_levels(round, data_size).map(|(_, Vec2(level_width, level_height))| {
337 compute_block_count(level_width, tile_width) * compute_block_count(level_height, tile_height)
338 }).sum()
339 }
340 }
341 }
342
343 // scan line blocks never have mip maps
344 else {
345 compute_block_count(data_size.height(), compression.scan_lines_per_block())
346 }
347}
348
349
350
351impl MetaData {
352
353 /// Read the exr meta data from a file.
354 /// Use `read_from_unbuffered` instead if you do not have a file.
355 /// Does not validate the meta data.
356 #[must_use]
357 pub fn read_from_file(path: impl AsRef<::std::path::Path>, pedantic: bool) -> Result<Self> {
358 Self::read_from_unbuffered(File::open(path)?, pedantic)
359 }
360
361 /// Buffer the reader and then read the exr meta data from it.
362 /// Use `read_from_buffered` if your reader is an in-memory reader.
363 /// Use `read_from_file` if you have a file path.
364 /// Does not validate the meta data.
365 #[must_use]
366 pub fn read_from_unbuffered(unbuffered: impl Read, pedantic: bool) -> Result<Self> {
367 Self::read_from_buffered(BufReader::new(unbuffered), pedantic)
368 }
369
370 /// Read the exr meta data from a reader.
371 /// Use `read_from_file` if you have a file path.
372 /// Use `read_from_unbuffered` if this is not an in-memory reader.
373 /// Does not validate the meta data.
374 #[must_use]
375 pub fn read_from_buffered(buffered: impl Read, pedantic: bool) -> Result<Self> {
376 let mut read = PeekRead::new(buffered);
377 MetaData::read_unvalidated_from_buffered_peekable(&mut read, pedantic)
378 }
379
380 /// Does __not validate__ the meta data completely.
381 #[must_use]
382 pub(crate) fn read_unvalidated_from_buffered_peekable(read: &mut PeekRead<impl Read>, pedantic: bool) -> Result<Self> {
383 magic_number::validate_exr(read)?;
384
385 let requirements = Requirements::read(read)?;
386
387 // do this check now in order to fast-fail for newer versions and features than version 2
388 requirements.validate()?;
389
390 let headers = Header::read_all(read, &requirements, pedantic)?;
391
392 // TODO check if supporting requirements 2 always implies supporting requirements 1
393 Ok(MetaData { requirements, headers })
394 }
395
396 /// Validates the meta data.
397 #[must_use]
398 pub(crate) fn read_validated_from_buffered_peekable(
399 read: &mut PeekRead<impl Read>, pedantic: bool
400 ) -> Result<Self> {
401 let meta_data = Self::read_unvalidated_from_buffered_peekable(read, !pedantic)?;
402 MetaData::validate(meta_data.headers.as_slice(), pedantic)?;
403 Ok(meta_data)
404 }
405
406 /// Validates the meta data and writes it to the stream.
407 /// If pedantic, throws errors for files that may produce errors in other exr readers.
408 /// Returns the automatically detected minimum requirement flags.
409 pub(crate) fn write_validating_to_buffered(write: &mut impl Write, headers: &[Header], pedantic: bool) -> Result<Requirements> {
410 // pedantic validation to not allow slightly invalid files
411 // that still could be read correctly in theory
412 let minimal_requirements = Self::validate(headers, pedantic)?;
413
414 magic_number::write(write)?;
415 minimal_requirements.write(write)?;
416 Header::write_all(headers, write, minimal_requirements.has_multiple_layers)?;
417 Ok(minimal_requirements)
418 }
419
420 /// Read one offset table from the reader for each header.
421 pub fn read_offset_tables(read: &mut PeekRead<impl Read>, headers: &Headers) -> Result<OffsetTables> {
422 headers.iter()
423 .map(|header| u64::read_vec(read, header.chunk_count, u16::MAX as usize, None, "offset table size"))
424 .collect()
425 }
426
427 /// Skip the offset tables by advancing the reader by the required byte count.
428 // TODO use seek for large (probably all) tables!
429 pub fn skip_offset_tables(read: &mut PeekRead<impl Read>, headers: &Headers) -> Result<usize> {
430 let chunk_count: usize = headers.iter().map(|header| header.chunk_count).sum();
431 crate::io::skip_bytes(read, chunk_count * u64::BYTE_SIZE)?; // TODO this should seek for large tables
432 Ok(chunk_count)
433 }
434
435 /// This iterator tells you the block indices of all blocks that must be in the image.
436 /// The order of the blocks depends on the `LineOrder` attribute
437 /// (unspecified line order is treated the same as increasing line order).
438 /// The blocks written to the file must be exactly in this order,
439 /// except for when the `LineOrder` is unspecified.
440 /// The index represents the block index, in increasing line order, within the header.
441 pub fn enumerate_ordered_header_block_indices(&self) -> impl '_ + Iterator<Item=(usize, BlockIndex)> {
442 crate::block::enumerate_ordered_header_block_indices(&self.headers)
443 }
444
445 /// Go through all the block indices in the correct order and call the specified closure for each of these blocks.
446 /// That way, the blocks indices are filled with real block data and returned as an iterator.
447 /// The closure returns the an `UncompressedBlock` for each block index.
448 pub fn collect_ordered_blocks<'s>(&'s self, mut get_block: impl 's + FnMut(BlockIndex) -> UncompressedBlock)
449 -> impl 's + Iterator<Item=(usize, UncompressedBlock)>
450 {
451 self.enumerate_ordered_header_block_indices().map(move |(index_in_header, block_index)|{
452 (index_in_header, get_block(block_index))
453 })
454 }
455
456 /// Go through all the block indices in the correct order and call the specified closure for each of these blocks.
457 /// That way, the blocks indices are filled with real block data and returned as an iterator.
458 /// The closure returns the byte data for each block index.
459 pub fn collect_ordered_block_data<'s>(&'s self, mut get_block_data: impl 's + FnMut(BlockIndex) -> Vec<u8>)
460 -> impl 's + Iterator<Item=(usize, UncompressedBlock)>
461 {
462 self.collect_ordered_blocks(move |block_index|
463 UncompressedBlock { index: block_index, data: get_block_data(block_index) }
464 )
465 }
466
467 /// Validates this meta data. Returns the minimal possible requirements.
468 pub fn validate(headers: &[Header], pedantic: bool) -> Result<Requirements> {
469 if headers.len() == 0 {
470 return Err(Error::invalid("at least one layer is required"));
471 }
472
473 let deep = false; // TODO deep data
474 let is_multilayer = headers.len() > 1;
475 let first_header_has_tiles = headers.iter().next()
476 .map_or(false, |header| header.blocks.has_tiles());
477
478 let mut minimal_requirements = Requirements {
479 // according to the spec, version 2 should only be necessary if `is_multilayer || deep`.
480 // but the current open exr library does not support images with version 1, so always use version 2.
481 file_format_version: 2,
482
483 // start as low as possible, later increasing if required
484 has_long_names: false,
485
486 is_single_layer_and_tiled: !is_multilayer && first_header_has_tiles,
487 has_multiple_layers: is_multilayer,
488 has_deep_data: deep,
489 };
490
491 for header in headers {
492 if header.deep { // TODO deep data (and then remove this check)
493 return Err(Error::unsupported("deep data not supported yet"));
494 }
495
496 header.validate(is_multilayer, &mut minimal_requirements.has_long_names, pedantic)?;
497 }
498
499 // TODO validation fn!
500 /*if let Some(max) = max_pixel_bytes {
501 let byte_size: usize = headers.iter()
502 .map(|header| header.total_pixel_bytes())
503 .sum();
504
505 if byte_size > max {
506 return Err(Error::invalid("image larger than specified maximum"));
507 }
508 }*/
509
510 if pedantic { // check for duplicate header names
511 let mut header_names = HashSet::with_capacity(headers.len());
512 for header in headers {
513 if !header_names.insert(&header.own_attributes.layer_name) {
514 return Err(Error::invalid(format!(
515 "duplicate layer name: `{}`",
516 header.own_attributes.layer_name.as_ref().expect("header validation bug")
517 )));
518 }
519 }
520 }
521
522 if pedantic {
523 let must_share = headers.iter().flat_map(|header| header.own_attributes.other.iter())
524 .any(|(_, value)| value.to_chromaticities().is_ok() || value.to_time_code().is_ok());
525
526 if must_share {
527 return Err(Error::invalid("chromaticities and time code attributes must must not exist in own attributes but shared instead"));
528 }
529 }
530
531 if pedantic && headers.len() > 1 { // check for attributes that should not differ in between headers
532 let first_header = headers.first().expect("header count validation bug");
533 let first_header_attributes = &first_header.shared_attributes;
534
535 for header in &headers[1..] {
536 if &header.shared_attributes != first_header_attributes {
537 return Err(Error::invalid("display window, pixel aspect, chromaticities, and time code attributes must be equal for all headers"))
538 }
539 }
540 }
541
542 debug_assert!(minimal_requirements.validate().is_ok(), "inferred requirements are invalid");
543 Ok(minimal_requirements)
544 }
545}
546
547
548
549
550impl Requirements {
551
552 // this is actually used for control flow, as the number of headers may be 1 in a multilayer file
553 /// Is this file declared to contain multiple layers?
554 pub fn is_multilayer(&self) -> bool {
555 self.has_multiple_layers
556 }
557
558 /// Read the value without validating.
559 pub fn read<R: Read>(read: &mut R) -> Result<Self> {
560 use ::bit_field::BitField;
561
562 let version_and_flags = u32::read(read)?;
563
564 // take the 8 least significant bits, they contain the file format version number
565 let version = (version_and_flags & 0x000F) as u8;
566
567 // the 24 most significant bits are treated as a set of boolean flags
568 let is_single_tile = version_and_flags.get_bit(9);
569 let has_long_names = version_and_flags.get_bit(10);
570 let has_deep_data = version_and_flags.get_bit(11);
571 let has_multiple_layers = version_and_flags.get_bit(12);
572
573 // all remaining bits except 9, 10, 11 and 12 are reserved and should be 0
574 // if a file has any of these bits set to 1, it means this file contains
575 // a feature that we don't support
576 let unknown_flags = version_and_flags >> 13; // all flags excluding the 12 bits we already parsed
577
578 if unknown_flags != 0 { // TODO test if this correctly detects unsupported files
579 return Err(Error::unsupported("too new file feature flags"));
580 }
581
582 let version = Requirements {
583 file_format_version: version,
584 is_single_layer_and_tiled: is_single_tile, has_long_names,
585 has_deep_data, has_multiple_layers,
586 };
587
588 Ok(version)
589 }
590
591 /// Without validation, write this instance to the byte stream.
592 pub fn write<W: Write>(self, write: &mut W) -> UnitResult {
593 use ::bit_field::BitField;
594
595 // the 8 least significant bits contain the file format version number
596 // and the flags are set to 0
597 let mut version_and_flags = self.file_format_version as u32;
598
599 // the 24 most significant bits are treated as a set of boolean flags
600 version_and_flags.set_bit(9, self.is_single_layer_and_tiled);
601 version_and_flags.set_bit(10, self.has_long_names);
602 version_and_flags.set_bit(11, self.has_deep_data);
603 version_and_flags.set_bit(12, self.has_multiple_layers);
604 // all remaining bits except 9, 10, 11 and 12 are reserved and should be 0
605
606 version_and_flags.write(write)?;
607 Ok(())
608 }
609
610 /// Validate this instance.
611 pub fn validate(&self) -> UnitResult {
612 if self.file_format_version == 2 {
613
614 match (
615 self.is_single_layer_and_tiled, self.has_deep_data, self.has_multiple_layers,
616 self.file_format_version
617 ) {
618 // Single-part scan line. One normal scan line image.
619 (false, false, false, 1..=2) => Ok(()),
620
621 // Single-part tile. One normal tiled image.
622 (true, false, false, 1..=2) => Ok(()),
623
624 // Multi-part (new in 2.0).
625 // Multiple normal images (scan line and/or tiled).
626 (false, false, true, 2) => Ok(()),
627
628 // Single-part deep data (new in 2.0).
629 // One deep tile or deep scan line part
630 (false, true, false, 2) => Ok(()),
631
632 // Multi-part deep data (new in 2.0).
633 // Multiple parts (any combination of:
634 // tiles, scan lines, deep tiles and/or deep scan lines).
635 (false, true, true, 2) => Ok(()),
636
637 _ => Err(Error::invalid("file feature flags"))
638 }
639 }
640 else {
641 Err(Error::unsupported("file versions other than 2.0 are not supported"))
642 }
643 }
644}
645
646
647#[cfg(test)]
648mod test {
649 use super::*;
650 use crate::meta::header::{ImageAttributes, LayerAttributes};
651
652 #[test]
653 fn round_trip_requirements() {
654 let requirements = Requirements {
655 file_format_version: 2,
656 is_single_layer_and_tiled: true,
657 has_long_names: false,
658 has_deep_data: true,
659 has_multiple_layers: false
660 };
661
662 let mut data: Vec<u8> = Vec::new();
663 requirements.write(&mut data).unwrap();
664 let read = Requirements::read(&mut data.as_slice()).unwrap();
665 assert_eq!(requirements, read);
666 }
667
668 #[test]
669 fn round_trip(){
670 let header = Header {
671 channels: ChannelList::new(smallvec![
672 ChannelDescription {
673 name: Text::from("main"),
674 sample_type: SampleType::U32,
675 quantize_linearly: false,
676 sampling: Vec2(1, 1)
677 }
678 ],
679 ),
680 compression: Compression::Uncompressed,
681 line_order: LineOrder::Increasing,
682 deep_data_version: Some(1),
683 chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
684 max_samples_per_pixel: Some(4),
685 shared_attributes: ImageAttributes {
686 pixel_aspect: 3.0,
687 .. ImageAttributes::new(IntegerBounds {
688 position: Vec2(2,1),
689 size: Vec2(11, 9)
690 })
691 },
692
693 blocks: BlockDescription::ScanLines,
694 deep: false,
695 layer_size: Vec2(2000, 333),
696 own_attributes: LayerAttributes {
697 layer_name: Some(Text::from("test name lol")),
698 layer_position: Vec2(3, -5),
699 screen_window_center: Vec2(0.3, 99.0),
700 screen_window_width: 0.19,
701 .. Default::default()
702 }
703 };
704
705 let meta = MetaData {
706 requirements: Requirements {
707 file_format_version: 2,
708 is_single_layer_and_tiled: false,
709 has_long_names: false,
710 has_deep_data: false,
711 has_multiple_layers: false
712 },
713 headers: smallvec![ header ],
714 };
715
716
717 let mut data: Vec<u8> = Vec::new();
718 MetaData::write_validating_to_buffered(&mut data, meta.headers.as_slice(), true).unwrap();
719 let meta2 = MetaData::read_from_buffered(data.as_slice(), false).unwrap();
720 MetaData::validate(meta2.headers.as_slice(), true).unwrap();
721 assert_eq!(meta, meta2);
722 }
723
724 #[test]
725 fn infer_low_requirements() {
726 let header_version_1_short_names = Header {
727 channels: ChannelList::new(smallvec![
728 ChannelDescription {
729 name: Text::from("main"),
730 sample_type: SampleType::U32,
731 quantize_linearly: false,
732 sampling: Vec2(1, 1)
733 }
734 ],
735 ),
736 compression: Compression::Uncompressed,
737 line_order: LineOrder::Increasing,
738 deep_data_version: Some(1),
739 chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
740 max_samples_per_pixel: Some(4),
741 shared_attributes: ImageAttributes {
742 pixel_aspect: 3.0,
743 .. ImageAttributes::new(IntegerBounds {
744 position: Vec2(2,1),
745 size: Vec2(11, 9)
746 })
747 },
748 blocks: BlockDescription::ScanLines,
749 deep: false,
750 layer_size: Vec2(2000, 333),
751 own_attributes: LayerAttributes {
752 other: vec![
753 (Text::try_from("x").unwrap(), AttributeValue::F32(3.0)),
754 (Text::try_from("y").unwrap(), AttributeValue::F32(-1.0)),
755 ].into_iter().collect(),
756 .. Default::default()
757 }
758 };
759
760 let low_requirements = MetaData::validate(
761 &[header_version_1_short_names], true
762 ).unwrap();
763
764 assert_eq!(low_requirements.has_long_names, false);
765 assert_eq!(low_requirements.file_format_version, 2); // always have version 2
766 assert_eq!(low_requirements.has_deep_data, false);
767 assert_eq!(low_requirements.has_multiple_layers, false);
768 }
769
770 #[test]
771 fn infer_high_requirements() {
772 let header_version_2_long_names = Header {
773 channels: ChannelList::new(
774 smallvec![
775 ChannelDescription {
776 name: Text::new_or_panic("main"),
777 sample_type: SampleType::U32,
778 quantize_linearly: false,
779 sampling: Vec2(1, 1)
780 }
781 ],
782 ),
783 compression: Compression::Uncompressed,
784 line_order: LineOrder::Increasing,
785 deep_data_version: Some(1),
786 chunk_count: compute_chunk_count(Compression::Uncompressed, Vec2(2000, 333), BlockDescription::ScanLines),
787 max_samples_per_pixel: Some(4),
788 shared_attributes: ImageAttributes {
789 pixel_aspect: 3.0,
790 .. ImageAttributes::new(IntegerBounds {
791 position: Vec2(2,1),
792 size: Vec2(11, 9)
793 })
794 },
795 blocks: BlockDescription::ScanLines,
796 deep: false,
797 layer_size: Vec2(2000, 333),
798 own_attributes: LayerAttributes {
799 layer_name: Some(Text::new_or_panic("oasdasoidfj")),
800 other: vec![
801 (Text::new_or_panic("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"), AttributeValue::F32(3.0)),
802 (Text::new_or_panic("y"), AttributeValue::F32(-1.0)),
803 ].into_iter().collect(),
804 .. Default::default()
805 }
806 };
807
808 let mut layer_2 = header_version_2_long_names.clone();
809 layer_2.own_attributes.layer_name = Some(Text::new_or_panic("anythingelse"));
810
811 let low_requirements = MetaData::validate(
812 &[header_version_2_long_names, layer_2], true
813 ).unwrap();
814
815 assert_eq!(low_requirements.has_long_names, true);
816 assert_eq!(low_requirements.file_format_version, 2);
817 assert_eq!(low_requirements.has_deep_data, false);
818 assert_eq!(low_requirements.has_multiple_layers, true);
819 }
820}
821
822