| 1 | mod table;
|
| 2 |
|
| 3 | use crate::compression::{mod_p, ByteVec};
|
| 4 | use crate::error::usize_to_i32;
|
| 5 | use crate::io::Data;
|
| 6 | use crate::meta::attribute::ChannelList;
|
| 7 | use crate::prelude::*;
|
| 8 | use std::cmp::min;
|
| 9 | use std::mem::size_of;
|
| 10 | use table::{EXP_TABLE, LOG_TABLE};
|
| 11 | use lebe::io::{ReadPrimitive, WriteEndian};
|
| 12 |
|
| 13 | const BLOCK_SAMPLE_COUNT: usize = 4;
|
| 14 |
|
| 15 | // As B44 compression is only use on f16 channels, we can have a conste for this value.
|
| 16 | const BLOCK_X_BYTE_COUNT: usize = BLOCK_SAMPLE_COUNT * size_of::<u16>();
|
| 17 |
|
| 18 | #[inline ]
|
| 19 | fn convert_from_linear(s: &mut [u16; 16]) {
|
| 20 | for v: &mut u16 in s {
|
| 21 | *v = EXP_TABLE[*v as usize];
|
| 22 | }
|
| 23 | }
|
| 24 |
|
| 25 | #[inline ]
|
| 26 | fn convert_to_linear(s: &mut [u16; 16]) {
|
| 27 | for v: &mut u16 in s {
|
| 28 | *v = LOG_TABLE[*v as usize];
|
| 29 | }
|
| 30 | }
|
| 31 |
|
| 32 | #[inline ]
|
| 33 | fn shift_and_round(x: i32, shift: i32) -> i32 {
|
| 34 | let x: i32 = x << 1;
|
| 35 | let a: i32 = (1 << shift) - 1;
|
| 36 | let shift: i32 = shift + 1;
|
| 37 | let b: i32 = (x >> shift) & 1;
|
| 38 | (x + a + b) >> shift
|
| 39 | }
|
| 40 |
|
| 41 | /// Pack a block of 4 by 4 16-bit pixels (32 bytes, the array `s`) into either 14 or 3 bytes.
|
| 42 | fn pack(s: [u16; 16], b: &mut [u8], optimize_flat_fields: bool, exact_max: bool) -> usize {
|
| 43 |
|
| 44 | let mut t = [0u16; 16];
|
| 45 |
|
| 46 | for i in 0..16 {
|
| 47 | if (s[i] & 0x7c00) == 0x7c00 {
|
| 48 | t[i] = 0x8000;
|
| 49 | } else if (s[i] & 0x8000) != 0 {
|
| 50 | t[i] = !s[i];
|
| 51 | } else {
|
| 52 | t[i] = s[i] | 0x8000;
|
| 53 | }
|
| 54 | }
|
| 55 |
|
| 56 | let t_max = t.iter().max().unwrap();
|
| 57 |
|
| 58 | // Compute a set of running differences, r[0] ... r[14]:
|
| 59 | // Find a shift value such that after rounding off the
|
| 60 | // rightmost bits and shifting all differences are between
|
| 61 | // -32 and +31. Then bias the differences so that they
|
| 62 | // end up between 0 and 63.
|
| 63 | let mut shift = -1;
|
| 64 | let mut d = [0i32; 16];
|
| 65 | let mut r = [0i32; 15];
|
| 66 | let mut r_min: i32;
|
| 67 | let mut r_max: i32;
|
| 68 |
|
| 69 | const BIAS: i32 = 0x20;
|
| 70 |
|
| 71 | loop {
|
| 72 | shift += 1;
|
| 73 |
|
| 74 | // Compute absolute differences, d[0] ... d[15],
|
| 75 | // between t_max and t[0] ... t[15].
|
| 76 | //
|
| 77 | // Shift and round the absolute differences.
|
| 78 | d.iter_mut()
|
| 79 | .zip(&t)
|
| 80 | .for_each(|(d_v, t_v)| *d_v = shift_and_round((t_max - t_v).into(), shift));
|
| 81 |
|
| 82 | // Convert d[0] .. d[15] into running differences
|
| 83 | r[0] = d[0] - d[4] + BIAS;
|
| 84 | r[1] = d[4] - d[8] + BIAS;
|
| 85 | r[2] = d[8] - d[12] + BIAS;
|
| 86 |
|
| 87 | r[3] = d[0] - d[1] + BIAS;
|
| 88 | r[4] = d[4] - d[5] + BIAS;
|
| 89 | r[5] = d[8] - d[9] + BIAS;
|
| 90 | r[6] = d[12] - d[13] + BIAS;
|
| 91 |
|
| 92 | r[7] = d[1] - d[2] + BIAS;
|
| 93 | r[8] = d[5] - d[6] + BIAS;
|
| 94 | r[9] = d[9] - d[10] + BIAS;
|
| 95 | r[10] = d[13] - d[14] + BIAS;
|
| 96 |
|
| 97 | r[11] = d[2] - d[3] + BIAS;
|
| 98 | r[12] = d[6] - d[7] + BIAS;
|
| 99 | r[13] = d[10] - d[11] + BIAS;
|
| 100 | r[14] = d[14] - d[15] + BIAS;
|
| 101 |
|
| 102 | r_min = r[0];
|
| 103 | r_max = r[0];
|
| 104 |
|
| 105 | r.iter().copied().for_each(|v| {
|
| 106 | if r_min > v {
|
| 107 | r_min = v;
|
| 108 | }
|
| 109 |
|
| 110 | if r_max < v {
|
| 111 | r_max = v;
|
| 112 | }
|
| 113 | });
|
| 114 |
|
| 115 | if !(r_min < 0 || r_max > 0x3f) {
|
| 116 | break;
|
| 117 | }
|
| 118 | }
|
| 119 |
|
| 120 | if r_min == BIAS && r_max == BIAS && optimize_flat_fields {
|
| 121 | // Special case - all pixels have the same value.
|
| 122 | // We encode this in 3 instead of 14 bytes by
|
| 123 | // storing the value 0xfc in the third output byte,
|
| 124 | // which cannot occur in the 14-byte encoding.
|
| 125 | b[0] = (t[0] >> 8) as u8;
|
| 126 | b[1] = t[0] as u8;
|
| 127 | b[2] = 0xfc;
|
| 128 |
|
| 129 | return 3;
|
| 130 | }
|
| 131 |
|
| 132 | if exact_max {
|
| 133 | // Adjust t[0] so that the pixel whose value is equal
|
| 134 | // to t_max gets represented as accurately as possible.
|
| 135 | t[0] = t_max - (d[0] << shift) as u16;
|
| 136 | }
|
| 137 |
|
| 138 | // Pack t[0], shift and r[0] ... r[14] into 14 bytes:
|
| 139 | b[0] = (t[0] >> 8) as u8;
|
| 140 | b[1] = t[0] as u8;
|
| 141 |
|
| 142 | b[2] = ((shift << 2) | (r[0] >> 4)) as u8;
|
| 143 | b[3] = ((r[0] << 4) | (r[1] >> 2)) as u8;
|
| 144 | b[4] = ((r[1] << 6) | r[2]) as u8;
|
| 145 |
|
| 146 | b[5] = ((r[3] << 2) | (r[4] >> 4)) as u8;
|
| 147 | b[6] = ((r[4] << 4) | (r[5] >> 2)) as u8;
|
| 148 | b[7] = ((r[5] << 6) | r[6]) as u8;
|
| 149 |
|
| 150 | b[8] = ((r[7] << 2) | (r[8] >> 4)) as u8;
|
| 151 | b[9] = ((r[8] << 4) | (r[9] >> 2)) as u8;
|
| 152 | b[10] = ((r[9] << 6) | r[10]) as u8;
|
| 153 |
|
| 154 | b[11] = ((r[11] << 2) | (r[12] >> 4)) as u8;
|
| 155 | b[12] = ((r[12] << 4) | (r[13] >> 2)) as u8;
|
| 156 | b[13] = ((r[13] << 6) | r[14]) as u8;
|
| 157 |
|
| 158 | return 14;
|
| 159 | }
|
| 160 |
|
| 161 | // Tiny macro to simply get block array value as a u32.
|
| 162 | macro_rules! b32 {
|
| 163 | ($b:expr, $i:expr) => {
|
| 164 | $b[$i] as u32
|
| 165 | };
|
| 166 | }
|
| 167 |
|
| 168 | // 0011 1111
|
| 169 | const SIX_BITS: u32 = 0x3f;
|
| 170 |
|
| 171 | // Unpack a 14-byte block into 4 by 4 16-bit pixels.
|
| 172 | fn unpack14(b: &[u8], s: &mut [u16; 16]) {
|
| 173 | debug_assert_eq!(b.len(), 14);
|
| 174 | debug_assert_ne!(b[2], 0xfc);
|
| 175 |
|
| 176 | s[0] = ((b32!(b, 0) << 8) | b32!(b, 1)) as u16;
|
| 177 |
|
| 178 | let shift = b32!(b, 2) >> 2;
|
| 179 | let bias = 0x20 << shift;
|
| 180 |
|
| 181 | s[4] = (s[0] as u32 + ((((b32!(b, 2) << 4) | (b32!(b, 3) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
| 182 | s[8] = (s[4] as u32 + ((((b32!(b, 3) << 2) | (b32!(b, 4) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
| 183 | s[12] = (s[8] as u32 + ((b32!(b, 4) & SIX_BITS) << shift) - bias) as u16;
|
| 184 |
|
| 185 | s[1] = (s[0] as u32 + ((b32!(b, 5) >> 2) << shift) - bias) as u16;
|
| 186 | s[5] = (s[4] as u32 + ((((b32!(b, 5) << 4) | (b32!(b, 6) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
| 187 | s[9] = (s[8] as u32 + ((((b32!(b, 6) << 2) | (b32!(b, 7) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
| 188 | s[13] = (s[12] as u32 + ((b32!(b, 7) & SIX_BITS) << shift) - bias) as u16;
|
| 189 |
|
| 190 | s[2] = (s[1] as u32 + ((b32!(b, 8) >> 2) << shift) - bias) as u16;
|
| 191 | s[6] = (s[5] as u32 + ((((b32!(b, 8) << 4) | (b32!(b, 9) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
| 192 | s[10] = (s[9] as u32 + ((((b32!(b, 9) << 2) | (b32!(b, 10) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
| 193 | s[14] = (s[13] as u32 + ((b32!(b, 10) & SIX_BITS) << shift) - bias) as u16;
|
| 194 |
|
| 195 | s[3] = (s[2] as u32 + ((b32!(b, 11) >> 2) << shift) - bias) as u16;
|
| 196 | s[7] = (s[6] as u32 + ((((b32!(b, 11) << 4) | (b32!(b, 12) >> 4)) & SIX_BITS) << shift) - bias) as u16;
|
| 197 | s[11] = (s[10] as u32 + ((((b32!(b, 12) << 2) | (b32!(b, 13) >> 6)) & SIX_BITS) << shift) - bias) as u16;
|
| 198 | s[15] = (s[14] as u32 + ((b32!(b, 13) & SIX_BITS) << shift) - bias) as u16;
|
| 199 |
|
| 200 | for i in 0..16 {
|
| 201 | if (s[i] & 0x8000) != 0 {
|
| 202 | s[i] &= 0x7fff;
|
| 203 | } else {
|
| 204 | s[i] = !s[i];
|
| 205 | }
|
| 206 | }
|
| 207 | }
|
| 208 |
|
| 209 | // Unpack a 3-byte block `b` into 4 by 4 identical 16-bit pixels in `s` array.
|
| 210 | fn unpack3(b: &[u8], s: &mut [u16; 16]) {
|
| 211 | // this assertion panics for fuzzed images.
|
| 212 | // assuming this debug assertion is an overly strict check to catch potential compression errors.
|
| 213 | // disabling because it panics when fuzzed.
|
| 214 | // when commenting out, it simply works (maybe it should return an error instead?).
|
| 215 | // debug_assert_eq!(b[2], 0xfc);
|
| 216 |
|
| 217 | // Get the 16-bit value from the block.
|
| 218 | let mut value: u16 = ((b32!(b, 0) << 8) | b32!(b, 1)) as u16;
|
| 219 |
|
| 220 | if (value & 0x8000) != 0 {
|
| 221 | value &= 0x7fff;
|
| 222 | } else {
|
| 223 | value = !value;
|
| 224 | }
|
| 225 |
|
| 226 | s.fill(value); // All pixels have save value.
|
| 227 | }
|
| 228 |
|
| 229 | #[derive (Debug)]
|
| 230 | struct ChannelData {
|
| 231 | tmp_start_index: usize,
|
| 232 | tmp_end_index: usize,
|
| 233 | resolution: Vec2<usize>,
|
| 234 | y_sampling: usize,
|
| 235 | sample_type: SampleType,
|
| 236 | quantize_linearly: bool,
|
| 237 | samples_per_pixel: usize,
|
| 238 | }
|
| 239 |
|
| 240 | // TODO: Unsafe seems to be required to efficiently copy whole slice of u16 ot u8. For now, we use
|
| 241 | // a less efficient, yet safe, implementation.
|
| 242 | #[inline ]
|
| 243 | fn memcpy_u16_to_u8(src: &[u16], mut dst: &mut [u8]) {
|
| 244 | use lebe::prelude::*;
|
| 245 | dst.write_as_native_endian(src).expect(msg:"byte copy error" );
|
| 246 | }
|
| 247 |
|
| 248 | #[inline ]
|
| 249 | fn memcpy_u8_to_u16(mut src: &[u8], dst: &mut [u16]) {
|
| 250 | use lebe::prelude::*;
|
| 251 | src.read_from_native_endian_into(dst).expect(msg:"byte copy error" );
|
| 252 | }
|
| 253 |
|
| 254 | #[inline ]
|
| 255 | fn cpy_u8(src: &[u16], src_i: usize, dst: &mut [u8], dst_i: usize, n: usize) {
|
| 256 | memcpy_u16_to_u8(&src[src_i..src_i + n], &mut dst[dst_i..dst_i + 2 * n]);
|
| 257 | }
|
| 258 |
|
| 259 | pub fn decompress(
|
| 260 | channels: &ChannelList,
|
| 261 | compressed: ByteVec,
|
| 262 | rectangle: IntegerBounds,
|
| 263 | expected_byte_size: usize,
|
| 264 | _pedantic: bool,
|
| 265 | ) -> Result<ByteVec> {
|
| 266 | debug_assert_eq!(
|
| 267 | expected_byte_size,
|
| 268 | rectangle.size.area() * channels.bytes_per_pixel,
|
| 269 | "expected byte size does not match header" // TODO compute instead of passing argument?
|
| 270 | );
|
| 271 |
|
| 272 | debug_assert!(!channels.list.is_empty(), "no channels found" );
|
| 273 |
|
| 274 | if compressed.is_empty() {
|
| 275 | return Ok(Vec::new());
|
| 276 | }
|
| 277 |
|
| 278 | // Extract channel information needed for decompression.
|
| 279 | let mut channel_data: Vec<ChannelData> = Vec::with_capacity(channels.list.len());
|
| 280 | let mut tmp_read_index = 0;
|
| 281 |
|
| 282 | for channel in channels.list.iter() {
|
| 283 | let channel = ChannelData {
|
| 284 | tmp_start_index: tmp_read_index,
|
| 285 | tmp_end_index: tmp_read_index,
|
| 286 | resolution: channel.subsampled_resolution(rectangle.size),
|
| 287 | y_sampling: channel.sampling.y(),
|
| 288 | sample_type: channel.sample_type,
|
| 289 | quantize_linearly: channel.quantize_linearly,
|
| 290 | samples_per_pixel: channel.sampling.area(),
|
| 291 | };
|
| 292 |
|
| 293 | tmp_read_index += channel.resolution.area()
|
| 294 | * channel.samples_per_pixel
|
| 295 | * channel.sample_type.bytes_per_sample();
|
| 296 |
|
| 297 | channel_data.push(channel);
|
| 298 | }
|
| 299 |
|
| 300 | // Temporary buffer is used to decompress B44 datas the way they are stored in the compressed
|
| 301 | // buffer (channel by channel). We interleave the final result later.
|
| 302 | let mut tmp = Vec::with_capacity(expected_byte_size);
|
| 303 |
|
| 304 | // Index in the compressed buffer.
|
| 305 | let mut in_i = 0usize;
|
| 306 |
|
| 307 | let mut remaining = compressed.len();
|
| 308 |
|
| 309 | for channel in &channel_data {
|
| 310 |
|
| 311 | debug_assert_eq!(remaining, compressed.len()-in_i);
|
| 312 |
|
| 313 | // Compute information for current channel.
|
| 314 | let sample_count = channel.resolution.area() * channel.samples_per_pixel;
|
| 315 | let byte_count = sample_count * channel.sample_type.bytes_per_sample();
|
| 316 |
|
| 317 | // Sample types that does not support B44 compression (u32 and f32) are raw copied.
|
| 318 | // In this branch, "compressed" array is actually raw, uncompressed data.
|
| 319 | if channel.sample_type != SampleType::F16 {
|
| 320 |
|
| 321 | debug_assert_eq!(channel.sample_type.bytes_per_sample(), 4);
|
| 322 |
|
| 323 | if remaining < byte_count {
|
| 324 | return Err(Error::invalid("not enough data" ));
|
| 325 | }
|
| 326 |
|
| 327 | tmp.extend_from_slice(&compressed[in_i..(in_i + byte_count)]);
|
| 328 |
|
| 329 | in_i += byte_count;
|
| 330 | remaining -= byte_count;
|
| 331 |
|
| 332 | continue;
|
| 333 | }
|
| 334 |
|
| 335 | // HALF channel
|
| 336 | // The rest of the code assume we are manipulating u16 (2 bytes) values.
|
| 337 | debug_assert_eq!(channel.sample_type, SampleType::F16);
|
| 338 | debug_assert_eq!(channel.sample_type.bytes_per_sample(), size_of::<u16>());
|
| 339 |
|
| 340 | // Increase buffer to get new uncompressed datas.
|
| 341 | tmp.resize(tmp.len() + byte_count, 0);
|
| 342 |
|
| 343 | let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
| 344 | let y_sample_count = channel.resolution.y() * channel.samples_per_pixel;
|
| 345 |
|
| 346 | let bytes_per_sample = size_of::<u16>();
|
| 347 |
|
| 348 | let x_byte_count = x_sample_count * bytes_per_sample;
|
| 349 | let cd_start = channel.tmp_start_index;
|
| 350 |
|
| 351 | for y in (0..y_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
| 352 | // Compute index in output (decompressed) buffer. We have 4 rows, because we will
|
| 353 | // uncompress 4 by 4 data blocks.
|
| 354 | let mut row0 = cd_start + y * x_byte_count;
|
| 355 | let mut row1 = row0 + x_byte_count;
|
| 356 | let mut row2 = row1 + x_byte_count;
|
| 357 | let mut row3 = row2 + x_byte_count;
|
| 358 |
|
| 359 | // Move in pixel x line, 4 by 4.
|
| 360 | for x in (0..x_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
| 361 |
|
| 362 | // Extract the 4 by 4 block of 16-bit floats from the compressed buffer.
|
| 363 | let mut s = [0u16; 16];
|
| 364 |
|
| 365 | if remaining < 3 {
|
| 366 | return Err(Error::invalid("not enough data" ));
|
| 367 | }
|
| 368 |
|
| 369 | // If shift exponent is 63, call unpack14 (ignoring unused bits)
|
| 370 | if compressed[in_i + 2] >= (13 << 2) {
|
| 371 | if remaining < 3 {
|
| 372 | return Err(Error::invalid("not enough data" ));
|
| 373 | }
|
| 374 |
|
| 375 | unpack3(&compressed[in_i..(in_i + 3)], &mut s);
|
| 376 |
|
| 377 | in_i += 3;
|
| 378 | remaining -= 3;
|
| 379 | } else {
|
| 380 | if remaining < 14 {
|
| 381 | return Err(Error::invalid("not enough data" ));
|
| 382 | }
|
| 383 |
|
| 384 | unpack14(&compressed[in_i..(in_i + 14)], &mut s);
|
| 385 |
|
| 386 | in_i += 14;
|
| 387 | remaining -= 14;
|
| 388 | }
|
| 389 |
|
| 390 | if channel.quantize_linearly {
|
| 391 | convert_to_linear(&mut s);
|
| 392 | }
|
| 393 |
|
| 394 | // Get resting samples from the line to copy in temp buffer (without going outside channel).
|
| 395 | let x_resting_sample_count = match x + 3 < x_sample_count {
|
| 396 | true => BLOCK_SAMPLE_COUNT,
|
| 397 | false => x_sample_count - x,
|
| 398 | };
|
| 399 |
|
| 400 | debug_assert!(x_resting_sample_count > 0);
|
| 401 | debug_assert!(x_resting_sample_count <= BLOCK_SAMPLE_COUNT);
|
| 402 |
|
| 403 | // Copy rows (without going outside channel).
|
| 404 | if y + 3 < y_sample_count {
|
| 405 | cpy_u8(&s, 0, &mut tmp, row0, x_resting_sample_count);
|
| 406 | cpy_u8(&s, 4, &mut tmp, row1, x_resting_sample_count);
|
| 407 | cpy_u8(&s, 8, &mut tmp, row2, x_resting_sample_count);
|
| 408 | cpy_u8(&s, 12, &mut tmp, row3, x_resting_sample_count);
|
| 409 | } else {
|
| 410 | debug_assert!(y < y_sample_count);
|
| 411 |
|
| 412 | cpy_u8(&s, 0, &mut tmp, row0, x_resting_sample_count);
|
| 413 |
|
| 414 | if y + 1 < y_sample_count {
|
| 415 | cpy_u8(&s, 4, &mut tmp, row1, x_resting_sample_count);
|
| 416 | }
|
| 417 |
|
| 418 | if y + 2 < y_sample_count {
|
| 419 | cpy_u8(&s, 8, &mut tmp, row2, x_resting_sample_count);
|
| 420 | }
|
| 421 | }
|
| 422 |
|
| 423 | // Update row's array index to 4 next pixels.
|
| 424 | row0 += BLOCK_X_BYTE_COUNT;
|
| 425 | row1 += BLOCK_X_BYTE_COUNT;
|
| 426 | row2 += BLOCK_X_BYTE_COUNT;
|
| 427 | row3 += BLOCK_X_BYTE_COUNT;
|
| 428 | }
|
| 429 | }
|
| 430 | }
|
| 431 |
|
| 432 | debug_assert_eq!(tmp.len(), expected_byte_size);
|
| 433 |
|
| 434 | // Interleave uncompressed channel data.
|
| 435 | let mut out = Vec::with_capacity(expected_byte_size);
|
| 436 |
|
| 437 | for y in rectangle.position.y()..rectangle.end().y() {
|
| 438 | for channel in &mut channel_data {
|
| 439 | if mod_p(y, usize_to_i32(channel.y_sampling)) != 0 {
|
| 440 | continue;
|
| 441 | }
|
| 442 |
|
| 443 | // Find data location in temporary buffer.
|
| 444 | let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
| 445 | let bytes_per_line = x_sample_count * channel.sample_type.bytes_per_sample();
|
| 446 | let next_tmp_end_index = channel.tmp_end_index + bytes_per_line;
|
| 447 | let channel_bytes = &tmp[channel.tmp_end_index..next_tmp_end_index];
|
| 448 |
|
| 449 | channel.tmp_end_index = next_tmp_end_index;
|
| 450 |
|
| 451 | // TODO do not convert endianness for f16-only images
|
| 452 | // see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
| 453 | // We can support uncompressed data in the machine's native format
|
| 454 | // if all image channels are of type HALF, and if the Xdr and the
|
| 455 | // native representations of a half have the same size.
|
| 456 |
|
| 457 | if channel.sample_type == SampleType::F16 {
|
| 458 | // TODO simplify this and make it memcpy on little endian systems
|
| 459 | // https://github.com/AcademySoftwareFoundation/openexr/blob/a03aca31fa1ce85d3f28627dbb3e5ded9494724a/src/lib/OpenEXR/ImfB44Compressor.cpp#L943
|
| 460 | for mut f16_bytes in channel_bytes.chunks(std::mem::size_of::<f16>()) {
|
| 461 | let native_endian_f16_bits = u16::read_from_little_endian(&mut f16_bytes).expect("memory read failed" );
|
| 462 | out.write_as_native_endian(&native_endian_f16_bits).expect("memory write failed" );
|
| 463 | }
|
| 464 | }
|
| 465 | else {
|
| 466 | u8::write_slice(&mut out, channel_bytes)
|
| 467 | .expect("write to in-memory failed" );
|
| 468 | }
|
| 469 | }
|
| 470 | }
|
| 471 |
|
| 472 | for index in 1..channel_data.len() {
|
| 473 | debug_assert_eq!(
|
| 474 | channel_data[index - 1].tmp_end_index,
|
| 475 | channel_data[index].tmp_start_index
|
| 476 | );
|
| 477 | }
|
| 478 |
|
| 479 | debug_assert_eq!(out.len(), expected_byte_size);
|
| 480 |
|
| 481 | // TODO do not convert endianness for f16-only images
|
| 482 | // see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
| 483 | Ok(super::convert_little_endian_to_current(out, channels, rectangle))
|
| 484 | }
|
| 485 |
|
| 486 | pub fn compress(
|
| 487 | channels: &ChannelList,
|
| 488 | uncompressed: ByteVec,
|
| 489 | rectangle: IntegerBounds,
|
| 490 | optimize_flat_fields: bool,
|
| 491 | ) -> Result<ByteVec> {
|
| 492 | if uncompressed.is_empty() {
|
| 493 | return Ok(Vec::new());
|
| 494 | }
|
| 495 |
|
| 496 | // TODO do not convert endianness for f16-only images
|
| 497 | // see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
| 498 | let uncompressed = super::convert_current_to_little_endian(uncompressed, channels, rectangle);
|
| 499 | let uncompressed = uncompressed.as_slice(); // TODO no alloc
|
| 500 |
|
| 501 | let mut channel_data = Vec::new();
|
| 502 |
|
| 503 | let mut tmp_end_index = 0;
|
| 504 | for channel in &channels.list {
|
| 505 | let number_samples = channel.subsampled_resolution(rectangle.size);
|
| 506 |
|
| 507 | let sample_count = channel.subsampled_resolution(rectangle.size).area();
|
| 508 | let byte_count = sample_count * channel.sample_type.bytes_per_sample();
|
| 509 |
|
| 510 | let channel = ChannelData {
|
| 511 | tmp_start_index: tmp_end_index,
|
| 512 | tmp_end_index,
|
| 513 | y_sampling: channel.sampling.y(),
|
| 514 | resolution: number_samples,
|
| 515 | sample_type: channel.sample_type,
|
| 516 | quantize_linearly: channel.quantize_linearly,
|
| 517 | samples_per_pixel: channel.sampling.area(),
|
| 518 | };
|
| 519 |
|
| 520 | tmp_end_index += byte_count;
|
| 521 | channel_data.push(channel);
|
| 522 | }
|
| 523 |
|
| 524 | let mut tmp = vec![0_u8; uncompressed.len()];
|
| 525 |
|
| 526 | debug_assert_eq!(tmp_end_index, tmp.len());
|
| 527 |
|
| 528 | let mut remaining_uncompressed_bytes = uncompressed;
|
| 529 |
|
| 530 | for y in rectangle.position.y()..rectangle.end().y() {
|
| 531 | for channel in &mut channel_data {
|
| 532 | if mod_p(y, usize_to_i32(channel.y_sampling)) != 0 {
|
| 533 | continue;
|
| 534 | }
|
| 535 |
|
| 536 | let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
| 537 | let bytes_per_line = x_sample_count * channel.sample_type.bytes_per_sample();
|
| 538 | let next_tmp_end_index = channel.tmp_end_index + bytes_per_line;
|
| 539 | let target = &mut tmp[channel.tmp_end_index..next_tmp_end_index];
|
| 540 |
|
| 541 | channel.tmp_end_index = next_tmp_end_index;
|
| 542 |
|
| 543 | // TODO do not convert endianness for f16-only images
|
| 544 | // see https://github.com/AcademySoftwareFoundation/openexr/blob/3bd93f85bcb74c77255f28cdbb913fdbfbb39dfe/OpenEXR/IlmImf/ImfTiledOutputFile.cpp#L750-L842
|
| 545 | // We can support uncompressed data in the machine's native format
|
| 546 | // if all image channels are of type HALF, and if the Xdr and the
|
| 547 | // native representations of a half have the same size.
|
| 548 |
|
| 549 | if channel.sample_type == SampleType::F16 {
|
| 550 |
|
| 551 | // TODO simplify this and make it memcpy on little endian systems
|
| 552 | // https://github.com/AcademySoftwareFoundation/openexr/blob/a03aca31fa1ce85d3f28627dbb3e5ded9494724a/src/lib/OpenEXR/ImfB44Compressor.cpp#L640
|
| 553 |
|
| 554 | for mut out_f16_bytes in target.chunks_mut(2) {
|
| 555 | let native_endian_f16_bits = u16::read_from_native_endian(&mut remaining_uncompressed_bytes).expect("memory read failed" );
|
| 556 | out_f16_bytes.write_as_little_endian(&native_endian_f16_bits).expect("memory write failed" );
|
| 557 | }
|
| 558 | }
|
| 559 | else {
|
| 560 | u8::read_slice(&mut remaining_uncompressed_bytes, target)
|
| 561 | .expect("in-memory read failed" );
|
| 562 | }
|
| 563 | }
|
| 564 | }
|
| 565 |
|
| 566 | // Generate a whole buffer that we will crop to proper size once compression is done.
|
| 567 | let mut b44_compressed = vec![0; std::cmp::max(2048, uncompressed.len())];
|
| 568 | let mut b44_end = 0; // Buffer byte index for storing next compressed values.
|
| 569 |
|
| 570 | for channel in &channel_data {
|
| 571 | // U32 and F32 channels are raw copied.
|
| 572 | if channel.sample_type != SampleType::F16 {
|
| 573 |
|
| 574 | debug_assert_eq!(channel.sample_type.bytes_per_sample(), 4);
|
| 575 |
|
| 576 | // Raw byte copy.
|
| 577 | let slice = &tmp[channel.tmp_start_index..channel.tmp_end_index];
|
| 578 | slice.iter().copied().for_each(|b| {
|
| 579 | b44_compressed[b44_end] = b;
|
| 580 | b44_end += 1;
|
| 581 | });
|
| 582 |
|
| 583 | continue;
|
| 584 | }
|
| 585 |
|
| 586 | // HALF channel
|
| 587 | debug_assert_eq!(channel.sample_type, SampleType::F16);
|
| 588 | debug_assert_eq!(channel.sample_type.bytes_per_sample(), size_of::<u16>());
|
| 589 |
|
| 590 | let x_sample_count = channel.resolution.x() * channel.samples_per_pixel;
|
| 591 | let y_sample_count = channel.resolution.y() * channel.samples_per_pixel;
|
| 592 |
|
| 593 | let x_byte_count = x_sample_count * size_of::<u16>();
|
| 594 | let cd_start = channel.tmp_start_index;
|
| 595 |
|
| 596 | for y in (0..y_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
| 597 | //
|
| 598 | // Copy the next 4x4 pixel block into array s.
|
| 599 | // If the width, cd.nx, or the height, cd.ny, of
|
| 600 | // the pixel data in _tmpBuffer is not divisible
|
| 601 | // by 4, then pad the data by repeating the
|
| 602 | // rightmost column and the bottom row.
|
| 603 | //
|
| 604 |
|
| 605 | // Compute row index in temp buffer.
|
| 606 | let mut row0 = cd_start + y * x_byte_count;
|
| 607 | let mut row1 = row0 + x_byte_count;
|
| 608 | let mut row2 = row1 + x_byte_count;
|
| 609 | let mut row3 = row2 + x_byte_count;
|
| 610 |
|
| 611 | if y + 3 >= y_sample_count {
|
| 612 | if y + 1 >= y_sample_count {
|
| 613 | row1 = row0;
|
| 614 | }
|
| 615 |
|
| 616 | if y + 2 >= y_sample_count {
|
| 617 | row2 = row1;
|
| 618 | }
|
| 619 |
|
| 620 | row3 = row2;
|
| 621 | }
|
| 622 |
|
| 623 | for x in (0..x_sample_count).step_by(BLOCK_SAMPLE_COUNT) {
|
| 624 | let mut s = [0u16; 16];
|
| 625 |
|
| 626 | if x + 3 >= x_sample_count {
|
| 627 | let n = x_sample_count - x;
|
| 628 |
|
| 629 | for i in 0..BLOCK_SAMPLE_COUNT {
|
| 630 | let j = min(i, n - 1) * 2;
|
| 631 |
|
| 632 | // TODO: Make [u8; 2] to u16 fast.
|
| 633 | s[i + 0] = u16::from_ne_bytes([tmp[row0 + j], tmp[row0 + j + 1]]);
|
| 634 | s[i + 4] = u16::from_ne_bytes([tmp[row1 + j], tmp[row1 + j + 1]]);
|
| 635 | s[i + 8] = u16::from_ne_bytes([tmp[row2 + j], tmp[row2 + j + 1]]);
|
| 636 | s[i + 12] = u16::from_ne_bytes([tmp[row3 + j], tmp[row3 + j + 1]]);
|
| 637 | }
|
| 638 | } else {
|
| 639 | memcpy_u8_to_u16(&tmp[row0..(row0 + BLOCK_X_BYTE_COUNT)], &mut s[0..4]);
|
| 640 | memcpy_u8_to_u16(&tmp[row1..(row1 + BLOCK_X_BYTE_COUNT)], &mut s[4..8]);
|
| 641 | memcpy_u8_to_u16(&tmp[row2..(row2 + BLOCK_X_BYTE_COUNT)], &mut s[8..12]);
|
| 642 | memcpy_u8_to_u16(&tmp[row3..(row3 + BLOCK_X_BYTE_COUNT)], &mut s[12..16]);
|
| 643 | }
|
| 644 |
|
| 645 | // Move to next block.
|
| 646 | row0 += BLOCK_X_BYTE_COUNT;
|
| 647 | row1 += BLOCK_X_BYTE_COUNT;
|
| 648 | row2 += BLOCK_X_BYTE_COUNT;
|
| 649 | row3 += BLOCK_X_BYTE_COUNT;
|
| 650 |
|
| 651 | // Compress the contents of array `s` and append the results to the output buffer.
|
| 652 | if channel.quantize_linearly {
|
| 653 | convert_from_linear(&mut s);
|
| 654 | }
|
| 655 |
|
| 656 | b44_end += pack(
|
| 657 | s,
|
| 658 | &mut b44_compressed[b44_end..(b44_end + 14)],
|
| 659 | optimize_flat_fields,
|
| 660 | !channel.quantize_linearly,
|
| 661 | );
|
| 662 | }
|
| 663 | }
|
| 664 | }
|
| 665 |
|
| 666 | b44_compressed.resize(b44_end, 0);
|
| 667 |
|
| 668 | Ok(b44_compressed)
|
| 669 | }
|
| 670 |
|
| 671 | #[cfg (test)]
|
| 672 | mod test {
|
| 673 | use crate::compression::b44;
|
| 674 | use crate::compression::b44::{convert_from_linear, convert_to_linear};
|
| 675 | use crate::compression::ByteVec;
|
| 676 | use crate::image::validate_results::ValidateResult;
|
| 677 | use crate::meta::attribute::ChannelList;
|
| 678 | use crate::prelude::f16;
|
| 679 | use crate::prelude::*;
|
| 680 |
|
| 681 | #[test ]
|
| 682 | fn test_convert_from_to_linear() {
|
| 683 | // Create two identical arrays with random floats.
|
| 684 | let mut s1 = [0u16; 16];
|
| 685 |
|
| 686 | for i in 0..16 {
|
| 687 | s1[i] = f16::from_f32(rand::random::<f32>()).to_bits();
|
| 688 | }
|
| 689 |
|
| 690 | let s2 = s1.clone();
|
| 691 |
|
| 692 | // Apply two reversible conversion.
|
| 693 | convert_from_linear(&mut s1);
|
| 694 | convert_to_linear(&mut s1);
|
| 695 |
|
| 696 | // And check.
|
| 697 | for (u1, u2) in s1.iter().zip(&s2) {
|
| 698 | let f1 = f16::from_bits(*u1).to_f64();
|
| 699 | let f2 = f16::from_bits(*u2).to_f64();
|
| 700 | assert!((f1 - f2).abs() < 0.01);
|
| 701 | }
|
| 702 | }
|
| 703 |
|
| 704 | fn test_roundtrip_noise_with(
|
| 705 | channels: ChannelList,
|
| 706 | rectangle: IntegerBounds,
|
| 707 | ) -> (ByteVec, ByteVec, ByteVec) {
|
| 708 | let byte_count = channels
|
| 709 | .list
|
| 710 | .iter()
|
| 711 | .map(|c| {
|
| 712 | c.subsampled_resolution(rectangle.size).area() * c.sample_type.bytes_per_sample()
|
| 713 | })
|
| 714 | .sum();
|
| 715 |
|
| 716 | assert!(byte_count > 0);
|
| 717 |
|
| 718 | let pixel_bytes: ByteVec = (0..byte_count).map(|_| rand::random()).collect();
|
| 719 |
|
| 720 | assert_eq!(pixel_bytes.len(), byte_count);
|
| 721 |
|
| 722 | let compressed = b44::compress(&channels, pixel_bytes.clone(), rectangle, true).unwrap();
|
| 723 |
|
| 724 | let decompressed =
|
| 725 | b44::decompress(&channels, compressed.clone(), rectangle, pixel_bytes.len(), true).unwrap();
|
| 726 |
|
| 727 | assert_eq!(decompressed.len(), pixel_bytes.len());
|
| 728 |
|
| 729 | (pixel_bytes, compressed, decompressed)
|
| 730 | }
|
| 731 |
|
| 732 | #[test ]
|
| 733 | fn roundtrip_noise_f16() {
|
| 734 | let channel = ChannelDescription {
|
| 735 | sample_type: SampleType::F16,
|
| 736 | name: Default::default(),
|
| 737 | quantize_linearly: false,
|
| 738 | sampling: Vec2(1, 1),
|
| 739 | };
|
| 740 |
|
| 741 | // Two similar channels.
|
| 742 | let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
| 743 |
|
| 744 | let rectangle = IntegerBounds {
|
| 745 | position: Vec2(-30, 100),
|
| 746 | size: Vec2(322, 731),
|
| 747 | };
|
| 748 |
|
| 749 | let (pixel_bytes, compressed, decompressed) =
|
| 750 | test_roundtrip_noise_with(channels, rectangle);
|
| 751 |
|
| 752 | // On my tests, B44 give a size of 44.08% the original data (this assert implies enough
|
| 753 | // pixels to be relevant).
|
| 754 | assert_eq!(pixel_bytes.len(), 941528);
|
| 755 | assert_eq!(compressed.len(), 415044);
|
| 756 | assert_eq!(decompressed.len(), 941528);
|
| 757 | }
|
| 758 |
|
| 759 | #[test ]
|
| 760 | fn roundtrip_noise_f16_tiny() {
|
| 761 | let channel = ChannelDescription {
|
| 762 | sample_type: SampleType::F16,
|
| 763 | name: Default::default(),
|
| 764 | quantize_linearly: false,
|
| 765 | sampling: Vec2(1, 1),
|
| 766 | };
|
| 767 |
|
| 768 | // Two similar channels.
|
| 769 | let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
| 770 |
|
| 771 | let rectangle = IntegerBounds {
|
| 772 | position: Vec2(0, 0),
|
| 773 | size: Vec2(3, 2),
|
| 774 | };
|
| 775 |
|
| 776 | let (pixel_bytes, compressed, decompressed) =
|
| 777 | test_roundtrip_noise_with(channels, rectangle);
|
| 778 |
|
| 779 | // B44 being 4 by 4 block, compression is less efficient for tiny images.
|
| 780 | assert_eq!(pixel_bytes.len(), 24);
|
| 781 | assert_eq!(compressed.len(), 28);
|
| 782 | assert_eq!(decompressed.len(), 24);
|
| 783 | }
|
| 784 |
|
| 785 | #[test ]
|
| 786 | fn roundtrip_noise_f32() {
|
| 787 | let channel = ChannelDescription {
|
| 788 | sample_type: SampleType::F32,
|
| 789 | name: Default::default(),
|
| 790 | quantize_linearly: false,
|
| 791 | sampling: Vec2(1, 1),
|
| 792 | };
|
| 793 |
|
| 794 | // Two similar channels.
|
| 795 | let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
| 796 |
|
| 797 | let rectangle = IntegerBounds {
|
| 798 | position: Vec2(-30, 100),
|
| 799 | size: Vec2(322, 731),
|
| 800 | };
|
| 801 |
|
| 802 | let (pixel_bytes, compressed, decompressed) =
|
| 803 | test_roundtrip_noise_with(channels, rectangle);
|
| 804 |
|
| 805 | assert_eq!(pixel_bytes.len(), 1883056);
|
| 806 | assert_eq!(compressed.len(), 1883056);
|
| 807 | assert_eq!(decompressed.len(), 1883056);
|
| 808 | assert_eq!(pixel_bytes, decompressed);
|
| 809 | }
|
| 810 |
|
| 811 | #[test ]
|
| 812 | fn roundtrip_noise_f32_tiny() {
|
| 813 | let channel = ChannelDescription {
|
| 814 | sample_type: SampleType::F32,
|
| 815 | name: Default::default(),
|
| 816 | quantize_linearly: false,
|
| 817 | sampling: Vec2(1, 1),
|
| 818 | };
|
| 819 |
|
| 820 | // Two similar channels.
|
| 821 | let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
| 822 |
|
| 823 | let rectangle = IntegerBounds {
|
| 824 | position: Vec2(0, 0),
|
| 825 | size: Vec2(3, 2),
|
| 826 | };
|
| 827 |
|
| 828 | let (pixel_bytes, compressed, decompressed) =
|
| 829 | test_roundtrip_noise_with(channels, rectangle);
|
| 830 |
|
| 831 | assert_eq!(pixel_bytes.len(), 48);
|
| 832 | assert_eq!(compressed.len(), 48);
|
| 833 | assert_eq!(decompressed.len(), 48);
|
| 834 | assert_eq!(pixel_bytes, decompressed);
|
| 835 | }
|
| 836 |
|
| 837 | #[test ]
|
| 838 | fn roundtrip_noise_u32() {
|
| 839 | let channel = ChannelDescription {
|
| 840 | sample_type: SampleType::U32,
|
| 841 | name: Default::default(),
|
| 842 | quantize_linearly: false,
|
| 843 | sampling: Vec2(1, 1),
|
| 844 | };
|
| 845 |
|
| 846 | // Two similar channels.
|
| 847 | let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
| 848 |
|
| 849 | let rectangle = IntegerBounds {
|
| 850 | position: Vec2(-30, 100),
|
| 851 | size: Vec2(322, 731),
|
| 852 | };
|
| 853 |
|
| 854 | let (pixel_bytes, compressed, decompressed) =
|
| 855 | test_roundtrip_noise_with(channels, rectangle);
|
| 856 |
|
| 857 | assert_eq!(pixel_bytes.len(), 1883056);
|
| 858 | assert_eq!(compressed.len(), 1883056);
|
| 859 | assert_eq!(decompressed.len(), 1883056);
|
| 860 | assert_eq!(pixel_bytes, decompressed);
|
| 861 | }
|
| 862 |
|
| 863 | #[test ]
|
| 864 | fn roundtrip_noise_u32_tiny() {
|
| 865 | let channel = ChannelDescription {
|
| 866 | sample_type: SampleType::U32,
|
| 867 | name: Default::default(),
|
| 868 | quantize_linearly: false,
|
| 869 | sampling: Vec2(1, 1),
|
| 870 | };
|
| 871 |
|
| 872 | // Two similar channels.
|
| 873 | let channels = ChannelList::new(smallvec![channel.clone(), channel]);
|
| 874 |
|
| 875 | let rectangle = IntegerBounds {
|
| 876 | position: Vec2(0, 0),
|
| 877 | size: Vec2(3, 2),
|
| 878 | };
|
| 879 |
|
| 880 | let (pixel_bytes, compressed, decompressed) =
|
| 881 | test_roundtrip_noise_with(channels, rectangle);
|
| 882 |
|
| 883 | assert_eq!(pixel_bytes.len(), 48);
|
| 884 | assert_eq!(compressed.len(), 48);
|
| 885 | assert_eq!(decompressed.len(), 48);
|
| 886 | assert_eq!(pixel_bytes, decompressed);
|
| 887 | }
|
| 888 |
|
| 889 | #[test ]
|
| 890 | fn roundtrip_noise_mix_f32_f16_u32() {
|
| 891 | let channels = ChannelList::new(smallvec![
|
| 892 | ChannelDescription {
|
| 893 | sample_type: SampleType::F32,
|
| 894 | name: Default::default(),
|
| 895 | quantize_linearly: false,
|
| 896 | sampling: Vec2(1, 1),
|
| 897 | },
|
| 898 | ChannelDescription {
|
| 899 | sample_type: SampleType::F16,
|
| 900 | name: Default::default(),
|
| 901 | quantize_linearly: false,
|
| 902 | sampling: Vec2(1, 1),
|
| 903 | },
|
| 904 | ChannelDescription {
|
| 905 | sample_type: SampleType::U32,
|
| 906 | name: Default::default(),
|
| 907 | quantize_linearly: false,
|
| 908 | sampling: Vec2(1, 1),
|
| 909 | }
|
| 910 | ]);
|
| 911 |
|
| 912 | let rectangle = IntegerBounds {
|
| 913 | position: Vec2(-30, 100),
|
| 914 | size: Vec2(322, 731),
|
| 915 | };
|
| 916 |
|
| 917 | let (pixel_bytes, compressed, decompressed) =
|
| 918 | test_roundtrip_noise_with(channels, rectangle);
|
| 919 |
|
| 920 | assert_eq!(pixel_bytes.len(), 2353820);
|
| 921 | assert_eq!(compressed.len(), 2090578);
|
| 922 | assert_eq!(decompressed.len(), 2353820);
|
| 923 | }
|
| 924 |
|
| 925 | #[test ]
|
| 926 | fn roundtrip_noise_mix_f32_f16_u32_tiny() {
|
| 927 | let channels = ChannelList::new(smallvec![
|
| 928 | ChannelDescription {
|
| 929 | sample_type: SampleType::F32,
|
| 930 | name: Default::default(),
|
| 931 | quantize_linearly: false,
|
| 932 | sampling: Vec2(1, 1),
|
| 933 | },
|
| 934 | ChannelDescription {
|
| 935 | sample_type: SampleType::F16,
|
| 936 | name: Default::default(),
|
| 937 | quantize_linearly: false,
|
| 938 | sampling: Vec2(1, 1),
|
| 939 | },
|
| 940 | ChannelDescription {
|
| 941 | sample_type: SampleType::U32,
|
| 942 | name: Default::default(),
|
| 943 | quantize_linearly: false,
|
| 944 | sampling: Vec2(1, 1),
|
| 945 | }
|
| 946 | ]);
|
| 947 |
|
| 948 | let rectangle = IntegerBounds {
|
| 949 | position: Vec2(0, 0),
|
| 950 | size: Vec2(3, 2),
|
| 951 | };
|
| 952 |
|
| 953 | let (pixel_bytes, compressed, decompressed) =
|
| 954 | test_roundtrip_noise_with(channels, rectangle);
|
| 955 |
|
| 956 | assert_eq!(pixel_bytes.len(), 60);
|
| 957 | assert_eq!(compressed.len(), 62);
|
| 958 | assert_eq!(decompressed.len(), 60);
|
| 959 | }
|
| 960 |
|
| 961 | #[test ]
|
| 962 | fn border_on_multiview() {
|
| 963 | // This test is hard to reproduce, so we use the direct image.
|
| 964 | let path = "tests/images/valid/openexr/MultiView/Adjuster.exr" ;
|
| 965 |
|
| 966 | let read_image = read()
|
| 967 | .no_deep_data()
|
| 968 | .all_resolution_levels()
|
| 969 | .all_channels()
|
| 970 | .all_layers()
|
| 971 | .all_attributes()
|
| 972 | .non_parallel();
|
| 973 |
|
| 974 | let image = read_image.clone().from_file(path).unwrap();
|
| 975 |
|
| 976 | let mut tmp_bytes = Vec::new();
|
| 977 | image
|
| 978 | .write()
|
| 979 | .non_parallel()
|
| 980 | .to_buffered(std::io::Cursor::new(&mut tmp_bytes))
|
| 981 | .unwrap();
|
| 982 |
|
| 983 | let image2 = read_image
|
| 984 | .from_buffered(std::io::Cursor::new(tmp_bytes))
|
| 985 | .unwrap();
|
| 986 |
|
| 987 | image.assert_equals_result(&image2);
|
| 988 | }
|
| 989 | }
|
| 990 | |