1// Copyright (c) 2018-2022, The rav1e contributors. All rights reserved
2//
3// This source code is subject to the terms of the BSD 2 Clause License and
4// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
5// was not distributed with this source code in the LICENSE file, you can
6// obtain it at www.aomedia.org/license/software. If the Alliance for Open
7// Media Patent License 1.0 was not distributed with this source code in the
8// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
9
10use crate::api::*;
11use crate::context::*;
12use crate::ec::*;
13use crate::lrf::*;
14use crate::partition::*;
15use crate::tiling::MAX_TILE_WIDTH;
16use crate::util::Fixed;
17use crate::util::Pixel;
18
19use crate::DeblockState;
20use crate::FrameInvariants;
21use crate::FrameState;
22use crate::SegmentationState;
23use crate::Sequence;
24
25use arrayvec::ArrayVec;
26use bitstream_io::{BigEndian, BitWrite, BitWriter, LittleEndian};
27
28use std::io;
29
30pub const PRIMARY_REF_NONE: u32 = 7;
31pub const ALL_REF_FRAMES_MASK: u32 = (1 << REF_FRAMES) - 1;
32
33const PRIMARY_REF_BITS: u32 = 3;
34
35#[allow(unused)]
36const OP_POINTS_IDC_BITS: usize = 12;
37#[allow(unused)]
38const LEVEL_MAJOR_MIN: usize = 2;
39#[allow(unused)]
40const LEVEL_MAJOR_BITS: usize = 3;
41#[allow(unused)]
42const LEVEL_MINOR_BITS: usize = 2;
43#[allow(unused)]
44const LEVEL_BITS: usize = LEVEL_MAJOR_BITS + LEVEL_MINOR_BITS;
45
46#[allow(dead_code, non_camel_case_types)]
47#[derive(Debug, Clone, Copy, PartialEq, Eq)]
48pub enum ReferenceMode {
49 SINGLE = 0,
50 COMPOUND = 1,
51 SELECT = 2,
52}
53
54#[allow(non_camel_case_types)]
55#[allow(unused)]
56pub enum ObuType {
57 OBU_SEQUENCE_HEADER = 1,
58 OBU_TEMPORAL_DELIMITER = 2,
59 OBU_FRAME_HEADER = 3,
60 OBU_TILE_GROUP = 4,
61 OBU_METADATA = 5,
62 OBU_FRAME = 6,
63 OBU_REDUNDANT_FRAME_HEADER = 7,
64 OBU_TILE_LIST = 8,
65 OBU_PADDING = 15,
66}
67
68#[derive(Clone, Copy)]
69#[allow(non_camel_case_types)]
70#[allow(unused)]
71pub enum ObuMetaType {
72 OBU_META_HDR_CLL = 1,
73 OBU_META_HDR_MDCV = 2,
74 OBU_META_SCALABILITY = 3,
75 OBU_META_ITUT_T35 = 4,
76 OBU_META_TIMECODE = 5,
77}
78
79impl ObuMetaType {
80 const fn size(self) -> u64 {
81 use self::ObuMetaType::*;
82 match self {
83 OBU_META_HDR_CLL => 4,
84 OBU_META_HDR_MDCV => 24,
85 _ => 0,
86 }
87 }
88}
89
90pub trait ULEB128Writer {
91 fn write_uleb128(&mut self, payload: u64) -> io::Result<()>;
92}
93
94impl<W: io::Write> ULEB128Writer for BitWriter<W, BigEndian> {
95 fn write_uleb128(&mut self, payload: u64) -> io::Result<()> {
96 // NOTE from libaom:
97 // Disallow values larger than 32-bits to ensure consistent behavior on 32 and
98 // 64 bit targets: value is typically used to determine buffer allocation size
99 // when decoded.
100 let mut coded_value: ArrayVec<u8, 8> = ArrayVec::new();
101
102 let mut value = payload as u32;
103 loop {
104 let mut byte = (value & 0x7f) as u8;
105 value >>= 7u8;
106 if value != 0 {
107 // Signal that more bytes follow.
108 byte |= 0x80;
109 }
110 coded_value.push(byte);
111
112 if value == 0 {
113 // We have to break at the end of the loop
114 // because there must be at least one byte written.
115 break;
116 }
117 }
118
119 for byte in coded_value {
120 self.write(8, byte)?;
121 }
122 Ok(())
123 }
124}
125
126pub trait LEWriter {
127 fn write_le(&mut self, bytes: u32, payload: u64) -> io::Result<()>;
128}
129
130// to write little endian values in a globally big-endian BitWriter
131impl<W: io::Write> LEWriter for BitWriter<W, BigEndian> {
132 fn write_le(&mut self, bytes: u32, value: u64) -> io::Result<()> {
133 let mut data: Vec = Vec::new();
134 let mut bwle: BitWriter<&mut Vec, LittleEndian> = BitWriter::endian(&mut data, _endian:LittleEndian);
135 bwle.write(bits:bytes * 8, value)?;
136 self.write_bytes(&data)
137 }
138}
139
140pub trait UncompressedHeader {
141 // Start of OBU Headers
142 fn write_obu_header(
143 &mut self, obu_type: ObuType, obu_extension: u32,
144 ) -> io::Result<()>;
145 fn write_sequence_metadata_obu(
146 &mut self, obu_meta_type: ObuMetaType, seq: &Sequence,
147 ) -> io::Result<()>;
148 fn write_sequence_header_obu<T: Pixel>(
149 &mut self, fi: &FrameInvariants<T>,
150 ) -> io::Result<()>;
151 fn write_frame_header_obu<T: Pixel>(
152 &mut self, fi: &FrameInvariants<T>, fs: &FrameState<T>,
153 inter_cfg: &InterConfig,
154 ) -> io::Result<()>;
155 fn write_sequence_header<T: Pixel>(
156 &mut self, fi: &FrameInvariants<T>,
157 ) -> io::Result<()>;
158 fn write_color_config(&mut self, seq: &Sequence) -> io::Result<()>;
159 fn write_t35_metadata_obu(&mut self, t35: &T35) -> io::Result<()>;
160 // End of OBU Headers
161
162 fn write_max_frame_size<T: Pixel>(
163 &mut self, fi: &FrameInvariants<T>,
164 ) -> io::Result<()>;
165 fn write_frame_size<T: Pixel>(
166 &mut self, fi: &FrameInvariants<T>,
167 ) -> io::Result<()>;
168 fn write_render_size<T: Pixel>(
169 &mut self, fi: &FrameInvariants<T>,
170 ) -> io::Result<()>;
171 fn write_frame_size_with_refs<T: Pixel>(
172 &mut self, fi: &FrameInvariants<T>,
173 ) -> io::Result<()>;
174 fn write_deblock_filter_a<T: Pixel>(
175 &mut self, fi: &FrameInvariants<T>, deblock: &DeblockState,
176 ) -> io::Result<()>;
177 fn write_deblock_filter_b<T: Pixel>(
178 &mut self, fi: &FrameInvariants<T>, deblock: &DeblockState,
179 ) -> io::Result<()>;
180 fn write_frame_cdef<T: Pixel>(
181 &mut self, fi: &FrameInvariants<T>,
182 ) -> io::Result<()>;
183 fn write_frame_lrf<T: Pixel>(
184 &mut self, fi: &FrameInvariants<T>, rs: &RestorationState,
185 ) -> io::Result<()>;
186 fn write_segment_data<T: Pixel>(
187 &mut self, fi: &FrameInvariants<T>, segmentation: &SegmentationState,
188 ) -> io::Result<()>;
189 fn write_delta_q(&mut self, delta_q: i8) -> io::Result<()>;
190}
191
192impl<W: io::Write> UncompressedHeader for BitWriter<W, BigEndian> {
193 // Start of OBU Headers
194 // Write OBU Header syntax
195 fn write_obu_header(
196 &mut self, obu_type: ObuType, obu_extension: u32,
197 ) -> io::Result<()> {
198 self.write_bit(false)?; // forbidden bit.
199 self.write(4, obu_type as u32)?;
200 self.write_bit(obu_extension != 0)?;
201 self.write_bit(true)?; // obu_has_payload_length_field
202 self.write_bit(false)?; // reserved
203
204 if obu_extension != 0 {
205 unimplemented!();
206 //self.write(8, obu_extension & 0xFF)?; size += 8;
207 }
208
209 Ok(())
210 }
211
212 fn write_sequence_metadata_obu(
213 &mut self, obu_meta_type: ObuMetaType, seq: &Sequence,
214 ) -> io::Result<()> {
215 // header
216 self.write_obu_header(ObuType::OBU_METADATA, 0)?;
217
218 // uleb128() - length
219 // we use a constant value to avoid computing the OBU size every time
220 // since it is fixed (depending on the metadata)
221 // +2 is for the metadata_type field and the trailing bits byte
222 self.write_uleb128(obu_meta_type.size() + 2)?;
223
224 // uleb128() - metadata_type (1 byte)
225 self.write_uleb128(obu_meta_type as u64)?;
226
227 match obu_meta_type {
228 ObuMetaType::OBU_META_HDR_CLL => {
229 let cll = seq.content_light.unwrap();
230 self.write(16, cll.max_content_light_level)?;
231 self.write(16, cll.max_frame_average_light_level)?;
232 }
233 ObuMetaType::OBU_META_HDR_MDCV => {
234 let mdcv = seq.mastering_display.unwrap();
235 for i in 0..3 {
236 self.write(16, mdcv.primaries[i].x)?;
237 self.write(16, mdcv.primaries[i].y)?;
238 }
239
240 self.write(16, mdcv.white_point.x)?;
241 self.write(16, mdcv.white_point.y)?;
242
243 self.write(32, mdcv.max_luminance)?;
244 self.write(32, mdcv.min_luminance)?;
245 }
246 _ => {}
247 }
248
249 // trailing bits (1 byte)
250 self.write_bit(true)?;
251 self.byte_align()?;
252
253 Ok(())
254 }
255
256 fn write_t35_metadata_obu(&mut self, t35: &T35) -> io::Result<()> {
257 self.write_obu_header(ObuType::OBU_METADATA, 0)?;
258
259 // metadata type + country code + optional extension + trailing bits
260 self.write_uleb128(
261 t35.data.len() as u64 + if t35.country_code == 0xFF { 4 } else { 3 },
262 )?;
263
264 self.write_uleb128(ObuMetaType::OBU_META_ITUT_T35 as u64)?;
265
266 self.write(8, t35.country_code)?;
267 if t35.country_code == 0xFF {
268 self.write(8, t35.country_code_extension_byte)?;
269 }
270 self.write_bytes(&t35.data)?;
271
272 // trailing bits (1 byte)
273 self.write_bit(true)?;
274 self.byte_align()?;
275
276 Ok(())
277 }
278
279 fn write_sequence_header_obu<T: Pixel>(
280 &mut self, fi: &FrameInvariants<T>,
281 ) -> io::Result<()> {
282 assert!(
283 !fi.sequence.reduced_still_picture_hdr || fi.sequence.still_picture
284 );
285
286 self.write(3, fi.sequence.profile)?; // profile
287 self.write_bit(fi.sequence.still_picture)?; // still_picture
288 self.write_bit(fi.sequence.reduced_still_picture_hdr)?; // reduced_still_picture_header
289
290 assert!(fi.sequence.level_idx[0] <= 31);
291 if fi.sequence.reduced_still_picture_hdr {
292 assert!(!fi.sequence.timing_info_present);
293 assert!(!fi.sequence.decoder_model_info_present_flag);
294 assert_eq!(fi.sequence.operating_points_cnt_minus_1, 0);
295 assert_eq!(fi.sequence.operating_point_idc[0], 0);
296 self.write(5, fi.sequence.level_idx[0])?; // level
297 assert_eq!(fi.sequence.tier[0], 0);
298 } else {
299 self.write_bit(fi.sequence.timing_info_present)?; // timing info present
300
301 if fi.sequence.timing_info_present {
302 self.write(32, fi.sequence.time_base.num)?;
303 self.write(32, fi.sequence.time_base.den)?;
304
305 self.write_bit(true)?; // equal picture interval
306 self.write_bit(true)?; // zero interval
307 self.write_bit(false)?; // decoder model info present flag
308 }
309
310 self.write_bit(false)?; // initial display delay present flag
311 self.write(5, 0)?; // one operating point
312 self.write(12, 0)?; // idc
313 self.write(5, fi.sequence.level_idx[0])?; // level
314 if fi.sequence.level_idx[0] > 7 {
315 self.write(1, 0)?; // tier
316 }
317 }
318
319 self.write_sequence_header(fi)?;
320
321 self.write_color_config(&fi.sequence)?;
322
323 self.write_bit(fi.sequence.film_grain_params_present)?;
324
325 Ok(())
326 }
327
328 fn write_sequence_header<T: Pixel>(
329 &mut self, fi: &FrameInvariants<T>,
330 ) -> io::Result<()> {
331 self.write_max_frame_size(fi)?;
332
333 let seq = &fi.sequence;
334
335 if seq.reduced_still_picture_hdr {
336 assert!(!seq.frame_id_numbers_present_flag);
337 } else {
338 self.write_bit(seq.frame_id_numbers_present_flag)?;
339 }
340
341 if seq.frame_id_numbers_present_flag {
342 // We must always have delta_frame_id_length < frame_id_length,
343 // in order for a frame to be referenced with a unique delta.
344 // Avoid wasting bits by using a coding that enforces this restriction.
345 self.write(4, seq.delta_frame_id_length - 2)?;
346 self.write(3, seq.frame_id_length - seq.delta_frame_id_length - 1)?;
347 }
348
349 self.write_bit(seq.use_128x128_superblock)?;
350 self.write_bit(seq.enable_filter_intra)?;
351 self.write_bit(seq.enable_intra_edge_filter)?;
352
353 if seq.reduced_still_picture_hdr {
354 assert!(!seq.enable_interintra_compound);
355 assert!(!seq.enable_masked_compound);
356 assert!(!seq.enable_warped_motion);
357 assert!(!seq.enable_dual_filter);
358 assert!(!seq.enable_order_hint);
359 assert!(!seq.enable_jnt_comp);
360 assert!(!seq.enable_ref_frame_mvs);
361 assert!(seq.force_screen_content_tools == 2);
362 assert!(seq.force_integer_mv == 2);
363 } else {
364 self.write_bit(seq.enable_interintra_compound)?;
365 self.write_bit(seq.enable_masked_compound)?;
366 self.write_bit(seq.enable_warped_motion)?;
367 self.write_bit(seq.enable_dual_filter)?;
368 self.write_bit(seq.enable_order_hint)?;
369
370 if seq.enable_order_hint {
371 self.write_bit(seq.enable_jnt_comp)?;
372 self.write_bit(seq.enable_ref_frame_mvs)?;
373 }
374
375 if seq.force_screen_content_tools == 2 {
376 self.write_bit(true)?;
377 } else {
378 self.write_bit(false)?;
379 self.write_bit(seq.force_screen_content_tools != 0)?;
380 }
381 if seq.force_screen_content_tools > 0 {
382 if seq.force_integer_mv == 2 {
383 self.write_bit(true)?;
384 } else {
385 self.write_bit(false)?;
386 self.write_bit(seq.force_integer_mv != 0)?;
387 }
388 } else {
389 assert!(seq.force_integer_mv == 2);
390 }
391 if seq.enable_order_hint {
392 self.write(3, seq.order_hint_bits_minus_1)?;
393 }
394 }
395
396 self.write_bit(seq.enable_superres)?;
397 self.write_bit(seq.enable_cdef)?;
398 self.write_bit(seq.enable_restoration)?;
399
400 Ok(())
401 }
402
403 // <https://aomediacodec.github.io/av1-spec/#color-config-syntax>
404 fn write_color_config(&mut self, seq: &Sequence) -> io::Result<()> {
405 let high_bitdepth = seq.bit_depth > 8;
406 self.write_bit(high_bitdepth)?;
407 if seq.profile == 2 && high_bitdepth {
408 self.write_bit(seq.bit_depth == 12)?; // twelve_bit
409 }
410
411 let monochrome = seq.chroma_sampling == ChromaSampling::Cs400;
412 if seq.profile == 1 {
413 assert!(!monochrome);
414 } else {
415 self.write_bit(monochrome)?; // mono_chrome
416 }
417
418 // color_description_present_flag
419 self.write_bit(seq.color_description.is_some())?;
420 let mut srgb_triple = false;
421 if let Some(color_description) = seq.color_description {
422 self.write(8, color_description.color_primaries as u8)?;
423 self.write(8, color_description.transfer_characteristics as u8)?;
424 self.write(8, color_description.matrix_coefficients as u8)?;
425 srgb_triple = color_description.is_srgb_triple();
426 }
427
428 if monochrome || !srgb_triple {
429 self.write_bit(seq.pixel_range == PixelRange::Full)?; // color_range
430 }
431 if monochrome {
432 return Ok(());
433 } else if srgb_triple {
434 assert!(seq.pixel_range == PixelRange::Full);
435 assert!(seq.chroma_sampling == ChromaSampling::Cs444);
436 } else {
437 if seq.profile == 0 {
438 assert!(seq.chroma_sampling == ChromaSampling::Cs420);
439 } else if seq.profile == 1 {
440 assert!(seq.chroma_sampling == ChromaSampling::Cs444);
441 } else if seq.bit_depth == 12 {
442 let subsampling_x = seq.chroma_sampling != ChromaSampling::Cs444;
443 let subsampling_y = seq.chroma_sampling == ChromaSampling::Cs420;
444 self.write_bit(subsampling_x)?;
445 if subsampling_x {
446 self.write_bit(subsampling_y)?;
447 }
448 } else {
449 assert!(seq.chroma_sampling == ChromaSampling::Cs422);
450 }
451 if seq.chroma_sampling == ChromaSampling::Cs420 {
452 self.write(2, seq.chroma_sample_position as u32)?;
453 }
454 }
455 self.write_bit(true)?; // separate_uv_delta_q
456
457 Ok(())
458 }
459
460 #[allow(unused)]
461 fn write_frame_header_obu<T: Pixel>(
462 &mut self, fi: &FrameInvariants<T>, fs: &FrameState<T>,
463 inter_cfg: &InterConfig,
464 ) -> io::Result<()> {
465 if fi.sequence.reduced_still_picture_hdr {
466 assert!(!fi.is_show_existing_frame());
467 assert!(fi.frame_type == FrameType::KEY);
468 assert!(fi.show_frame);
469 assert!(!fi.showable_frame);
470 } else {
471 self.write_bit(fi.is_show_existing_frame())?;
472
473 if fi.is_show_existing_frame() {
474 self.write(3, fi.frame_to_show_map_idx)?;
475
476 //TODO:
477 /* temporal_point_info();
478 if fi.sequence.decoder_model_info_present_flag &&
479 timing_info.equal_picture_interval == 0 {
480 // write frame_presentation_delay;
481 }
482 if fi.sequence.frame_id_numbers_present_flag {
483 // write display_frame_id;
484 }*/
485
486 self.write_bit(true)?; // trailing bit
487 self.byte_align()?;
488 return Ok(());
489 }
490
491 self.write(2, fi.frame_type as u32)?;
492 self.write_bit(fi.show_frame)?; // show frame
493
494 if fi.show_frame {
495 //TODO:
496 /* temporal_point_info();
497 if fi.sequence.decoder_model_info_present_flag &&
498 timing_info.equal_picture_interval == 0 {
499 // write frame_presentation_delay;
500 }*/
501 } else {
502 self.write_bit(fi.showable_frame)?;
503 }
504
505 if fi.error_resilient {
506 assert!(fi.primary_ref_frame == PRIMARY_REF_NONE);
507 }
508 if fi.frame_type == FrameType::SWITCH {
509 assert!(fi.error_resilient);
510 } else if !(fi.frame_type == FrameType::KEY && fi.show_frame) {
511 self.write_bit(fi.error_resilient)?; // error resilient
512 }
513 }
514
515 self.write_bit(fi.disable_cdf_update)?;
516
517 if fi.sequence.force_screen_content_tools == 2 {
518 self.write_bit(fi.allow_screen_content_tools != 0)?;
519 } else {
520 assert!(
521 fi.allow_screen_content_tools
522 == fi.sequence.force_screen_content_tools
523 );
524 }
525
526 if fi.allow_screen_content_tools > 0 {
527 if fi.sequence.force_integer_mv == 2 {
528 self.write_bit(fi.force_integer_mv != 0)?;
529 } else {
530 assert!(fi.force_integer_mv == fi.sequence.force_integer_mv);
531 }
532 }
533
534 assert!(
535 fi.force_integer_mv
536 == u32::from(fi.frame_type == FrameType::KEY || fi.intra_only)
537 );
538
539 if fi.sequence.frame_id_numbers_present_flag {
540 unimplemented!();
541
542 //TODO:
543 //let frame_id_len = fi.sequence.frame_id_length;
544 //self.write(frame_id_len, fi.current_frame_id);
545 }
546
547 if fi.frame_type != FrameType::SWITCH
548 && !fi.sequence.reduced_still_picture_hdr
549 {
550 self.write_bit(fi.frame_size_override_flag)?; // frame size overhead flag
551 }
552
553 if fi.sequence.enable_order_hint {
554 let n = fi.sequence.order_hint_bits_minus_1 + 1;
555 let mask = (1 << n) - 1;
556 self.write(n, fi.order_hint & mask)?;
557 }
558
559 if !fi.error_resilient && !fi.intra_only {
560 self.write(PRIMARY_REF_BITS, fi.primary_ref_frame)?;
561 }
562
563 if fi.sequence.decoder_model_info_present_flag {
564 unimplemented!();
565 }
566
567 if fi.frame_type == FrameType::KEY {
568 if !fi.show_frame {
569 // unshown keyframe (forward keyframe)
570 unimplemented!();
571 self.write(REF_FRAMES as u32, fi.refresh_frame_flags)?;
572 } else {
573 assert!(fi.refresh_frame_flags == ALL_REF_FRAMES_MASK);
574 }
575 } else if fi.frame_type == FrameType::SWITCH {
576 assert!(fi.refresh_frame_flags == ALL_REF_FRAMES_MASK);
577 } else {
578 // Inter frame info goes here
579 if fi.intra_only {
580 assert!(fi.refresh_frame_flags != ALL_REF_FRAMES_MASK);
581 } else {
582 // TODO: This should be set once inter mode is used
583 }
584 self.write(REF_FRAMES as u32, fi.refresh_frame_flags)?;
585 };
586
587 if (!fi.intra_only || fi.refresh_frame_flags != ALL_REF_FRAMES_MASK) {
588 // Write all ref frame order hints if error_resilient_mode == 1
589 if (fi.error_resilient && fi.sequence.enable_order_hint) {
590 for i in 0..REF_FRAMES {
591 let n = fi.sequence.order_hint_bits_minus_1 + 1;
592 let mask = (1 << n) - 1;
593 if let Some(ref rec) = fi.rec_buffer.frames[i] {
594 let ref_hint = rec.order_hint;
595 self.write(n, ref_hint & mask)?;
596 } else {
597 self.write(n, 0)?;
598 }
599 }
600 }
601 }
602
603 // if KEY or INTRA_ONLY frame
604 if fi.intra_only {
605 self.write_frame_size(fi)?;
606 self.write_render_size(fi)?;
607 if fi.allow_screen_content_tools != 0 {
608 // TODO: && UpscaledWidth == FrameWidth.
609 self.write_bit(fi.allow_intrabc)?;
610 }
611 }
612
613 let frame_refs_short_signaling = false;
614 if fi.frame_type == FrameType::KEY || fi.intra_only {
615 // Done by above
616 } else {
617 if fi.sequence.enable_order_hint {
618 self.write_bit(frame_refs_short_signaling)?;
619 if frame_refs_short_signaling {
620 unimplemented!();
621 }
622 }
623
624 for i in 0..INTER_REFS_PER_FRAME {
625 if !frame_refs_short_signaling {
626 self.write(REF_FRAMES_LOG2 as u32, fi.ref_frames[i])?;
627 }
628 if fi.sequence.frame_id_numbers_present_flag {
629 unimplemented!();
630 }
631 }
632
633 if !fi.error_resilient && fi.frame_size_override_flag {
634 self.write_frame_size_with_refs(fi)?;
635 } else {
636 self.write_frame_size(fi)?;
637 self.write_render_size(fi)?;
638 }
639
640 if fi.force_integer_mv == 0 {
641 self.write_bit(fi.allow_high_precision_mv);
642 }
643
644 self.write_bit(fi.is_filter_switchable)?;
645 if !fi.is_filter_switchable {
646 self.write(2, fi.default_filter as u8)?;
647 }
648 self.write_bit(fi.is_motion_mode_switchable)?;
649
650 if (!fi.error_resilient && fi.sequence.enable_ref_frame_mvs) {
651 self.write_bit(fi.use_ref_frame_mvs)?;
652 }
653 }
654
655 if fi.sequence.reduced_still_picture_hdr || fi.disable_cdf_update {
656 assert!(fi.disable_frame_end_update_cdf);
657 } else {
658 self.write_bit(fi.disable_frame_end_update_cdf)?;
659 }
660
661 // tile
662 // <https://aomediacodec.github.io/av1-spec/#tile-info-syntax>
663
664 // Can we use the uniform spacing tile syntax? 'Uniform spacing'
665 // is a slight misnomer; it's more constrained than just a uniform
666 // spacing.
667 let ti = &fi.sequence.tiling;
668
669 if fi.sb_width.align_power_of_two_and_shift(ti.tile_cols_log2)
670 == ti.tile_width_sb
671 && fi.sb_height.align_power_of_two_and_shift(ti.tile_rows_log2)
672 == ti.tile_height_sb
673 {
674 // yes; our actual tile width/height setting (which is always
675 // currently uniform) also matches the constrained width/height
676 // calculation implicit in the uniform spacing flag.
677
678 self.write_bit(true)?; // uniform_tile_spacing_flag
679
680 let cols_ones = ti.tile_cols_log2 - ti.min_tile_cols_log2;
681 for _ in 0..cols_ones {
682 self.write_bit(true);
683 }
684 if ti.tile_cols_log2 < ti.max_tile_cols_log2 {
685 self.write_bit(false);
686 }
687
688 let rows_ones = ti.tile_rows_log2 - ti.min_tile_rows_log2;
689 for _ in 0..rows_ones {
690 self.write_bit(true);
691 }
692 if ti.tile_rows_log2 < ti.max_tile_rows_log2 {
693 self.write_bit(false);
694 }
695 } else {
696 self.write_bit(false)?; // uniform_tile_spacing_flag
697 let mut sofar = 0;
698 let mut widest_tile_sb = 0;
699 for _ in 0..ti.cols {
700 let max = (MAX_TILE_WIDTH
701 >> if fi.sequence.use_128x128_superblock { 7 } else { 6 })
702 .min(fi.sb_width - sofar) as u16;
703 let this_sb_width = ti.tile_width_sb.min(fi.sb_width - sofar);
704 self.write_quniform(max, (this_sb_width - 1) as u16);
705 sofar += this_sb_width;
706 widest_tile_sb = widest_tile_sb.max(this_sb_width);
707 }
708
709 let max_tile_area_sb = if ti.min_tiles_log2 > 0 {
710 (fi.sb_height * fi.sb_width) >> (ti.min_tiles_log2 + 1)
711 } else {
712 fi.sb_height * fi.sb_width
713 };
714
715 let max_tile_height_sb = (max_tile_area_sb / widest_tile_sb).max(1);
716
717 sofar = 0;
718 for i in 0..ti.rows {
719 let max = max_tile_height_sb.min(fi.sb_height - sofar) as u16;
720 let this_sb_height = ti.tile_height_sb.min(fi.sb_height - sofar);
721
722 self.write_quniform(max, (this_sb_height - 1) as u16);
723 sofar += this_sb_height;
724 }
725 }
726
727 let tiles_log2 = ti.tile_cols_log2 + ti.tile_rows_log2;
728 if tiles_log2 > 0 {
729 // context_update_tile_id
730 // for now, always use the first tile CDF
731 self.write(tiles_log2 as u32, fs.context_update_tile_id as u32)?;
732
733 // tile_size_bytes_minus_1
734 self.write(2, fs.max_tile_size_bytes - 1);
735 }
736
737 // quantization
738 assert!(fi.base_q_idx > 0);
739 self.write(8, fi.base_q_idx)?; // base_q_idx
740 self.write_delta_q(fi.dc_delta_q[0])?;
741 if fi.sequence.chroma_sampling != ChromaSampling::Cs400 {
742 assert!(fi.ac_delta_q[0] == 0);
743 let diff_uv_delta = fi.dc_delta_q[1] != fi.dc_delta_q[2]
744 || fi.ac_delta_q[1] != fi.ac_delta_q[2];
745 self.write_bit(diff_uv_delta)?;
746 self.write_delta_q(fi.dc_delta_q[1])?;
747 self.write_delta_q(fi.ac_delta_q[1])?;
748 if diff_uv_delta {
749 self.write_delta_q(fi.dc_delta_q[2])?;
750 self.write_delta_q(fi.ac_delta_q[2])?;
751 }
752 }
753 self.write_bit(false)?; // no qm
754
755 // segmentation
756 self.write_segment_data(fi, &fs.segmentation)?;
757
758 // delta_q
759 self.write_bit(false)?; // delta_q_present_flag: no delta q
760
761 // delta_lf_params in the spec
762 self.write_deblock_filter_a(fi, &fs.deblock)?;
763
764 // code for features not yet implemented....
765
766 // loop_filter_params in the spec
767 self.write_deblock_filter_b(fi, &fs.deblock)?;
768
769 // cdef
770 self.write_frame_cdef(fi)?;
771
772 // loop restoration
773 self.write_frame_lrf(fi, &fs.restoration)?;
774
775 self.write_bit(fi.tx_mode_select)?; // tx mode
776
777 let mut reference_select = false;
778 if !fi.intra_only {
779 reference_select = fi.reference_mode != ReferenceMode::SINGLE;
780 self.write_bit(reference_select)?;
781 }
782
783 let skip_mode_allowed =
784 fi.sequence.get_skip_mode_allowed(fi, inter_cfg, reference_select);
785 if skip_mode_allowed {
786 self.write_bit(false)?; // skip_mode_present
787 }
788
789 if fi.intra_only || fi.error_resilient || !fi.sequence.enable_warped_motion
790 {
791 } else {
792 self.write_bit(fi.allow_warped_motion)?; // allow_warped_motion
793 }
794
795 self.write_bit(fi.use_reduced_tx_set)?; // reduced tx
796
797 // global motion
798 if !fi.intra_only {
799 for i in 0..7 {
800 let mode = fi.globalmv_transformation_type[i];
801 self.write_bit(mode != GlobalMVMode::IDENTITY)?;
802 if mode != GlobalMVMode::IDENTITY {
803 self.write_bit(mode == GlobalMVMode::ROTZOOM)?;
804 if mode != GlobalMVMode::ROTZOOM {
805 self.write_bit(mode == GlobalMVMode::TRANSLATION)?;
806 }
807 }
808 match mode {
809 GlobalMVMode::IDENTITY => { /* Nothing to do */ }
810 GlobalMVMode::TRANSLATION => {
811 let mv_x = 0;
812 let mv_x_ref = 0;
813 let mv_y = 0;
814 let mv_y_ref = 0;
815 let bits = 12 - 6 + 3 - !fi.allow_high_precision_mv as u8;
816 let bits_diff = 12 - 3 + fi.allow_high_precision_mv as u8;
817 BCodeWriter::write_s_refsubexpfin(
818 self,
819 (1 << bits) + 1,
820 3,
821 mv_x_ref >> bits_diff,
822 mv_x >> bits_diff,
823 )?;
824 BCodeWriter::write_s_refsubexpfin(
825 self,
826 (1 << bits) + 1,
827 3,
828 mv_y_ref >> bits_diff,
829 mv_y >> bits_diff,
830 )?;
831 }
832 GlobalMVMode::ROTZOOM => unimplemented!(),
833 GlobalMVMode::AFFINE => unimplemented!(),
834 };
835 }
836 }
837
838 if fi.sequence.film_grain_params_present {
839 if let Some(grain_params) = fi.film_grain_params() {
840 // Apply grain
841 self.write_bit(true)?;
842 self.write(16, grain_params.random_seed)?;
843 if fi.frame_type == FrameType::INTER {
844 // For the purposes of photon noise,
845 // it's simpler to always update the params,
846 // and the output will be the same.
847 self.write_bit(true)?;
848 }
849
850 self.write(4, grain_params.scaling_points_y.len() as u8)?;
851 for point in &grain_params.scaling_points_y {
852 self.write(8, point[0])?;
853 self.write(8, point[1])?;
854 }
855
856 let chroma_scaling_from_luma =
857 if fi.sequence.chroma_sampling != ChromaSampling::Cs400 {
858 self.write_bit(grain_params.chroma_scaling_from_luma)?;
859 grain_params.chroma_scaling_from_luma
860 } else {
861 false
862 };
863 if !(fi.sequence.chroma_sampling == ChromaSampling::Cs400
864 || chroma_scaling_from_luma
865 || (fi.sequence.chroma_sampling == ChromaSampling::Cs420
866 && grain_params.scaling_points_y.is_empty()))
867 {
868 self.write(4, grain_params.scaling_points_cb.len() as u8)?;
869 for point in &grain_params.scaling_points_cb {
870 self.write(8, point[0])?;
871 self.write(8, point[1])?;
872 }
873 self.write(4, grain_params.scaling_points_cr.len() as u8)?;
874 for point in &grain_params.scaling_points_cr {
875 self.write(8, point[0])?;
876 self.write(8, point[1])?;
877 }
878 }
879
880 self.write(2, grain_params.scaling_shift - 8)?;
881 self.write(2, grain_params.ar_coeff_lag)?;
882
883 let mut num_pos_luma =
884 (2 * grain_params.ar_coeff_lag * (grain_params.ar_coeff_lag + 1))
885 as usize;
886 let mut num_pos_chroma;
887 if !grain_params.scaling_points_y.is_empty() {
888 num_pos_chroma = num_pos_luma + 1;
889 for i in 0..num_pos_luma {
890 self.write(8, grain_params.ar_coeffs_y[i] as i16 + 128)?;
891 }
892 } else {
893 num_pos_chroma = num_pos_luma;
894 }
895
896 if chroma_scaling_from_luma
897 || !grain_params.scaling_points_cb.is_empty()
898 {
899 for i in 0..num_pos_chroma {
900 self.write(8, grain_params.ar_coeffs_cb[i] as i16 + 128)?;
901 }
902 }
903 if chroma_scaling_from_luma
904 || !grain_params.scaling_points_cr.is_empty()
905 {
906 for i in 0..num_pos_chroma {
907 self.write(8, grain_params.ar_coeffs_cr[i] as i16 + 128)?;
908 }
909 }
910
911 self.write(2, grain_params.ar_coeff_shift - 6)?;
912 self.write(2, grain_params.grain_scale_shift)?;
913 if !grain_params.scaling_points_cb.is_empty() {
914 self.write(8, grain_params.cb_mult)?;
915 self.write(8, grain_params.cb_luma_mult)?;
916 self.write(9, grain_params.cb_offset)?;
917 }
918 if !grain_params.scaling_points_cr.is_empty() {
919 self.write(8, grain_params.cr_mult)?;
920 self.write(8, grain_params.cr_luma_mult)?;
921 self.write(9, grain_params.cr_offset)?;
922 }
923 self.write_bit(grain_params.overlap_flag)?;
924 self.write_bit(fi.sequence.pixel_range == PixelRange::Limited)?;
925 } else {
926 // No film grain for this frame
927 self.write_bit(false)?;
928 }
929 }
930
931 if fi.large_scale_tile {
932 unimplemented!();
933 }
934 self.byte_align()?;
935
936 Ok(())
937 }
938 // End of OBU Headers
939
940 fn write_max_frame_size<T: Pixel>(
941 &mut self, fi: &FrameInvariants<T>,
942 ) -> io::Result<()> {
943 // width_bits and height_bits will have to be moved to the sequence header OBU
944 // when we add support for it.
945 let width = fi.width - 1;
946 let height = fi.height - 1;
947 let width_bits = log_in_base_2(width as u32) as u32 + 1;
948 let height_bits = log_in_base_2(height as u32) as u32 + 1;
949 assert!(width_bits <= 16);
950 assert!(height_bits <= 16);
951 self.write(4, width_bits - 1)?;
952 self.write(4, height_bits - 1)?;
953 self.write(width_bits, width as u16)?;
954 self.write(height_bits, height as u16)?;
955 Ok(())
956 }
957
958 fn write_frame_size<T: Pixel>(
959 &mut self, fi: &FrameInvariants<T>,
960 ) -> io::Result<()> {
961 // width_bits and height_bits will have to be moved to the sequence header OBU
962 // when we add support for it.
963 if fi.frame_size_override_flag {
964 let width = fi.width - 1;
965 let height = fi.height - 1;
966 let width_bits = log_in_base_2(width as u32) as u32 + 1;
967 let height_bits = log_in_base_2(height as u32) as u32 + 1;
968 assert!(width_bits <= 16);
969 assert!(height_bits <= 16);
970 self.write(width_bits, width as u16)?;
971 self.write(height_bits, height as u16)?;
972 }
973 if fi.sequence.enable_superres {
974 unimplemented!();
975 }
976 Ok(())
977 }
978
979 fn write_render_size<T: Pixel>(
980 &mut self, fi: &FrameInvariants<T>,
981 ) -> io::Result<()> {
982 self.write_bit(fi.render_and_frame_size_different)?;
983 if fi.render_and_frame_size_different {
984 self.write(16, fi.render_width - 1)?;
985 self.write(16, fi.render_height - 1)?;
986 }
987 Ok(())
988 }
989
990 fn write_frame_size_with_refs<T: Pixel>(
991 &mut self, fi: &FrameInvariants<T>,
992 ) -> io::Result<()> {
993 let mut found_ref = false;
994 for i in 0..INTER_REFS_PER_FRAME {
995 if let Some(ref rec) = fi.rec_buffer.frames[fi.ref_frames[i] as usize] {
996 if rec.width == fi.width as u32
997 && rec.height == fi.height as u32
998 && rec.render_width == fi.render_width
999 && rec.render_height == fi.render_height
1000 {
1001 self.write_bit(true)?;
1002 found_ref = true;
1003 break;
1004 } else {
1005 self.write_bit(false)?;
1006 }
1007 } else {
1008 self.write_bit(false)?;
1009 }
1010 }
1011 if !found_ref {
1012 self.write_frame_size(fi)?;
1013 self.write_render_size(fi)?;
1014 } else if fi.sequence.enable_superres {
1015 unimplemented!();
1016 }
1017 Ok(())
1018 }
1019
1020 fn write_deblock_filter_a<T: Pixel>(
1021 &mut self, fi: &FrameInvariants<T>, deblock: &DeblockState,
1022 ) -> io::Result<()> {
1023 if fi.delta_q_present {
1024 if !fi.allow_intrabc {
1025 self.write_bit(deblock.block_deltas_enabled)?;
1026 }
1027 if deblock.block_deltas_enabled {
1028 self.write(2, deblock.block_delta_shift)?;
1029 self.write_bit(deblock.block_delta_multi)?;
1030 }
1031 }
1032 Ok(())
1033 }
1034
1035 fn write_deblock_filter_b<T: Pixel>(
1036 &mut self, fi: &FrameInvariants<T>, deblock: &DeblockState,
1037 ) -> io::Result<()> {
1038 let planes = if fi.sequence.chroma_sampling == ChromaSampling::Cs400 {
1039 1
1040 } else {
1041 MAX_PLANES
1042 };
1043 assert!(deblock.levels[0] < 64);
1044 self.write(6, deblock.levels[0])?; // loop deblocking filter level 0
1045 assert!(deblock.levels[1] < 64);
1046 self.write(6, deblock.levels[1])?; // loop deblocking filter level 1
1047 if planes > 1 && (deblock.levels[0] > 0 || deblock.levels[1] > 0) {
1048 assert!(deblock.levels[2] < 64);
1049 self.write(6, deblock.levels[2])?; // loop deblocking filter level 2
1050 assert!(deblock.levels[3] < 64);
1051 self.write(6, deblock.levels[3])?; // loop deblocking filter level 3
1052 }
1053 self.write(3, deblock.sharpness)?; // deblocking filter sharpness
1054 self.write_bit(deblock.deltas_enabled)?; // loop deblocking filter deltas enabled
1055 if deblock.deltas_enabled {
1056 self.write_bit(deblock.delta_updates_enabled)?; // deltas updates enabled
1057 if deblock.delta_updates_enabled {
1058 // conditionally write ref delta updates
1059 let prev_ref_deltas = if fi.primary_ref_frame == PRIMARY_REF_NONE {
1060 [1, 0, 0, 0, 0, -1, -1, -1]
1061 } else {
1062 fi.rec_buffer.deblock
1063 [fi.ref_frames[fi.primary_ref_frame as usize] as usize]
1064 .ref_deltas
1065 };
1066 for i in 0..REF_FRAMES {
1067 let update = deblock.ref_deltas[i] != prev_ref_deltas[i];
1068 self.write_bit(update)?;
1069 if update {
1070 self.write_signed(7, deblock.ref_deltas[i])?;
1071 }
1072 }
1073 // conditionally write mode delta updates
1074 let prev_mode_deltas = if fi.primary_ref_frame == PRIMARY_REF_NONE {
1075 [0, 0]
1076 } else {
1077 fi.rec_buffer.deblock
1078 [fi.ref_frames[fi.primary_ref_frame as usize] as usize]
1079 .mode_deltas
1080 };
1081 for i in 0..2 {
1082 let update = deblock.mode_deltas[i] != prev_mode_deltas[i];
1083 self.write_bit(update)?;
1084 if update {
1085 self.write_signed(7, deblock.mode_deltas[i])?;
1086 }
1087 }
1088 }
1089 }
1090 Ok(())
1091 }
1092
1093 fn write_frame_cdef<T: Pixel>(
1094 &mut self, fi: &FrameInvariants<T>,
1095 ) -> io::Result<()> {
1096 if fi.sequence.enable_cdef && !fi.allow_intrabc {
1097 assert!(fi.cdef_damping >= 3);
1098 assert!(fi.cdef_damping <= 6);
1099 self.write(2, fi.cdef_damping - 3)?;
1100 assert!(fi.cdef_bits < 4);
1101 self.write(2, fi.cdef_bits)?; // cdef bits
1102 for i in 0..(1 << fi.cdef_bits) {
1103 assert!(fi.cdef_y_strengths[i] < 64);
1104 assert!(fi.cdef_uv_strengths[i] < 64);
1105 self.write(6, fi.cdef_y_strengths[i])?; // cdef y strength
1106 if fi.sequence.chroma_sampling != ChromaSampling::Cs400 {
1107 self.write(6, fi.cdef_uv_strengths[i])?; // cdef uv strength
1108 }
1109 }
1110 }
1111 Ok(())
1112 }
1113
1114 fn write_frame_lrf<T: Pixel>(
1115 &mut self, fi: &FrameInvariants<T>, rs: &RestorationState,
1116 ) -> io::Result<()> {
1117 if fi.sequence.enable_restoration && !fi.allow_intrabc {
1118 // && !self.lossless
1119 let planes = if fi.sequence.chroma_sampling == ChromaSampling::Cs400 {
1120 1
1121 } else {
1122 MAX_PLANES
1123 };
1124 let mut use_lrf = false;
1125 let mut use_chroma_lrf = false;
1126 for i in 0..planes {
1127 self.write(2, rs.planes[i].cfg.lrf_type)?; // filter type by plane
1128 if rs.planes[i].cfg.lrf_type != RESTORE_NONE {
1129 use_lrf = true;
1130 if i > 0 {
1131 use_chroma_lrf = true;
1132 }
1133 }
1134 }
1135 if use_lrf {
1136 // The Y shift value written here indicates shift up from superblock size
1137 if !fi.sequence.use_128x128_superblock {
1138 self.write(1, u8::from(rs.planes[0].cfg.unit_size > 64))?;
1139 }
1140
1141 if rs.planes[0].cfg.unit_size > 64 {
1142 self.write(1, u8::from(rs.planes[0].cfg.unit_size > 128))?;
1143 }
1144
1145 if use_chroma_lrf
1146 && fi.sequence.chroma_sampling == ChromaSampling::Cs420
1147 {
1148 self.write(
1149 1,
1150 u8::from(rs.planes[0].cfg.unit_size > rs.planes[1].cfg.unit_size),
1151 )?;
1152 }
1153 }
1154 }
1155 Ok(())
1156 }
1157
1158 fn write_segment_data<T: Pixel>(
1159 &mut self, fi: &FrameInvariants<T>, segmentation: &SegmentationState,
1160 ) -> io::Result<()> {
1161 assert_eq!(fi.enable_segmentation, segmentation.enabled);
1162 self.write_bit(fi.enable_segmentation)?;
1163
1164 if segmentation.enabled {
1165 if fi.primary_ref_frame == PRIMARY_REF_NONE {
1166 assert!(segmentation.update_map);
1167 assert!(segmentation.update_data);
1168 } else {
1169 self.write_bit(segmentation.update_map)?;
1170 if segmentation.update_map {
1171 self.write_bit(false)?; /* Without using temporal prediction */
1172 }
1173 self.write_bit(segmentation.update_data)?;
1174 }
1175 if segmentation.update_data {
1176 for i in 0..8 {
1177 for j in 0..SegLvl::SEG_LVL_MAX as usize {
1178 self.write_bit(segmentation.features[i][j])?;
1179 if segmentation.features[i][j] {
1180 let bits = seg_feature_bits[j];
1181 let data = segmentation.data[i][j];
1182 if seg_feature_is_signed[j] {
1183 self.write_signed(bits + 1, data)?;
1184 } else {
1185 self.write(bits, data)?;
1186 }
1187 }
1188 }
1189 }
1190 }
1191 }
1192 Ok(())
1193 }
1194
1195 fn write_delta_q(&mut self, delta_q: i8) -> io::Result<()> {
1196 self.write_bit(delta_q != 0)?;
1197 if delta_q != 0 {
1198 assert!((-63..=63).contains(&delta_q));
1199 self.write_signed(6 + 1, delta_q)?;
1200 }
1201 Ok(())
1202 }
1203}
1204
1205#[cfg(test)]
1206mod tests {
1207 use super::ULEB128Writer;
1208 use bitstream_io::{BigEndian, BitWriter};
1209 use nom::error::Error;
1210 use nom::IResult;
1211 use quickcheck::quickcheck;
1212
1213 fn leb128(mut input: &[u8]) -> IResult<&[u8], u64, Error<&[u8]>> {
1214 use nom::bytes::complete::take;
1215
1216 let mut value = 0u64;
1217 for i in 0..8u8 {
1218 let result = take(1usize)(input)?;
1219 input = result.0;
1220 let leb128_byte = result.1[0];
1221 value |= u64::from(leb128_byte & 0x7f) << (i * 7);
1222 if (leb128_byte & 0x80) == 0 {
1223 break;
1224 }
1225 }
1226 Ok((input, value))
1227 }
1228
1229 quickcheck! {
1230 fn validate_leb128_write(val: u32) -> bool {
1231 let mut buf1 = Vec::new();
1232 let mut bw1 = BitWriter::endian(&mut buf1, BigEndian);
1233 bw1.write_uleb128(val as u64).unwrap();
1234 let result = leb128(&buf1).unwrap();
1235 u64::from(val) == result.1 && result.0.is_empty()
1236 }
1237 }
1238}
1239