1// Copyright (c) 2017-2022, The rav1e contributors. All rights reserved
2//
3// This source code is subject to the terms of the BSD 2 Clause License and
4// the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
5// was not distributed with this source code in the LICENSE file, you can
6// obtain it at www.aomedia.org/license/software. If the Alliance for Open
7// Media Patent License 1.0 was not distributed with this source code in the
8// PATENTS file, you can obtain it at www.aomedia.org/license/patent.
9
10use std::mem::MaybeUninit;
11
12use super::*;
13
14use crate::predict::PredictionMode;
15use crate::util::cdf;
16
17pub const MAX_PLANES: usize = 3;
18
19pub const BLOCK_SIZE_GROUPS: usize = 4;
20pub const MAX_ANGLE_DELTA: usize = 3;
21pub const DIRECTIONAL_MODES: usize = 8;
22pub const KF_MODE_CONTEXTS: usize = 5;
23
24pub const INTRA_INTER_CONTEXTS: usize = 4;
25pub const INTER_MODE_CONTEXTS: usize = 8;
26pub const DRL_MODE_CONTEXTS: usize = 3;
27pub const COMP_INTER_CONTEXTS: usize = 5;
28pub const COMP_REF_TYPE_CONTEXTS: usize = 5;
29pub const UNI_COMP_REF_CONTEXTS: usize = 3;
30
31pub const PLANE_TYPES: usize = 2;
32const REF_TYPES: usize = 2;
33
34pub const COMP_INDEX_CONTEXTS: usize = 6;
35pub const COMP_GROUP_IDX_CONTEXTS: usize = 6;
36
37pub const COEFF_CONTEXT_MAX_WIDTH: usize = MAX_TILE_WIDTH / MI_SIZE;
38
39/// Absolute offset in blocks, where a block is defined
40/// to be an `N*N` square where `N == (1 << BLOCK_TO_PLANE_SHIFT)`.
41#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
42pub struct BlockOffset {
43 pub x: usize,
44 pub y: usize,
45}
46
47/// Absolute offset in blocks inside a plane, where a block is defined
48/// to be an `N*N` square where `N == (1 << BLOCK_TO_PLANE_SHIFT)`.
49#[derive(Clone, Copy, Debug, PartialEq, Eq)]
50pub struct PlaneBlockOffset(pub BlockOffset);
51
52/// Absolute offset in blocks inside a tile, where a block is defined
53/// to be an `N*N` square where `N == (1 << BLOCK_TO_PLANE_SHIFT)`.
54#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
55pub struct TileBlockOffset(pub BlockOffset);
56
57impl BlockOffset {
58 /// Offset of the superblock in which this block is located.
59 #[inline]
60 const fn sb_offset(self) -> SuperBlockOffset {
61 SuperBlockOffset {
62 x: self.x >> SUPERBLOCK_TO_BLOCK_SHIFT,
63 y: self.y >> SUPERBLOCK_TO_BLOCK_SHIFT,
64 }
65 }
66
67 /// Offset of the top-left pixel of this block.
68 #[inline]
69 const fn plane_offset(self, plane: &PlaneConfig) -> PlaneOffset {
70 PlaneOffset {
71 x: (self.x >> plane.xdec << BLOCK_TO_PLANE_SHIFT) as isize,
72 y: (self.y >> plane.ydec << BLOCK_TO_PLANE_SHIFT) as isize,
73 }
74 }
75
76 /// Convert to plane offset without decimation.
77 #[inline]
78 const fn to_luma_plane_offset(self) -> PlaneOffset {
79 PlaneOffset {
80 x: (self.x as isize) << BLOCK_TO_PLANE_SHIFT,
81 y: (self.y as isize) << BLOCK_TO_PLANE_SHIFT,
82 }
83 }
84
85 #[inline]
86 const fn y_in_sb(self) -> usize {
87 self.y % MIB_SIZE
88 }
89
90 #[inline]
91 fn with_offset(self, col_offset: isize, row_offset: isize) -> BlockOffset {
92 let x = self.x as isize + col_offset;
93 let y = self.y as isize + row_offset;
94 debug_assert!(x >= 0);
95 debug_assert!(y >= 0);
96
97 BlockOffset { x: x as usize, y: y as usize }
98 }
99}
100
101impl PlaneBlockOffset {
102 /// Offset of the superblock in which this block is located.
103 #[inline]
104 pub const fn sb_offset(self) -> PlaneSuperBlockOffset {
105 PlaneSuperBlockOffset(self.0.sb_offset())
106 }
107
108 /// Offset of the top-left pixel of this block.
109 #[inline]
110 pub const fn plane_offset(self, plane: &PlaneConfig) -> PlaneOffset {
111 self.0.plane_offset(plane)
112 }
113
114 /// Convert to plane offset without decimation.
115 #[inline]
116 pub const fn to_luma_plane_offset(self) -> PlaneOffset {
117 self.0.to_luma_plane_offset()
118 }
119
120 #[inline]
121 pub const fn y_in_sb(self) -> usize {
122 self.0.y_in_sb()
123 }
124
125 #[inline]
126 pub fn with_offset(
127 self, col_offset: isize, row_offset: isize,
128 ) -> PlaneBlockOffset {
129 Self(self.0.with_offset(col_offset, row_offset))
130 }
131}
132
133impl TileBlockOffset {
134 /// Offset of the superblock in which this block is located.
135 #[inline]
136 pub const fn sb_offset(self) -> TileSuperBlockOffset {
137 TileSuperBlockOffset(self.0.sb_offset())
138 }
139
140 /// Offset of the top-left pixel of this block.
141 #[inline]
142 pub const fn plane_offset(self, plane: &PlaneConfig) -> PlaneOffset {
143 self.0.plane_offset(plane)
144 }
145
146 /// Convert to plane offset without decimation.
147 #[inline]
148 pub const fn to_luma_plane_offset(self) -> PlaneOffset {
149 self.0.to_luma_plane_offset()
150 }
151
152 #[inline]
153 pub const fn y_in_sb(self) -> usize {
154 self.0.y_in_sb()
155 }
156
157 #[inline]
158 pub fn with_offset(
159 self, col_offset: isize, row_offset: isize,
160 ) -> TileBlockOffset {
161 Self(self.0.with_offset(col_offset, row_offset))
162 }
163}
164
165#[derive(Copy, Clone)]
166pub struct Block {
167 pub mode: PredictionMode,
168 pub partition: PartitionType,
169 pub skip: bool,
170 pub ref_frames: [RefType; 2],
171 pub mv: [MotionVector; 2],
172 // note: indexes are reflist index, NOT the same as libaom
173 pub neighbors_ref_counts: [u8; INTER_REFS_PER_FRAME],
174 pub cdef_index: u8,
175 pub bsize: BlockSize,
176 pub n4_w: u8, /* block width in the unit of mode_info */
177 pub n4_h: u8, /* block height in the unit of mode_info */
178 pub txsize: TxSize,
179 // The block-level deblock_deltas are left-shifted by
180 // fi.deblock.block_delta_shift and added to the frame-configured
181 // deltas
182 pub deblock_deltas: [i8; FRAME_LF_COUNT],
183 pub segmentation_idx: u8,
184}
185
186impl Block {
187 pub fn is_inter(&self) -> bool {
188 self.mode >= PredictionMode::NEARESTMV
189 }
190 pub fn has_second_ref(&self) -> bool {
191 self.ref_frames[1] != INTRA_FRAME && self.ref_frames[1] != NONE_FRAME
192 }
193}
194
195impl Default for Block {
196 fn default() -> Block {
197 Block {
198 mode: PredictionMode::DC_PRED,
199 partition: PartitionType::PARTITION_NONE,
200 skip: false,
201 ref_frames: [INTRA_FRAME; 2],
202 mv: [MotionVector::default(); 2],
203 neighbors_ref_counts: [0; INTER_REFS_PER_FRAME],
204 cdef_index: 0,
205 bsize: BLOCK_64X64,
206 n4_w: BLOCK_64X64.width_mi() as u8,
207 n4_h: BLOCK_64X64.height_mi() as u8,
208 txsize: TX_64X64,
209 deblock_deltas: [0, 0, 0, 0],
210 segmentation_idx: 0,
211 }
212 }
213}
214
215#[derive(Clone)]
216pub struct BlockContextCheckpoint {
217 x: usize,
218 chroma_sampling: ChromaSampling,
219 cdef_coded: bool,
220 above_partition_context: [u8; MIB_SIZE >> 1],
221 // left context is also at 8x8 granularity
222 left_partition_context: [u8; MIB_SIZE >> 1],
223 above_tx_context: [u8; MIB_SIZE],
224 left_tx_context: [u8; MIB_SIZE],
225 above_coeff_context: [[u8; MIB_SIZE]; MAX_PLANES],
226 left_coeff_context: [[u8; MIB_SIZE]; MAX_PLANES],
227}
228
229pub struct BlockContext<'a> {
230 pub cdef_coded: bool,
231 pub code_deltas: bool,
232 pub update_seg: bool,
233 pub preskip_segid: bool,
234 pub above_partition_context: [u8; PARTITION_CONTEXT_MAX_WIDTH],
235 pub left_partition_context: [u8; MIB_SIZE >> 1],
236 pub above_tx_context: [u8; COEFF_CONTEXT_MAX_WIDTH],
237 pub left_tx_context: [u8; MIB_SIZE],
238 pub above_coeff_context: [[u8; COEFF_CONTEXT_MAX_WIDTH]; MAX_PLANES],
239 pub left_coeff_context: [[u8; MIB_SIZE]; MAX_PLANES],
240 pub blocks: &'a mut TileBlocksMut<'a>,
241}
242
243impl<'a> BlockContext<'a> {
244 pub fn new(blocks: &'a mut TileBlocksMut<'a>) -> Self {
245 BlockContext {
246 cdef_coded: false,
247 code_deltas: false,
248 update_seg: false,
249 preskip_segid: false,
250 above_partition_context: [0; PARTITION_CONTEXT_MAX_WIDTH],
251 left_partition_context: [0; MIB_SIZE >> 1],
252 above_tx_context: [0; COEFF_CONTEXT_MAX_WIDTH],
253 left_tx_context: [0; MIB_SIZE],
254 above_coeff_context: [
255 [0; COEFF_CONTEXT_MAX_WIDTH],
256 [0; COEFF_CONTEXT_MAX_WIDTH],
257 [0; COEFF_CONTEXT_MAX_WIDTH],
258 ],
259 left_coeff_context: [[0; MIB_SIZE]; MAX_PLANES],
260 blocks,
261 }
262 }
263
264 pub fn checkpoint(
265 &self, tile_bo: &TileBlockOffset, chroma_sampling: ChromaSampling,
266 ) -> BlockContextCheckpoint {
267 let x = tile_bo.0.x & (COEFF_CONTEXT_MAX_WIDTH - MIB_SIZE);
268 let mut checkpoint = BlockContextCheckpoint {
269 x,
270 chroma_sampling,
271 cdef_coded: self.cdef_coded,
272 above_partition_context: [0; MIB_SIZE >> 1],
273 left_partition_context: self.left_partition_context,
274 above_tx_context: [0; MIB_SIZE],
275 left_tx_context: self.left_tx_context,
276 above_coeff_context: [[0; MIB_SIZE]; MAX_PLANES],
277 left_coeff_context: self.left_coeff_context,
278 };
279 checkpoint.above_partition_context.copy_from_slice(
280 &self.above_partition_context[(x >> 1)..][..(MIB_SIZE >> 1)],
281 );
282 checkpoint
283 .above_tx_context
284 .copy_from_slice(&self.above_tx_context[x..][..MIB_SIZE]);
285 let num_planes =
286 if chroma_sampling == ChromaSampling::Cs400 { 1 } else { 3 };
287 for (p, (dst, src)) in checkpoint
288 .above_coeff_context
289 .iter_mut()
290 .zip(self.above_coeff_context.iter())
291 .enumerate()
292 .take(num_planes)
293 {
294 let xdec = (p > 0 && chroma_sampling != ChromaSampling::Cs444) as usize;
295 dst.copy_from_slice(&src[(x >> xdec)..][..MIB_SIZE]);
296 }
297 checkpoint
298 }
299
300 pub fn rollback(&mut self, checkpoint: &BlockContextCheckpoint) {
301 let x = checkpoint.x & (COEFF_CONTEXT_MAX_WIDTH - MIB_SIZE);
302 self.cdef_coded = checkpoint.cdef_coded;
303 self.above_partition_context[(x >> 1)..][..(MIB_SIZE >> 1)]
304 .copy_from_slice(&checkpoint.above_partition_context);
305 self.left_partition_context = checkpoint.left_partition_context;
306 self.above_tx_context[x..][..MIB_SIZE]
307 .copy_from_slice(&checkpoint.above_tx_context);
308 self.left_tx_context = checkpoint.left_tx_context;
309 let num_planes =
310 if checkpoint.chroma_sampling == ChromaSampling::Cs400 { 1 } else { 3 };
311 for (p, (dst, src)) in self
312 .above_coeff_context
313 .iter_mut()
314 .zip(checkpoint.above_coeff_context.iter())
315 .enumerate()
316 .take(num_planes)
317 {
318 let xdec = (p > 0 && checkpoint.chroma_sampling != ChromaSampling::Cs444)
319 as usize;
320 dst[(x >> xdec)..][..MIB_SIZE].copy_from_slice(src);
321 }
322 self.left_coeff_context = checkpoint.left_coeff_context;
323 }
324
325 #[inline]
326 pub fn set_dc_sign(cul_level: &mut u32, dc_val: i32) {
327 if dc_val < 0 {
328 *cul_level |= 1 << COEFF_CONTEXT_BITS;
329 } else if dc_val > 0 {
330 *cul_level += 2 << COEFF_CONTEXT_BITS;
331 }
332 }
333
334 pub fn set_coeff_context(
335 &mut self, plane: usize, bo: TileBlockOffset, tx_size: TxSize,
336 xdec: usize, ydec: usize, value: u8,
337 ) {
338 for above in &mut self.above_coeff_context[plane][(bo.0.x >> xdec)..]
339 [..tx_size.width_mi()]
340 {
341 *above = value;
342 }
343 let bo_y = bo.y_in_sb();
344 for left in &mut self.left_coeff_context[plane][(bo_y >> ydec)..]
345 [..tx_size.height_mi()]
346 {
347 *left = value;
348 }
349 }
350
351 fn reset_left_coeff_context(&mut self, plane: usize) {
352 for c in &mut self.left_coeff_context[plane] {
353 *c = 0;
354 }
355 }
356
357 fn reset_left_partition_context(&mut self) {
358 for c in &mut self.left_partition_context {
359 *c = 0;
360 }
361 }
362
363 pub fn update_tx_size_context(
364 &mut self, bo: TileBlockOffset, bsize: BlockSize, tx_size: TxSize,
365 skip: bool,
366 ) {
367 let n4_w = bsize.width_mi();
368 let n4_h = bsize.height_mi();
369
370 let (tx_w, tx_h) = if skip {
371 ((n4_w * MI_SIZE) as u8, (n4_h * MI_SIZE) as u8)
372 } else {
373 (tx_size.width() as u8, tx_size.height() as u8)
374 };
375
376 let above_ctx = &mut self.above_tx_context[bo.0.x..bo.0.x + n4_w];
377 let left_ctx =
378 &mut self.left_tx_context[bo.y_in_sb()..bo.y_in_sb() + n4_h];
379
380 for v in above_ctx[0..n4_w].iter_mut() {
381 *v = tx_w;
382 }
383
384 for v in left_ctx[0..n4_h].iter_mut() {
385 *v = tx_h;
386 }
387 }
388
389 fn reset_left_tx_context(&mut self) {
390 for c in &mut self.left_tx_context {
391 *c = 0;
392 }
393 }
394
395 pub fn reset_left_contexts(&mut self, planes: usize) {
396 for p in 0..planes {
397 BlockContext::reset_left_coeff_context(self, p);
398 }
399 BlockContext::reset_left_partition_context(self);
400
401 BlockContext::reset_left_tx_context(self);
402 }
403
404 // The mode info data structure has a one element border above and to the
405 // left of the entries corresponding to real macroblocks.
406 // The prediction flags in these dummy entries are initialized to 0.
407 // 0 - inter/inter, inter/--, --/inter, --/--
408 // 1 - intra/inter, inter/intra
409 // 2 - intra/--, --/intra
410 // 3 - intra/intra
411 pub fn intra_inter_context(&self, bo: TileBlockOffset) -> usize {
412 let has_above = bo.0.y > 0;
413 let has_left = bo.0.x > 0;
414
415 match (has_above, has_left) {
416 (true, true) => {
417 let above_intra = !self.blocks.above_of(bo).is_inter();
418 let left_intra = !self.blocks.left_of(bo).is_inter();
419 if above_intra && left_intra {
420 3
421 } else {
422 (above_intra || left_intra) as usize
423 }
424 }
425 (true, false) => {
426 if self.blocks.above_of(bo).is_inter() {
427 0
428 } else {
429 2
430 }
431 }
432 (false, true) => {
433 if self.blocks.left_of(bo).is_inter() {
434 0
435 } else {
436 2
437 }
438 }
439 _ => 0,
440 }
441 }
442
443 pub fn get_txb_ctx(
444 &self, plane_bsize: BlockSize, tx_size: TxSize, plane: usize,
445 bo: TileBlockOffset, xdec: usize, ydec: usize, frame_clipped_txw: usize,
446 frame_clipped_txh: usize,
447 ) -> TXB_CTX {
448 let mut txb_ctx = TXB_CTX { txb_skip_ctx: 0, dc_sign_ctx: 0 };
449 const MAX_TX_SIZE_UNIT: usize = 16;
450 const signs: [i8; 3] = [0, -1, 1];
451 const dc_sign_contexts: [usize; 4 * MAX_TX_SIZE_UNIT + 1] = [
452 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
453 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
454 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
455 ];
456 let mut dc_sign: i16 = 0;
457
458 let above_ctxs = &self.above_coeff_context[plane][(bo.0.x >> xdec)..]
459 [..frame_clipped_txw >> 2];
460 let left_ctxs = &self.left_coeff_context[plane][(bo.y_in_sb() >> ydec)..]
461 [..frame_clipped_txh >> 2];
462
463 // Decide txb_ctx.dc_sign_ctx
464 for &ctx in above_ctxs {
465 let sign = ctx >> COEFF_CONTEXT_BITS;
466 dc_sign += signs[sign as usize] as i16;
467 }
468
469 for &ctx in left_ctxs {
470 let sign = ctx >> COEFF_CONTEXT_BITS;
471 dc_sign += signs[sign as usize] as i16;
472 }
473
474 txb_ctx.dc_sign_ctx =
475 dc_sign_contexts[(dc_sign + 2 * MAX_TX_SIZE_UNIT as i16) as usize];
476
477 // Decide txb_ctx.txb_skip_ctx
478 if plane == 0 {
479 if plane_bsize == tx_size.block_size() {
480 txb_ctx.txb_skip_ctx = 0;
481 } else {
482 // This is the algorithm to generate table skip_contexts[min][max].
483 // if (!max)
484 // txb_skip_ctx = 1;
485 // else if (!min)
486 // txb_skip_ctx = 2 + (max > 3);
487 // else if (max <= 3)
488 // txb_skip_ctx = 4;
489 // else if (min <= 3)
490 // txb_skip_ctx = 5;
491 // else
492 // txb_skip_ctx = 6;
493 const skip_contexts: [[u8; 5]; 5] = [
494 [1, 2, 2, 2, 3],
495 [1, 4, 4, 4, 5],
496 [1, 4, 4, 4, 5],
497 [1, 4, 4, 4, 5],
498 [1, 4, 4, 4, 6],
499 ];
500
501 let top: u8 = above_ctxs.iter().fold(0, |acc, ctx| acc | *ctx)
502 & COEFF_CONTEXT_MASK as u8;
503
504 let left: u8 = left_ctxs.iter().fold(0, |acc, ctx| acc | *ctx)
505 & COEFF_CONTEXT_MASK as u8;
506
507 let max = cmp::min(top | left, 4);
508 let min = cmp::min(cmp::min(top, left), 4);
509 txb_ctx.txb_skip_ctx =
510 skip_contexts[min as usize][max as usize] as usize;
511 }
512 } else {
513 let top: u8 = above_ctxs.iter().fold(0, |acc, ctx| acc | *ctx);
514 let left: u8 = left_ctxs.iter().fold(0, |acc, ctx| acc | *ctx);
515 let ctx_base = (top != 0) as usize + (left != 0) as usize;
516 let ctx_offset = if num_pels_log2_lookup[plane_bsize as usize]
517 > num_pels_log2_lookup[tx_size.block_size() as usize]
518 {
519 10
520 } else {
521 7
522 };
523 txb_ctx.txb_skip_ctx = ctx_base + ctx_offset;
524 }
525
526 txb_ctx
527 }
528}
529
530#[derive(Clone, Copy)]
531#[repr(C)]
532pub struct NMVComponent {
533 pub sign_cdf: [u16; 2],
534 pub class0_hp_cdf: [u16; 2],
535 pub hp_cdf: [u16; 2],
536 pub class0_cdf: [u16; CLASS0_SIZE],
537 pub bits_cdf: [[u16; 2]; MV_OFFSET_BITS],
538
539 pub class0_fp_cdf: [[u16; MV_FP_SIZE]; CLASS0_SIZE],
540 pub fp_cdf: [u16; MV_FP_SIZE],
541
542 pub classes_cdf: [u16; MV_CLASSES],
543 // MV_CLASSES + 5 == 16; pad the last CDF for rollback.
544 padding: [u16; 5],
545}
546
547#[derive(Clone, Copy)]
548#[repr(C)]
549pub struct NMVContext {
550 pub joints_cdf: [u16; MV_JOINTS],
551 // MV_JOINTS + 12 == 16; pad the last CDF for rollback.
552 padding: [u16; 12],
553 pub comps: [NMVComponent; 2],
554}
555
556// lv_map
557pub static default_nmv_context: NMVContext = {
558 NMVContext {
559 joints_cdf: cdf([4096, 11264, 19328]),
560 padding: [0; 12],
561 comps: [
562 NMVComponent {
563 classes_cdf: cdf([
564 28672, 30976, 31858, 32320, 32551, 32656, 32740, 32757, 32762, 32767,
565 ]),
566 class0_fp_cdf: cdf_2d([[16384, 24576, 26624], [12288, 21248, 24128]]),
567 fp_cdf: cdf([8192, 17408, 21248]),
568 sign_cdf: cdf([128 * 128]),
569 class0_hp_cdf: cdf([160 * 128]),
570 hp_cdf: cdf([128 * 128]),
571 class0_cdf: cdf([216 * 128]),
572 bits_cdf: cdf_2d([
573 [128 * 136],
574 [128 * 140],
575 [128 * 148],
576 [128 * 160],
577 [128 * 176],
578 [128 * 192],
579 [128 * 224],
580 [128 * 234],
581 [128 * 234],
582 [128 * 240],
583 ]),
584 padding: [0; 5],
585 },
586 NMVComponent {
587 classes_cdf: cdf([
588 28672, 30976, 31858, 32320, 32551, 32656, 32740, 32757, 32762, 32767,
589 ]),
590 class0_fp_cdf: cdf_2d([[16384, 24576, 26624], [12288, 21248, 24128]]),
591 fp_cdf: cdf([8192, 17408, 21248]),
592 sign_cdf: cdf([128 * 128]),
593 class0_hp_cdf: cdf([160 * 128]),
594 hp_cdf: cdf([128 * 128]),
595 class0_cdf: cdf([216 * 128]),
596 bits_cdf: cdf_2d([
597 [128 * 136],
598 [128 * 140],
599 [128 * 148],
600 [128 * 160],
601 [128 * 176],
602 [128 * 192],
603 [128 * 224],
604 [128 * 234],
605 [128 * 234],
606 [128 * 240],
607 ]),
608 padding: [0; 5],
609 },
610 ],
611 }
612};
613
614#[derive(Clone)]
615pub struct CandidateMV {
616 pub this_mv: MotionVector,
617 pub comp_mv: MotionVector,
618 pub weight: u32,
619}
620
621#[derive(Clone)]
622pub struct FrameBlocks {
623 blocks: Box<[Block]>,
624 pub cols: usize,
625 pub rows: usize,
626}
627
628impl FrameBlocks {
629 pub fn new(cols: usize, rows: usize) -> Self {
630 Self {
631 blocks: vec![Block::default(); cols * rows].into_boxed_slice(),
632 cols,
633 rows,
634 }
635 }
636
637 #[inline(always)]
638 pub fn as_tile_blocks(&self) -> TileBlocks<'_> {
639 TileBlocks::new(self, x:0, y:0, self.cols, self.rows)
640 }
641
642 #[inline(always)]
643 pub fn as_tile_blocks_mut(&mut self) -> TileBlocksMut<'_> {
644 TileBlocksMut::new(self, x:0, y:0, self.cols, self.rows)
645 }
646}
647
648impl Index<usize> for FrameBlocks {
649 type Output = [Block];
650 #[inline]
651 fn index(&self, index: usize) -> &Self::Output {
652 &self.blocks[index * self.cols..(index + 1) * self.cols]
653 }
654}
655
656impl IndexMut<usize> for FrameBlocks {
657 #[inline]
658 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
659 &mut self.blocks[index * self.cols..(index + 1) * self.cols]
660 }
661}
662
663// for convenience, also index by BlockOffset
664
665impl Index<PlaneBlockOffset> for FrameBlocks {
666 type Output = Block;
667 #[inline]
668 fn index(&self, bo: PlaneBlockOffset) -> &Self::Output {
669 &self[bo.0.y][bo.0.x]
670 }
671}
672
673impl IndexMut<PlaneBlockOffset> for FrameBlocks {
674 #[inline]
675 fn index_mut(&mut self, bo: PlaneBlockOffset) -> &mut Self::Output {
676 &mut self[bo.0.y][bo.0.x]
677 }
678}
679
680impl<'a> ContextWriter<'a> {
681 pub fn get_cdf_intra_mode_kf(
682 &self, bo: TileBlockOffset,
683 ) -> &[u16; INTRA_MODES] {
684 static intra_mode_context: [usize; INTRA_MODES] =
685 [0, 1, 2, 3, 4, 4, 4, 4, 3, 0, 1, 2, 0];
686 let above_mode = if bo.0.y > 0 {
687 self.bc.blocks.above_of(bo).mode
688 } else {
689 PredictionMode::DC_PRED
690 };
691 let left_mode = if bo.0.x > 0 {
692 self.bc.blocks.left_of(bo).mode
693 } else {
694 PredictionMode::DC_PRED
695 };
696 let above_ctx = intra_mode_context[above_mode as usize];
697 let left_ctx = intra_mode_context[left_mode as usize];
698 &self.fc.kf_y_cdf[above_ctx][left_ctx]
699 }
700
701 pub fn write_intra_mode_kf<W: Writer>(
702 &mut self, w: &mut W, bo: TileBlockOffset, mode: PredictionMode,
703 ) {
704 static intra_mode_context: [usize; INTRA_MODES] =
705 [0, 1, 2, 3, 4, 4, 4, 4, 3, 0, 1, 2, 0];
706 let above_mode = if bo.0.y > 0 {
707 self.bc.blocks.above_of(bo).mode
708 } else {
709 PredictionMode::DC_PRED
710 };
711 let left_mode = if bo.0.x > 0 {
712 self.bc.blocks.left_of(bo).mode
713 } else {
714 PredictionMode::DC_PRED
715 };
716 let above_ctx = intra_mode_context[above_mode as usize];
717 let left_ctx = intra_mode_context[left_mode as usize];
718 let cdf = &self.fc.kf_y_cdf[above_ctx][left_ctx];
719 symbol_with_update!(self, w, mode as u32, cdf);
720 }
721
722 pub fn get_cdf_intra_mode(&self, bsize: BlockSize) -> &[u16; INTRA_MODES] {
723 &self.fc.y_mode_cdf[size_group_lookup[bsize as usize] as usize]
724 }
725
726 #[inline]
727 pub fn write_intra_mode<W: Writer>(
728 &mut self, w: &mut W, bsize: BlockSize, mode: PredictionMode,
729 ) {
730 let cdf = &self.fc.y_mode_cdf[size_group_lookup[bsize as usize] as usize];
731 symbol_with_update!(self, w, mode as u32, cdf);
732 }
733
734 #[inline]
735 pub fn write_intra_uv_mode<W: Writer>(
736 &mut self, w: &mut W, uv_mode: PredictionMode, y_mode: PredictionMode,
737 bs: BlockSize,
738 ) {
739 if bs.cfl_allowed() {
740 let cdf = &self.fc.uv_mode_cfl_cdf[y_mode as usize];
741 symbol_with_update!(self, w, uv_mode as u32, cdf);
742 } else {
743 let cdf = &self.fc.uv_mode_cdf[y_mode as usize];
744 symbol_with_update!(self, w, uv_mode as u32, cdf);
745 }
746 }
747
748 #[inline]
749 pub fn write_angle_delta<W: Writer>(
750 &mut self, w: &mut W, angle: i8, mode: PredictionMode,
751 ) {
752 symbol_with_update!(
753 self,
754 w,
755 (angle + MAX_ANGLE_DELTA as i8) as u32,
756 &self.fc.angle_delta_cdf
757 [mode as usize - PredictionMode::V_PRED as usize]
758 );
759 }
760
761 pub fn write_use_filter_intra<W: Writer>(
762 &mut self, w: &mut W, enable: bool, block_size: BlockSize,
763 ) {
764 let cdf = &self.fc.filter_intra_cdfs[block_size as usize];
765 symbol_with_update!(self, w, enable as u32, cdf);
766 }
767
768 /// # Panics
769 ///
770 /// - If called with `enable: true` (not yet implemented
771 pub fn write_use_palette_mode<W: Writer>(
772 &mut self, w: &mut W, enable: bool, bsize: BlockSize, bo: TileBlockOffset,
773 luma_mode: PredictionMode, chroma_mode: PredictionMode, xdec: usize,
774 ydec: usize, cs: ChromaSampling,
775 ) {
776 if enable {
777 unimplemented!(); // TODO
778 }
779
780 let (ctx_luma, ctx_chroma) = (0, 0); // TODO: increase based on surrounding block info
781
782 if luma_mode == PredictionMode::DC_PRED {
783 let bsize_ctx = bsize.width_mi_log2() + bsize.height_mi_log2() - 2;
784 let cdf = &self.fc.palette_y_mode_cdfs[bsize_ctx][ctx_luma];
785 symbol_with_update!(self, w, enable as u32, cdf);
786 }
787
788 if has_chroma(bo, bsize, xdec, ydec, cs)
789 && chroma_mode == PredictionMode::DC_PRED
790 {
791 let cdf = &self.fc.palette_uv_mode_cdfs[ctx_chroma];
792 symbol_with_update!(self, w, enable as u32, cdf);
793 }
794 }
795
796 fn find_valid_row_offs(
797 row_offset: isize, mi_row: usize, mi_rows: usize,
798 ) -> isize {
799 cmp::min(
800 cmp::max(row_offset, -(mi_row as isize)),
801 (mi_rows - mi_row - 1) as isize,
802 )
803 }
804
805 fn find_valid_col_offs(
806 col_offset: isize, mi_col: usize, mi_cols: usize,
807 ) -> isize {
808 cmp::min(
809 cmp::max(col_offset, -(mi_col as isize)),
810 (mi_cols - mi_col - 1) as isize,
811 )
812 }
813
814 fn find_matching_mv(
815 mv: MotionVector, mv_stack: &mut ArrayVec<CandidateMV, 9>,
816 ) -> bool {
817 for mv_cand in mv_stack {
818 if mv.row == mv_cand.this_mv.row && mv.col == mv_cand.this_mv.col {
819 return true;
820 }
821 }
822 false
823 }
824
825 fn find_matching_mv_and_update_weight(
826 mv: MotionVector, mv_stack: &mut ArrayVec<CandidateMV, 9>, weight: u32,
827 ) -> bool {
828 for mv_cand in mv_stack {
829 if mv.row == mv_cand.this_mv.row && mv.col == mv_cand.this_mv.col {
830 mv_cand.weight += weight;
831 return true;
832 }
833 }
834 false
835 }
836
837 fn find_matching_comp_mv_and_update_weight(
838 mvs: [MotionVector; 2], mv_stack: &mut ArrayVec<CandidateMV, 9>,
839 weight: u32,
840 ) -> bool {
841 for mv_cand in mv_stack {
842 if mvs[0].row == mv_cand.this_mv.row
843 && mvs[0].col == mv_cand.this_mv.col
844 && mvs[1].row == mv_cand.comp_mv.row
845 && mvs[1].col == mv_cand.comp_mv.col
846 {
847 mv_cand.weight += weight;
848 return true;
849 }
850 }
851 false
852 }
853
854 fn add_ref_mv_candidate(
855 ref_frames: [RefType; 2], blk: &Block,
856 mv_stack: &mut ArrayVec<CandidateMV, 9>, weight: u32,
857 newmv_count: &mut usize, is_compound: bool,
858 ) -> bool {
859 if !blk.is_inter() {
860 /* For intrabc */
861 false
862 } else if is_compound {
863 if blk.ref_frames[0] == ref_frames[0]
864 && blk.ref_frames[1] == ref_frames[1]
865 {
866 let found_match = Self::find_matching_comp_mv_and_update_weight(
867 blk.mv, mv_stack, weight,
868 );
869
870 if !found_match && mv_stack.len() < MAX_REF_MV_STACK_SIZE {
871 let mv_cand =
872 CandidateMV { this_mv: blk.mv[0], comp_mv: blk.mv[1], weight };
873
874 mv_stack.push(mv_cand);
875 }
876
877 if blk.mode.has_newmv() {
878 *newmv_count += 1;
879 }
880
881 true
882 } else {
883 false
884 }
885 } else {
886 let mut found = false;
887 for i in 0..2 {
888 if blk.ref_frames[i] == ref_frames[0] {
889 let found_match = Self::find_matching_mv_and_update_weight(
890 blk.mv[i], mv_stack, weight,
891 );
892
893 if !found_match && mv_stack.len() < MAX_REF_MV_STACK_SIZE {
894 let mv_cand = CandidateMV {
895 this_mv: blk.mv[i],
896 comp_mv: MotionVector::default(),
897 weight,
898 };
899
900 mv_stack.push(mv_cand);
901 }
902
903 if blk.mode.has_newmv() {
904 *newmv_count += 1;
905 }
906
907 found = true;
908 }
909 }
910 found
911 }
912 }
913
914 fn add_extra_mv_candidate<T: Pixel>(
915 blk: &Block, ref_frames: [RefType; 2],
916 mv_stack: &mut ArrayVec<CandidateMV, 9>, fi: &FrameInvariants<T>,
917 is_compound: bool, ref_id_count: &mut [usize; 2],
918 ref_id_mvs: &mut [[MotionVector; 2]; 2], ref_diff_count: &mut [usize; 2],
919 ref_diff_mvs: &mut [[MotionVector; 2]; 2],
920 ) {
921 if is_compound {
922 for cand_list in 0..2 {
923 let cand_ref = blk.ref_frames[cand_list];
924 if cand_ref != INTRA_FRAME && cand_ref != NONE_FRAME {
925 for list in 0..2 {
926 let mut cand_mv = blk.mv[cand_list];
927 if cand_ref == ref_frames[list] && ref_id_count[list] < 2 {
928 ref_id_mvs[list][ref_id_count[list]] = cand_mv;
929 ref_id_count[list] += 1;
930 } else if ref_diff_count[list] < 2 {
931 if fi.ref_frame_sign_bias[cand_ref.to_index()]
932 != fi.ref_frame_sign_bias[ref_frames[list].to_index()]
933 {
934 cand_mv.row = -cand_mv.row;
935 cand_mv.col = -cand_mv.col;
936 }
937 ref_diff_mvs[list][ref_diff_count[list]] = cand_mv;
938 ref_diff_count[list] += 1;
939 }
940 }
941 }
942 }
943 } else {
944 for cand_list in 0..2 {
945 let cand_ref = blk.ref_frames[cand_list];
946 if cand_ref != INTRA_FRAME && cand_ref != NONE_FRAME {
947 let mut mv = blk.mv[cand_list];
948 if fi.ref_frame_sign_bias[cand_ref.to_index()]
949 != fi.ref_frame_sign_bias[ref_frames[0].to_index()]
950 {
951 mv.row = -mv.row;
952 mv.col = -mv.col;
953 }
954
955 if !Self::find_matching_mv(mv, mv_stack) {
956 let mv_cand = CandidateMV {
957 this_mv: mv,
958 comp_mv: MotionVector::default(),
959 weight: 2,
960 };
961 mv_stack.push(mv_cand);
962 }
963 }
964 }
965 }
966 }
967
968 fn scan_row_mbmi(
969 &self, bo: TileBlockOffset, row_offset: isize, max_row_offs: isize,
970 processed_rows: &mut isize, ref_frames: [RefType; 2],
971 mv_stack: &mut ArrayVec<CandidateMV, 9>, newmv_count: &mut usize,
972 bsize: BlockSize, is_compound: bool,
973 ) -> bool {
974 let bc = &self.bc;
975 let target_n4_w = bsize.width_mi();
976
977 let end_mi = cmp::min(
978 cmp::min(target_n4_w, bc.blocks.cols() - bo.0.x),
979 BLOCK_64X64.width_mi(),
980 );
981 let n4_w_8 = BLOCK_8X8.width_mi();
982 let n4_w_16 = BLOCK_16X16.width_mi();
983 let mut col_offset = 0;
984
985 if row_offset.abs() > 1 {
986 col_offset = 1;
987 if ((bo.0.x & 0x01) != 0) && (target_n4_w < n4_w_8) {
988 col_offset -= 1;
989 }
990 }
991
992 let use_step_16 = target_n4_w >= 16;
993
994 let mut found_match = false;
995
996 let mut i = 0;
997 while i < end_mi {
998 let cand =
999 &bc.blocks[bo.with_offset(col_offset + i as isize, row_offset)];
1000
1001 let n4_w = cand.n4_w as usize;
1002 let mut len = cmp::min(target_n4_w, n4_w);
1003 if use_step_16 {
1004 len = cmp::max(n4_w_16, len);
1005 } else if row_offset.abs() > 1 {
1006 len = cmp::max(len, n4_w_8);
1007 }
1008
1009 let mut weight: u32 = 2;
1010 if target_n4_w >= n4_w_8 && target_n4_w <= n4_w {
1011 let inc = cmp::min(-max_row_offs + row_offset + 1, cand.n4_h as isize);
1012 assert!(inc >= 0);
1013 weight = cmp::max(weight, inc as u32);
1014 *processed_rows = inc - row_offset - 1;
1015 }
1016
1017 if Self::add_ref_mv_candidate(
1018 ref_frames,
1019 cand,
1020 mv_stack,
1021 len as u32 * weight,
1022 newmv_count,
1023 is_compound,
1024 ) {
1025 found_match = true;
1026 }
1027
1028 i += len;
1029 }
1030
1031 found_match
1032 }
1033
1034 fn scan_col_mbmi(
1035 &self, bo: TileBlockOffset, col_offset: isize, max_col_offs: isize,
1036 processed_cols: &mut isize, ref_frames: [RefType; 2],
1037 mv_stack: &mut ArrayVec<CandidateMV, 9>, newmv_count: &mut usize,
1038 bsize: BlockSize, is_compound: bool,
1039 ) -> bool {
1040 let bc = &self.bc;
1041
1042 let target_n4_h = bsize.height_mi();
1043
1044 let end_mi = cmp::min(
1045 cmp::min(target_n4_h, bc.blocks.rows() - bo.0.y),
1046 BLOCK_64X64.height_mi(),
1047 );
1048 let n4_h_8 = BLOCK_8X8.height_mi();
1049 let n4_h_16 = BLOCK_16X16.height_mi();
1050 let mut row_offset = 0;
1051
1052 if col_offset.abs() > 1 {
1053 row_offset = 1;
1054 if ((bo.0.y & 0x01) != 0) && (target_n4_h < n4_h_8) {
1055 row_offset -= 1;
1056 }
1057 }
1058
1059 let use_step_16 = target_n4_h >= 16;
1060
1061 let mut found_match = false;
1062
1063 let mut i = 0;
1064 while i < end_mi {
1065 let cand =
1066 &bc.blocks[bo.with_offset(col_offset, row_offset + i as isize)];
1067 let n4_h = cand.n4_h as usize;
1068 let mut len = cmp::min(target_n4_h, n4_h);
1069 if use_step_16 {
1070 len = cmp::max(n4_h_16, len);
1071 } else if col_offset.abs() > 1 {
1072 len = cmp::max(len, n4_h_8);
1073 }
1074
1075 let mut weight: u32 = 2;
1076 if target_n4_h >= n4_h_8 && target_n4_h <= n4_h {
1077 let inc = cmp::min(-max_col_offs + col_offset + 1, cand.n4_w as isize);
1078 assert!(inc >= 0);
1079 weight = cmp::max(weight, inc as u32);
1080 *processed_cols = inc - col_offset - 1;
1081 }
1082
1083 if Self::add_ref_mv_candidate(
1084 ref_frames,
1085 cand,
1086 mv_stack,
1087 len as u32 * weight,
1088 newmv_count,
1089 is_compound,
1090 ) {
1091 found_match = true;
1092 }
1093
1094 i += len;
1095 }
1096
1097 found_match
1098 }
1099
1100 fn scan_blk_mbmi(
1101 &self, bo: TileBlockOffset, ref_frames: [RefType; 2],
1102 mv_stack: &mut ArrayVec<CandidateMV, 9>, newmv_count: &mut usize,
1103 is_compound: bool,
1104 ) -> bool {
1105 if bo.0.x >= self.bc.blocks.cols() || bo.0.y >= self.bc.blocks.rows() {
1106 return false;
1107 }
1108
1109 let weight = 2 * BLOCK_8X8.width_mi() as u32;
1110 /* Always assume its within a tile, probably wrong */
1111 Self::add_ref_mv_candidate(
1112 ref_frames,
1113 &self.bc.blocks[bo],
1114 mv_stack,
1115 weight,
1116 newmv_count,
1117 is_compound,
1118 )
1119 }
1120
1121 fn add_offset(mv_stack: &mut ArrayVec<CandidateMV, 9>) {
1122 for cand_mv in mv_stack {
1123 cand_mv.weight += REF_CAT_LEVEL;
1124 }
1125 }
1126
1127 #[profiling::function]
1128 fn setup_mvref_list<T: Pixel>(
1129 &self, bo: TileBlockOffset, ref_frames: [RefType; 2],
1130 mv_stack: &mut ArrayVec<CandidateMV, 9>, bsize: BlockSize,
1131 fi: &FrameInvariants<T>, is_compound: bool,
1132 ) -> usize {
1133 let (_rf, _rf_num) = (INTRA_FRAME, 1);
1134
1135 let target_n4_h = bsize.height_mi();
1136 let target_n4_w = bsize.width_mi();
1137
1138 let mut max_row_offs: isize = 0;
1139 let row_adj =
1140 (target_n4_h < BLOCK_8X8.height_mi()) && (bo.0.y & 0x01) != 0x0;
1141
1142 let mut max_col_offs: isize = 0;
1143 let col_adj =
1144 (target_n4_w < BLOCK_8X8.width_mi()) && (bo.0.x & 0x01) != 0x0;
1145
1146 let mut processed_rows: isize = 0;
1147 let mut processed_cols: isize = 0;
1148
1149 let up_avail = bo.0.y > 0;
1150 let left_avail = bo.0.x > 0;
1151
1152 if up_avail {
1153 max_row_offs = -2 * MVREF_ROW_COLS as isize + row_adj as isize;
1154
1155 // limit max offset for small blocks
1156 if target_n4_h < BLOCK_8X8.height_mi() {
1157 max_row_offs = -2 * 2 + row_adj as isize;
1158 }
1159
1160 let rows = self.bc.blocks.rows();
1161 max_row_offs = Self::find_valid_row_offs(max_row_offs, bo.0.y, rows);
1162 }
1163
1164 if left_avail {
1165 max_col_offs = -2 * MVREF_ROW_COLS as isize + col_adj as isize;
1166
1167 // limit max offset for small blocks
1168 if target_n4_w < BLOCK_8X8.width_mi() {
1169 max_col_offs = -2 * 2 + col_adj as isize;
1170 }
1171
1172 let cols = self.bc.blocks.cols();
1173 max_col_offs = Self::find_valid_col_offs(max_col_offs, bo.0.x, cols);
1174 }
1175
1176 let mut row_match = false;
1177 let mut col_match = false;
1178 let mut newmv_count: usize = 0;
1179
1180 if max_row_offs.abs() >= 1 {
1181 let found_match = self.scan_row_mbmi(
1182 bo,
1183 -1,
1184 max_row_offs,
1185 &mut processed_rows,
1186 ref_frames,
1187 mv_stack,
1188 &mut newmv_count,
1189 bsize,
1190 is_compound,
1191 );
1192 row_match |= found_match;
1193 }
1194 if max_col_offs.abs() >= 1 {
1195 let found_match = self.scan_col_mbmi(
1196 bo,
1197 -1,
1198 max_col_offs,
1199 &mut processed_cols,
1200 ref_frames,
1201 mv_stack,
1202 &mut newmv_count,
1203 bsize,
1204 is_compound,
1205 );
1206 col_match |= found_match;
1207 }
1208 if has_tr(bo, bsize) && bo.0.y > 0 {
1209 let found_match = self.scan_blk_mbmi(
1210 bo.with_offset(target_n4_w as isize, -1),
1211 ref_frames,
1212 mv_stack,
1213 &mut newmv_count,
1214 is_compound,
1215 );
1216 row_match |= found_match;
1217 }
1218
1219 let nearest_match = usize::from(row_match) + usize::from(col_match);
1220
1221 Self::add_offset(mv_stack);
1222
1223 /* Scan the second outer area. */
1224 let mut far_newmv_count: usize = 0; // won't be used
1225
1226 let found_match = bo.0.x > 0
1227 && bo.0.y > 0
1228 && self.scan_blk_mbmi(
1229 bo.with_offset(-1, -1),
1230 ref_frames,
1231 mv_stack,
1232 &mut far_newmv_count,
1233 is_compound,
1234 );
1235 row_match |= found_match;
1236
1237 for idx in 2..=MVREF_ROW_COLS {
1238 let row_offset = -2 * idx as isize + 1 + row_adj as isize;
1239 let col_offset = -2 * idx as isize + 1 + col_adj as isize;
1240
1241 if row_offset.abs() <= max_row_offs.abs()
1242 && row_offset.abs() > processed_rows
1243 {
1244 let found_match = self.scan_row_mbmi(
1245 bo,
1246 row_offset,
1247 max_row_offs,
1248 &mut processed_rows,
1249 ref_frames,
1250 mv_stack,
1251 &mut far_newmv_count,
1252 bsize,
1253 is_compound,
1254 );
1255 row_match |= found_match;
1256 }
1257
1258 if col_offset.abs() <= max_col_offs.abs()
1259 && col_offset.abs() > processed_cols
1260 {
1261 let found_match = self.scan_col_mbmi(
1262 bo,
1263 col_offset,
1264 max_col_offs,
1265 &mut processed_cols,
1266 ref_frames,
1267 mv_stack,
1268 &mut far_newmv_count,
1269 bsize,
1270 is_compound,
1271 );
1272 col_match |= found_match;
1273 }
1274 }
1275
1276 let total_match = usize::from(row_match) + usize::from(col_match);
1277
1278 assert!(total_match >= nearest_match);
1279
1280 // mode_context contains both newmv_context and refmv_context, where newmv_context
1281 // lies in the REF_MVOFFSET least significant bits
1282 let mode_context = match nearest_match {
1283 0 => cmp::min(total_match, 1) + (total_match << REFMV_OFFSET),
1284 1 => 3 - cmp::min(newmv_count, 1) + ((2 + total_match) << REFMV_OFFSET),
1285 _ => 5 - cmp::min(newmv_count, 1) + (5 << REFMV_OFFSET),
1286 };
1287
1288 /* TODO: Find nearest match and assign nearest and near mvs */
1289
1290 // 7.10.2.11 Sort MV stack according to weight
1291 mv_stack.sort_by(|a, b| b.weight.cmp(&a.weight));
1292
1293 if mv_stack.len() < 2 {
1294 // 7.10.2.12 Extra search process
1295
1296 let w4 = bsize.width_mi().min(16).min(self.bc.blocks.cols() - bo.0.x);
1297 let h4 = bsize.height_mi().min(16).min(self.bc.blocks.rows() - bo.0.y);
1298 let num4x4 = w4.min(h4);
1299
1300 let passes = i32::from(!up_avail)..=i32::from(left_avail);
1301
1302 let mut ref_id_count: [usize; 2] = [0; 2];
1303 let mut ref_diff_count: [usize; 2] = [0; 2];
1304 let mut ref_id_mvs = [[MotionVector::default(); 2]; 2];
1305 let mut ref_diff_mvs = [[MotionVector::default(); 2]; 2];
1306
1307 for pass in passes {
1308 let mut idx = 0;
1309 while idx < num4x4 && mv_stack.len() < 2 {
1310 let rbo = if pass == 0 {
1311 bo.with_offset(idx as isize, -1)
1312 } else {
1313 bo.with_offset(-1, idx as isize)
1314 };
1315
1316 let blk = &self.bc.blocks[rbo];
1317 Self::add_extra_mv_candidate(
1318 blk,
1319 ref_frames,
1320 mv_stack,
1321 fi,
1322 is_compound,
1323 &mut ref_id_count,
1324 &mut ref_id_mvs,
1325 &mut ref_diff_count,
1326 &mut ref_diff_mvs,
1327 );
1328
1329 idx += if pass == 0 { blk.n4_w } else { blk.n4_h } as usize;
1330 }
1331 }
1332
1333 if is_compound {
1334 let mut combined_mvs = [[MotionVector::default(); 2]; 2];
1335
1336 for list in 0..2 {
1337 let mut comp_count = 0;
1338 for idx in 0..ref_id_count[list] {
1339 combined_mvs[comp_count][list] = ref_id_mvs[list][idx];
1340 comp_count += 1;
1341 }
1342 for idx in 0..ref_diff_count[list] {
1343 if comp_count < 2 {
1344 combined_mvs[comp_count][list] = ref_diff_mvs[list][idx];
1345 comp_count += 1;
1346 }
1347 }
1348 }
1349
1350 if mv_stack.len() == 1 {
1351 let mv_cand = if combined_mvs[0][0].row == mv_stack[0].this_mv.row
1352 && combined_mvs[0][0].col == mv_stack[0].this_mv.col
1353 && combined_mvs[0][1].row == mv_stack[0].comp_mv.row
1354 && combined_mvs[0][1].col == mv_stack[0].comp_mv.col
1355 {
1356 CandidateMV {
1357 this_mv: combined_mvs[1][0],
1358 comp_mv: combined_mvs[1][1],
1359 weight: 2,
1360 }
1361 } else {
1362 CandidateMV {
1363 this_mv: combined_mvs[0][0],
1364 comp_mv: combined_mvs[0][1],
1365 weight: 2,
1366 }
1367 };
1368 mv_stack.push(mv_cand);
1369 } else {
1370 for idx in 0..2 {
1371 let mv_cand = CandidateMV {
1372 this_mv: combined_mvs[idx][0],
1373 comp_mv: combined_mvs[idx][1],
1374 weight: 2,
1375 };
1376 mv_stack.push(mv_cand);
1377 }
1378 }
1379
1380 assert!(mv_stack.len() == 2);
1381 }
1382 }
1383
1384 /* TODO: Handle single reference frame extension */
1385
1386 let frame_bo = PlaneBlockOffset(BlockOffset {
1387 x: self.bc.blocks.x() + bo.0.x,
1388 y: self.bc.blocks.y() + bo.0.y,
1389 });
1390 // clamp mvs
1391 for mv in mv_stack {
1392 let blk_w = bsize.width();
1393 let blk_h = bsize.height();
1394 let border_w = 128 + blk_w as isize * 8;
1395 let border_h = 128 + blk_h as isize * 8;
1396 let mvx_min =
1397 -(frame_bo.0.x as isize) * (8 * MI_SIZE) as isize - border_w;
1398 let mvx_max = ((self.bc.blocks.frame_cols() - frame_bo.0.x) as isize
1399 - (blk_w / MI_SIZE) as isize)
1400 * (8 * MI_SIZE) as isize
1401 + border_w;
1402 let mvy_min =
1403 -(frame_bo.0.y as isize) * (8 * MI_SIZE) as isize - border_h;
1404 let mvy_max = ((self.bc.blocks.frame_rows() - frame_bo.0.y) as isize
1405 - (blk_h / MI_SIZE) as isize)
1406 * (8 * MI_SIZE) as isize
1407 + border_h;
1408 mv.this_mv.row =
1409 (mv.this_mv.row as isize).clamp(mvy_min, mvy_max) as i16;
1410 mv.this_mv.col =
1411 (mv.this_mv.col as isize).clamp(mvx_min, mvx_max) as i16;
1412 mv.comp_mv.row =
1413 (mv.comp_mv.row as isize).clamp(mvy_min, mvy_max) as i16;
1414 mv.comp_mv.col =
1415 (mv.comp_mv.col as isize).clamp(mvx_min, mvx_max) as i16;
1416 }
1417
1418 mode_context
1419 }
1420
1421 /// # Panics
1422 ///
1423 /// - If the first ref frame is not set (`NONE_FRAME`)
1424 pub fn find_mvrefs<T: Pixel>(
1425 &self, bo: TileBlockOffset, ref_frames: [RefType; 2],
1426 mv_stack: &mut ArrayVec<CandidateMV, 9>, bsize: BlockSize,
1427 fi: &FrameInvariants<T>, is_compound: bool,
1428 ) -> usize {
1429 assert!(ref_frames[0] != NONE_FRAME);
1430 if ref_frames[0] != NONE_FRAME {
1431 // TODO: If ref_frames[0] != INTRA_FRAME, convert global mv to an mv;
1432 // otherwise, set the global mv ref to invalid.
1433 }
1434
1435 if ref_frames[0] != INTRA_FRAME {
1436 /* TODO: Set zeromv ref to the converted global motion vector */
1437 } else {
1438 /* TODO: Set the zeromv ref to 0 */
1439 return 0;
1440 }
1441
1442 self.setup_mvref_list(bo, ref_frames, mv_stack, bsize, fi, is_compound)
1443 }
1444
1445 pub fn fill_neighbours_ref_counts(&mut self, bo: TileBlockOffset) {
1446 let mut ref_counts = [0; INTER_REFS_PER_FRAME];
1447
1448 if bo.0.y > 0 {
1449 let above_b = self.bc.blocks.above_of(bo);
1450 if above_b.is_inter() {
1451 ref_counts[above_b.ref_frames[0].to_index()] += 1;
1452 if above_b.has_second_ref() {
1453 ref_counts[above_b.ref_frames[1].to_index()] += 1;
1454 }
1455 }
1456 }
1457
1458 if bo.0.x > 0 {
1459 let left_b = self.bc.blocks.left_of(bo);
1460 if left_b.is_inter() {
1461 ref_counts[left_b.ref_frames[0].to_index()] += 1;
1462 if left_b.has_second_ref() {
1463 ref_counts[left_b.ref_frames[1].to_index()] += 1;
1464 }
1465 }
1466 }
1467 self.bc.blocks[bo].neighbors_ref_counts = ref_counts;
1468 }
1469
1470 #[inline]
1471 pub const fn ref_count_ctx(counts0: u8, counts1: u8) -> usize {
1472 if counts0 < counts1 {
1473 0
1474 } else if counts0 == counts1 {
1475 1
1476 } else {
1477 2
1478 }
1479 }
1480
1481 #[inline]
1482 pub fn get_pred_ctx_brfarf2_or_arf(&self, bo: TileBlockOffset) -> usize {
1483 let ref_counts = self.bc.blocks[bo].neighbors_ref_counts;
1484
1485 let brfarf2_count = ref_counts[BWDREF_FRAME.to_index()]
1486 + ref_counts[ALTREF2_FRAME.to_index()];
1487 let arf_count = ref_counts[ALTREF_FRAME.to_index()];
1488
1489 ContextWriter::ref_count_ctx(brfarf2_count, arf_count)
1490 }
1491
1492 #[inline]
1493 pub fn get_pred_ctx_ll2_or_l3gld(&self, bo: TileBlockOffset) -> usize {
1494 let ref_counts = self.bc.blocks[bo].neighbors_ref_counts;
1495
1496 let l_l2_count =
1497 ref_counts[LAST_FRAME.to_index()] + ref_counts[LAST2_FRAME.to_index()];
1498 let l3_gold_count =
1499 ref_counts[LAST3_FRAME.to_index()] + ref_counts[GOLDEN_FRAME.to_index()];
1500
1501 ContextWriter::ref_count_ctx(l_l2_count, l3_gold_count)
1502 }
1503
1504 #[inline]
1505 pub fn get_pred_ctx_last_or_last2(&self, bo: TileBlockOffset) -> usize {
1506 let ref_counts = self.bc.blocks[bo].neighbors_ref_counts;
1507
1508 let l_count = ref_counts[LAST_FRAME.to_index()];
1509 let l2_count = ref_counts[LAST2_FRAME.to_index()];
1510
1511 ContextWriter::ref_count_ctx(l_count, l2_count)
1512 }
1513
1514 #[inline]
1515 pub fn get_pred_ctx_last3_or_gold(&self, bo: TileBlockOffset) -> usize {
1516 let ref_counts = self.bc.blocks[bo].neighbors_ref_counts;
1517
1518 let l3_count = ref_counts[LAST3_FRAME.to_index()];
1519 let gold_count = ref_counts[GOLDEN_FRAME.to_index()];
1520
1521 ContextWriter::ref_count_ctx(l3_count, gold_count)
1522 }
1523
1524 #[inline]
1525 pub fn get_pred_ctx_brf_or_arf2(&self, bo: TileBlockOffset) -> usize {
1526 let ref_counts = self.bc.blocks[bo].neighbors_ref_counts;
1527
1528 let brf_count = ref_counts[BWDREF_FRAME.to_index()];
1529 let arf2_count = ref_counts[ALTREF2_FRAME.to_index()];
1530
1531 ContextWriter::ref_count_ctx(brf_count, arf2_count)
1532 }
1533
1534 pub fn get_comp_mode_ctx(&self, bo: TileBlockOffset) -> usize {
1535 let avail_left = bo.0.x > 0;
1536 let avail_up = bo.0.y > 0;
1537 let (left0, left1) = if avail_left {
1538 let bo_left = bo.with_offset(-1, 0);
1539 let ref_frames = &self.bc.blocks[bo_left].ref_frames;
1540 (ref_frames[0], ref_frames[1])
1541 } else {
1542 (INTRA_FRAME, NONE_FRAME)
1543 };
1544 let (above0, above1) = if avail_up {
1545 let bo_up = bo.with_offset(0, -1);
1546 let ref_frames = &self.bc.blocks[bo_up].ref_frames;
1547 (ref_frames[0], ref_frames[1])
1548 } else {
1549 (INTRA_FRAME, NONE_FRAME)
1550 };
1551 let left_single = left1 == NONE_FRAME;
1552 let above_single = above1 == NONE_FRAME;
1553 let left_intra = left0 == INTRA_FRAME;
1554 let above_intra = above0 == INTRA_FRAME;
1555 let left_backward = left0.is_bwd_ref();
1556 let above_backward = above0.is_bwd_ref();
1557
1558 if avail_left && avail_up {
1559 if above_single && left_single {
1560 (above_backward ^ left_backward) as usize
1561 } else if above_single {
1562 2 + (above_backward || above_intra) as usize
1563 } else if left_single {
1564 2 + (left_backward || left_intra) as usize
1565 } else {
1566 4
1567 }
1568 } else if avail_up {
1569 if above_single {
1570 above_backward as usize
1571 } else {
1572 3
1573 }
1574 } else if avail_left {
1575 if left_single {
1576 left_backward as usize
1577 } else {
1578 3
1579 }
1580 } else {
1581 1
1582 }
1583 }
1584
1585 pub fn get_comp_ref_type_ctx(&self, bo: TileBlockOffset) -> usize {
1586 fn is_samedir_ref_pair(ref0: RefType, ref1: RefType) -> bool {
1587 (ref0.is_bwd_ref() && ref0 != NONE_FRAME)
1588 == (ref1.is_bwd_ref() && ref1 != NONE_FRAME)
1589 }
1590
1591 let avail_left = bo.0.x > 0;
1592 let avail_up = bo.0.y > 0;
1593 let (left0, left1) = if avail_left {
1594 let bo_left = bo.with_offset(-1, 0);
1595 let ref_frames = &self.bc.blocks[bo_left].ref_frames;
1596 (ref_frames[0], ref_frames[1])
1597 } else {
1598 (INTRA_FRAME, NONE_FRAME)
1599 };
1600 let (above0, above1) = if avail_up {
1601 let bo_up = bo.with_offset(0, -1);
1602 let ref_frames = &self.bc.blocks[bo_up].ref_frames;
1603 (ref_frames[0], ref_frames[1])
1604 } else {
1605 (INTRA_FRAME, NONE_FRAME)
1606 };
1607 let left_single = left1 == NONE_FRAME;
1608 let above_single = above1 == NONE_FRAME;
1609 let left_intra = left0 == INTRA_FRAME;
1610 let above_intra = above0 == INTRA_FRAME;
1611 let above_comp_inter = avail_up && !above_intra && !above_single;
1612 let left_comp_inter = avail_left && !left_intra && !left_single;
1613 let above_uni_comp =
1614 above_comp_inter && is_samedir_ref_pair(above0, above1);
1615 let left_uni_comp = left_comp_inter && is_samedir_ref_pair(left0, left1);
1616
1617 if avail_up && !above_intra && avail_left && !left_intra {
1618 let samedir = is_samedir_ref_pair(above0, left0) as usize;
1619
1620 if !above_comp_inter && !left_comp_inter {
1621 1 + 2 * samedir
1622 } else if !above_comp_inter {
1623 if !left_uni_comp {
1624 1
1625 } else {
1626 3 + samedir
1627 }
1628 } else if !left_comp_inter {
1629 if !above_uni_comp {
1630 1
1631 } else {
1632 3 + samedir
1633 }
1634 } else if !above_uni_comp && !left_uni_comp {
1635 0
1636 } else if !above_uni_comp || !left_uni_comp {
1637 2
1638 } else {
1639 3 + ((above0 == BWDREF_FRAME) == (left0 == BWDREF_FRAME)) as usize
1640 }
1641 } else if avail_up && avail_left {
1642 if above_comp_inter {
1643 1 + 2 * above_uni_comp as usize
1644 } else if left_comp_inter {
1645 1 + 2 * left_uni_comp as usize
1646 } else {
1647 2
1648 }
1649 } else if above_comp_inter {
1650 4 * above_uni_comp as usize
1651 } else if left_comp_inter {
1652 4 * left_uni_comp as usize
1653 } else {
1654 2
1655 }
1656 }
1657
1658 /// # Panics
1659 ///
1660 /// - If `mode` is not an inter mode
1661 pub fn write_compound_mode<W: Writer>(
1662 &mut self, w: &mut W, mode: PredictionMode, ctx: usize,
1663 ) {
1664 let newmv_ctx = ctx & NEWMV_CTX_MASK;
1665 let refmv_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
1666
1667 let ctx = if refmv_ctx < 2 {
1668 newmv_ctx.min(1)
1669 } else if refmv_ctx < 4 {
1670 (newmv_ctx + 1).min(4)
1671 } else {
1672 (newmv_ctx.max(1) + 3).min(7)
1673 };
1674
1675 assert!(mode >= PredictionMode::NEAREST_NEARESTMV);
1676 let val = match mode {
1677 PredictionMode::NEAREST_NEARESTMV => 0,
1678 PredictionMode::NEAR_NEAR0MV
1679 | PredictionMode::NEAR_NEAR1MV
1680 | PredictionMode::NEAR_NEAR2MV => 1,
1681 PredictionMode::NEAREST_NEWMV => 2,
1682 PredictionMode::NEW_NEARESTMV => 3,
1683 PredictionMode::NEAR_NEW0MV
1684 | PredictionMode::NEAR_NEW1MV
1685 | PredictionMode::NEAR_NEW2MV => 4,
1686 PredictionMode::NEW_NEAR0MV
1687 | PredictionMode::NEW_NEAR1MV
1688 | PredictionMode::NEW_NEAR2MV => 5,
1689 PredictionMode::GLOBAL_GLOBALMV => 6,
1690 PredictionMode::NEW_NEWMV => 7,
1691 _ => unreachable!(),
1692 };
1693 symbol_with_update!(self, w, val, &self.fc.compound_mode_cdf[ctx]);
1694 }
1695
1696 pub fn write_inter_mode<W: Writer>(
1697 &mut self, w: &mut W, mode: PredictionMode, ctx: usize,
1698 ) {
1699 use PredictionMode::{GLOBALMV, NEARESTMV, NEWMV};
1700 let newmv_ctx = ctx & NEWMV_CTX_MASK;
1701 let cdf = &self.fc.newmv_cdf[newmv_ctx];
1702 symbol_with_update!(self, w, (mode != NEWMV) as u32, cdf);
1703 if mode != NEWMV {
1704 let zeromv_ctx = (ctx >> GLOBALMV_OFFSET) & GLOBALMV_CTX_MASK;
1705 let cdf = &self.fc.zeromv_cdf[zeromv_ctx];
1706 symbol_with_update!(self, w, (mode != GLOBALMV) as u32, cdf);
1707 if mode != GLOBALMV {
1708 let refmv_ctx = (ctx >> REFMV_OFFSET) & REFMV_CTX_MASK;
1709 let cdf = &self.fc.refmv_cdf[refmv_ctx];
1710 symbol_with_update!(self, w, (mode != NEARESTMV) as u32, cdf);
1711 }
1712 }
1713 }
1714
1715 #[inline]
1716 pub fn write_drl_mode<W: Writer>(
1717 &mut self, w: &mut W, drl_mode: bool, ctx: usize,
1718 ) {
1719 let cdf = &self.fc.drl_cdfs[ctx];
1720 symbol_with_update!(self, w, drl_mode as u32, cdf);
1721 }
1722
1723 /// # Panics
1724 ///
1725 /// - If the MV is invalid
1726 pub fn write_mv<W: Writer>(
1727 &mut self, w: &mut W, mv: MotionVector, ref_mv: MotionVector,
1728 mv_precision: MvSubpelPrecision,
1729 ) {
1730 // <https://aomediacodec.github.io/av1-spec/#assign-mv-semantics>
1731 assert!(mv.is_valid());
1732
1733 let diff =
1734 MotionVector { row: mv.row - ref_mv.row, col: mv.col - ref_mv.col };
1735 let j: MvJointType = av1_get_mv_joint(diff);
1736
1737 let cdf = &self.fc.nmv_context.joints_cdf;
1738 symbol_with_update!(self, w, j as u32, cdf);
1739
1740 if mv_joint_vertical(j) {
1741 self.encode_mv_component(w, diff.row as i32, 0, mv_precision);
1742 }
1743 if mv_joint_horizontal(j) {
1744 self.encode_mv_component(w, diff.col as i32, 1, mv_precision);
1745 }
1746 }
1747
1748 pub fn write_block_deblock_deltas<W: Writer>(
1749 &mut self, w: &mut W, bo: TileBlockOffset, multi: bool, planes: usize,
1750 ) {
1751 let block = &self.bc.blocks[bo];
1752 let deltas_count = if multi { FRAME_LF_COUNT + planes - 3 } else { 1 };
1753 let deltas = &block.deblock_deltas[..deltas_count];
1754
1755 for (i, &delta) in deltas.iter().enumerate() {
1756 let abs = delta.unsigned_abs() as u32;
1757 let cdf = if multi {
1758 &self.fc.deblock_delta_multi_cdf[i]
1759 } else {
1760 &self.fc.deblock_delta_cdf
1761 };
1762
1763 symbol_with_update!(self, w, cmp::min(abs, DELTA_LF_SMALL), cdf);
1764
1765 if abs >= DELTA_LF_SMALL {
1766 let bits = msb(abs as i32 - 1) as u32;
1767 w.literal(3, bits - 1);
1768 w.literal(bits as u8, abs - (1 << bits) - 1);
1769 }
1770 if abs > 0 {
1771 w.bool(delta < 0, 16384);
1772 }
1773 }
1774 }
1775
1776 pub fn write_is_inter<W: Writer>(
1777 &mut self, w: &mut W, bo: TileBlockOffset, is_inter: bool,
1778 ) {
1779 let ctx = self.bc.intra_inter_context(bo);
1780 let cdf = &self.fc.intra_inter_cdfs[ctx];
1781 symbol_with_update!(self, w, is_inter as u32, cdf);
1782 }
1783
1784 pub fn write_coeffs_lv_map<T: Coefficient, W: Writer>(
1785 &mut self, w: &mut W, plane: usize, bo: TileBlockOffset, coeffs_in: &[T],
1786 eob: u16, pred_mode: PredictionMode, tx_size: TxSize, tx_type: TxType,
1787 plane_bsize: BlockSize, xdec: usize, ydec: usize,
1788 use_reduced_tx_set: bool, frame_clipped_txw: usize,
1789 frame_clipped_txh: usize,
1790 ) -> bool {
1791 debug_assert!(frame_clipped_txw != 0);
1792 debug_assert!(frame_clipped_txh != 0);
1793
1794 let is_inter = pred_mode >= PredictionMode::NEARESTMV;
1795
1796 // Note: Both intra and inter mode uses inter scan order. Surprised?
1797 let scan: &[u16] = &av1_scan_orders[tx_size as usize][tx_type as usize]
1798 .scan[..usize::from(eob)];
1799 let height = av1_get_coded_tx_size(tx_size).height();
1800
1801 // Create a slice with coeffs in scan order
1802 let mut coeffs_storage: Aligned<ArrayVec<T, { 32 * 32 }>> =
1803 Aligned::new(ArrayVec::new());
1804 let coeffs = &mut coeffs_storage.data;
1805 coeffs.extend(scan.iter().map(|&scan_idx| coeffs_in[scan_idx as usize]));
1806
1807 let cul_level: u32 = coeffs.iter().map(|c| u32::cast_from(c.abs())).sum();
1808
1809 let txs_ctx = Self::get_txsize_entropy_ctx(tx_size);
1810 let txb_ctx = self.bc.get_txb_ctx(
1811 plane_bsize,
1812 tx_size,
1813 plane,
1814 bo,
1815 xdec,
1816 ydec,
1817 frame_clipped_txw,
1818 frame_clipped_txh,
1819 );
1820
1821 {
1822 let cdf = &self.fc.txb_skip_cdf[txs_ctx][txb_ctx.txb_skip_ctx];
1823 symbol_with_update!(self, w, (eob == 0) as u32, cdf);
1824 }
1825
1826 if eob == 0 {
1827 self.bc.set_coeff_context(plane, bo, tx_size, xdec, ydec, 0);
1828 return false;
1829 }
1830
1831 let mut levels_buf = [0u8; TX_PAD_2D];
1832 let levels: &mut [u8] =
1833 &mut levels_buf[TX_PAD_TOP * (height + TX_PAD_HOR)..];
1834
1835 self.txb_init_levels(coeffs_in, height, levels, height + TX_PAD_HOR);
1836
1837 let tx_class = tx_type_to_class[tx_type as usize];
1838 let plane_type = usize::from(plane != 0);
1839
1840 // Signal tx_type for luma plane only
1841 if plane == 0 {
1842 self.write_tx_type(
1843 w,
1844 tx_size,
1845 tx_type,
1846 pred_mode,
1847 is_inter,
1848 use_reduced_tx_set,
1849 );
1850 }
1851
1852 self.encode_eob(eob, tx_size, tx_class, txs_ctx, plane_type, w);
1853 self.encode_coeffs(
1854 coeffs, levels, scan, eob, tx_size, tx_class, txs_ctx, plane_type, w,
1855 );
1856 let cul_level =
1857 self.encode_coeff_signs(coeffs, w, plane_type, txb_ctx, cul_level);
1858 self.bc.set_coeff_context(plane, bo, tx_size, xdec, ydec, cul_level as u8);
1859 true
1860 }
1861
1862 fn encode_eob<W: Writer>(
1863 &mut self, eob: u16, tx_size: TxSize, tx_class: TxClass, txs_ctx: usize,
1864 plane_type: usize, w: &mut W,
1865 ) {
1866 let (eob_pt, eob_extra) = Self::get_eob_pos_token(eob);
1867 let eob_multi_size: usize = tx_size.area_log2() - 4;
1868 let eob_multi_ctx: usize = usize::from(tx_class != TX_CLASS_2D);
1869
1870 match eob_multi_size {
1871 0 => {
1872 let cdf = &self.fc.eob_flag_cdf16[plane_type][eob_multi_ctx];
1873 symbol_with_update!(self, w, eob_pt - 1, cdf);
1874 }
1875 1 => {
1876 let cdf = &self.fc.eob_flag_cdf32[plane_type][eob_multi_ctx];
1877 symbol_with_update!(self, w, eob_pt - 1, cdf);
1878 }
1879 2 => {
1880 let cdf = &self.fc.eob_flag_cdf64[plane_type][eob_multi_ctx];
1881 symbol_with_update!(self, w, eob_pt - 1, cdf);
1882 }
1883 3 => {
1884 let cdf = &self.fc.eob_flag_cdf128[plane_type][eob_multi_ctx];
1885 symbol_with_update!(self, w, eob_pt - 1, cdf);
1886 }
1887 4 => {
1888 let cdf = &self.fc.eob_flag_cdf256[plane_type][eob_multi_ctx];
1889 symbol_with_update!(self, w, eob_pt - 1, cdf);
1890 }
1891 5 => {
1892 let cdf = &self.fc.eob_flag_cdf512[plane_type][eob_multi_ctx];
1893 symbol_with_update!(self, w, eob_pt - 1, cdf);
1894 }
1895 _ => {
1896 let cdf = &self.fc.eob_flag_cdf1024[plane_type][eob_multi_ctx];
1897 symbol_with_update!(self, w, eob_pt - 1, cdf);
1898 }
1899 }
1900
1901 let eob_offset_bits = k_eob_offset_bits[eob_pt as usize];
1902
1903 if eob_offset_bits > 0 {
1904 let mut eob_shift = eob_offset_bits - 1;
1905 let mut bit: u32 = u32::from((eob_extra & (1 << eob_shift)) != 0);
1906 let cdf =
1907 &self.fc.eob_extra_cdf[txs_ctx][plane_type][(eob_pt - 3) as usize];
1908 symbol_with_update!(self, w, bit, cdf);
1909 for i in 1..eob_offset_bits {
1910 eob_shift = eob_offset_bits - 1 - i;
1911 bit = u32::from((eob_extra & (1 << eob_shift)) != 0);
1912 w.bit(bit as u16);
1913 }
1914 }
1915 }
1916
1917 fn encode_coeffs<T: Coefficient, W: Writer>(
1918 &mut self, coeffs: &[T], levels: &mut [u8], scan: &[u16], eob: u16,
1919 tx_size: TxSize, tx_class: TxClass, txs_ctx: usize, plane_type: usize,
1920 w: &mut W,
1921 ) {
1922 let mut coeff_contexts =
1923 Aligned::<[MaybeUninit<i8>; MAX_CODED_TX_SQUARE]>::uninit_array();
1924
1925 // get_nz_map_contexts sets coeff_contexts contiguously as a parallel array for scan, not in scan order
1926 let coeff_contexts = self.get_nz_map_contexts(
1927 levels,
1928 scan,
1929 eob,
1930 tx_size,
1931 tx_class,
1932 &mut coeff_contexts.data,
1933 );
1934
1935 let bhl = Self::get_txb_bhl(tx_size);
1936
1937 let scan_with_ctx =
1938 scan.iter().copied().zip(coeff_contexts.iter().copied());
1939 for (c, ((pos, coeff_ctx), v)) in
1940 scan_with_ctx.zip(coeffs.iter().copied()).enumerate().rev()
1941 {
1942 let pos = pos as usize;
1943 let coeff_ctx = coeff_ctx as usize;
1944 let level = v.abs();
1945
1946 if c == usize::from(eob) - 1 {
1947 symbol_with_update!(
1948 self,
1949 w,
1950 cmp::min(u32::cast_from(level), 3) - 1,
1951 &self.fc.coeff_base_eob_cdf[txs_ctx][plane_type][coeff_ctx]
1952 );
1953 } else {
1954 symbol_with_update!(
1955 self,
1956 w,
1957 cmp::min(u32::cast_from(level), 3),
1958 &self.fc.coeff_base_cdf[txs_ctx][plane_type][coeff_ctx]
1959 );
1960 }
1961
1962 if level > T::cast_from(NUM_BASE_LEVELS) {
1963 let base_range = level - T::cast_from(1 + NUM_BASE_LEVELS);
1964 let br_ctx = Self::get_br_ctx(levels, pos, bhl, tx_class);
1965 let mut idx: T = T::cast_from(0);
1966
1967 loop {
1968 if idx >= T::cast_from(COEFF_BASE_RANGE) {
1969 break;
1970 }
1971 let k = cmp::min(base_range - idx, T::cast_from(BR_CDF_SIZE - 1));
1972 let cdf = &self.fc.coeff_br_cdf
1973 [txs_ctx.min(TxSize::TX_32X32 as usize)][plane_type][br_ctx];
1974 symbol_with_update!(self, w, u32::cast_from(k), cdf);
1975 if k < T::cast_from(BR_CDF_SIZE - 1) {
1976 break;
1977 }
1978 idx += T::cast_from(BR_CDF_SIZE - 1);
1979 }
1980 }
1981 }
1982 }
1983
1984 fn encode_coeff_signs<T: Coefficient, W: Writer>(
1985 &mut self, coeffs: &[T], w: &mut W, plane_type: usize, txb_ctx: TXB_CTX,
1986 orig_cul_level: u32,
1987 ) -> u32 {
1988 // Loop to code all signs in the transform block,
1989 // starting with the sign of DC (if applicable)
1990 for (c, &v) in coeffs.iter().enumerate() {
1991 if v == T::cast_from(0) {
1992 continue;
1993 }
1994
1995 let level = v.abs();
1996 let sign = u32::from(v < T::cast_from(0));
1997 if c == 0 {
1998 let cdf = &self.fc.dc_sign_cdf[plane_type][txb_ctx.dc_sign_ctx];
1999 symbol_with_update!(self, w, sign, cdf);
2000 } else {
2001 w.bit(sign as u16);
2002 }
2003 // save extra golomb codes for separate loop
2004 if level > T::cast_from(COEFF_BASE_RANGE + NUM_BASE_LEVELS) {
2005 w.write_golomb(u32::cast_from(
2006 level - T::cast_from(COEFF_BASE_RANGE + NUM_BASE_LEVELS + 1),
2007 ));
2008 }
2009 }
2010
2011 let mut new_cul_level =
2012 cmp::min(COEFF_CONTEXT_MASK as u32, orig_cul_level);
2013
2014 BlockContext::set_dc_sign(&mut new_cul_level, i32::cast_from(coeffs[0]));
2015
2016 new_cul_level
2017 }
2018}
2019