1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Hantro VPU HEVC codec driver
4 *
5 * Copyright (C) 2020 Safran Passenger Innovations LLC
6 */
7
8#include "hantro_hw.h"
9#include "hantro_g2_regs.h"
10
11static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
12{
13 struct hantro_dev *vpu = ctx->dev;
14 const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
15 const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
16 const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
17 u16 *p = (u16 *)((u8 *)ctx->hevc_dec.tile_sizes.cpu);
18 unsigned int num_tile_rows = pps->num_tile_rows_minus1 + 1;
19 unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
20 unsigned int pic_width_in_ctbs, pic_height_in_ctbs;
21 unsigned int max_log2_ctb_size, ctb_size;
22 bool tiles_enabled, uniform_spacing;
23 u32 no_chroma = 0;
24
25 tiles_enabled = !!(pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED);
26 uniform_spacing = !!(pps->flags & V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING);
27
28 hantro_reg_write(vpu, reg: &g2_tile_e, val: tiles_enabled);
29
30 max_log2_ctb_size = sps->log2_min_luma_coding_block_size_minus3 + 3 +
31 sps->log2_diff_max_min_luma_coding_block_size;
32 pic_width_in_ctbs = (sps->pic_width_in_luma_samples +
33 (1 << max_log2_ctb_size) - 1) >> max_log2_ctb_size;
34 pic_height_in_ctbs = (sps->pic_height_in_luma_samples + (1 << max_log2_ctb_size) - 1)
35 >> max_log2_ctb_size;
36 ctb_size = 1 << max_log2_ctb_size;
37
38 vpu_debug(1, "Preparing tile sizes buffer for %dx%d CTBs (CTB size %d)\n",
39 pic_width_in_ctbs, pic_height_in_ctbs, ctb_size);
40
41 if (tiles_enabled) {
42 unsigned int i, j, h;
43
44 vpu_debug(1, "Tiles enabled! %dx%d\n", num_tile_cols, num_tile_rows);
45
46 hantro_reg_write(vpu, reg: &g2_num_tile_rows, val: num_tile_rows);
47 hantro_reg_write(vpu, reg: &g2_num_tile_cols, val: num_tile_cols);
48
49 /* write width + height for each tile in pic */
50 if (!uniform_spacing) {
51 u32 tmp_w = 0, tmp_h = 0;
52
53 for (i = 0; i < num_tile_rows; i++) {
54 if (i == num_tile_rows - 1)
55 h = pic_height_in_ctbs - tmp_h;
56 else
57 h = pps->row_height_minus1[i] + 1;
58 tmp_h += h;
59 if (i == 0 && h == 1 && ctb_size == 16)
60 no_chroma = 1;
61 for (j = 0, tmp_w = 0; j < num_tile_cols - 1; j++) {
62 tmp_w += pps->column_width_minus1[j] + 1;
63 *p++ = pps->column_width_minus1[j] + 1;
64 *p++ = h;
65 if (i == 0 && h == 1 && ctb_size == 16)
66 no_chroma = 1;
67 }
68 /* last column */
69 *p++ = pic_width_in_ctbs - tmp_w;
70 *p++ = h;
71 }
72 } else { /* uniform spacing */
73 u32 tmp, prev_h, prev_w;
74
75 for (i = 0, prev_h = 0; i < num_tile_rows; i++) {
76 tmp = (i + 1) * pic_height_in_ctbs / num_tile_rows;
77 h = tmp - prev_h;
78 prev_h = tmp;
79 if (i == 0 && h == 1 && ctb_size == 16)
80 no_chroma = 1;
81 for (j = 0, prev_w = 0; j < num_tile_cols; j++) {
82 tmp = (j + 1) * pic_width_in_ctbs / num_tile_cols;
83 *p++ = tmp - prev_w;
84 *p++ = h;
85 if (j == 0 &&
86 (pps->column_width_minus1[0] + 1) == 1 &&
87 ctb_size == 16)
88 no_chroma = 1;
89 prev_w = tmp;
90 }
91 }
92 }
93 } else {
94 hantro_reg_write(vpu, reg: &g2_num_tile_rows, val: 1);
95 hantro_reg_write(vpu, reg: &g2_num_tile_cols, val: 1);
96
97 /* There's one tile, with dimensions equal to pic size. */
98 p[0] = pic_width_in_ctbs;
99 p[1] = pic_height_in_ctbs;
100 }
101
102 if (no_chroma)
103 vpu_debug(1, "%s: no chroma!\n", __func__);
104}
105
106static int compute_header_skip_length(struct hantro_ctx *ctx)
107{
108 const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
109 const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
110 const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
111 const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
112 int skip = 0;
113
114 if (pps->flags & V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT)
115 /* size of pic_output_flag */
116 skip++;
117
118 if (sps->flags & V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE)
119 /* size of pic_order_cnt_lsb */
120 skip += 2;
121
122 if (!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC)) {
123 /* size of pic_order_cnt_lsb */
124 skip += sps->log2_max_pic_order_cnt_lsb_minus4 + 4;
125
126 /* size of short_term_ref_pic_set_sps_flag */
127 skip++;
128
129 if (decode_params->short_term_ref_pic_set_size)
130 /* size of st_ref_pic_set( num_short_term_ref_pic_sets ) */
131 skip += decode_params->short_term_ref_pic_set_size;
132 else if (sps->num_short_term_ref_pic_sets > 1)
133 skip += fls(x: sps->num_short_term_ref_pic_sets - 1);
134
135 skip += decode_params->long_term_ref_pic_set_size;
136 }
137
138 return skip;
139}
140
141static void set_params(struct hantro_ctx *ctx)
142{
143 const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
144 const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
145 const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
146 const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
147 struct hantro_dev *vpu = ctx->dev;
148 u32 min_log2_cb_size, max_log2_ctb_size, min_cb_size, max_ctb_size;
149 u32 pic_width_in_min_cbs, pic_height_in_min_cbs;
150 u32 pic_width_aligned, pic_height_aligned;
151 u32 partial_ctb_x, partial_ctb_y;
152
153 hantro_reg_write(vpu, reg: &g2_bit_depth_y_minus8, val: sps->bit_depth_luma_minus8);
154 hantro_reg_write(vpu, reg: &g2_bit_depth_c_minus8, val: sps->bit_depth_chroma_minus8);
155
156 hantro_reg_write(vpu, reg: &g2_hdr_skip_length, val: compute_header_skip_length(ctx));
157
158 min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3;
159 max_log2_ctb_size = min_log2_cb_size + sps->log2_diff_max_min_luma_coding_block_size;
160
161 hantro_reg_write(vpu, reg: &g2_min_cb_size, val: min_log2_cb_size);
162 hantro_reg_write(vpu, reg: &g2_max_cb_size, val: max_log2_ctb_size);
163
164 min_cb_size = 1 << min_log2_cb_size;
165 max_ctb_size = 1 << max_log2_ctb_size;
166
167 pic_width_in_min_cbs = sps->pic_width_in_luma_samples / min_cb_size;
168 pic_height_in_min_cbs = sps->pic_height_in_luma_samples / min_cb_size;
169 pic_width_aligned = ALIGN(sps->pic_width_in_luma_samples, max_ctb_size);
170 pic_height_aligned = ALIGN(sps->pic_height_in_luma_samples, max_ctb_size);
171
172 partial_ctb_x = !!(sps->pic_width_in_luma_samples != pic_width_aligned);
173 partial_ctb_y = !!(sps->pic_height_in_luma_samples != pic_height_aligned);
174
175 hantro_reg_write(vpu, reg: &g2_partial_ctb_x, val: partial_ctb_x);
176 hantro_reg_write(vpu, reg: &g2_partial_ctb_y, val: partial_ctb_y);
177
178 hantro_reg_write(vpu, reg: &g2_pic_width_in_cbs, val: pic_width_in_min_cbs);
179 hantro_reg_write(vpu, reg: &g2_pic_height_in_cbs, val: pic_height_in_min_cbs);
180
181 hantro_reg_write(vpu, reg: &g2_pic_width_4x4,
182 val: (pic_width_in_min_cbs * min_cb_size) / 4);
183 hantro_reg_write(vpu, reg: &g2_pic_height_4x4,
184 val: (pic_height_in_min_cbs * min_cb_size) / 4);
185
186 hantro_reg_write(vpu, reg: &hevc_max_inter_hierdepth,
187 val: sps->max_transform_hierarchy_depth_inter);
188 hantro_reg_write(vpu, reg: &hevc_max_intra_hierdepth,
189 val: sps->max_transform_hierarchy_depth_intra);
190 hantro_reg_write(vpu, reg: &hevc_min_trb_size,
191 val: sps->log2_min_luma_transform_block_size_minus2 + 2);
192 hantro_reg_write(vpu, reg: &hevc_max_trb_size,
193 val: sps->log2_min_luma_transform_block_size_minus2 + 2 +
194 sps->log2_diff_max_min_luma_transform_block_size);
195
196 hantro_reg_write(vpu, reg: &g2_tempor_mvp_e,
197 val: !!(sps->flags & V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED) &&
198 !(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC));
199 hantro_reg_write(vpu, reg: &g2_strong_smooth_e,
200 val: !!(sps->flags & V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED));
201 hantro_reg_write(vpu, reg: &g2_asym_pred_e,
202 val: !!(sps->flags & V4L2_HEVC_SPS_FLAG_AMP_ENABLED));
203 hantro_reg_write(vpu, reg: &g2_sao_e,
204 val: !!(sps->flags & V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET));
205 hantro_reg_write(vpu, reg: &g2_sign_data_hide,
206 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED));
207
208 if (pps->flags & V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED) {
209 hantro_reg_write(vpu, reg: &g2_cu_qpd_e, val: 1);
210 hantro_reg_write(vpu, reg: &g2_max_cu_qpd_depth, val: pps->diff_cu_qp_delta_depth);
211 } else {
212 hantro_reg_write(vpu, reg: &g2_cu_qpd_e, val: 0);
213 hantro_reg_write(vpu, reg: &g2_max_cu_qpd_depth, val: 0);
214 }
215
216 hantro_reg_write(vpu, reg: &g2_cb_qp_offset, val: pps->pps_cb_qp_offset);
217 hantro_reg_write(vpu, reg: &g2_cr_qp_offset, val: pps->pps_cr_qp_offset);
218
219 hantro_reg_write(vpu, reg: &g2_filt_offset_beta, val: pps->pps_beta_offset_div2);
220 hantro_reg_write(vpu, reg: &g2_filt_offset_tc, val: pps->pps_tc_offset_div2);
221 hantro_reg_write(vpu, reg: &g2_slice_hdr_ext_e,
222 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT));
223 hantro_reg_write(vpu, reg: &g2_slice_hdr_ext_bits, val: pps->num_extra_slice_header_bits);
224 hantro_reg_write(vpu, reg: &g2_slice_chqp_present,
225 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT));
226 hantro_reg_write(vpu, reg: &g2_weight_bipr_idc,
227 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED));
228 hantro_reg_write(vpu, reg: &g2_transq_bypass,
229 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED));
230 hantro_reg_write(vpu, reg: &g2_list_mod_e,
231 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT));
232 hantro_reg_write(vpu, reg: &g2_entropy_sync_e,
233 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED));
234 hantro_reg_write(vpu, reg: &g2_cabac_init_present,
235 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
236 hantro_reg_write(vpu, reg: &g2_idr_pic_e,
237 val: !!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC));
238 hantro_reg_write(vpu, reg: &hevc_parallel_merge,
239 val: pps->log2_parallel_merge_level_minus2 + 2);
240 hantro_reg_write(vpu, reg: &g2_pcm_filt_d,
241 val: !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED));
242 hantro_reg_write(vpu, reg: &g2_pcm_e,
243 val: !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED));
244 if (sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED) {
245 hantro_reg_write(vpu, reg: &g2_max_pcm_size,
246 val: sps->log2_diff_max_min_pcm_luma_coding_block_size +
247 sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
248 hantro_reg_write(vpu, reg: &g2_min_pcm_size,
249 val: sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
250 hantro_reg_write(vpu, reg: &g2_bit_depth_pcm_y,
251 val: sps->pcm_sample_bit_depth_luma_minus1 + 1);
252 hantro_reg_write(vpu, reg: &g2_bit_depth_pcm_c,
253 val: sps->pcm_sample_bit_depth_chroma_minus1 + 1);
254 } else {
255 hantro_reg_write(vpu, reg: &g2_max_pcm_size, val: 0);
256 hantro_reg_write(vpu, reg: &g2_min_pcm_size, val: 0);
257 hantro_reg_write(vpu, reg: &g2_bit_depth_pcm_y, val: 0);
258 hantro_reg_write(vpu, reg: &g2_bit_depth_pcm_c, val: 0);
259 }
260
261 hantro_reg_write(vpu, reg: &g2_start_code_e, val: 1);
262 hantro_reg_write(vpu, reg: &g2_init_qp, val: pps->init_qp_minus26 + 26);
263 hantro_reg_write(vpu, reg: &g2_weight_pred_e,
264 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED));
265 hantro_reg_write(vpu, reg: &g2_cabac_init_present,
266 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
267 hantro_reg_write(vpu, reg: &g2_const_intra_e,
268 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED));
269 hantro_reg_write(vpu, reg: &g2_transform_skip,
270 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED));
271 hantro_reg_write(vpu, reg: &g2_out_filtering_dis,
272 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER));
273 hantro_reg_write(vpu, reg: &g2_filt_ctrl_pres,
274 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT));
275 hantro_reg_write(vpu, reg: &g2_dependent_slice,
276 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED));
277 hantro_reg_write(vpu, reg: &g2_filter_override,
278 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED));
279 hantro_reg_write(vpu, reg: &g2_refidx0_active,
280 val: pps->num_ref_idx_l0_default_active_minus1 + 1);
281 hantro_reg_write(vpu, reg: &g2_refidx1_active,
282 val: pps->num_ref_idx_l1_default_active_minus1 + 1);
283 hantro_reg_write(vpu, reg: &g2_apf_threshold, val: 8);
284}
285
286static void set_ref_pic_list(struct hantro_ctx *ctx)
287{
288 const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
289 struct hantro_dev *vpu = ctx->dev;
290 const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
291 u32 list0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
292 u32 list1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
293 static const struct hantro_reg ref_pic_regs0[] = {
294 hevc_rlist_f0,
295 hevc_rlist_f1,
296 hevc_rlist_f2,
297 hevc_rlist_f3,
298 hevc_rlist_f4,
299 hevc_rlist_f5,
300 hevc_rlist_f6,
301 hevc_rlist_f7,
302 hevc_rlist_f8,
303 hevc_rlist_f9,
304 hevc_rlist_f10,
305 hevc_rlist_f11,
306 hevc_rlist_f12,
307 hevc_rlist_f13,
308 hevc_rlist_f14,
309 hevc_rlist_f15,
310 };
311 static const struct hantro_reg ref_pic_regs1[] = {
312 hevc_rlist_b0,
313 hevc_rlist_b1,
314 hevc_rlist_b2,
315 hevc_rlist_b3,
316 hevc_rlist_b4,
317 hevc_rlist_b5,
318 hevc_rlist_b6,
319 hevc_rlist_b7,
320 hevc_rlist_b8,
321 hevc_rlist_b9,
322 hevc_rlist_b10,
323 hevc_rlist_b11,
324 hevc_rlist_b12,
325 hevc_rlist_b13,
326 hevc_rlist_b14,
327 hevc_rlist_b15,
328 };
329 unsigned int i, j;
330
331 /* List 0 contains: short term before, short term after and long term */
332 j = 0;
333 for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list0); i++)
334 list0[j++] = decode_params->poc_st_curr_before[i];
335 for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list0); i++)
336 list0[j++] = decode_params->poc_st_curr_after[i];
337 for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list0); i++)
338 list0[j++] = decode_params->poc_lt_curr[i];
339
340 /* Fill the list, copying over and over */
341 i = 0;
342 while (j < ARRAY_SIZE(list0))
343 list0[j++] = list0[i++];
344
345 j = 0;
346 for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list1); i++)
347 list1[j++] = decode_params->poc_st_curr_after[i];
348 for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list1); i++)
349 list1[j++] = decode_params->poc_st_curr_before[i];
350 for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list1); i++)
351 list1[j++] = decode_params->poc_lt_curr[i];
352
353 i = 0;
354 while (j < ARRAY_SIZE(list1))
355 list1[j++] = list1[i++];
356
357 for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
358 hantro_reg_write(vpu, reg: &ref_pic_regs0[i], val: list0[i]);
359 hantro_reg_write(vpu, reg: &ref_pic_regs1[i], val: list1[i]);
360 }
361}
362
363static int set_ref(struct hantro_ctx *ctx)
364{
365 const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
366 const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
367 const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
368 const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
369 dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
370 struct hantro_dev *vpu = ctx->dev;
371 struct vb2_v4l2_buffer *vb2_dst;
372 struct hantro_decoded_buffer *dst;
373 size_t cr_offset = hantro_g2_chroma_offset(ctx);
374 size_t mv_offset = hantro_g2_motion_vectors_offset(ctx);
375 u32 max_ref_frames;
376 u16 dpb_longterm_e;
377 static const struct hantro_reg cur_poc[] = {
378 hevc_cur_poc_00,
379 hevc_cur_poc_01,
380 hevc_cur_poc_02,
381 hevc_cur_poc_03,
382 hevc_cur_poc_04,
383 hevc_cur_poc_05,
384 hevc_cur_poc_06,
385 hevc_cur_poc_07,
386 hevc_cur_poc_08,
387 hevc_cur_poc_09,
388 hevc_cur_poc_10,
389 hevc_cur_poc_11,
390 hevc_cur_poc_12,
391 hevc_cur_poc_13,
392 hevc_cur_poc_14,
393 hevc_cur_poc_15,
394 };
395 unsigned int i;
396
397 max_ref_frames = decode_params->num_poc_lt_curr +
398 decode_params->num_poc_st_curr_before +
399 decode_params->num_poc_st_curr_after;
400 /*
401 * Set max_ref_frames to non-zero to avoid HW hang when decoding
402 * badly marked I-frames.
403 */
404 max_ref_frames = max_ref_frames ? max_ref_frames : 1;
405 hantro_reg_write(vpu, reg: &g2_num_ref_frames, val: max_ref_frames);
406 hantro_reg_write(vpu, reg: &g2_filter_over_slices,
407 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED));
408 hantro_reg_write(vpu, reg: &g2_filter_over_tiles,
409 val: !!(pps->flags & V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED));
410
411 /*
412 * Write POC count diff from current pic.
413 */
414 for (i = 0; i < decode_params->num_active_dpb_entries && i < ARRAY_SIZE(cur_poc); i++) {
415 char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt_val;
416
417 hantro_reg_write(vpu, reg: &cur_poc[i], val: poc_diff);
418 }
419
420 if (i < ARRAY_SIZE(cur_poc)) {
421 /*
422 * After the references, fill one entry pointing to itself,
423 * i.e. difference is zero.
424 */
425 hantro_reg_write(vpu, reg: &cur_poc[i], val: 0);
426 i++;
427 }
428
429 /* Fill the rest with the current picture */
430 for (; i < ARRAY_SIZE(cur_poc); i++)
431 hantro_reg_write(vpu, reg: &cur_poc[i], val: decode_params->pic_order_cnt_val);
432
433 set_ref_pic_list(ctx);
434
435 /* We will only keep the reference pictures that are still used */
436 hantro_hevc_ref_init(ctx);
437
438 /* Set up addresses of DPB buffers */
439 dpb_longterm_e = 0;
440 for (i = 0; i < decode_params->num_active_dpb_entries &&
441 i < (V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1); i++) {
442 luma_addr = hantro_hevc_get_ref_buf(ctx, poc: dpb[i].pic_order_cnt_val);
443 if (!luma_addr)
444 return -ENOMEM;
445
446 chroma_addr = luma_addr + cr_offset;
447 mv_addr = luma_addr + mv_offset;
448
449 if (dpb[i].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
450 dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
451
452 hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), addr: luma_addr);
453 hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), addr: chroma_addr);
454 hantro_write_addr(vpu, G2_REF_MV_ADDR(i), addr: mv_addr);
455 }
456
457 vb2_dst = hantro_get_dst_buf(ctx);
458 dst = vb2_to_hantro_decoded_buf(buf: &vb2_dst->vb2_buf);
459 luma_addr = hantro_get_dec_buf_addr(ctx, vb: &dst->base.vb.vb2_buf);
460 if (!luma_addr)
461 return -ENOMEM;
462
463 if (hantro_hevc_add_ref_buf(ctx, poc: decode_params->pic_order_cnt_val, addr: luma_addr))
464 return -EINVAL;
465
466 chroma_addr = luma_addr + cr_offset;
467 mv_addr = luma_addr + mv_offset;
468
469 hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), addr: luma_addr);
470 hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), addr: chroma_addr);
471 hantro_write_addr(vpu, G2_REF_MV_ADDR(i++), addr: mv_addr);
472
473 hantro_write_addr(vpu, G2_OUT_LUMA_ADDR, addr: luma_addr);
474 hantro_write_addr(vpu, G2_OUT_CHROMA_ADDR, addr: chroma_addr);
475 hantro_write_addr(vpu, G2_OUT_MV_ADDR, addr: mv_addr);
476
477 for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
478 hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), addr: 0);
479 hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), addr: 0);
480 hantro_write_addr(vpu, G2_REF_MV_ADDR(i), addr: 0);
481 }
482
483 hantro_reg_write(vpu, reg: &g2_refer_lterm_e, val: dpb_longterm_e);
484
485 return 0;
486}
487
488static void set_buffers(struct hantro_ctx *ctx)
489{
490 struct vb2_v4l2_buffer *src_buf;
491 struct hantro_dev *vpu = ctx->dev;
492 dma_addr_t src_dma;
493 u32 src_len, src_buf_len;
494
495 src_buf = hantro_get_src_buf(ctx);
496
497 /* Source (stream) buffer. */
498 src_dma = vb2_dma_contig_plane_dma_addr(vb: &src_buf->vb2_buf, plane_no: 0);
499 src_len = vb2_get_plane_payload(vb: &src_buf->vb2_buf, plane_no: 0);
500 src_buf_len = vb2_plane_size(vb: &src_buf->vb2_buf, plane_no: 0);
501
502 hantro_write_addr(vpu, G2_STREAM_ADDR, addr: src_dma);
503 hantro_reg_write(vpu, reg: &g2_stream_len, val: src_len);
504 hantro_reg_write(vpu, reg: &g2_strm_buffer_len, val: src_buf_len);
505 hantro_reg_write(vpu, reg: &g2_strm_start_offset, val: 0);
506 hantro_reg_write(vpu, reg: &g2_write_mvs_e, val: 1);
507
508 hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, addr: ctx->hevc_dec.tile_sizes.dma);
509 hantro_write_addr(vpu, G2_TILE_FILTER_ADDR, addr: ctx->hevc_dec.tile_filter.dma);
510 hantro_write_addr(vpu, G2_TILE_SAO_ADDR, addr: ctx->hevc_dec.tile_sao.dma);
511 hantro_write_addr(vpu, G2_TILE_BSD_ADDR, addr: ctx->hevc_dec.tile_bsd.dma);
512}
513
514static void prepare_scaling_list_buffer(struct hantro_ctx *ctx)
515{
516 struct hantro_dev *vpu = ctx->dev;
517 const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
518 const struct v4l2_ctrl_hevc_scaling_matrix *sc = ctrls->scaling;
519 const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
520 u8 *p = ((u8 *)ctx->hevc_dec.scaling_lists.cpu);
521 unsigned int scaling_list_enabled;
522 unsigned int i, j, k;
523
524 scaling_list_enabled = !!(sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED);
525 hantro_reg_write(vpu, reg: &g2_scaling_list_e, val: scaling_list_enabled);
526
527 if (!scaling_list_enabled)
528 return;
529
530 for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_16x16); i++)
531 *p++ = sc->scaling_list_dc_coef_16x16[i];
532
533 for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_32x32); i++)
534 *p++ = sc->scaling_list_dc_coef_32x32[i];
535
536 /* 128-bit boundary */
537 p += 8;
538
539 /* write scaling lists column by column */
540
541 for (i = 0; i < 6; i++)
542 for (j = 0; j < 4; j++)
543 for (k = 0; k < 4; k++)
544 *p++ = sc->scaling_list_4x4[i][4 * k + j];
545
546 for (i = 0; i < 6; i++)
547 for (j = 0; j < 8; j++)
548 for (k = 0; k < 8; k++)
549 *p++ = sc->scaling_list_8x8[i][8 * k + j];
550
551 for (i = 0; i < 6; i++)
552 for (j = 0; j < 8; j++)
553 for (k = 0; k < 8; k++)
554 *p++ = sc->scaling_list_16x16[i][8 * k + j];
555
556 for (i = 0; i < 2; i++)
557 for (j = 0; j < 8; j++)
558 for (k = 0; k < 8; k++)
559 *p++ = sc->scaling_list_32x32[i][8 * k + j];
560
561 hantro_write_addr(vpu, G2_HEVC_SCALING_LIST_ADDR, addr: ctx->hevc_dec.scaling_lists.dma);
562}
563
564int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
565{
566 struct hantro_dev *vpu = ctx->dev;
567 int ret;
568
569 hantro_g2_check_idle(vpu);
570
571 /* Prepare HEVC decoder context. */
572 ret = hantro_hevc_dec_prepare_run(ctx);
573 if (ret)
574 return ret;
575
576 /* Configure hardware registers. */
577 set_params(ctx);
578
579 /* set reference pictures */
580 ret = set_ref(ctx);
581 if (ret)
582 return ret;
583
584 set_buffers(ctx);
585 prepare_tile_info_buffer(ctx);
586
587 prepare_scaling_list_buffer(ctx);
588
589 hantro_end_prepare_run(ctx);
590
591 hantro_reg_write(vpu, reg: &g2_mode, HEVC_DEC_MODE);
592 hantro_reg_write(vpu, reg: &g2_clk_gate_e, val: 1);
593
594 /* Don't disable output */
595 hantro_reg_write(vpu, reg: &g2_out_dis, val: 0);
596
597 /* Don't compress buffers */
598 hantro_reg_write(vpu, reg: &g2_ref_compress_bypass, val: 1);
599
600 /* Bus width and max burst */
601 hantro_reg_write(vpu, reg: &g2_buswidth, BUS_WIDTH_128);
602 hantro_reg_write(vpu, reg: &g2_max_burst, val: 16);
603
604 /* Swap */
605 hantro_reg_write(vpu, reg: &g2_strm_swap, val: 0xf);
606 hantro_reg_write(vpu, reg: &g2_dirmv_swap, val: 0xf);
607 hantro_reg_write(vpu, reg: &g2_compress_swap, val: 0xf);
608
609 /* Start decoding! */
610 vdpu_write(vpu, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
611
612 return 0;
613}
614

source code of linux/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c