1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: AMD
24 *
25 */
26
27#include <drm/drm_atomic_helper.h>
28#include <drm/drm_blend.h>
29#include <drm/drm_gem_atomic_helper.h>
30#include <drm/drm_plane_helper.h>
31#include <drm/drm_fourcc.h>
32
33#include "amdgpu.h"
34#include "dal_asic_id.h"
35#include "amdgpu_display.h"
36#include "amdgpu_dm_trace.h"
37#include "amdgpu_dm_plane.h"
38#include "gc/gc_11_0_0_offset.h"
39#include "gc/gc_11_0_0_sh_mask.h"
40
41/*
42 * TODO: these are currently initialized to rgb formats only.
43 * For future use cases we should either initialize them dynamically based on
44 * plane capabilities, or initialize this array to all formats, so internal drm
45 * check will succeed, and let DC implement proper check
46 */
47static const uint32_t rgb_formats[] = {
48 DRM_FORMAT_XRGB8888,
49 DRM_FORMAT_ARGB8888,
50 DRM_FORMAT_RGBA8888,
51 DRM_FORMAT_XRGB2101010,
52 DRM_FORMAT_XBGR2101010,
53 DRM_FORMAT_ARGB2101010,
54 DRM_FORMAT_ABGR2101010,
55 DRM_FORMAT_XRGB16161616,
56 DRM_FORMAT_XBGR16161616,
57 DRM_FORMAT_ARGB16161616,
58 DRM_FORMAT_ABGR16161616,
59 DRM_FORMAT_XBGR8888,
60 DRM_FORMAT_ABGR8888,
61 DRM_FORMAT_RGB565,
62};
63
64static const uint32_t overlay_formats[] = {
65 DRM_FORMAT_XRGB8888,
66 DRM_FORMAT_ARGB8888,
67 DRM_FORMAT_RGBA8888,
68 DRM_FORMAT_XBGR8888,
69 DRM_FORMAT_ABGR8888,
70 DRM_FORMAT_RGB565,
71 DRM_FORMAT_NV21,
72 DRM_FORMAT_NV12,
73 DRM_FORMAT_P010
74};
75
76static const uint32_t video_formats[] = {
77 DRM_FORMAT_NV21,
78 DRM_FORMAT_NV12,
79 DRM_FORMAT_P010
80};
81
82static const u32 cursor_formats[] = {
83 DRM_FORMAT_ARGB8888
84};
85
86enum dm_micro_swizzle {
87 MICRO_SWIZZLE_Z = 0,
88 MICRO_SWIZZLE_S = 1,
89 MICRO_SWIZZLE_D = 2,
90 MICRO_SWIZZLE_R = 3
91};
92
93const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
94{
95 return amdgpu_lookup_format_info(format: cmd->pixel_format, modifier: cmd->modifier[0]);
96}
97
98void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
99 bool *per_pixel_alpha, bool *pre_multiplied_alpha,
100 bool *global_alpha, int *global_alpha_value)
101{
102 *per_pixel_alpha = false;
103 *pre_multiplied_alpha = true;
104 *global_alpha = false;
105 *global_alpha_value = 0xff;
106
107 if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
108 return;
109
110 if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI ||
111 plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) {
112 static const uint32_t alpha_formats[] = {
113 DRM_FORMAT_ARGB8888,
114 DRM_FORMAT_RGBA8888,
115 DRM_FORMAT_ABGR8888,
116 DRM_FORMAT_ARGB2101010,
117 DRM_FORMAT_ABGR2101010,
118 DRM_FORMAT_ARGB16161616,
119 DRM_FORMAT_ABGR16161616,
120 DRM_FORMAT_ARGB16161616F,
121 };
122 uint32_t format = plane_state->fb->format->format;
123 unsigned int i;
124
125 for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
126 if (format == alpha_formats[i]) {
127 *per_pixel_alpha = true;
128 break;
129 }
130 }
131
132 if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)
133 *pre_multiplied_alpha = false;
134 }
135
136 if (plane_state->alpha < 0xffff) {
137 *global_alpha = true;
138 *global_alpha_value = plane_state->alpha >> 8;
139 }
140}
141
142static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
143{
144 if (!*mods)
145 return;
146
147 if (*cap - *size < 1) {
148 uint64_t new_cap = *cap * 2;
149 uint64_t *new_mods = kmalloc(size: new_cap * sizeof(uint64_t), GFP_KERNEL);
150
151 if (!new_mods) {
152 kfree(objp: *mods);
153 *mods = NULL;
154 return;
155 }
156
157 memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
158 kfree(objp: *mods);
159 *mods = new_mods;
160 *cap = new_cap;
161 }
162
163 (*mods)[*size] = mod;
164 *size += 1;
165}
166
167static bool amdgpu_dm_plane_modifier_has_dcc(uint64_t modifier)
168{
169 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
170}
171
172static unsigned int amdgpu_dm_plane_modifier_gfx9_swizzle_mode(uint64_t modifier)
173{
174 if (modifier == DRM_FORMAT_MOD_LINEAR)
175 return 0;
176
177 return AMD_FMT_MOD_GET(TILE, modifier);
178}
179
180static void amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
181 uint64_t tiling_flags)
182{
183 /* Fill GFX8 params */
184 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
185 unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
186
187 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
188 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
189 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
190 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
191 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
192
193 /* XXX fix me for VI */
194 tiling_info->gfx8.num_banks = num_banks;
195 tiling_info->gfx8.array_mode =
196 DC_ARRAY_2D_TILED_THIN1;
197 tiling_info->gfx8.tile_split = tile_split;
198 tiling_info->gfx8.bank_width = bankw;
199 tiling_info->gfx8.bank_height = bankh;
200 tiling_info->gfx8.tile_aspect = mtaspect;
201 tiling_info->gfx8.tile_mode =
202 DC_ADDR_SURF_MICRO_TILING_DISPLAY;
203 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
204 == DC_ARRAY_1D_TILED_THIN1) {
205 tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
206 }
207
208 tiling_info->gfx8.pipe_config =
209 AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
210}
211
212static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
213 union dc_tiling_info *tiling_info)
214{
215 /* Fill GFX9 params */
216 tiling_info->gfx9.num_pipes =
217 adev->gfx.config.gb_addr_config_fields.num_pipes;
218 tiling_info->gfx9.num_banks =
219 adev->gfx.config.gb_addr_config_fields.num_banks;
220 tiling_info->gfx9.pipe_interleave =
221 adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
222 tiling_info->gfx9.num_shader_engines =
223 adev->gfx.config.gb_addr_config_fields.num_se;
224 tiling_info->gfx9.max_compressed_frags =
225 adev->gfx.config.gb_addr_config_fields.max_compress_frags;
226 tiling_info->gfx9.num_rb_per_se =
227 adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
228 tiling_info->gfx9.shaderEnable = 1;
229 if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) >= IP_VERSION(10, 3, 0))
230 tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
231}
232
233static void amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
234 union dc_tiling_info *tiling_info,
235 uint64_t modifier)
236{
237 unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
238 unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
239 unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
240 unsigned int pipes_log2;
241
242 pipes_log2 = min(5u, mod_pipe_xor_bits);
243
244 amdgpu_dm_plane_fill_gfx9_tiling_info_from_device(adev, tiling_info);
245
246 if (!IS_AMD_FMT_MOD(modifier))
247 return;
248
249 tiling_info->gfx9.num_pipes = 1u << pipes_log2;
250 tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
251
252 if (adev->family >= AMDGPU_FAMILY_NV) {
253 tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
254 } else {
255 tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
256
257 /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
258 }
259}
260
261static int amdgpu_dm_plane_validate_dcc(struct amdgpu_device *adev,
262 const enum surface_pixel_format format,
263 const enum dc_rotation_angle rotation,
264 const union dc_tiling_info *tiling_info,
265 const struct dc_plane_dcc_param *dcc,
266 const struct dc_plane_address *address,
267 const struct plane_size *plane_size)
268{
269 struct dc *dc = adev->dm.dc;
270 struct dc_dcc_surface_param input;
271 struct dc_surface_dcc_cap output;
272
273 memset(&input, 0, sizeof(input));
274 memset(&output, 0, sizeof(output));
275
276 if (!dcc->enable)
277 return 0;
278
279 if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
280 !dc->cap_funcs.get_dcc_compression_cap)
281 return -EINVAL;
282
283 input.format = format;
284 input.surface_size.width = plane_size->surface_size.width;
285 input.surface_size.height = plane_size->surface_size.height;
286 input.swizzle_mode = tiling_info->gfx9.swizzle;
287
288 if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
289 input.scan = SCAN_DIRECTION_HORIZONTAL;
290 else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
291 input.scan = SCAN_DIRECTION_VERTICAL;
292
293 if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
294 return -EINVAL;
295
296 if (!output.capable)
297 return -EINVAL;
298
299 if (dcc->independent_64b_blks == 0 &&
300 output.grph.rgb.independent_64b_blks != 0)
301 return -EINVAL;
302
303 return 0;
304}
305
306static int amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
307 const struct amdgpu_framebuffer *afb,
308 const enum surface_pixel_format format,
309 const enum dc_rotation_angle rotation,
310 const struct plane_size *plane_size,
311 union dc_tiling_info *tiling_info,
312 struct dc_plane_dcc_param *dcc,
313 struct dc_plane_address *address,
314 const bool force_disable_dcc)
315{
316 const uint64_t modifier = afb->base.modifier;
317 int ret = 0;
318
319 amdgpu_dm_plane_fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
320 tiling_info->gfx9.swizzle = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier);
321
322 if (amdgpu_dm_plane_modifier_has_dcc(modifier) && !force_disable_dcc) {
323 uint64_t dcc_address = afb->address + afb->base.offsets[1];
324 bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
325 bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
326
327 dcc->enable = 1;
328 dcc->meta_pitch = afb->base.pitches[1];
329 dcc->independent_64b_blks = independent_64b_blks;
330 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
331 if (independent_64b_blks && independent_128b_blks)
332 dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
333 else if (independent_128b_blks)
334 dcc->dcc_ind_blk = hubp_ind_block_128b;
335 else if (independent_64b_blks && !independent_128b_blks)
336 dcc->dcc_ind_blk = hubp_ind_block_64b;
337 else
338 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
339 } else {
340 if (independent_64b_blks)
341 dcc->dcc_ind_blk = hubp_ind_block_64b;
342 else
343 dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
344 }
345
346 address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
347 address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
348 }
349
350 ret = amdgpu_dm_plane_validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
351 if (ret)
352 drm_dbg_kms(adev_to_drm(adev), "amdgpu_dm_plane_validate_dcc: returned error: %d\n", ret);
353
354 return ret;
355}
356
357static void amdgpu_dm_plane_add_gfx10_1_modifiers(const struct amdgpu_device *adev,
358 uint64_t **mods,
359 uint64_t *size,
360 uint64_t *capacity)
361{
362 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
363
364 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
365 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
366 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
367 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
368 AMD_FMT_MOD_SET(DCC, 1) |
369 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
370 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
371 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
372
373 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
374 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
375 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
376 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
377 AMD_FMT_MOD_SET(DCC, 1) |
378 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
379 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
380 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
381 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
382
383 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
384 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
385 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
386 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
387
388 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
389 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
390 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
391 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
392
393
394 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
395 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
396 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
397 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
398
399 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
400 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
401 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
402}
403
404static void amdgpu_dm_plane_add_gfx9_modifiers(const struct amdgpu_device *adev,
405 uint64_t **mods,
406 uint64_t *size,
407 uint64_t *capacity)
408{
409 int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
410 int pipe_xor_bits = min(8, pipes +
411 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
412 int bank_xor_bits = min(8 - pipe_xor_bits,
413 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
414 int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
415 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
416
417
418 if (adev->family == AMDGPU_FAMILY_RV) {
419 /* Raven2 and later */
420 bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
421
422 /*
423 * No _D DCC swizzles yet because we only allow 32bpp, which
424 * doesn't support _D on DCN
425 */
426
427 if (has_constant_encode) {
428 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
429 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
430 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
431 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
432 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
433 AMD_FMT_MOD_SET(DCC, 1) |
434 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
435 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
436 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
437 }
438
439 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
440 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
441 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
442 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
443 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
444 AMD_FMT_MOD_SET(DCC, 1) |
445 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
446 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
447 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
448
449 if (has_constant_encode) {
450 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
451 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
452 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
453 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
454 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
455 AMD_FMT_MOD_SET(DCC, 1) |
456 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
457 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
458 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
459 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
460 AMD_FMT_MOD_SET(RB, rb) |
461 AMD_FMT_MOD_SET(PIPE, pipes));
462 }
463
464 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
465 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
466 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
467 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
468 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
469 AMD_FMT_MOD_SET(DCC, 1) |
470 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
471 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
472 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
473 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
474 AMD_FMT_MOD_SET(RB, rb) |
475 AMD_FMT_MOD_SET(PIPE, pipes));
476 }
477
478 /*
479 * Only supported for 64bpp on Raven, will be filtered on format in
480 * amdgpu_dm_plane_format_mod_supported.
481 */
482 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
483 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
484 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
485 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
486 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
487
488 if (adev->family == AMDGPU_FAMILY_RV) {
489 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
490 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
491 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
492 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
493 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
494 }
495
496 /*
497 * Only supported for 64bpp on Raven, will be filtered on format in
498 * amdgpu_dm_plane_format_mod_supported.
499 */
500 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
501 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
502 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
503
504 if (adev->family == AMDGPU_FAMILY_RV) {
505 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
506 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
507 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
508 }
509}
510
511static void amdgpu_dm_plane_add_gfx10_3_modifiers(const struct amdgpu_device *adev,
512 uint64_t **mods,
513 uint64_t *size,
514 uint64_t *capacity)
515{
516 int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
517 int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
518
519 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
520 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
521 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
522 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
523 AMD_FMT_MOD_SET(PACKERS, pkrs) |
524 AMD_FMT_MOD_SET(DCC, 1) |
525 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
526 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
527 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
528 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
529
530 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
531 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
532 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
533 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
534 AMD_FMT_MOD_SET(PACKERS, pkrs) |
535 AMD_FMT_MOD_SET(DCC, 1) |
536 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
537 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
538 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
539
540 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
541 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
542 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
543 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
544 AMD_FMT_MOD_SET(PACKERS, pkrs) |
545 AMD_FMT_MOD_SET(DCC, 1) |
546 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
547 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
548 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
549 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
550 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
551
552 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
553 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
554 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
555 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
556 AMD_FMT_MOD_SET(PACKERS, pkrs) |
557 AMD_FMT_MOD_SET(DCC, 1) |
558 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
559 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
560 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
561 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
562
563 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
564 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
565 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
566 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
567 AMD_FMT_MOD_SET(PACKERS, pkrs));
568
569 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
570 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
571 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
572 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
573 AMD_FMT_MOD_SET(PACKERS, pkrs));
574
575 /* Only supported for 64bpp, will be filtered in amdgpu_dm_plane_format_mod_supported */
576 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
577 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
578 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
579
580 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
581 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
582 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
583}
584
585static void amdgpu_dm_plane_add_gfx11_modifiers(struct amdgpu_device *adev,
586 uint64_t **mods, uint64_t *size, uint64_t *capacity)
587{
588 int num_pipes = 0;
589 int pipe_xor_bits = 0;
590 int num_pkrs = 0;
591 int pkrs = 0;
592 u32 gb_addr_config;
593 u8 i = 0;
594 unsigned int swizzle_r_x;
595 uint64_t modifier_r_x;
596 uint64_t modifier_dcc_best;
597 uint64_t modifier_dcc_4k;
598
599 /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
600 * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes}
601 */
602 gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
603 ASSERT(gb_addr_config != 0);
604
605 num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
606 pkrs = ilog2(num_pkrs);
607 num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
608 pipe_xor_bits = ilog2(num_pipes);
609
610 for (i = 0; i < 2; i++) {
611 /* Insert the best one first. */
612 /* R_X swizzle modes are the best for rendering and DCC requires them. */
613 if (num_pipes > 16)
614 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
615 else
616 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
617
618 modifier_r_x = AMD_FMT_MOD |
619 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
620 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
621 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
622 AMD_FMT_MOD_SET(PACKERS, pkrs);
623
624 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
625 modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
626 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
627 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
628 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
629
630 /* DCC settings for 4K and greater resolutions. (required by display hw) */
631 modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
632 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
633 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
634 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
635
636 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, mod: modifier_dcc_best);
637 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, mod: modifier_dcc_4k);
638
639 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, mod: modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
640 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, mod: modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
641
642 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, mod: modifier_r_x);
643 }
644
645 amdgpu_dm_plane_add_modifier(mods, size, cap: capacity, AMD_FMT_MOD |
646 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
647 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
648}
649
650static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
651{
652 uint64_t size = 0, capacity = 128;
653 *mods = NULL;
654
655 /* We have not hooked up any pre-GFX9 modifiers. */
656 if (adev->family < AMDGPU_FAMILY_AI)
657 return 0;
658
659 *mods = kmalloc(size: capacity * sizeof(uint64_t), GFP_KERNEL);
660
661 if (plane_type == DRM_PLANE_TYPE_CURSOR) {
662 amdgpu_dm_plane_add_modifier(mods, size: &size, cap: &capacity, DRM_FORMAT_MOD_LINEAR);
663 amdgpu_dm_plane_add_modifier(mods, size: &size, cap: &capacity, DRM_FORMAT_MOD_INVALID);
664 return *mods ? 0 : -ENOMEM;
665 }
666
667 switch (adev->family) {
668 case AMDGPU_FAMILY_AI:
669 case AMDGPU_FAMILY_RV:
670 amdgpu_dm_plane_add_gfx9_modifiers(adev, mods, size: &size, capacity: &capacity);
671 break;
672 case AMDGPU_FAMILY_NV:
673 case AMDGPU_FAMILY_VGH:
674 case AMDGPU_FAMILY_YC:
675 case AMDGPU_FAMILY_GC_10_3_6:
676 case AMDGPU_FAMILY_GC_10_3_7:
677 if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) >= IP_VERSION(10, 3, 0))
678 amdgpu_dm_plane_add_gfx10_3_modifiers(adev, mods, size: &size, capacity: &capacity);
679 else
680 amdgpu_dm_plane_add_gfx10_1_modifiers(adev, mods, size: &size, capacity: &capacity);
681 break;
682 case AMDGPU_FAMILY_GC_11_0_0:
683 case AMDGPU_FAMILY_GC_11_0_1:
684 case AMDGPU_FAMILY_GC_11_5_0:
685 amdgpu_dm_plane_add_gfx11_modifiers(adev, mods, size: &size, capacity: &capacity);
686 break;
687 }
688
689 amdgpu_dm_plane_add_modifier(mods, size: &size, cap: &capacity, DRM_FORMAT_MOD_LINEAR);
690
691 /* INVALID marks the end of the list. */
692 amdgpu_dm_plane_add_modifier(mods, size: &size, cap: &capacity, DRM_FORMAT_MOD_INVALID);
693
694 if (!*mods)
695 return -ENOMEM;
696
697 return 0;
698}
699
700static int amdgpu_dm_plane_get_plane_formats(const struct drm_plane *plane,
701 const struct dc_plane_cap *plane_cap,
702 uint32_t *formats, int max_formats)
703{
704 int i, num_formats = 0;
705
706 /*
707 * TODO: Query support for each group of formats directly from
708 * DC plane caps. This will require adding more formats to the
709 * caps list.
710 */
711
712 if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
713 (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
714 for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
715 if (num_formats >= max_formats)
716 break;
717
718 formats[num_formats++] = rgb_formats[i];
719 }
720
721 if (plane_cap && plane_cap->pixel_format_support.nv12)
722 formats[num_formats++] = DRM_FORMAT_NV12;
723 if (plane_cap && plane_cap->pixel_format_support.p010)
724 formats[num_formats++] = DRM_FORMAT_P010;
725 if (plane_cap && plane_cap->pixel_format_support.fp16) {
726 formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
727 formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
728 formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
729 formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
730 }
731 } else {
732 switch (plane->type) {
733 case DRM_PLANE_TYPE_OVERLAY:
734 for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
735 if (num_formats >= max_formats)
736 break;
737
738 formats[num_formats++] = overlay_formats[i];
739 }
740 break;
741
742 case DRM_PLANE_TYPE_CURSOR:
743 for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
744 if (num_formats >= max_formats)
745 break;
746
747 formats[num_formats++] = cursor_formats[i];
748 }
749 break;
750
751 default:
752 break;
753 }
754 }
755
756 return num_formats;
757}
758
759int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev,
760 const struct amdgpu_framebuffer *afb,
761 const enum surface_pixel_format format,
762 const enum dc_rotation_angle rotation,
763 const uint64_t tiling_flags,
764 union dc_tiling_info *tiling_info,
765 struct plane_size *plane_size,
766 struct dc_plane_dcc_param *dcc,
767 struct dc_plane_address *address,
768 bool tmz_surface,
769 bool force_disable_dcc)
770{
771 const struct drm_framebuffer *fb = &afb->base;
772 int ret;
773
774 memset(tiling_info, 0, sizeof(*tiling_info));
775 memset(plane_size, 0, sizeof(*plane_size));
776 memset(dcc, 0, sizeof(*dcc));
777 memset(address, 0, sizeof(*address));
778
779 address->tmz_surface = tmz_surface;
780
781 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
782 uint64_t addr = afb->address + fb->offsets[0];
783
784 plane_size->surface_size.x = 0;
785 plane_size->surface_size.y = 0;
786 plane_size->surface_size.width = fb->width;
787 plane_size->surface_size.height = fb->height;
788 plane_size->surface_pitch =
789 fb->pitches[0] / fb->format->cpp[0];
790
791 address->type = PLN_ADDR_TYPE_GRAPHICS;
792 address->grph.addr.low_part = lower_32_bits(addr);
793 address->grph.addr.high_part = upper_32_bits(addr);
794 } else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
795 uint64_t luma_addr = afb->address + fb->offsets[0];
796 uint64_t chroma_addr = afb->address + fb->offsets[1];
797
798 plane_size->surface_size.x = 0;
799 plane_size->surface_size.y = 0;
800 plane_size->surface_size.width = fb->width;
801 plane_size->surface_size.height = fb->height;
802 plane_size->surface_pitch =
803 fb->pitches[0] / fb->format->cpp[0];
804
805 plane_size->chroma_size.x = 0;
806 plane_size->chroma_size.y = 0;
807 /* TODO: set these based on surface format */
808 plane_size->chroma_size.width = fb->width / 2;
809 plane_size->chroma_size.height = fb->height / 2;
810
811 plane_size->chroma_pitch =
812 fb->pitches[1] / fb->format->cpp[1];
813
814 address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
815 address->video_progressive.luma_addr.low_part =
816 lower_32_bits(luma_addr);
817 address->video_progressive.luma_addr.high_part =
818 upper_32_bits(luma_addr);
819 address->video_progressive.chroma_addr.low_part =
820 lower_32_bits(chroma_addr);
821 address->video_progressive.chroma_addr.high_part =
822 upper_32_bits(chroma_addr);
823 }
824
825 if (adev->family >= AMDGPU_FAMILY_AI) {
826 ret = amdgpu_dm_plane_fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
827 rotation, plane_size,
828 tiling_info, dcc,
829 address,
830 force_disable_dcc);
831 if (ret)
832 return ret;
833 } else {
834 amdgpu_dm_plane_fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
835 }
836
837 return 0;
838}
839
840static int amdgpu_dm_plane_helper_prepare_fb(struct drm_plane *plane,
841 struct drm_plane_state *new_state)
842{
843 struct amdgpu_framebuffer *afb;
844 struct drm_gem_object *obj;
845 struct amdgpu_device *adev;
846 struct amdgpu_bo *rbo;
847 struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
848 uint32_t domain;
849 int r;
850
851 if (!new_state->fb) {
852 DRM_DEBUG_KMS("No FB bound\n");
853 return 0;
854 }
855
856 afb = to_amdgpu_framebuffer(new_state->fb);
857 obj = new_state->fb->obj[0];
858 rbo = gem_to_amdgpu_bo(obj);
859 adev = amdgpu_ttm_adev(bdev: rbo->tbo.bdev);
860
861 r = amdgpu_bo_reserve(bo: rbo, no_intr: true);
862 if (r) {
863 dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
864 return r;
865 }
866
867 r = dma_resv_reserve_fences(obj: rbo->tbo.base.resv, num_fences: 1);
868 if (r) {
869 dev_err(adev->dev, "reserving fence slot failed (%d)\n", r);
870 goto error_unlock;
871 }
872
873 if (plane->type != DRM_PLANE_TYPE_CURSOR)
874 domain = amdgpu_display_supported_domains(adev, bo_flags: rbo->flags);
875 else
876 domain = AMDGPU_GEM_DOMAIN_VRAM;
877
878 r = amdgpu_bo_pin(bo: rbo, domain);
879 if (unlikely(r != 0)) {
880 if (r != -ERESTARTSYS)
881 DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
882 goto error_unlock;
883 }
884
885 r = amdgpu_ttm_alloc_gart(bo: &rbo->tbo);
886 if (unlikely(r != 0)) {
887 DRM_ERROR("%p bind failed\n", rbo);
888 goto error_unpin;
889 }
890
891 r = drm_gem_plane_helper_prepare_fb(plane, state: new_state);
892 if (unlikely(r != 0))
893 goto error_unpin;
894
895 amdgpu_bo_unreserve(bo: rbo);
896
897 afb->address = amdgpu_bo_gpu_offset(bo: rbo);
898
899 amdgpu_bo_ref(bo: rbo);
900
901 /**
902 * We don't do surface updates on planes that have been newly created,
903 * but we also don't have the afb->address during atomic check.
904 *
905 * Fill in buffer attributes depending on the address here, but only on
906 * newly created planes since they're not being used by DC yet and this
907 * won't modify global state.
908 */
909 dm_plane_state_old = to_dm_plane_state(plane->state);
910 dm_plane_state_new = to_dm_plane_state(new_state);
911
912 if (dm_plane_state_new->dc_state &&
913 dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
914 struct dc_plane_state *plane_state =
915 dm_plane_state_new->dc_state;
916 bool force_disable_dcc = !plane_state->dcc.enable;
917
918 amdgpu_dm_plane_fill_plane_buffer_attributes(
919 adev, afb, format: plane_state->format, rotation: plane_state->rotation,
920 tiling_flags: afb->tiling_flags,
921 tiling_info: &plane_state->tiling_info, plane_size: &plane_state->plane_size,
922 dcc: &plane_state->dcc, address: &plane_state->address,
923 tmz_surface: afb->tmz_surface, force_disable_dcc);
924 }
925
926 return 0;
927
928error_unpin:
929 amdgpu_bo_unpin(bo: rbo);
930
931error_unlock:
932 amdgpu_bo_unreserve(bo: rbo);
933 return r;
934}
935
936static void amdgpu_dm_plane_helper_cleanup_fb(struct drm_plane *plane,
937 struct drm_plane_state *old_state)
938{
939 struct amdgpu_bo *rbo;
940 int r;
941
942 if (!old_state->fb)
943 return;
944
945 rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
946 r = amdgpu_bo_reserve(bo: rbo, no_intr: false);
947 if (unlikely(r)) {
948 DRM_ERROR("failed to reserve rbo before unpin\n");
949 return;
950 }
951
952 amdgpu_bo_unpin(bo: rbo);
953 amdgpu_bo_unreserve(bo: rbo);
954 amdgpu_bo_unref(bo: &rbo);
955}
956
957static void amdgpu_dm_plane_get_min_max_dc_plane_scaling(struct drm_device *dev,
958 struct drm_framebuffer *fb,
959 int *min_downscale, int *max_upscale)
960{
961 struct amdgpu_device *adev = drm_to_adev(ddev: dev);
962 struct dc *dc = adev->dm.dc;
963 /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
964 struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
965
966 switch (fb->format->format) {
967 case DRM_FORMAT_P010:
968 case DRM_FORMAT_NV12:
969 case DRM_FORMAT_NV21:
970 *max_upscale = plane_cap->max_upscale_factor.nv12;
971 *min_downscale = plane_cap->max_downscale_factor.nv12;
972 break;
973
974 case DRM_FORMAT_XRGB16161616F:
975 case DRM_FORMAT_ARGB16161616F:
976 case DRM_FORMAT_XBGR16161616F:
977 case DRM_FORMAT_ABGR16161616F:
978 *max_upscale = plane_cap->max_upscale_factor.fp16;
979 *min_downscale = plane_cap->max_downscale_factor.fp16;
980 break;
981
982 default:
983 *max_upscale = plane_cap->max_upscale_factor.argb8888;
984 *min_downscale = plane_cap->max_downscale_factor.argb8888;
985 break;
986 }
987
988 /*
989 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
990 * scaling factor of 1.0 == 1000 units.
991 */
992 if (*max_upscale == 1)
993 *max_upscale = 1000;
994
995 if (*min_downscale == 1)
996 *min_downscale = 1000;
997}
998
999int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state,
1000 struct drm_crtc_state *new_crtc_state)
1001{
1002 struct drm_framebuffer *fb = state->fb;
1003 int min_downscale, max_upscale;
1004 int min_scale = 0;
1005 int max_scale = INT_MAX;
1006
1007 /* Plane enabled? Validate viewport and get scaling factors from plane caps. */
1008 if (fb && state->crtc) {
1009 /* Validate viewport to cover the case when only the position changes */
1010 if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
1011 int viewport_width = state->crtc_w;
1012 int viewport_height = state->crtc_h;
1013
1014 if (state->crtc_x < 0)
1015 viewport_width += state->crtc_x;
1016 else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
1017 viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
1018
1019 if (state->crtc_y < 0)
1020 viewport_height += state->crtc_y;
1021 else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
1022 viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
1023
1024 if (viewport_width < 0 || viewport_height < 0) {
1025 DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
1026 return -EINVAL;
1027 } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
1028 DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
1029 return -EINVAL;
1030 } else if (viewport_height < MIN_VIEWPORT_SIZE) {
1031 DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
1032 return -EINVAL;
1033 }
1034
1035 }
1036
1037 /* Get min/max allowed scaling factors from plane caps. */
1038 amdgpu_dm_plane_get_min_max_dc_plane_scaling(dev: state->crtc->dev, fb,
1039 min_downscale: &min_downscale, max_upscale: &max_upscale);
1040 /*
1041 * Convert to drm convention: 16.16 fixed point, instead of dc's
1042 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
1043 * dst/src, so min_scale = 1.0 / max_upscale, etc.
1044 */
1045 min_scale = (1000 << 16) / max_upscale;
1046 max_scale = (1000 << 16) / min_downscale;
1047 }
1048
1049 return drm_atomic_helper_check_plane_state(
1050 plane_state: state, crtc_state: new_crtc_state, min_scale, max_scale, can_position: true, can_update_disabled: true);
1051}
1052
1053int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev,
1054 const struct drm_plane_state *state,
1055 struct dc_scaling_info *scaling_info)
1056{
1057 int scale_w, scale_h, min_downscale, max_upscale;
1058
1059 memset(scaling_info, 0, sizeof(*scaling_info));
1060
1061 /* Source is fixed 16.16 but we ignore mantissa for now... */
1062 scaling_info->src_rect.x = state->src_x >> 16;
1063 scaling_info->src_rect.y = state->src_y >> 16;
1064
1065 /*
1066 * For reasons we don't (yet) fully understand a non-zero
1067 * src_y coordinate into an NV12 buffer can cause a
1068 * system hang on DCN1x.
1069 * To avoid hangs (and maybe be overly cautious)
1070 * let's reject both non-zero src_x and src_y.
1071 *
1072 * We currently know of only one use-case to reproduce a
1073 * scenario with non-zero src_x and src_y for NV12, which
1074 * is to gesture the YouTube Android app into full screen
1075 * on ChromeOS.
1076 */
1077 if (((amdgpu_ip_version(adev, ip: DCE_HWIP, inst: 0) == IP_VERSION(1, 0, 0)) ||
1078 (amdgpu_ip_version(adev, ip: DCE_HWIP, inst: 0) == IP_VERSION(1, 0, 1))) &&
1079 (state->fb && state->fb->format->format == DRM_FORMAT_NV12 &&
1080 (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0)))
1081 return -EINVAL;
1082
1083 scaling_info->src_rect.width = state->src_w >> 16;
1084 if (scaling_info->src_rect.width == 0)
1085 return -EINVAL;
1086
1087 scaling_info->src_rect.height = state->src_h >> 16;
1088 if (scaling_info->src_rect.height == 0)
1089 return -EINVAL;
1090
1091 scaling_info->dst_rect.x = state->crtc_x;
1092 scaling_info->dst_rect.y = state->crtc_y;
1093
1094 if (state->crtc_w == 0)
1095 return -EINVAL;
1096
1097 scaling_info->dst_rect.width = state->crtc_w;
1098
1099 if (state->crtc_h == 0)
1100 return -EINVAL;
1101
1102 scaling_info->dst_rect.height = state->crtc_h;
1103
1104 /* DRM doesn't specify clipping on destination output. */
1105 scaling_info->clip_rect = scaling_info->dst_rect;
1106
1107 /* Validate scaling per-format with DC plane caps */
1108 if (state->plane && state->plane->dev && state->fb) {
1109 amdgpu_dm_plane_get_min_max_dc_plane_scaling(dev: state->plane->dev, fb: state->fb,
1110 min_downscale: &min_downscale, max_upscale: &max_upscale);
1111 } else {
1112 min_downscale = 250;
1113 max_upscale = 16000;
1114 }
1115
1116 scale_w = scaling_info->dst_rect.width * 1000 /
1117 scaling_info->src_rect.width;
1118
1119 if (scale_w < min_downscale || scale_w > max_upscale)
1120 return -EINVAL;
1121
1122 scale_h = scaling_info->dst_rect.height * 1000 /
1123 scaling_info->src_rect.height;
1124
1125 if (scale_h < min_downscale || scale_h > max_upscale)
1126 return -EINVAL;
1127
1128 /*
1129 * The "scaling_quality" can be ignored for now, quality = 0 has DC
1130 * assume reasonable defaults based on the format.
1131 */
1132
1133 return 0;
1134}
1135
1136static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
1137 struct drm_atomic_state *state)
1138{
1139 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
1140 plane);
1141 struct amdgpu_device *adev = drm_to_adev(ddev: plane->dev);
1142 struct dc *dc = adev->dm.dc;
1143 struct dm_plane_state *dm_plane_state;
1144 struct dc_scaling_info scaling_info;
1145 struct drm_crtc_state *new_crtc_state;
1146 int ret;
1147
1148 trace_amdgpu_dm_plane_atomic_check(state: new_plane_state);
1149
1150 dm_plane_state = to_dm_plane_state(new_plane_state);
1151
1152 if (!dm_plane_state->dc_state)
1153 return 0;
1154
1155 new_crtc_state =
1156 drm_atomic_get_new_crtc_state(state,
1157 crtc: new_plane_state->crtc);
1158 if (!new_crtc_state)
1159 return -EINVAL;
1160
1161 ret = amdgpu_dm_plane_helper_check_state(state: new_plane_state, new_crtc_state);
1162 if (ret)
1163 return ret;
1164
1165 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, state: new_plane_state, scaling_info: &scaling_info);
1166 if (ret)
1167 return ret;
1168
1169 if (dc_validate_plane(dc, plane_state: dm_plane_state->dc_state) == DC_OK)
1170 return 0;
1171
1172 return -EINVAL;
1173}
1174
1175static int amdgpu_dm_plane_atomic_async_check(struct drm_plane *plane,
1176 struct drm_atomic_state *state)
1177{
1178 /* Only support async updates on cursor planes. */
1179 if (plane->type != DRM_PLANE_TYPE_CURSOR)
1180 return -EINVAL;
1181
1182 return 0;
1183}
1184
1185static int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
1186 struct dc_cursor_position *position)
1187{
1188 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1189 int x, y;
1190 int xorigin = 0, yorigin = 0;
1191
1192 if (!crtc || !plane->state->fb)
1193 return 0;
1194
1195 if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
1196 (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
1197 DRM_ERROR("%s: bad cursor width or height %d x %d\n",
1198 __func__,
1199 plane->state->crtc_w,
1200 plane->state->crtc_h);
1201 return -EINVAL;
1202 }
1203
1204 x = plane->state->crtc_x;
1205 y = plane->state->crtc_y;
1206
1207 if (x <= -amdgpu_crtc->max_cursor_width ||
1208 y <= -amdgpu_crtc->max_cursor_height)
1209 return 0;
1210
1211 if (x < 0) {
1212 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
1213 x = 0;
1214 }
1215 if (y < 0) {
1216 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
1217 y = 0;
1218 }
1219 position->enable = true;
1220 position->translate_by_source = true;
1221 position->x = x;
1222 position->y = y;
1223 position->x_hotspot = xorigin;
1224 position->y_hotspot = yorigin;
1225
1226 return 0;
1227}
1228
1229void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
1230 struct drm_plane_state *old_plane_state)
1231{
1232 struct amdgpu_device *adev = drm_to_adev(ddev: plane->dev);
1233 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
1234 struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
1235 struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
1236 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1237 uint64_t address = afb ? afb->address : 0;
1238 struct dc_cursor_position position = {0};
1239 struct dc_cursor_attributes attributes;
1240 int ret;
1241
1242 if (!plane->state->fb && !old_plane_state->fb)
1243 return;
1244
1245 drm_dbg_atomic(plane->dev, "crtc_id=%d with size %d to %d\n",
1246 amdgpu_crtc->crtc_id, plane->state->crtc_w,
1247 plane->state->crtc_h);
1248
1249 ret = amdgpu_dm_plane_get_cursor_position(plane, crtc, position: &position);
1250 if (ret)
1251 return;
1252
1253 if (!position.enable) {
1254 /* turn off cursor */
1255 if (crtc_state && crtc_state->stream) {
1256 mutex_lock(&adev->dm.dc_lock);
1257 dc_stream_set_cursor_position(stream: crtc_state->stream,
1258 position: &position);
1259 mutex_unlock(lock: &adev->dm.dc_lock);
1260 }
1261 return;
1262 }
1263
1264 amdgpu_crtc->cursor_width = plane->state->crtc_w;
1265 amdgpu_crtc->cursor_height = plane->state->crtc_h;
1266
1267 memset(&attributes, 0, sizeof(attributes));
1268 attributes.address.high_part = upper_32_bits(address);
1269 attributes.address.low_part = lower_32_bits(address);
1270 attributes.width = plane->state->crtc_w;
1271 attributes.height = plane->state->crtc_h;
1272 attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
1273 attributes.rotation_angle = 0;
1274 attributes.attribute_flags.value = 0;
1275
1276 /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
1277 * legacy gamma setup.
1278 */
1279 if (crtc_state->cm_is_degamma_srgb &&
1280 adev->dm.dc->caps.color.dpp.gamma_corr)
1281 attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
1282
1283 attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
1284
1285 if (crtc_state->stream) {
1286 mutex_lock(&adev->dm.dc_lock);
1287 if (!dc_stream_set_cursor_attributes(stream: crtc_state->stream,
1288 attributes: &attributes))
1289 DRM_ERROR("DC failed to set cursor attributes\n");
1290
1291 if (!dc_stream_set_cursor_position(stream: crtc_state->stream,
1292 position: &position))
1293 DRM_ERROR("DC failed to set cursor position\n");
1294 mutex_unlock(lock: &adev->dm.dc_lock);
1295 }
1296}
1297
1298static void amdgpu_dm_plane_atomic_async_update(struct drm_plane *plane,
1299 struct drm_atomic_state *state)
1300{
1301 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
1302 plane);
1303 struct drm_plane_state *old_state =
1304 drm_atomic_get_old_plane_state(state, plane);
1305
1306 trace_amdgpu_dm_atomic_update_cursor(state: new_state);
1307
1308 swap(plane->state->fb, new_state->fb);
1309
1310 plane->state->src_x = new_state->src_x;
1311 plane->state->src_y = new_state->src_y;
1312 plane->state->src_w = new_state->src_w;
1313 plane->state->src_h = new_state->src_h;
1314 plane->state->crtc_x = new_state->crtc_x;
1315 plane->state->crtc_y = new_state->crtc_y;
1316 plane->state->crtc_w = new_state->crtc_w;
1317 plane->state->crtc_h = new_state->crtc_h;
1318
1319 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state: old_state);
1320}
1321
1322static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
1323 .prepare_fb = amdgpu_dm_plane_helper_prepare_fb,
1324 .cleanup_fb = amdgpu_dm_plane_helper_cleanup_fb,
1325 .atomic_check = amdgpu_dm_plane_atomic_check,
1326 .atomic_async_check = amdgpu_dm_plane_atomic_async_check,
1327 .atomic_async_update = amdgpu_dm_plane_atomic_async_update
1328};
1329
1330static void amdgpu_dm_plane_drm_plane_reset(struct drm_plane *plane)
1331{
1332 struct dm_plane_state *amdgpu_state = NULL;
1333
1334 if (plane->state)
1335 plane->funcs->atomic_destroy_state(plane, plane->state);
1336
1337 amdgpu_state = kzalloc(size: sizeof(*amdgpu_state), GFP_KERNEL);
1338 WARN_ON(amdgpu_state == NULL);
1339
1340 if (amdgpu_state)
1341 __drm_atomic_helper_plane_reset(plane, state: &amdgpu_state->base);
1342}
1343
1344static struct drm_plane_state *amdgpu_dm_plane_drm_plane_duplicate_state(struct drm_plane *plane)
1345{
1346 struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
1347
1348 old_dm_plane_state = to_dm_plane_state(plane->state);
1349 dm_plane_state = kzalloc(size: sizeof(*dm_plane_state), GFP_KERNEL);
1350 if (!dm_plane_state)
1351 return NULL;
1352
1353 __drm_atomic_helper_plane_duplicate_state(plane, state: &dm_plane_state->base);
1354
1355 if (old_dm_plane_state->dc_state) {
1356 dm_plane_state->dc_state = old_dm_plane_state->dc_state;
1357 dc_plane_state_retain(plane_state: dm_plane_state->dc_state);
1358 }
1359
1360 return &dm_plane_state->base;
1361}
1362
1363static bool amdgpu_dm_plane_format_mod_supported(struct drm_plane *plane,
1364 uint32_t format,
1365 uint64_t modifier)
1366{
1367 struct amdgpu_device *adev = drm_to_adev(ddev: plane->dev);
1368 const struct drm_format_info *info = drm_format_info(format);
1369 int i;
1370
1371 enum dm_micro_swizzle microtile = amdgpu_dm_plane_modifier_gfx9_swizzle_mode(modifier) & 3;
1372
1373 if (!info)
1374 return false;
1375
1376 /*
1377 * We always have to allow these modifiers:
1378 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
1379 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
1380 */
1381 if (modifier == DRM_FORMAT_MOD_LINEAR ||
1382 modifier == DRM_FORMAT_MOD_INVALID) {
1383 return true;
1384 }
1385
1386 /* Check that the modifier is on the list of the plane's supported modifiers. */
1387 for (i = 0; i < plane->modifier_count; i++) {
1388 if (modifier == plane->modifiers[i])
1389 break;
1390 }
1391 if (i == plane->modifier_count)
1392 return false;
1393
1394 /*
1395 * For D swizzle the canonical modifier depends on the bpp, so check
1396 * it here.
1397 */
1398 if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
1399 adev->family >= AMDGPU_FAMILY_NV) {
1400 if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
1401 return false;
1402 }
1403
1404 if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
1405 info->cpp[0] < 8)
1406 return false;
1407
1408 if (amdgpu_dm_plane_modifier_has_dcc(modifier)) {
1409 /* Per radeonsi comments 16/64 bpp are more complicated. */
1410 if (info->cpp[0] != 4)
1411 return false;
1412 /* We support multi-planar formats, but not when combined with
1413 * additional DCC metadata planes.
1414 */
1415 if (info->num_planes > 1)
1416 return false;
1417 }
1418
1419 return true;
1420}
1421
1422static void amdgpu_dm_plane_drm_plane_destroy_state(struct drm_plane *plane,
1423 struct drm_plane_state *state)
1424{
1425 struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
1426
1427 if (dm_plane_state->dc_state)
1428 dc_plane_state_release(plane_state: dm_plane_state->dc_state);
1429
1430 drm_atomic_helper_plane_destroy_state(plane, state);
1431}
1432
1433static const struct drm_plane_funcs dm_plane_funcs = {
1434 .update_plane = drm_atomic_helper_update_plane,
1435 .disable_plane = drm_atomic_helper_disable_plane,
1436 .destroy = drm_plane_helper_destroy,
1437 .reset = amdgpu_dm_plane_drm_plane_reset,
1438 .atomic_duplicate_state = amdgpu_dm_plane_drm_plane_duplicate_state,
1439 .atomic_destroy_state = amdgpu_dm_plane_drm_plane_destroy_state,
1440 .format_mod_supported = amdgpu_dm_plane_format_mod_supported,
1441};
1442
1443int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
1444 struct drm_plane *plane,
1445 unsigned long possible_crtcs,
1446 const struct dc_plane_cap *plane_cap)
1447{
1448 uint32_t formats[32];
1449 int num_formats;
1450 int res = -EPERM;
1451 unsigned int supported_rotations;
1452 uint64_t *modifiers = NULL;
1453
1454 num_formats = amdgpu_dm_plane_get_plane_formats(plane, plane_cap, formats,
1455 ARRAY_SIZE(formats));
1456
1457 res = amdgpu_dm_plane_get_plane_modifiers(adev: dm->adev, plane_type: plane->type, mods: &modifiers);
1458 if (res)
1459 return res;
1460
1461 if (modifiers == NULL)
1462 adev_to_drm(adev: dm->adev)->mode_config.fb_modifiers_not_supported = true;
1463
1464 res = drm_universal_plane_init(dev: adev_to_drm(adev: dm->adev), plane, possible_crtcs,
1465 funcs: &dm_plane_funcs, formats, format_count: num_formats,
1466 format_modifiers: modifiers, type: plane->type, NULL);
1467 kfree(objp: modifiers);
1468 if (res)
1469 return res;
1470
1471 if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
1472 plane_cap && plane_cap->per_pixel_alpha) {
1473 unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
1474 BIT(DRM_MODE_BLEND_PREMULTI) |
1475 BIT(DRM_MODE_BLEND_COVERAGE);
1476
1477 drm_plane_create_alpha_property(plane);
1478 drm_plane_create_blend_mode_property(plane, supported_modes: blend_caps);
1479 }
1480
1481 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
1482 drm_plane_create_zpos_immutable_property(plane, zpos: 0);
1483 } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
1484 unsigned int zpos = 1 + drm_plane_index(plane);
1485 drm_plane_create_zpos_property(plane, zpos, min: 1, max: 254);
1486 } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
1487 drm_plane_create_zpos_immutable_property(plane, zpos: 255);
1488 }
1489
1490 if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
1491 plane_cap &&
1492 (plane_cap->pixel_format_support.nv12 ||
1493 plane_cap->pixel_format_support.p010)) {
1494 /* This only affects YUV formats. */
1495 drm_plane_create_color_properties(
1496 plane,
1497 BIT(DRM_COLOR_YCBCR_BT601) |
1498 BIT(DRM_COLOR_YCBCR_BT709) |
1499 BIT(DRM_COLOR_YCBCR_BT2020),
1500 BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
1501 BIT(DRM_COLOR_YCBCR_FULL_RANGE),
1502 default_encoding: DRM_COLOR_YCBCR_BT709, default_range: DRM_COLOR_YCBCR_LIMITED_RANGE);
1503 }
1504
1505 supported_rotations =
1506 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
1507 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
1508
1509 if (dm->adev->asic_type >= CHIP_BONAIRE &&
1510 plane->type != DRM_PLANE_TYPE_CURSOR)
1511 drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
1512 supported_rotations);
1513
1514 if (amdgpu_ip_version(adev: dm->adev, ip: DCE_HWIP, inst: 0) > IP_VERSION(3, 0, 1) &&
1515 plane->type != DRM_PLANE_TYPE_CURSOR)
1516 drm_plane_enable_fb_damage_clips(plane);
1517
1518 drm_plane_helper_add(plane, funcs: &dm_plane_helper_funcs);
1519
1520 /* Create (reset) the plane state */
1521 if (plane->funcs->reset)
1522 plane->funcs->reset(plane);
1523
1524 return 0;
1525}
1526
1527bool amdgpu_dm_plane_is_video_format(uint32_t format)
1528{
1529 int i;
1530
1531 for (i = 0; i < ARRAY_SIZE(video_formats); i++)
1532 if (format == video_formats[i])
1533 return true;
1534
1535 return false;
1536}
1537
1538

source code of linux/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c