1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian K├Ânig <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_6_0_d.h"
31#include "uvd/uvd_6_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
38#include "vi.h"
39#include "ivsrcid/ivsrcid_vislands30.h"
40
41/* Polaris10/11/12 firmware version */
42#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8))
43
44static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
45static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev);
46
47static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev);
48static int uvd_v6_0_start(struct amdgpu_device *adev);
49static void uvd_v6_0_stop(struct amdgpu_device *adev);
50static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev);
51static int uvd_v6_0_set_clockgating_state(void *handle,
52 enum amd_clockgating_state state);
53static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
54 bool enable);
55
56/**
57* uvd_v6_0_enc_support - get encode support status
58*
59* @adev: amdgpu_device pointer
60*
61* Returns the current hardware encode support status
62*/
63static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev)
64{
65 return ((adev->asic_type >= CHIP_POLARIS10) &&
66 (adev->asic_type <= CHIP_VEGAM) &&
67 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16));
68}
69
70/**
71 * uvd_v6_0_ring_get_rptr - get read pointer
72 *
73 * @ring: amdgpu_ring pointer
74 *
75 * Returns the current hardware read pointer
76 */
77static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring)
78{
79 struct amdgpu_device *adev = ring->adev;
80
81 return RREG32(mmUVD_RBC_RB_RPTR);
82}
83
84/**
85 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer
86 *
87 * @ring: amdgpu_ring pointer
88 *
89 * Returns the current hardware enc read pointer
90 */
91static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring)
92{
93 struct amdgpu_device *adev = ring->adev;
94
95 if (ring == &adev->uvd.inst->ring_enc[0])
96 return RREG32(mmUVD_RB_RPTR);
97 else
98 return RREG32(mmUVD_RB_RPTR2);
99}
100/**
101 * uvd_v6_0_ring_get_wptr - get write pointer
102 *
103 * @ring: amdgpu_ring pointer
104 *
105 * Returns the current hardware write pointer
106 */
107static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring)
108{
109 struct amdgpu_device *adev = ring->adev;
110
111 return RREG32(mmUVD_RBC_RB_WPTR);
112}
113
114/**
115 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer
116 *
117 * @ring: amdgpu_ring pointer
118 *
119 * Returns the current hardware enc write pointer
120 */
121static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring)
122{
123 struct amdgpu_device *adev = ring->adev;
124
125 if (ring == &adev->uvd.inst->ring_enc[0])
126 return RREG32(mmUVD_RB_WPTR);
127 else
128 return RREG32(mmUVD_RB_WPTR2);
129}
130
131/**
132 * uvd_v6_0_ring_set_wptr - set write pointer
133 *
134 * @ring: amdgpu_ring pointer
135 *
136 * Commits the write pointer to the hardware
137 */
138static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring)
139{
140 struct amdgpu_device *adev = ring->adev;
141
142 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
143}
144
145/**
146 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer
147 *
148 * @ring: amdgpu_ring pointer
149 *
150 * Commits the enc write pointer to the hardware
151 */
152static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
153{
154 struct amdgpu_device *adev = ring->adev;
155
156 if (ring == &adev->uvd.inst->ring_enc[0])
157 WREG32(mmUVD_RB_WPTR,
158 lower_32_bits(ring->wptr));
159 else
160 WREG32(mmUVD_RB_WPTR2,
161 lower_32_bits(ring->wptr));
162}
163
164/**
165 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working
166 *
167 * @ring: the engine to test on
168 *
169 */
170static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
171{
172 struct amdgpu_device *adev = ring->adev;
173 uint32_t rptr;
174 unsigned i;
175 int r;
176
177 r = amdgpu_ring_alloc(ring, 16);
178 if (r)
179 return r;
180
181 rptr = amdgpu_ring_get_rptr(ring);
182
183 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
184 amdgpu_ring_commit(ring);
185
186 for (i = 0; i < adev->usec_timeout; i++) {
187 if (amdgpu_ring_get_rptr(ring) != rptr)
188 break;
189 udelay(1);
190 }
191
192 if (i >= adev->usec_timeout)
193 r = -ETIMEDOUT;
194
195 return r;
196}
197
198/**
199 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg
200 *
201 * @ring: ring we should submit the msg to
202 * @handle: session handle to use
203 * @bo: amdgpu object for which we query the offset
204 * @fence: optional fence to return
205 *
206 * Open up a stream for HW test
207 */
208static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
209 struct amdgpu_bo *bo,
210 struct dma_fence **fence)
211{
212 const unsigned ib_size_dw = 16;
213 struct amdgpu_job *job;
214 struct amdgpu_ib *ib;
215 struct dma_fence *f = NULL;
216 uint64_t addr;
217 int i, r;
218
219 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
220 AMDGPU_IB_POOL_DIRECT, &job);
221 if (r)
222 return r;
223
224 ib = &job->ibs[0];
225 addr = amdgpu_bo_gpu_offset(bo);
226
227 ib->length_dw = 0;
228 ib->ptr[ib->length_dw++] = 0x00000018;
229 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
230 ib->ptr[ib->length_dw++] = handle;
231 ib->ptr[ib->length_dw++] = 0x00010000;
232 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
233 ib->ptr[ib->length_dw++] = addr;
234
235 ib->ptr[ib->length_dw++] = 0x00000014;
236 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
237 ib->ptr[ib->length_dw++] = 0x0000001c;
238 ib->ptr[ib->length_dw++] = 0x00000001;
239 ib->ptr[ib->length_dw++] = 0x00000000;
240
241 ib->ptr[ib->length_dw++] = 0x00000008;
242 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
243
244 for (i = ib->length_dw; i < ib_size_dw; ++i)
245 ib->ptr[i] = 0x0;
246
247 r = amdgpu_job_submit_direct(job, ring, &f);
248 if (r)
249 goto err;
250
251 if (fence)
252 *fence = dma_fence_get(f);
253 dma_fence_put(f);
254 return 0;
255
256err:
257 amdgpu_job_free(job);
258 return r;
259}
260
261/**
262 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg
263 *
264 * @ring: ring we should submit the msg to
265 * @handle: session handle to use
266 * @bo: amdgpu object for which we query the offset
267 * @fence: optional fence to return
268 *
269 * Close up a stream for HW test or if userspace failed to do so
270 */
271static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
272 uint32_t handle,
273 struct amdgpu_bo *bo,
274 struct dma_fence **fence)
275{
276 const unsigned ib_size_dw = 16;
277 struct amdgpu_job *job;
278 struct amdgpu_ib *ib;
279 struct dma_fence *f = NULL;
280 uint64_t addr;
281 int i, r;
282
283 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
284 AMDGPU_IB_POOL_DIRECT, &job);
285 if (r)
286 return r;
287
288 ib = &job->ibs[0];
289 addr = amdgpu_bo_gpu_offset(bo);
290
291 ib->length_dw = 0;
292 ib->ptr[ib->length_dw++] = 0x00000018;
293 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
294 ib->ptr[ib->length_dw++] = handle;
295 ib->ptr[ib->length_dw++] = 0x00010000;
296 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
297 ib->ptr[ib->length_dw++] = addr;
298
299 ib->ptr[ib->length_dw++] = 0x00000014;
300 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
301 ib->ptr[ib->length_dw++] = 0x0000001c;
302 ib->ptr[ib->length_dw++] = 0x00000001;
303 ib->ptr[ib->length_dw++] = 0x00000000;
304
305 ib->ptr[ib->length_dw++] = 0x00000008;
306 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
307
308 for (i = ib->length_dw; i < ib_size_dw; ++i)
309 ib->ptr[i] = 0x0;
310
311 r = amdgpu_job_submit_direct(job, ring, &f);
312 if (r)
313 goto err;
314
315 if (fence)
316 *fence = dma_fence_get(f);
317 dma_fence_put(f);
318 return 0;
319
320err:
321 amdgpu_job_free(job);
322 return r;
323}
324
325/**
326 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working
327 *
328 * @ring: the engine to test on
329 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
330 *
331 */
332static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
333{
334 struct dma_fence *fence = NULL;
335 struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
336 long r;
337
338 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
339 if (r)
340 goto error;
341
342 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
343 if (r)
344 goto error;
345
346 r = dma_fence_wait_timeout(fence, false, timeout);
347 if (r == 0)
348 r = -ETIMEDOUT;
349 else if (r > 0)
350 r = 0;
351
352error:
353 dma_fence_put(fence);
354 return r;
355}
356
357static int uvd_v6_0_early_init(void *handle)
358{
359 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
360 adev->uvd.num_uvd_inst = 1;
361
362 if (!(adev->flags & AMD_IS_APU) &&
363 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK))
364 return -ENOENT;
365
366 uvd_v6_0_set_ring_funcs(adev);
367
368 if (uvd_v6_0_enc_support(adev)) {
369 adev->uvd.num_enc_rings = 2;
370 uvd_v6_0_set_enc_ring_funcs(adev);
371 }
372
373 uvd_v6_0_set_irq_funcs(adev);
374
375 return 0;
376}
377
378static int uvd_v6_0_sw_init(void *handle)
379{
380 struct amdgpu_ring *ring;
381 int i, r;
382 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
383
384 /* UVD TRAP */
385 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq);
386 if (r)
387 return r;
388
389 /* UVD ENC TRAP */
390 if (uvd_v6_0_enc_support(adev)) {
391 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
392 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq);
393 if (r)
394 return r;
395 }
396 }
397
398 r = amdgpu_uvd_sw_init(adev);
399 if (r)
400 return r;
401
402 if (!uvd_v6_0_enc_support(adev)) {
403 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
404 adev->uvd.inst->ring_enc[i].funcs = NULL;
405
406 adev->uvd.inst->irq.num_types = 1;
407 adev->uvd.num_enc_rings = 0;
408
409 DRM_INFO("UVD ENC is disabled\n");
410 }
411
412 ring = &adev->uvd.inst->ring;
413 sprintf(ring->name, "uvd");
414 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
415 AMDGPU_RING_PRIO_DEFAULT, NULL);
416 if (r)
417 return r;
418
419 r = amdgpu_uvd_resume(adev);
420 if (r)
421 return r;
422
423 if (uvd_v6_0_enc_support(adev)) {
424 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
425 ring = &adev->uvd.inst->ring_enc[i];
426 sprintf(ring->name, "uvd_enc%d", i);
427 r = amdgpu_ring_init(adev, ring, 512,
428 &adev->uvd.inst->irq, 0,
429 AMDGPU_RING_PRIO_DEFAULT, NULL);
430 if (r)
431 return r;
432 }
433 }
434
435 r = amdgpu_uvd_entity_init(adev);
436
437 return r;
438}
439
440static int uvd_v6_0_sw_fini(void *handle)
441{
442 int i, r;
443 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
444
445 r = amdgpu_uvd_suspend(adev);
446 if (r)
447 return r;
448
449 if (uvd_v6_0_enc_support(adev)) {
450 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
451 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
452 }
453
454 return amdgpu_uvd_sw_fini(adev);
455}
456
457/**
458 * uvd_v6_0_hw_init - start and test UVD block
459 *
460 * @handle: handle used to pass amdgpu_device pointer
461 *
462 * Initialize the hardware, boot up the VCPU and do some testing
463 */
464static int uvd_v6_0_hw_init(void *handle)
465{
466 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
467 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
468 uint32_t tmp;
469 int i, r;
470
471 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
472 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
473 uvd_v6_0_enable_mgcg(adev, true);
474
475 r = amdgpu_ring_test_helper(ring);
476 if (r)
477 goto done;
478
479 r = amdgpu_ring_alloc(ring, 10);
480 if (r) {
481 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
482 goto done;
483 }
484
485 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
486 amdgpu_ring_write(ring, tmp);
487 amdgpu_ring_write(ring, 0xFFFFF);
488
489 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
490 amdgpu_ring_write(ring, tmp);
491 amdgpu_ring_write(ring, 0xFFFFF);
492
493 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
494 amdgpu_ring_write(ring, tmp);
495 amdgpu_ring_write(ring, 0xFFFFF);
496
497 /* Clear timeout status bits */
498 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
499 amdgpu_ring_write(ring, 0x8);
500
501 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
502 amdgpu_ring_write(ring, 3);
503
504 amdgpu_ring_commit(ring);
505
506 if (uvd_v6_0_enc_support(adev)) {
507 for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
508 ring = &adev->uvd.inst->ring_enc[i];
509 r = amdgpu_ring_test_helper(ring);
510 if (r)
511 goto done;
512 }
513 }
514
515done:
516 if (!r) {
517 if (uvd_v6_0_enc_support(adev))
518 DRM_INFO("UVD and UVD ENC initialized successfully.\n");
519 else
520 DRM_INFO("UVD initialized successfully.\n");
521 }
522
523 return r;
524}
525
526/**
527 * uvd_v6_0_hw_fini - stop the hardware block
528 *
529 * @handle: handle used to pass amdgpu_device pointer
530 *
531 * Stop the UVD block, mark ring as not ready any more
532 */
533static int uvd_v6_0_hw_fini(void *handle)
534{
535 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
536
537 cancel_delayed_work_sync(&adev->uvd.idle_work);
538
539 if (RREG32(mmUVD_STATUS) != 0)
540 uvd_v6_0_stop(adev);
541
542 return 0;
543}
544
545static int uvd_v6_0_suspend(void *handle)
546{
547 int r;
548 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
549
550 /*
551 * Proper cleanups before halting the HW engine:
552 * - cancel the delayed idle work
553 * - enable powergating
554 * - enable clockgating
555 * - disable dpm
556 *
557 * TODO: to align with the VCN implementation, move the
558 * jobs for clockgating/powergating/dpm setting to
559 * ->set_powergating_state().
560 */
561 cancel_delayed_work_sync(&adev->uvd.idle_work);
562
563 if (adev->pm.dpm_enabled) {
564 amdgpu_dpm_enable_uvd(adev, false);
565 } else {
566 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
567 /* shutdown the UVD block */
568 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
569 AMD_PG_STATE_GATE);
570 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
571 AMD_CG_STATE_GATE);
572 }
573
574 r = uvd_v6_0_hw_fini(adev);
575 if (r)
576 return r;
577
578 return amdgpu_uvd_suspend(adev);
579}
580
581static int uvd_v6_0_resume(void *handle)
582{
583 int r;
584 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
585
586 r = amdgpu_uvd_resume(adev);
587 if (r)
588 return r;
589
590 return uvd_v6_0_hw_init(adev);
591}
592
593/**
594 * uvd_v6_0_mc_resume - memory controller programming
595 *
596 * @adev: amdgpu_device pointer
597 *
598 * Let the UVD memory controller know it's offsets
599 */
600static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
601{
602 uint64_t offset;
603 uint32_t size;
604
605 /* program memory controller bits 0-27 */
606 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
607 lower_32_bits(adev->uvd.inst->gpu_addr));
608 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
609 upper_32_bits(adev->uvd.inst->gpu_addr));
610
611 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
612 size = AMDGPU_UVD_FIRMWARE_SIZE(adev);
613 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
614 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
615
616 offset += size;
617 size = AMDGPU_UVD_HEAP_SIZE;
618 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
619 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
620
621 offset += size;
622 size = AMDGPU_UVD_STACK_SIZE +
623 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
624 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
625 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
626
627 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
628 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
629 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
630
631 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
632}
633
634#if 0
635static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
636 bool enable)
637{
638 u32 data, data1;
639
640 data = RREG32(mmUVD_CGC_GATE);
641 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
642 if (enable) {
643 data |= UVD_CGC_GATE__SYS_MASK |
644 UVD_CGC_GATE__UDEC_MASK |
645 UVD_CGC_GATE__MPEG2_MASK |
646 UVD_CGC_GATE__RBC_MASK |
647 UVD_CGC_GATE__LMI_MC_MASK |
648 UVD_CGC_GATE__IDCT_MASK |
649 UVD_CGC_GATE__MPRD_MASK |
650 UVD_CGC_GATE__MPC_MASK |
651 UVD_CGC_GATE__LBSI_MASK |
652 UVD_CGC_GATE__LRBBM_MASK |
653 UVD_CGC_GATE__UDEC_RE_MASK |
654 UVD_CGC_GATE__UDEC_CM_MASK |
655 UVD_CGC_GATE__UDEC_IT_MASK |
656 UVD_CGC_GATE__UDEC_DB_MASK |
657 UVD_CGC_GATE__UDEC_MP_MASK |
658 UVD_CGC_GATE__WCB_MASK |
659 UVD_CGC_GATE__VCPU_MASK |
660 UVD_CGC_GATE__SCPU_MASK;
661 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
662 UVD_SUVD_CGC_GATE__SIT_MASK |
663 UVD_SUVD_CGC_GATE__SMP_MASK |
664 UVD_SUVD_CGC_GATE__SCM_MASK |
665 UVD_SUVD_CGC_GATE__SDB_MASK |
666 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
667 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
668 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
669 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
670 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
671 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
672 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
673 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
674 } else {
675 data &= ~(UVD_CGC_GATE__SYS_MASK |
676 UVD_CGC_GATE__UDEC_MASK |
677 UVD_CGC_GATE__MPEG2_MASK |
678 UVD_CGC_GATE__RBC_MASK |
679 UVD_CGC_GATE__LMI_MC_MASK |
680 UVD_CGC_GATE__LMI_UMC_MASK |
681 UVD_CGC_GATE__IDCT_MASK |
682 UVD_CGC_GATE__MPRD_MASK |
683 UVD_CGC_GATE__MPC_MASK |
684 UVD_CGC_GATE__LBSI_MASK |
685 UVD_CGC_GATE__LRBBM_MASK |
686 UVD_CGC_GATE__UDEC_RE_MASK |
687 UVD_CGC_GATE__UDEC_CM_MASK |
688 UVD_CGC_GATE__UDEC_IT_MASK |
689 UVD_CGC_GATE__UDEC_DB_MASK |
690 UVD_CGC_GATE__UDEC_MP_MASK |
691 UVD_CGC_GATE__WCB_MASK |
692 UVD_CGC_GATE__VCPU_MASK |
693 UVD_CGC_GATE__SCPU_MASK);
694 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
695 UVD_SUVD_CGC_GATE__SIT_MASK |
696 UVD_SUVD_CGC_GATE__SMP_MASK |
697 UVD_SUVD_CGC_GATE__SCM_MASK |
698 UVD_SUVD_CGC_GATE__SDB_MASK |
699 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
700 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
701 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
702 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
703 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
704 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
705 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
706 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
707 }
708 WREG32(mmUVD_CGC_GATE, data);
709 WREG32(mmUVD_SUVD_CGC_GATE, data1);
710}
711#endif
712
713/**
714 * uvd_v6_0_start - start UVD block
715 *
716 * @adev: amdgpu_device pointer
717 *
718 * Setup and start the UVD block
719 */
720static int uvd_v6_0_start(struct amdgpu_device *adev)
721{
722 struct amdgpu_ring *ring = &adev->uvd.inst->ring;
723 uint32_t rb_bufsz, tmp;
724 uint32_t lmi_swap_cntl;
725 uint32_t mp_swap_cntl;
726 int i, j, r;
727
728 /* disable DPG */
729 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
730
731 /* disable byte swapping */
732 lmi_swap_cntl = 0;
733 mp_swap_cntl = 0;
734
735 uvd_v6_0_mc_resume(adev);
736
737 /* disable interupt */
738 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0);
739
740 /* stall UMC and register bus before resetting VCPU */
741 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1);
742 mdelay(1);
743
744 /* put LMI, VCPU, RBC etc... into reset */
745 WREG32(mmUVD_SOFT_RESET,
746 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
747 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
748 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
749 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
750 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
751 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
752 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
753 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
754 mdelay(5);
755
756 /* take UVD block out of reset */
757 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0);
758 mdelay(5);
759
760 /* initialize UVD memory controller */
761 WREG32(mmUVD_LMI_CTRL,
762 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
763 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
764 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
765 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
766 UVD_LMI_CTRL__REQ_MODE_MASK |
767 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
768
769#ifdef __BIG_ENDIAN
770 /* swap (8 in 32) RB and IB */
771 lmi_swap_cntl = 0xa;
772 mp_swap_cntl = 0;
773#endif
774 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
775 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
776
777 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
778 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
779 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
780 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
781 WREG32(mmUVD_MPC_SET_ALU, 0);
782 WREG32(mmUVD_MPC_SET_MUX, 0x88);
783
784 /* take all subblocks out of reset, except VCPU */
785 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
786 mdelay(5);
787
788 /* enable VCPU clock */
789 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
790
791 /* enable UMC */
792 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0);
793
794 /* boot up the VCPU */
795 WREG32(mmUVD_SOFT_RESET, 0);
796 mdelay(10);
797
798 for (i = 0; i < 10; ++i) {
799 uint32_t status;
800
801 for (j = 0; j < 100; ++j) {
802 status = RREG32(mmUVD_STATUS);
803 if (status & 2)
804 break;
805 mdelay(10);
806 }
807 r = 0;
808 if (status & 2)
809 break;
810
811 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
812 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1);
813 mdelay(10);
814 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0);
815 mdelay(10);
816 r = -1;
817 }
818
819 if (r) {
820 DRM_ERROR("UVD not responding, giving up!!!\n");
821 return r;
822 }
823 /* enable master interrupt */
824 WREG32_P(mmUVD_MASTINT_EN,
825 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
826 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
827
828 /* clear the bit 4 of UVD_STATUS */
829 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
830
831 /* force RBC into idle state */
832 rb_bufsz = order_base_2(ring->ring_size);
833 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
834 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
835 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
836 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
837 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
838 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
839 WREG32(mmUVD_RBC_RB_CNTL, tmp);
840
841 /* set the write pointer delay */
842 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
843
844 /* set the wb address */
845 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
846
847 /* program the RB_BASE for ring buffer */
848 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
849 lower_32_bits(ring->gpu_addr));
850 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
851 upper_32_bits(ring->gpu_addr));
852
853 /* Initialize the ring buffer's read and write pointers */
854 WREG32(mmUVD_RBC_RB_RPTR, 0);
855
856 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
857 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
858
859 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0);
860
861 if (uvd_v6_0_enc_support(adev)) {
862 ring = &adev->uvd.inst->ring_enc[0];
863 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
864 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
865 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr);
866 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
867 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4);
868
869 ring = &adev->uvd.inst->ring_enc[1];
870 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
871 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
872 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr);
873 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
874 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4);
875 }
876
877 return 0;
878}
879
880/**
881 * uvd_v6_0_stop - stop UVD block
882 *
883 * @adev: amdgpu_device pointer
884 *
885 * stop the UVD block
886 */
887static void uvd_v6_0_stop(struct amdgpu_device *adev)
888{
889 /* force RBC into idle state */
890 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
891
892 /* Stall UMC and register bus before resetting VCPU */
893 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
894 mdelay(1);
895
896 /* put VCPU into reset */
897 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
898 mdelay(5);
899
900 /* disable VCPU clock */
901 WREG32(mmUVD_VCPU_CNTL, 0x0);
902
903 /* Unstall UMC and register bus */
904 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
905
906 WREG32(mmUVD_STATUS, 0);
907}
908
909/**
910 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
911 *
912 * @ring: amdgpu_ring pointer
913 * @addr: address
914 * @seq: sequence number
915 * @flags: fence related flags
916 *
917 * Write a fence and a trap command to the ring.
918 */
919static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
920 unsigned flags)
921{
922 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
923
924 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
925 amdgpu_ring_write(ring, seq);
926 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
927 amdgpu_ring_write(ring, addr & 0xffffffff);
928 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
929 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
930 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
931 amdgpu_ring_write(ring, 0);
932
933 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
934 amdgpu_ring_write(ring, 0);
935 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
936 amdgpu_ring_write(ring, 0);
937 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
938 amdgpu_ring_write(ring, 2);
939}
940
941/**
942 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command
943 *
944 * @ring: amdgpu_ring pointer
945 * @addr: address
946 * @seq: sequence number
947 * @flags: fence related flags
948 *
949 * Write enc a fence and a trap command to the ring.
950 */
951static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
952 u64 seq, unsigned flags)
953{
954 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
955
956 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE);
957 amdgpu_ring_write(ring, addr);
958 amdgpu_ring_write(ring, upper_32_bits(addr));
959 amdgpu_ring_write(ring, seq);
960 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
961}
962
963/**
964 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing
965 *
966 * @ring: amdgpu_ring pointer
967 */
968static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
969{
970 /* The firmware doesn't seem to like touching registers at this point. */
971}
972
973/**
974 * uvd_v6_0_ring_test_ring - register write test
975 *
976 * @ring: amdgpu_ring pointer
977 *
978 * Test if we can successfully write to the context register
979 */
980static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
981{
982 struct amdgpu_device *adev = ring->adev;
983 uint32_t tmp = 0;
984 unsigned i;
985 int r;
986
987 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
988 r = amdgpu_ring_alloc(ring, 3);
989 if (r)
990 return r;
991
992 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
993 amdgpu_ring_write(ring, 0xDEADBEEF);
994 amdgpu_ring_commit(ring);
995 for (i = 0; i < adev->usec_timeout; i++) {
996 tmp = RREG32(mmUVD_CONTEXT_ID);
997 if (tmp == 0xDEADBEEF)
998 break;
999 udelay(1);
1000 }
1001
1002 if (i >= adev->usec_timeout)
1003 r = -ETIMEDOUT;
1004
1005 return r;
1006}
1007
1008/**
1009 * uvd_v6_0_ring_emit_ib - execute indirect buffer
1010 *
1011 * @ring: amdgpu_ring pointer
1012 * @job: job to retrieve vmid from
1013 * @ib: indirect buffer to execute
1014 * @flags: unused
1015 *
1016 * Write ring commands to execute the indirect buffer
1017 */
1018static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
1019 struct amdgpu_job *job,
1020 struct amdgpu_ib *ib,
1021 uint32_t flags)
1022{
1023 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1024
1025 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
1026 amdgpu_ring_write(ring, vmid);
1027
1028 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
1029 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1030 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
1031 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1032 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
1033 amdgpu_ring_write(ring, ib->length_dw);
1034}
1035
1036/**
1037 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer
1038 *
1039 * @ring: amdgpu_ring pointer
1040 * @job: job to retrive vmid from
1041 * @ib: indirect buffer to execute
1042 * @flags: unused
1043 *
1044 * Write enc ring commands to execute the indirect buffer
1045 */
1046static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
1047 struct amdgpu_job *job,
1048 struct amdgpu_ib *ib,
1049 uint32_t flags)
1050{
1051 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
1052
1053 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
1054 amdgpu_ring_write(ring, vmid);
1055 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1056 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1057 amdgpu_ring_write(ring, ib->length_dw);
1058}
1059
1060static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
1061 uint32_t reg, uint32_t val)
1062{
1063 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1064 amdgpu_ring_write(ring, reg << 2);
1065 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1066 amdgpu_ring_write(ring, val);
1067 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1068 amdgpu_ring_write(ring, 0x8);
1069}
1070
1071static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1072 unsigned vmid, uint64_t pd_addr)
1073{
1074 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1075
1076 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1077 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
1078 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1079 amdgpu_ring_write(ring, 0);
1080 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1081 amdgpu_ring_write(ring, 1 << vmid); /* mask */
1082 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1083 amdgpu_ring_write(ring, 0xC);
1084}
1085
1086static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1087{
1088 uint32_t seq = ring->fence_drv.sync_seq;
1089 uint64_t addr = ring->fence_drv.gpu_addr;
1090
1091 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
1092 amdgpu_ring_write(ring, lower_32_bits(addr));
1093 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
1094 amdgpu_ring_write(ring, upper_32_bits(addr));
1095 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
1096 amdgpu_ring_write(ring, 0xffffffff); /* mask */
1097 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
1098 amdgpu_ring_write(ring, seq);
1099 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
1100 amdgpu_ring_write(ring, 0xE);
1101}
1102
1103static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
1104{
1105 int i;
1106
1107 WARN_ON(ring->wptr % 2 || count % 2);
1108
1109 for (i = 0; i < count / 2; i++) {
1110 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
1111 amdgpu_ring_write(ring, 0);
1112 }
1113}
1114
1115static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1116{
1117 uint32_t seq = ring->fence_drv.sync_seq;
1118 uint64_t addr = ring->fence_drv.gpu_addr;
1119
1120 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE);
1121 amdgpu_ring_write(ring, lower_32_bits(addr));
1122 amdgpu_ring_write(ring, upper_32_bits(addr));
1123 amdgpu_ring_write(ring, seq);
1124}
1125
1126static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
1127{
1128 amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
1129}
1130
1131static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
1132 unsigned int vmid, uint64_t pd_addr)
1133{
1134 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
1135 amdgpu_ring_write(ring, vmid);
1136 amdgpu_ring_write(ring, pd_addr >> 12);
1137
1138 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB);
1139 amdgpu_ring_write(ring, vmid);
1140}
1141
1142static bool uvd_v6_0_is_idle(void *handle)
1143{
1144 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1145
1146 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
1147}
1148
1149static int uvd_v6_0_wait_for_idle(void *handle)
1150{
1151 unsigned i;
1152 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1153
1154 for (i = 0; i < adev->usec_timeout; i++) {
1155 if (uvd_v6_0_is_idle(handle))
1156 return 0;
1157 }
1158 return -ETIMEDOUT;
1159}
1160
1161#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd
1162static bool uvd_v6_0_check_soft_reset(void *handle)
1163{
1164 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165 u32 srbm_soft_reset = 0;
1166 u32 tmp = RREG32(mmSRBM_STATUS);
1167
1168 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) ||
1169 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) ||
1170 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK))
1171 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1);
1172
1173 if (srbm_soft_reset) {
1174 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset;
1175 return true;
1176 } else {
1177 adev->uvd.inst->srbm_soft_reset = 0;
1178 return false;
1179 }
1180}
1181
1182static int uvd_v6_0_pre_soft_reset(void *handle)
1183{
1184 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1185
1186 if (!adev->uvd.inst->srbm_soft_reset)
1187 return 0;
1188
1189 uvd_v6_0_stop(adev);
1190 return 0;
1191}
1192
1193static int uvd_v6_0_soft_reset(void *handle)
1194{
1195 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1196 u32 srbm_soft_reset;
1197
1198 if (!adev->uvd.inst->srbm_soft_reset)
1199 return 0;
1200 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset;
1201
1202 if (srbm_soft_reset) {
1203 u32 tmp;
1204
1205 tmp = RREG32(mmSRBM_SOFT_RESET);
1206 tmp |= srbm_soft_reset;
1207 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1208 WREG32(mmSRBM_SOFT_RESET, tmp);
1209 tmp = RREG32(mmSRBM_SOFT_RESET);
1210
1211 udelay(50);
1212
1213 tmp &= ~srbm_soft_reset;
1214 WREG32(mmSRBM_SOFT_RESET, tmp);
1215 tmp = RREG32(mmSRBM_SOFT_RESET);
1216
1217 /* Wait a little for things to settle down */
1218 udelay(50);
1219 }
1220
1221 return 0;
1222}
1223
1224static int uvd_v6_0_post_soft_reset(void *handle)
1225{
1226 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1227
1228 if (!adev->uvd.inst->srbm_soft_reset)
1229 return 0;
1230
1231 mdelay(5);
1232
1233 return uvd_v6_0_start(adev);
1234}
1235
1236static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev,
1237 struct amdgpu_irq_src *source,
1238 unsigned type,
1239 enum amdgpu_interrupt_state state)
1240{
1241 // TODO
1242 return 0;
1243}
1244
1245static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
1246 struct amdgpu_irq_src *source,
1247 struct amdgpu_iv_entry *entry)
1248{
1249 bool int_handled = true;
1250 DRM_DEBUG("IH: UVD TRAP\n");
1251
1252 switch (entry->src_id) {
1253 case 124:
1254 amdgpu_fence_process(&adev->uvd.inst->ring);
1255 break;
1256 case 119:
1257 if (likely(uvd_v6_0_enc_support(adev)))
1258 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]);
1259 else
1260 int_handled = false;
1261 break;
1262 case 120:
1263 if (likely(uvd_v6_0_enc_support(adev)))
1264 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]);
1265 else
1266 int_handled = false;
1267 break;
1268 }
1269
1270 if (!int_handled)
1271 DRM_ERROR("Unhandled interrupt: %d %d\n",
1272 entry->src_id, entry->src_data[0]);
1273
1274 return 0;
1275}
1276
1277static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
1278{
1279 uint32_t data1, data3;
1280
1281 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1282 data3 = RREG32(mmUVD_CGC_GATE);
1283
1284 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
1285 UVD_SUVD_CGC_GATE__SIT_MASK |
1286 UVD_SUVD_CGC_GATE__SMP_MASK |
1287 UVD_SUVD_CGC_GATE__SCM_MASK |
1288 UVD_SUVD_CGC_GATE__SDB_MASK |
1289 UVD_SUVD_CGC_GATE__SRE_H264_MASK |
1290 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
1291 UVD_SUVD_CGC_GATE__SIT_H264_MASK |
1292 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
1293 UVD_SUVD_CGC_GATE__SCM_H264_MASK |
1294 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
1295 UVD_SUVD_CGC_GATE__SDB_H264_MASK |
1296 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
1297
1298 if (enable) {
1299 data3 |= (UVD_CGC_GATE__SYS_MASK |
1300 UVD_CGC_GATE__UDEC_MASK |
1301 UVD_CGC_GATE__MPEG2_MASK |
1302 UVD_CGC_GATE__RBC_MASK |
1303 UVD_CGC_GATE__LMI_MC_MASK |
1304 UVD_CGC_GATE__LMI_UMC_MASK |
1305 UVD_CGC_GATE__IDCT_MASK |
1306 UVD_CGC_GATE__MPRD_MASK |
1307 UVD_CGC_GATE__MPC_MASK |
1308 UVD_CGC_GATE__LBSI_MASK |
1309 UVD_CGC_GATE__LRBBM_MASK |
1310 UVD_CGC_GATE__UDEC_RE_MASK |
1311 UVD_CGC_GATE__UDEC_CM_MASK |
1312 UVD_CGC_GATE__UDEC_IT_MASK |
1313 UVD_CGC_GATE__UDEC_DB_MASK |
1314 UVD_CGC_GATE__UDEC_MP_MASK |
1315 UVD_CGC_GATE__WCB_MASK |
1316 UVD_CGC_GATE__JPEG_MASK |
1317 UVD_CGC_GATE__SCPU_MASK |
1318 UVD_CGC_GATE__JPEG2_MASK);
1319 /* only in pg enabled, we can gate clock to vcpu*/
1320 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1321 data3 |= UVD_CGC_GATE__VCPU_MASK;
1322
1323 data3 &= ~UVD_CGC_GATE__REGS_MASK;
1324 } else {
1325 data3 = 0;
1326 }
1327
1328 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1329 WREG32(mmUVD_CGC_GATE, data3);
1330}
1331
1332static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev)
1333{
1334 uint32_t data, data2;
1335
1336 data = RREG32(mmUVD_CGC_CTRL);
1337 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
1338
1339
1340 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
1341 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
1342
1343
1344 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1345 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
1346 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
1347
1348 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
1349 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
1350 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
1351 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
1352 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
1353 UVD_CGC_CTRL__SYS_MODE_MASK |
1354 UVD_CGC_CTRL__UDEC_MODE_MASK |
1355 UVD_CGC_CTRL__MPEG2_MODE_MASK |
1356 UVD_CGC_CTRL__REGS_MODE_MASK |
1357 UVD_CGC_CTRL__RBC_MODE_MASK |
1358 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
1359 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
1360 UVD_CGC_CTRL__IDCT_MODE_MASK |
1361 UVD_CGC_CTRL__MPRD_MODE_MASK |
1362 UVD_CGC_CTRL__MPC_MODE_MASK |
1363 UVD_CGC_CTRL__LBSI_MODE_MASK |
1364 UVD_CGC_CTRL__LRBBM_MODE_MASK |
1365 UVD_CGC_CTRL__WCB_MODE_MASK |
1366 UVD_CGC_CTRL__VCPU_MODE_MASK |
1367 UVD_CGC_CTRL__JPEG_MODE_MASK |
1368 UVD_CGC_CTRL__SCPU_MODE_MASK |
1369 UVD_CGC_CTRL__JPEG2_MODE_MASK);
1370 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1371 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1372 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1373 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1374 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
1375
1376 WREG32(mmUVD_CGC_CTRL, data);
1377 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
1378}
1379
1380#if 0
1381static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev)
1382{
1383 uint32_t data, data1, cgc_flags, suvd_flags;
1384
1385 data = RREG32(mmUVD_CGC_GATE);
1386 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
1387
1388 cgc_flags = UVD_CGC_GATE__SYS_MASK |
1389 UVD_CGC_GATE__UDEC_MASK |
1390 UVD_CGC_GATE__MPEG2_MASK |
1391 UVD_CGC_GATE__RBC_MASK |
1392 UVD_CGC_GATE__LMI_MC_MASK |
1393 UVD_CGC_GATE__IDCT_MASK |
1394 UVD_CGC_GATE__MPRD_MASK |
1395 UVD_CGC_GATE__MPC_MASK |
1396 UVD_CGC_GATE__LBSI_MASK |
1397 UVD_CGC_GATE__LRBBM_MASK |
1398 UVD_CGC_GATE__UDEC_RE_MASK |
1399 UVD_CGC_GATE__UDEC_CM_MASK |
1400 UVD_CGC_GATE__UDEC_IT_MASK |
1401 UVD_CGC_GATE__UDEC_DB_MASK |
1402 UVD_CGC_GATE__UDEC_MP_MASK |
1403 UVD_CGC_GATE__WCB_MASK |
1404 UVD_CGC_GATE__VCPU_MASK |
1405 UVD_CGC_GATE__SCPU_MASK |
1406 UVD_CGC_GATE__JPEG_MASK |
1407 UVD_CGC_GATE__JPEG2_MASK;
1408
1409 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
1410 UVD_SUVD_CGC_GATE__SIT_MASK |
1411 UVD_SUVD_CGC_GATE__SMP_MASK |
1412 UVD_SUVD_CGC_GATE__SCM_MASK |
1413 UVD_SUVD_CGC_GATE__SDB_MASK;
1414
1415 data |= cgc_flags;
1416 data1 |= suvd_flags;
1417
1418 WREG32(mmUVD_CGC_GATE, data);
1419 WREG32(mmUVD_SUVD_CGC_GATE, data1);
1420}
1421#endif
1422
1423static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev,
1424 bool enable)
1425{
1426 u32 orig, data;
1427
1428 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
1429 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1430 data |= 0xfff;
1431 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1432
1433 orig = data = RREG32(mmUVD_CGC_CTRL);
1434 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1435 if (orig != data)
1436 WREG32(mmUVD_CGC_CTRL, data);
1437 } else {
1438 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
1439 data &= ~0xfff;
1440 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
1441
1442 orig = data = RREG32(mmUVD_CGC_CTRL);
1443 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
1444 if (orig != data)
1445 WREG32(mmUVD_CGC_CTRL, data);
1446 }
1447}
1448
1449static int uvd_v6_0_set_clockgating_state(void *handle,
1450 enum amd_clockgating_state state)
1451{
1452 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1453 bool enable = (state == AMD_CG_STATE_GATE);
1454
1455 if (enable) {
1456 /* wait for STATUS to clear */
1457 if (uvd_v6_0_wait_for_idle(handle))
1458 return -EBUSY;
1459 uvd_v6_0_enable_clock_gating(adev, true);
1460 /* enable HW gates because UVD is idle */
1461/* uvd_v6_0_set_hw_clock_gating(adev); */
1462 } else {
1463 /* disable HW gating and enable Sw gating */
1464 uvd_v6_0_enable_clock_gating(adev, false);
1465 }
1466 uvd_v6_0_set_sw_clock_gating(adev);
1467 return 0;
1468}
1469
1470static int uvd_v6_0_set_powergating_state(void *handle,
1471 enum amd_powergating_state state)
1472{
1473 /* This doesn't actually powergate the UVD block.
1474 * That's done in the dpm code via the SMC. This
1475 * just re-inits the block as necessary. The actual
1476 * gating still happens in the dpm code. We should
1477 * revisit this when there is a cleaner line between
1478 * the smc and the hw blocks
1479 */
1480 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1481 int ret = 0;
1482
1483 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
1484
1485 if (state == AMD_PG_STATE_GATE) {
1486 uvd_v6_0_stop(adev);
1487 } else {
1488 ret = uvd_v6_0_start(adev);
1489 if (ret)
1490 goto out;
1491 }
1492
1493out:
1494 return ret;
1495}
1496
1497static void uvd_v6_0_get_clockgating_state(void *handle, u64 *flags)
1498{
1499 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1500 int data;
1501
1502 mutex_lock(&adev->pm.mutex);
1503
1504 if (adev->flags & AMD_IS_APU)
1505 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU);
1506 else
1507 data = RREG32_SMC(ixCURRENT_PG_STATUS);
1508
1509 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
1510 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
1511 goto out;
1512 }
1513
1514 /* AMD_CG_SUPPORT_UVD_MGCG */
1515 data = RREG32(mmUVD_CGC_CTRL);
1516 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
1517 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
1518
1519out:
1520 mutex_unlock(&adev->pm.mutex);
1521}
1522
1523static const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
1524 .name = "uvd_v6_0",
1525 .early_init = uvd_v6_0_early_init,
1526 .late_init = NULL,
1527 .sw_init = uvd_v6_0_sw_init,
1528 .sw_fini = uvd_v6_0_sw_fini,
1529 .hw_init = uvd_v6_0_hw_init,
1530 .hw_fini = uvd_v6_0_hw_fini,
1531 .suspend = uvd_v6_0_suspend,
1532 .resume = uvd_v6_0_resume,
1533 .is_idle = uvd_v6_0_is_idle,
1534 .wait_for_idle = uvd_v6_0_wait_for_idle,
1535 .check_soft_reset = uvd_v6_0_check_soft_reset,
1536 .pre_soft_reset = uvd_v6_0_pre_soft_reset,
1537 .soft_reset = uvd_v6_0_soft_reset,
1538 .post_soft_reset = uvd_v6_0_post_soft_reset,
1539 .set_clockgating_state = uvd_v6_0_set_clockgating_state,
1540 .set_powergating_state = uvd_v6_0_set_powergating_state,
1541 .get_clockgating_state = uvd_v6_0_get_clockgating_state,
1542};
1543
1544static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
1545 .type = AMDGPU_RING_TYPE_UVD,
1546 .align_mask = 0xf,
1547 .support_64bit_ptrs = false,
1548 .no_user_fence = true,
1549 .get_rptr = uvd_v6_0_ring_get_rptr,
1550 .get_wptr = uvd_v6_0_ring_get_wptr,
1551 .set_wptr = uvd_v6_0_ring_set_wptr,
1552 .parse_cs = amdgpu_uvd_ring_parse_cs,
1553 .emit_frame_size =
1554 6 + /* hdp invalidate */
1555 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1556 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
1557 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1558 .emit_ib = uvd_v6_0_ring_emit_ib,
1559 .emit_fence = uvd_v6_0_ring_emit_fence,
1560 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1561 .test_ring = uvd_v6_0_ring_test_ring,
1562 .test_ib = amdgpu_uvd_ring_test_ib,
1563 .insert_nop = uvd_v6_0_ring_insert_nop,
1564 .pad_ib = amdgpu_ring_generic_pad_ib,
1565 .begin_use = amdgpu_uvd_ring_begin_use,
1566 .end_use = amdgpu_uvd_ring_end_use,
1567 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1568};
1569
1570static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
1571 .type = AMDGPU_RING_TYPE_UVD,
1572 .align_mask = 0xf,
1573 .support_64bit_ptrs = false,
1574 .no_user_fence = true,
1575 .get_rptr = uvd_v6_0_ring_get_rptr,
1576 .get_wptr = uvd_v6_0_ring_get_wptr,
1577 .set_wptr = uvd_v6_0_ring_set_wptr,
1578 .emit_frame_size =
1579 6 + /* hdp invalidate */
1580 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
1581 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
1582 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
1583 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
1584 .emit_ib = uvd_v6_0_ring_emit_ib,
1585 .emit_fence = uvd_v6_0_ring_emit_fence,
1586 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
1587 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1588 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1589 .test_ring = uvd_v6_0_ring_test_ring,
1590 .test_ib = amdgpu_uvd_ring_test_ib,
1591 .insert_nop = uvd_v6_0_ring_insert_nop,
1592 .pad_ib = amdgpu_ring_generic_pad_ib,
1593 .begin_use = amdgpu_uvd_ring_begin_use,
1594 .end_use = amdgpu_uvd_ring_end_use,
1595 .emit_wreg = uvd_v6_0_ring_emit_wreg,
1596};
1597
1598static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
1599 .type = AMDGPU_RING_TYPE_UVD_ENC,
1600 .align_mask = 0x3f,
1601 .nop = HEVC_ENC_CMD_NO_OP,
1602 .support_64bit_ptrs = false,
1603 .no_user_fence = true,
1604 .get_rptr = uvd_v6_0_enc_ring_get_rptr,
1605 .get_wptr = uvd_v6_0_enc_ring_get_wptr,
1606 .set_wptr = uvd_v6_0_enc_ring_set_wptr,
1607 .emit_frame_size =
1608 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
1609 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
1610 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
1611 1, /* uvd_v6_0_enc_ring_insert_end */
1612 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
1613 .emit_ib = uvd_v6_0_enc_ring_emit_ib,
1614 .emit_fence = uvd_v6_0_enc_ring_emit_fence,
1615 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush,
1616 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync,
1617 .test_ring = uvd_v6_0_enc_ring_test_ring,
1618 .test_ib = uvd_v6_0_enc_ring_test_ib,
1619 .insert_nop = amdgpu_ring_insert_nop,
1620 .insert_end = uvd_v6_0_enc_ring_insert_end,
1621 .pad_ib = amdgpu_ring_generic_pad_ib,
1622 .begin_use = amdgpu_uvd_ring_begin_use,
1623 .end_use = amdgpu_uvd_ring_end_use,
1624};
1625
1626static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
1627{
1628 if (adev->asic_type >= CHIP_POLARIS10) {
1629 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs;
1630 DRM_INFO("UVD is enabled in VM mode\n");
1631 } else {
1632 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs;
1633 DRM_INFO("UVD is enabled in physical mode\n");
1634 }
1635}
1636
1637static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev)
1638{
1639 int i;
1640
1641 for (i = 0; i < adev->uvd.num_enc_rings; ++i)
1642 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs;
1643
1644 DRM_INFO("UVD ENC is enabled in VM mode\n");
1645}
1646
1647static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
1648 .set = uvd_v6_0_set_interrupt_state,
1649 .process = uvd_v6_0_process_interrupt,
1650};
1651
1652static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev)
1653{
1654 if (uvd_v6_0_enc_support(adev))
1655 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1;
1656 else
1657 adev->uvd.inst->irq.num_types = 1;
1658
1659 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs;
1660}
1661
1662const struct amdgpu_ip_block_version uvd_v6_0_ip_block =
1663{
1664 .type = AMD_IP_BLOCK_TYPE_UVD,
1665 .major = 6,
1666 .minor = 0,
1667 .rev = 0,
1668 .funcs = &uvd_v6_0_ip_funcs,
1669};
1670
1671const struct amdgpu_ip_block_version uvd_v6_2_ip_block =
1672{
1673 .type = AMD_IP_BLOCK_TYPE_UVD,
1674 .major = 6,
1675 .minor = 2,
1676 .rev = 0,
1677 .funcs = &uvd_v6_0_ip_funcs,
1678};
1679
1680const struct amdgpu_ip_block_version uvd_v6_3_ip_block =
1681{
1682 .type = AMD_IP_BLOCK_TYPE_UVD,
1683 .major = 6,
1684 .minor = 3,
1685 .rev = 0,
1686 .funcs = &uvd_v6_0_ip_funcs,
1687};
1688

source code of linux/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c