1 | /* |
2 | * Copyright 2013 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Christian König <christian.koenig@amd.com> |
23 | */ |
24 | |
25 | #include <linux/firmware.h> |
26 | |
27 | #include "amdgpu.h" |
28 | #include "amdgpu_uvd.h" |
29 | #include "cikd.h" |
30 | |
31 | #include "uvd/uvd_4_2_d.h" |
32 | #include "uvd/uvd_4_2_sh_mask.h" |
33 | |
34 | #include "oss/oss_2_0_d.h" |
35 | #include "oss/oss_2_0_sh_mask.h" |
36 | |
37 | #include "bif/bif_4_1_d.h" |
38 | |
39 | #include "smu/smu_7_0_1_d.h" |
40 | #include "smu/smu_7_0_1_sh_mask.h" |
41 | |
42 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); |
43 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); |
44 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev); |
45 | static int uvd_v4_2_start(struct amdgpu_device *adev); |
46 | static void uvd_v4_2_stop(struct amdgpu_device *adev); |
47 | static int uvd_v4_2_set_clockgating_state(void *handle, |
48 | enum amd_clockgating_state state); |
49 | static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, |
50 | bool sw_mode); |
51 | /** |
52 | * uvd_v4_2_ring_get_rptr - get read pointer |
53 | * |
54 | * @ring: amdgpu_ring pointer |
55 | * |
56 | * Returns the current hardware read pointer |
57 | */ |
58 | static uint64_t uvd_v4_2_ring_get_rptr(struct amdgpu_ring *ring) |
59 | { |
60 | struct amdgpu_device *adev = ring->adev; |
61 | |
62 | return RREG32(mmUVD_RBC_RB_RPTR); |
63 | } |
64 | |
65 | /** |
66 | * uvd_v4_2_ring_get_wptr - get write pointer |
67 | * |
68 | * @ring: amdgpu_ring pointer |
69 | * |
70 | * Returns the current hardware write pointer |
71 | */ |
72 | static uint64_t uvd_v4_2_ring_get_wptr(struct amdgpu_ring *ring) |
73 | { |
74 | struct amdgpu_device *adev = ring->adev; |
75 | |
76 | return RREG32(mmUVD_RBC_RB_WPTR); |
77 | } |
78 | |
79 | /** |
80 | * uvd_v4_2_ring_set_wptr - set write pointer |
81 | * |
82 | * @ring: amdgpu_ring pointer |
83 | * |
84 | * Commits the write pointer to the hardware |
85 | */ |
86 | static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) |
87 | { |
88 | struct amdgpu_device *adev = ring->adev; |
89 | |
90 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
91 | } |
92 | |
93 | static int uvd_v4_2_early_init(void *handle) |
94 | { |
95 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
96 | adev->uvd.num_uvd_inst = 1; |
97 | |
98 | uvd_v4_2_set_ring_funcs(adev); |
99 | uvd_v4_2_set_irq_funcs(adev); |
100 | |
101 | return 0; |
102 | } |
103 | |
104 | static int uvd_v4_2_sw_init(void *handle) |
105 | { |
106 | struct amdgpu_ring *ring; |
107 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
108 | int r; |
109 | |
110 | /* UVD TRAP */ |
111 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 124, source: &adev->uvd.inst->irq); |
112 | if (r) |
113 | return r; |
114 | |
115 | r = amdgpu_uvd_sw_init(adev); |
116 | if (r) |
117 | return r; |
118 | |
119 | ring = &adev->uvd.inst->ring; |
120 | sprintf(buf: ring->name, fmt: "uvd" ); |
121 | r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->uvd.inst->irq, irq_type: 0, |
122 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
123 | if (r) |
124 | return r; |
125 | |
126 | r = amdgpu_uvd_resume(adev); |
127 | if (r) |
128 | return r; |
129 | |
130 | return r; |
131 | } |
132 | |
133 | static int uvd_v4_2_sw_fini(void *handle) |
134 | { |
135 | int r; |
136 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
137 | |
138 | r = amdgpu_uvd_suspend(adev); |
139 | if (r) |
140 | return r; |
141 | |
142 | return amdgpu_uvd_sw_fini(adev); |
143 | } |
144 | |
145 | static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, |
146 | bool enable); |
147 | /** |
148 | * uvd_v4_2_hw_init - start and test UVD block |
149 | * |
150 | * @handle: handle used to pass amdgpu_device pointer |
151 | * |
152 | * Initialize the hardware, boot up the VCPU and do some testing |
153 | */ |
154 | static int uvd_v4_2_hw_init(void *handle) |
155 | { |
156 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
157 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
158 | uint32_t tmp; |
159 | int r; |
160 | |
161 | uvd_v4_2_enable_mgcg(adev, enable: true); |
162 | amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); |
163 | |
164 | r = amdgpu_ring_test_helper(ring); |
165 | if (r) |
166 | goto done; |
167 | |
168 | r = amdgpu_ring_alloc(ring, ndw: 10); |
169 | if (r) { |
170 | DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n" , r); |
171 | goto done; |
172 | } |
173 | |
174 | tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); |
175 | amdgpu_ring_write(ring, v: tmp); |
176 | amdgpu_ring_write(ring, v: 0xFFFFF); |
177 | |
178 | tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); |
179 | amdgpu_ring_write(ring, v: tmp); |
180 | amdgpu_ring_write(ring, v: 0xFFFFF); |
181 | |
182 | tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); |
183 | amdgpu_ring_write(ring, v: tmp); |
184 | amdgpu_ring_write(ring, v: 0xFFFFF); |
185 | |
186 | /* Clear timeout status bits */ |
187 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); |
188 | amdgpu_ring_write(ring, v: 0x8); |
189 | |
190 | amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); |
191 | amdgpu_ring_write(ring, v: 3); |
192 | |
193 | amdgpu_ring_commit(ring); |
194 | |
195 | done: |
196 | if (!r) |
197 | DRM_INFO("UVD initialized successfully.\n" ); |
198 | |
199 | return r; |
200 | } |
201 | |
202 | /** |
203 | * uvd_v4_2_hw_fini - stop the hardware block |
204 | * |
205 | * @handle: handle used to pass amdgpu_device pointer |
206 | * |
207 | * Stop the UVD block, mark ring as not ready any more |
208 | */ |
209 | static int uvd_v4_2_hw_fini(void *handle) |
210 | { |
211 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
212 | |
213 | cancel_delayed_work_sync(dwork: &adev->uvd.idle_work); |
214 | |
215 | if (RREG32(mmUVD_STATUS) != 0) |
216 | uvd_v4_2_stop(adev); |
217 | |
218 | return 0; |
219 | } |
220 | |
221 | static int uvd_v4_2_prepare_suspend(void *handle) |
222 | { |
223 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
224 | |
225 | return amdgpu_uvd_prepare_suspend(adev); |
226 | } |
227 | |
228 | static int uvd_v4_2_suspend(void *handle) |
229 | { |
230 | int r; |
231 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
232 | |
233 | /* |
234 | * Proper cleanups before halting the HW engine: |
235 | * - cancel the delayed idle work |
236 | * - enable powergating |
237 | * - enable clockgating |
238 | * - disable dpm |
239 | * |
240 | * TODO: to align with the VCN implementation, move the |
241 | * jobs for clockgating/powergating/dpm setting to |
242 | * ->set_powergating_state(). |
243 | */ |
244 | cancel_delayed_work_sync(dwork: &adev->uvd.idle_work); |
245 | |
246 | if (adev->pm.dpm_enabled) { |
247 | amdgpu_dpm_enable_uvd(adev, enable: false); |
248 | } else { |
249 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); |
250 | /* shutdown the UVD block */ |
251 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
252 | state: AMD_PG_STATE_GATE); |
253 | amdgpu_device_ip_set_clockgating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
254 | state: AMD_CG_STATE_GATE); |
255 | } |
256 | |
257 | r = uvd_v4_2_hw_fini(handle: adev); |
258 | if (r) |
259 | return r; |
260 | |
261 | return amdgpu_uvd_suspend(adev); |
262 | } |
263 | |
264 | static int uvd_v4_2_resume(void *handle) |
265 | { |
266 | int r; |
267 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
268 | |
269 | r = amdgpu_uvd_resume(adev); |
270 | if (r) |
271 | return r; |
272 | |
273 | return uvd_v4_2_hw_init(handle: adev); |
274 | } |
275 | |
276 | /** |
277 | * uvd_v4_2_start - start UVD block |
278 | * |
279 | * @adev: amdgpu_device pointer |
280 | * |
281 | * Setup and start the UVD block |
282 | */ |
283 | static int uvd_v4_2_start(struct amdgpu_device *adev) |
284 | { |
285 | struct amdgpu_ring *ring = &adev->uvd.inst->ring; |
286 | uint32_t rb_bufsz; |
287 | int i, j, r; |
288 | u32 tmp; |
289 | /* disable byte swapping */ |
290 | u32 lmi_swap_cntl = 0; |
291 | u32 mp_swap_cntl = 0; |
292 | |
293 | /* set uvd busy */ |
294 | WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2)); |
295 | |
296 | uvd_v4_2_set_dcm(adev, sw_mode: true); |
297 | WREG32(mmUVD_CGC_GATE, 0); |
298 | |
299 | /* take UVD block out of reset */ |
300 | WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); |
301 | mdelay(5); |
302 | |
303 | /* enable VCPU clock */ |
304 | WREG32(mmUVD_VCPU_CNTL, 1 << 9); |
305 | |
306 | /* disable interupt */ |
307 | WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); |
308 | |
309 | #ifdef __BIG_ENDIAN |
310 | /* swap (8 in 32) RB and IB */ |
311 | lmi_swap_cntl = 0xa; |
312 | mp_swap_cntl = 0; |
313 | #endif |
314 | WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); |
315 | WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); |
316 | /* initialize UVD memory controller */ |
317 | WREG32(mmUVD_LMI_CTRL, 0x203108); |
318 | |
319 | tmp = RREG32(mmUVD_MPC_CNTL); |
320 | WREG32(mmUVD_MPC_CNTL, tmp | 0x10); |
321 | |
322 | WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); |
323 | WREG32(mmUVD_MPC_SET_MUXA1, 0x0); |
324 | WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); |
325 | WREG32(mmUVD_MPC_SET_MUXB1, 0x0); |
326 | WREG32(mmUVD_MPC_SET_ALU, 0); |
327 | WREG32(mmUVD_MPC_SET_MUX, 0x88); |
328 | |
329 | uvd_v4_2_mc_resume(adev); |
330 | |
331 | tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL); |
332 | WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10)); |
333 | |
334 | /* enable UMC */ |
335 | WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); |
336 | |
337 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); |
338 | |
339 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); |
340 | |
341 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
342 | |
343 | mdelay(10); |
344 | |
345 | for (i = 0; i < 10; ++i) { |
346 | uint32_t status; |
347 | for (j = 0; j < 100; ++j) { |
348 | status = RREG32(mmUVD_STATUS); |
349 | if (status & 2) |
350 | break; |
351 | mdelay(10); |
352 | } |
353 | r = 0; |
354 | if (status & 2) |
355 | break; |
356 | |
357 | DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n" ); |
358 | WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, |
359 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
360 | mdelay(10); |
361 | WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
362 | mdelay(10); |
363 | r = -1; |
364 | } |
365 | |
366 | if (r) { |
367 | DRM_ERROR("UVD not responding, giving up!!!\n" ); |
368 | return r; |
369 | } |
370 | |
371 | /* enable interupt */ |
372 | WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1)); |
373 | |
374 | WREG32_P(mmUVD_STATUS, 0, ~(1<<2)); |
375 | |
376 | /* force RBC into idle state */ |
377 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); |
378 | |
379 | /* Set the write pointer delay */ |
380 | WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); |
381 | |
382 | /* program the 4GB memory segment for rptr and ring buffer */ |
383 | WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) | |
384 | (0x7 << 16) | (0x1 << 31)); |
385 | |
386 | /* Initialize the ring buffer's read and write pointers */ |
387 | WREG32(mmUVD_RBC_RB_RPTR, 0x0); |
388 | |
389 | ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); |
390 | WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
391 | |
392 | /* set the ring address */ |
393 | WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr); |
394 | |
395 | /* Set ring buffer size */ |
396 | rb_bufsz = order_base_2(ring->ring_size); |
397 | rb_bufsz = (0x1 << 8) | rb_bufsz; |
398 | WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f); |
399 | |
400 | return 0; |
401 | } |
402 | |
403 | /** |
404 | * uvd_v4_2_stop - stop UVD block |
405 | * |
406 | * @adev: amdgpu_device pointer |
407 | * |
408 | * stop the UVD block |
409 | */ |
410 | static void uvd_v4_2_stop(struct amdgpu_device *adev) |
411 | { |
412 | uint32_t i, j; |
413 | uint32_t status; |
414 | |
415 | WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); |
416 | |
417 | for (i = 0; i < 10; ++i) { |
418 | for (j = 0; j < 100; ++j) { |
419 | status = RREG32(mmUVD_STATUS); |
420 | if (status & 2) |
421 | break; |
422 | mdelay(1); |
423 | } |
424 | if (status & 2) |
425 | break; |
426 | } |
427 | |
428 | for (i = 0; i < 10; ++i) { |
429 | for (j = 0; j < 100; ++j) { |
430 | status = RREG32(mmUVD_LMI_STATUS); |
431 | if (status & 0xf) |
432 | break; |
433 | mdelay(1); |
434 | } |
435 | if (status & 0xf) |
436 | break; |
437 | } |
438 | |
439 | /* Stall UMC and register bus before resetting VCPU */ |
440 | WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); |
441 | |
442 | for (i = 0; i < 10; ++i) { |
443 | for (j = 0; j < 100; ++j) { |
444 | status = RREG32(mmUVD_LMI_STATUS); |
445 | if (status & 0x240) |
446 | break; |
447 | mdelay(1); |
448 | } |
449 | if (status & 0x240) |
450 | break; |
451 | } |
452 | |
453 | WREG32_P(0x3D49, 0, ~(1 << 2)); |
454 | |
455 | WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9)); |
456 | |
457 | /* put LMI, VCPU, RBC etc... into reset */ |
458 | WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | |
459 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | |
460 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); |
461 | |
462 | WREG32(mmUVD_STATUS, 0); |
463 | |
464 | uvd_v4_2_set_dcm(adev, sw_mode: false); |
465 | } |
466 | |
467 | /** |
468 | * uvd_v4_2_ring_emit_fence - emit an fence & trap command |
469 | * |
470 | * @ring: amdgpu_ring pointer |
471 | * @addr: address |
472 | * @seq: sequence number |
473 | * @flags: fence related flags |
474 | * |
475 | * Write a fence and a trap command to the ring. |
476 | */ |
477 | static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
478 | unsigned flags) |
479 | { |
480 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
481 | |
482 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
483 | amdgpu_ring_write(ring, v: seq); |
484 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); |
485 | amdgpu_ring_write(ring, v: addr & 0xffffffff); |
486 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); |
487 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); |
488 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); |
489 | amdgpu_ring_write(ring, v: 0); |
490 | |
491 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); |
492 | amdgpu_ring_write(ring, v: 0); |
493 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); |
494 | amdgpu_ring_write(ring, v: 0); |
495 | amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); |
496 | amdgpu_ring_write(ring, v: 2); |
497 | } |
498 | |
499 | /** |
500 | * uvd_v4_2_ring_test_ring - register write test |
501 | * |
502 | * @ring: amdgpu_ring pointer |
503 | * |
504 | * Test if we can successfully write to the context register |
505 | */ |
506 | static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring) |
507 | { |
508 | struct amdgpu_device *adev = ring->adev; |
509 | uint32_t tmp = 0; |
510 | unsigned i; |
511 | int r; |
512 | |
513 | WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
514 | r = amdgpu_ring_alloc(ring, ndw: 3); |
515 | if (r) |
516 | return r; |
517 | |
518 | amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); |
519 | amdgpu_ring_write(ring, v: 0xDEADBEEF); |
520 | amdgpu_ring_commit(ring); |
521 | for (i = 0; i < adev->usec_timeout; i++) { |
522 | tmp = RREG32(mmUVD_CONTEXT_ID); |
523 | if (tmp == 0xDEADBEEF) |
524 | break; |
525 | udelay(1); |
526 | } |
527 | |
528 | if (i >= adev->usec_timeout) |
529 | r = -ETIMEDOUT; |
530 | |
531 | return r; |
532 | } |
533 | |
534 | /** |
535 | * uvd_v4_2_ring_emit_ib - execute indirect buffer |
536 | * |
537 | * @ring: amdgpu_ring pointer |
538 | * @job: iob associated with the indirect buffer |
539 | * @ib: indirect buffer to execute |
540 | * @flags: flags associated with the indirect buffer |
541 | * |
542 | * Write ring commands to execute the indirect buffer |
543 | */ |
544 | static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, |
545 | struct amdgpu_job *job, |
546 | struct amdgpu_ib *ib, |
547 | uint32_t flags) |
548 | { |
549 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); |
550 | amdgpu_ring_write(ring, v: ib->gpu_addr); |
551 | amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); |
552 | amdgpu_ring_write(ring, v: ib->length_dw); |
553 | } |
554 | |
555 | static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
556 | { |
557 | int i; |
558 | |
559 | WARN_ON(ring->wptr % 2 || count % 2); |
560 | |
561 | for (i = 0; i < count / 2; i++) { |
562 | amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); |
563 | amdgpu_ring_write(ring, v: 0); |
564 | } |
565 | } |
566 | |
567 | /** |
568 | * uvd_v4_2_mc_resume - memory controller programming |
569 | * |
570 | * @adev: amdgpu_device pointer |
571 | * |
572 | * Let the UVD memory controller know it's offsets |
573 | */ |
574 | static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) |
575 | { |
576 | uint64_t addr; |
577 | uint32_t size; |
578 | |
579 | /* program the VCPU memory controller bits 0-27 */ |
580 | addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; |
581 | size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; |
582 | WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); |
583 | WREG32(mmUVD_VCPU_CACHE_SIZE0, size); |
584 | |
585 | addr += size; |
586 | size = AMDGPU_UVD_HEAP_SIZE >> 3; |
587 | WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr); |
588 | WREG32(mmUVD_VCPU_CACHE_SIZE1, size); |
589 | |
590 | addr += size; |
591 | size = (AMDGPU_UVD_STACK_SIZE + |
592 | (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3; |
593 | WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr); |
594 | WREG32(mmUVD_VCPU_CACHE_SIZE2, size); |
595 | |
596 | /* bits 28-31 */ |
597 | addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; |
598 | WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); |
599 | |
600 | /* bits 32-39 */ |
601 | addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; |
602 | WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); |
603 | |
604 | WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); |
605 | WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); |
606 | WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); |
607 | } |
608 | |
609 | static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, |
610 | bool enable) |
611 | { |
612 | u32 orig, data; |
613 | |
614 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { |
615 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); |
616 | data |= 0xfff; |
617 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); |
618 | |
619 | orig = data = RREG32(mmUVD_CGC_CTRL); |
620 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; |
621 | if (orig != data) |
622 | WREG32(mmUVD_CGC_CTRL, data); |
623 | } else { |
624 | data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); |
625 | data &= ~0xfff; |
626 | WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); |
627 | |
628 | orig = data = RREG32(mmUVD_CGC_CTRL); |
629 | data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; |
630 | if (orig != data) |
631 | WREG32(mmUVD_CGC_CTRL, data); |
632 | } |
633 | } |
634 | |
635 | static void uvd_v4_2_set_dcm(struct amdgpu_device *adev, |
636 | bool sw_mode) |
637 | { |
638 | u32 tmp, tmp2; |
639 | |
640 | WREG32_FIELD(UVD_CGC_GATE, REGS, 0); |
641 | |
642 | tmp = RREG32(mmUVD_CGC_CTRL); |
643 | tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); |
644 | tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | |
645 | (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) | |
646 | (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT); |
647 | |
648 | if (sw_mode) { |
649 | tmp &= ~0x7ffff800; |
650 | tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK | |
651 | UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK | |
652 | (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT); |
653 | } else { |
654 | tmp |= 0x7ffff800; |
655 | tmp2 = 0; |
656 | } |
657 | |
658 | WREG32(mmUVD_CGC_CTRL, tmp); |
659 | WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2); |
660 | } |
661 | |
662 | static bool uvd_v4_2_is_idle(void *handle) |
663 | { |
664 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
665 | |
666 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); |
667 | } |
668 | |
669 | static int uvd_v4_2_wait_for_idle(void *handle) |
670 | { |
671 | unsigned i; |
672 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
673 | |
674 | for (i = 0; i < adev->usec_timeout; i++) { |
675 | if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK)) |
676 | return 0; |
677 | } |
678 | return -ETIMEDOUT; |
679 | } |
680 | |
681 | static int uvd_v4_2_soft_reset(void *handle) |
682 | { |
683 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
684 | |
685 | uvd_v4_2_stop(adev); |
686 | |
687 | WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK, |
688 | ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK); |
689 | mdelay(5); |
690 | |
691 | return uvd_v4_2_start(adev); |
692 | } |
693 | |
694 | static int uvd_v4_2_set_interrupt_state(struct amdgpu_device *adev, |
695 | struct amdgpu_irq_src *source, |
696 | unsigned type, |
697 | enum amdgpu_interrupt_state state) |
698 | { |
699 | // TODO |
700 | return 0; |
701 | } |
702 | |
703 | static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, |
704 | struct amdgpu_irq_src *source, |
705 | struct amdgpu_iv_entry *entry) |
706 | { |
707 | DRM_DEBUG("IH: UVD TRAP\n" ); |
708 | amdgpu_fence_process(ring: &adev->uvd.inst->ring); |
709 | return 0; |
710 | } |
711 | |
712 | static int uvd_v4_2_set_clockgating_state(void *handle, |
713 | enum amd_clockgating_state state) |
714 | { |
715 | return 0; |
716 | } |
717 | |
718 | static int uvd_v4_2_set_powergating_state(void *handle, |
719 | enum amd_powergating_state state) |
720 | { |
721 | /* This doesn't actually powergate the UVD block. |
722 | * That's done in the dpm code via the SMC. This |
723 | * just re-inits the block as necessary. The actual |
724 | * gating still happens in the dpm code. We should |
725 | * revisit this when there is a cleaner line between |
726 | * the smc and the hw blocks |
727 | */ |
728 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
729 | |
730 | if (state == AMD_PG_STATE_GATE) { |
731 | uvd_v4_2_stop(adev); |
732 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { |
733 | if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & |
734 | CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { |
735 | WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | |
736 | UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_DOWN_MASK | |
737 | UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); |
738 | mdelay(20); |
739 | } |
740 | } |
741 | return 0; |
742 | } else { |
743 | if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { |
744 | if (RREG32_SMC(ixCURRENT_PG_STATUS) & |
745 | CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { |
746 | WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | |
747 | UVD_PGFSM_CONFIG__UVD_PGFSM_POWER_UP_MASK | |
748 | UVD_PGFSM_CONFIG__UVD_PGFSM_P1_SELECT_MASK)); |
749 | mdelay(30); |
750 | } |
751 | } |
752 | return uvd_v4_2_start(adev); |
753 | } |
754 | } |
755 | |
756 | static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { |
757 | .name = "uvd_v4_2" , |
758 | .early_init = uvd_v4_2_early_init, |
759 | .late_init = NULL, |
760 | .sw_init = uvd_v4_2_sw_init, |
761 | .sw_fini = uvd_v4_2_sw_fini, |
762 | .hw_init = uvd_v4_2_hw_init, |
763 | .hw_fini = uvd_v4_2_hw_fini, |
764 | .prepare_suspend = uvd_v4_2_prepare_suspend, |
765 | .suspend = uvd_v4_2_suspend, |
766 | .resume = uvd_v4_2_resume, |
767 | .is_idle = uvd_v4_2_is_idle, |
768 | .wait_for_idle = uvd_v4_2_wait_for_idle, |
769 | .soft_reset = uvd_v4_2_soft_reset, |
770 | .set_clockgating_state = uvd_v4_2_set_clockgating_state, |
771 | .set_powergating_state = uvd_v4_2_set_powergating_state, |
772 | }; |
773 | |
774 | static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { |
775 | .type = AMDGPU_RING_TYPE_UVD, |
776 | .align_mask = 0xf, |
777 | .support_64bit_ptrs = false, |
778 | .no_user_fence = true, |
779 | .get_rptr = uvd_v4_2_ring_get_rptr, |
780 | .get_wptr = uvd_v4_2_ring_get_wptr, |
781 | .set_wptr = uvd_v4_2_ring_set_wptr, |
782 | .parse_cs = amdgpu_uvd_ring_parse_cs, |
783 | .emit_frame_size = |
784 | 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ |
785 | .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ |
786 | .emit_ib = uvd_v4_2_ring_emit_ib, |
787 | .emit_fence = uvd_v4_2_ring_emit_fence, |
788 | .test_ring = uvd_v4_2_ring_test_ring, |
789 | .test_ib = amdgpu_uvd_ring_test_ib, |
790 | .insert_nop = uvd_v4_2_ring_insert_nop, |
791 | .pad_ib = amdgpu_ring_generic_pad_ib, |
792 | .begin_use = amdgpu_uvd_ring_begin_use, |
793 | .end_use = amdgpu_uvd_ring_end_use, |
794 | }; |
795 | |
796 | static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) |
797 | { |
798 | adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs; |
799 | } |
800 | |
801 | static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { |
802 | .set = uvd_v4_2_set_interrupt_state, |
803 | .process = uvd_v4_2_process_interrupt, |
804 | }; |
805 | |
806 | static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) |
807 | { |
808 | adev->uvd.inst->irq.num_types = 1; |
809 | adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs; |
810 | } |
811 | |
812 | const struct amdgpu_ip_block_version uvd_v4_2_ip_block = |
813 | { |
814 | .type = AMD_IP_BLOCK_TYPE_UVD, |
815 | .major = 4, |
816 | .minor = 2, |
817 | .rev = 0, |
818 | .funcs = &uvd_v4_2_ip_funcs, |
819 | }; |
820 | |