1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_uvd.h" |
28 | #include "amdgpu_cs.h" |
29 | #include "soc15.h" |
30 | #include "soc15d.h" |
31 | #include "soc15_common.h" |
32 | #include "mmsch_v1_0.h" |
33 | |
34 | #include "uvd/uvd_7_0_offset.h" |
35 | #include "uvd/uvd_7_0_sh_mask.h" |
36 | #include "vce/vce_4_0_offset.h" |
37 | #include "vce/vce_4_0_default.h" |
38 | #include "vce/vce_4_0_sh_mask.h" |
39 | #include "nbif/nbif_6_1_offset.h" |
40 | #include "mmhub/mmhub_1_0_offset.h" |
41 | #include "mmhub/mmhub_1_0_sh_mask.h" |
42 | #include "ivsrcid/uvd/irqsrcs_uvd_7_0.h" |
43 | |
44 | #define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7 |
45 | #define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1 |
46 | //UVD_PG0_CC_UVD_HARVESTING |
47 | #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1 |
48 | #define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L |
49 | |
50 | #define UVD7_MAX_HW_INSTANCES_VEGA20 2 |
51 | |
52 | static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev); |
53 | static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev); |
54 | static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev); |
55 | static int uvd_v7_0_start(struct amdgpu_device *adev); |
56 | static void uvd_v7_0_stop(struct amdgpu_device *adev); |
57 | static int uvd_v7_0_sriov_start(struct amdgpu_device *adev); |
58 | |
59 | static int amdgpu_ih_clientid_uvds[] = { |
60 | SOC15_IH_CLIENTID_UVD, |
61 | SOC15_IH_CLIENTID_UVD1 |
62 | }; |
63 | |
64 | /** |
65 | * uvd_v7_0_ring_get_rptr - get read pointer |
66 | * |
67 | * @ring: amdgpu_ring pointer |
68 | * |
69 | * Returns the current hardware read pointer |
70 | */ |
71 | static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring) |
72 | { |
73 | struct amdgpu_device *adev = ring->adev; |
74 | |
75 | return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); |
76 | } |
77 | |
78 | /** |
79 | * uvd_v7_0_enc_ring_get_rptr - get enc read pointer |
80 | * |
81 | * @ring: amdgpu_ring pointer |
82 | * |
83 | * Returns the current hardware enc read pointer |
84 | */ |
85 | static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) |
86 | { |
87 | struct amdgpu_device *adev = ring->adev; |
88 | |
89 | if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) |
90 | return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); |
91 | else |
92 | return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); |
93 | } |
94 | |
95 | /** |
96 | * uvd_v7_0_ring_get_wptr - get write pointer |
97 | * |
98 | * @ring: amdgpu_ring pointer |
99 | * |
100 | * Returns the current hardware write pointer |
101 | */ |
102 | static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring) |
103 | { |
104 | struct amdgpu_device *adev = ring->adev; |
105 | |
106 | return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); |
107 | } |
108 | |
109 | /** |
110 | * uvd_v7_0_enc_ring_get_wptr - get enc write pointer |
111 | * |
112 | * @ring: amdgpu_ring pointer |
113 | * |
114 | * Returns the current hardware enc write pointer |
115 | */ |
116 | static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) |
117 | { |
118 | struct amdgpu_device *adev = ring->adev; |
119 | |
120 | if (ring->use_doorbell) |
121 | return *ring->wptr_cpu_addr; |
122 | |
123 | if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) |
124 | return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); |
125 | else |
126 | return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); |
127 | } |
128 | |
129 | /** |
130 | * uvd_v7_0_ring_set_wptr - set write pointer |
131 | * |
132 | * @ring: amdgpu_ring pointer |
133 | * |
134 | * Commits the write pointer to the hardware |
135 | */ |
136 | static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring) |
137 | { |
138 | struct amdgpu_device *adev = ring->adev; |
139 | |
140 | WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
141 | } |
142 | |
143 | /** |
144 | * uvd_v7_0_enc_ring_set_wptr - set enc write pointer |
145 | * |
146 | * @ring: amdgpu_ring pointer |
147 | * |
148 | * Commits the enc write pointer to the hardware |
149 | */ |
150 | static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) |
151 | { |
152 | struct amdgpu_device *adev = ring->adev; |
153 | |
154 | if (ring->use_doorbell) { |
155 | /* XXX check if swapping is necessary on BE */ |
156 | *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); |
157 | WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); |
158 | return; |
159 | } |
160 | |
161 | if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) |
162 | WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, |
163 | lower_32_bits(ring->wptr)); |
164 | else |
165 | WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, |
166 | lower_32_bits(ring->wptr)); |
167 | } |
168 | |
169 | /** |
170 | * uvd_v7_0_enc_ring_test_ring - test if UVD ENC ring is working |
171 | * |
172 | * @ring: the engine to test on |
173 | * |
174 | */ |
175 | static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) |
176 | { |
177 | struct amdgpu_device *adev = ring->adev; |
178 | uint32_t rptr; |
179 | unsigned i; |
180 | int r; |
181 | |
182 | if (amdgpu_sriov_vf(adev)) |
183 | return 0; |
184 | |
185 | r = amdgpu_ring_alloc(ring, ndw: 16); |
186 | if (r) |
187 | return r; |
188 | |
189 | rptr = amdgpu_ring_get_rptr(ring); |
190 | |
191 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); |
192 | amdgpu_ring_commit(ring); |
193 | |
194 | for (i = 0; i < adev->usec_timeout; i++) { |
195 | if (amdgpu_ring_get_rptr(ring) != rptr) |
196 | break; |
197 | udelay(1); |
198 | } |
199 | |
200 | if (i >= adev->usec_timeout) |
201 | r = -ETIMEDOUT; |
202 | |
203 | return r; |
204 | } |
205 | |
206 | /** |
207 | * uvd_v7_0_enc_get_create_msg - generate a UVD ENC create msg |
208 | * |
209 | * @ring: ring we should submit the msg to |
210 | * @handle: session handle to use |
211 | * @bo: amdgpu object for which we query the offset |
212 | * @fence: optional fence to return |
213 | * |
214 | * Open up a stream for HW test |
215 | */ |
216 | static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, |
217 | struct amdgpu_bo *bo, |
218 | struct dma_fence **fence) |
219 | { |
220 | const unsigned ib_size_dw = 16; |
221 | struct amdgpu_job *job; |
222 | struct amdgpu_ib *ib; |
223 | struct dma_fence *f = NULL; |
224 | uint64_t addr; |
225 | int i, r; |
226 | |
227 | r = amdgpu_job_alloc_with_ib(adev: ring->adev, NULL, NULL, size: ib_size_dw * 4, |
228 | pool_type: AMDGPU_IB_POOL_DIRECT, job: &job); |
229 | if (r) |
230 | return r; |
231 | |
232 | ib = &job->ibs[0]; |
233 | addr = amdgpu_bo_gpu_offset(bo); |
234 | |
235 | ib->length_dw = 0; |
236 | ib->ptr[ib->length_dw++] = 0x00000018; |
237 | ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ |
238 | ib->ptr[ib->length_dw++] = handle; |
239 | ib->ptr[ib->length_dw++] = 0x00000000; |
240 | ib->ptr[ib->length_dw++] = upper_32_bits(addr); |
241 | ib->ptr[ib->length_dw++] = addr; |
242 | |
243 | ib->ptr[ib->length_dw++] = 0x00000014; |
244 | ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ |
245 | ib->ptr[ib->length_dw++] = 0x0000001c; |
246 | ib->ptr[ib->length_dw++] = 0x00000000; |
247 | ib->ptr[ib->length_dw++] = 0x00000000; |
248 | |
249 | ib->ptr[ib->length_dw++] = 0x00000008; |
250 | ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ |
251 | |
252 | for (i = ib->length_dw; i < ib_size_dw; ++i) |
253 | ib->ptr[i] = 0x0; |
254 | |
255 | r = amdgpu_job_submit_direct(job, ring, fence: &f); |
256 | if (r) |
257 | goto err; |
258 | |
259 | if (fence) |
260 | *fence = dma_fence_get(fence: f); |
261 | dma_fence_put(fence: f); |
262 | return 0; |
263 | |
264 | err: |
265 | amdgpu_job_free(job); |
266 | return r; |
267 | } |
268 | |
269 | /** |
270 | * uvd_v7_0_enc_get_destroy_msg - generate a UVD ENC destroy msg |
271 | * |
272 | * @ring: ring we should submit the msg to |
273 | * @handle: session handle to use |
274 | * @bo: amdgpu object for which we query the offset |
275 | * @fence: optional fence to return |
276 | * |
277 | * Close up a stream for HW test or if userspace failed to do so |
278 | */ |
279 | static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, |
280 | struct amdgpu_bo *bo, |
281 | struct dma_fence **fence) |
282 | { |
283 | const unsigned ib_size_dw = 16; |
284 | struct amdgpu_job *job; |
285 | struct amdgpu_ib *ib; |
286 | struct dma_fence *f = NULL; |
287 | uint64_t addr; |
288 | int i, r; |
289 | |
290 | r = amdgpu_job_alloc_with_ib(adev: ring->adev, NULL, NULL, size: ib_size_dw * 4, |
291 | pool_type: AMDGPU_IB_POOL_DIRECT, job: &job); |
292 | if (r) |
293 | return r; |
294 | |
295 | ib = &job->ibs[0]; |
296 | addr = amdgpu_bo_gpu_offset(bo); |
297 | |
298 | ib->length_dw = 0; |
299 | ib->ptr[ib->length_dw++] = 0x00000018; |
300 | ib->ptr[ib->length_dw++] = 0x00000001; |
301 | ib->ptr[ib->length_dw++] = handle; |
302 | ib->ptr[ib->length_dw++] = 0x00000000; |
303 | ib->ptr[ib->length_dw++] = upper_32_bits(addr); |
304 | ib->ptr[ib->length_dw++] = addr; |
305 | |
306 | ib->ptr[ib->length_dw++] = 0x00000014; |
307 | ib->ptr[ib->length_dw++] = 0x00000002; |
308 | ib->ptr[ib->length_dw++] = 0x0000001c; |
309 | ib->ptr[ib->length_dw++] = 0x00000000; |
310 | ib->ptr[ib->length_dw++] = 0x00000000; |
311 | |
312 | ib->ptr[ib->length_dw++] = 0x00000008; |
313 | ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ |
314 | |
315 | for (i = ib->length_dw; i < ib_size_dw; ++i) |
316 | ib->ptr[i] = 0x0; |
317 | |
318 | r = amdgpu_job_submit_direct(job, ring, fence: &f); |
319 | if (r) |
320 | goto err; |
321 | |
322 | if (fence) |
323 | *fence = dma_fence_get(fence: f); |
324 | dma_fence_put(fence: f); |
325 | return 0; |
326 | |
327 | err: |
328 | amdgpu_job_free(job); |
329 | return r; |
330 | } |
331 | |
332 | /** |
333 | * uvd_v7_0_enc_ring_test_ib - test if UVD ENC IBs are working |
334 | * |
335 | * @ring: the engine to test on |
336 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
337 | * |
338 | */ |
339 | static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
340 | { |
341 | struct dma_fence *fence = NULL; |
342 | struct amdgpu_bo *bo = ring->adev->uvd.ib_bo; |
343 | long r; |
344 | |
345 | r = uvd_v7_0_enc_get_create_msg(ring, handle: 1, bo, NULL); |
346 | if (r) |
347 | goto error; |
348 | |
349 | r = uvd_v7_0_enc_get_destroy_msg(ring, handle: 1, bo, fence: &fence); |
350 | if (r) |
351 | goto error; |
352 | |
353 | r = dma_fence_wait_timeout(fence, intr: false, timeout); |
354 | if (r == 0) |
355 | r = -ETIMEDOUT; |
356 | else if (r > 0) |
357 | r = 0; |
358 | |
359 | error: |
360 | dma_fence_put(fence); |
361 | return r; |
362 | } |
363 | |
364 | static int uvd_v7_0_early_init(void *handle) |
365 | { |
366 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
367 | |
368 | if (adev->asic_type == CHIP_VEGA20) { |
369 | u32 harvest; |
370 | int i; |
371 | |
372 | adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; |
373 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { |
374 | harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING); |
375 | if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) { |
376 | adev->uvd.harvest_config |= 1 << i; |
377 | } |
378 | } |
379 | if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 | |
380 | AMDGPU_UVD_HARVEST_UVD1)) |
381 | /* both instances are harvested, disable the block */ |
382 | return -ENOENT; |
383 | } else { |
384 | adev->uvd.num_uvd_inst = 1; |
385 | } |
386 | |
387 | if (amdgpu_sriov_vf(adev)) |
388 | adev->uvd.num_enc_rings = 1; |
389 | else |
390 | adev->uvd.num_enc_rings = 2; |
391 | uvd_v7_0_set_ring_funcs(adev); |
392 | uvd_v7_0_set_enc_ring_funcs(adev); |
393 | uvd_v7_0_set_irq_funcs(adev); |
394 | |
395 | return 0; |
396 | } |
397 | |
398 | static int uvd_v7_0_sw_init(void *handle) |
399 | { |
400 | struct amdgpu_ring *ring; |
401 | |
402 | int i, j, r; |
403 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
404 | |
405 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
406 | if (adev->uvd.harvest_config & (1 << j)) |
407 | continue; |
408 | /* UVD TRAP */ |
409 | r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, source: &adev->uvd.inst[j].irq); |
410 | if (r) |
411 | return r; |
412 | |
413 | /* UVD ENC TRAP */ |
414 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
415 | r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_uvds[j], src_id: i + UVD_7_0__SRCID__UVD_ENC_GEN_PURP, source: &adev->uvd.inst[j].irq); |
416 | if (r) |
417 | return r; |
418 | } |
419 | } |
420 | |
421 | r = amdgpu_uvd_sw_init(adev); |
422 | if (r) |
423 | return r; |
424 | |
425 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
426 | const struct common_firmware_header *hdr; |
427 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; |
428 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].ucode_id = AMDGPU_UCODE_ID_UVD; |
429 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].fw = adev->uvd.fw; |
430 | adev->firmware.fw_size += |
431 | ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); |
432 | |
433 | if (adev->uvd.num_uvd_inst == UVD7_MAX_HW_INSTANCES_VEGA20) { |
434 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].ucode_id = AMDGPU_UCODE_ID_UVD1; |
435 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].fw = adev->uvd.fw; |
436 | adev->firmware.fw_size += |
437 | ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE); |
438 | } |
439 | DRM_INFO("PSP loading UVD firmware\n" ); |
440 | } |
441 | |
442 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
443 | if (adev->uvd.harvest_config & (1 << j)) |
444 | continue; |
445 | if (!amdgpu_sriov_vf(adev)) { |
446 | ring = &adev->uvd.inst[j].ring; |
447 | ring->vm_hub = AMDGPU_MMHUB0(0); |
448 | sprintf(buf: ring->name, fmt: "uvd_%d" , ring->me); |
449 | r = amdgpu_ring_init(adev, ring, max_dw: 512, |
450 | irq_src: &adev->uvd.inst[j].irq, irq_type: 0, |
451 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
452 | if (r) |
453 | return r; |
454 | } |
455 | |
456 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
457 | ring = &adev->uvd.inst[j].ring_enc[i]; |
458 | ring->vm_hub = AMDGPU_MMHUB0(0); |
459 | sprintf(buf: ring->name, fmt: "uvd_enc_%d.%d" , ring->me, i); |
460 | if (amdgpu_sriov_vf(adev)) { |
461 | ring->use_doorbell = true; |
462 | |
463 | /* currently only use the first enconding ring for |
464 | * sriov, so set unused location for other unused rings. |
465 | */ |
466 | if (i == 0) |
467 | ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2; |
468 | else |
469 | ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1; |
470 | } |
471 | r = amdgpu_ring_init(adev, ring, max_dw: 512, |
472 | irq_src: &adev->uvd.inst[j].irq, irq_type: 0, |
473 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
474 | if (r) |
475 | return r; |
476 | } |
477 | } |
478 | |
479 | r = amdgpu_uvd_resume(adev); |
480 | if (r) |
481 | return r; |
482 | |
483 | r = amdgpu_uvd_entity_init(adev); |
484 | if (r) |
485 | return r; |
486 | |
487 | r = amdgpu_virt_alloc_mm_table(adev); |
488 | if (r) |
489 | return r; |
490 | |
491 | return r; |
492 | } |
493 | |
494 | static int uvd_v7_0_sw_fini(void *handle) |
495 | { |
496 | int i, j, r; |
497 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
498 | |
499 | amdgpu_virt_free_mm_table(adev); |
500 | |
501 | r = amdgpu_uvd_suspend(adev); |
502 | if (r) |
503 | return r; |
504 | |
505 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
506 | if (adev->uvd.harvest_config & (1 << j)) |
507 | continue; |
508 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) |
509 | amdgpu_ring_fini(ring: &adev->uvd.inst[j].ring_enc[i]); |
510 | } |
511 | return amdgpu_uvd_sw_fini(adev); |
512 | } |
513 | |
514 | /** |
515 | * uvd_v7_0_hw_init - start and test UVD block |
516 | * |
517 | * @handle: handle used to pass amdgpu_device pointer |
518 | * |
519 | * Initialize the hardware, boot up the VCPU and do some testing |
520 | */ |
521 | static int uvd_v7_0_hw_init(void *handle) |
522 | { |
523 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
524 | struct amdgpu_ring *ring; |
525 | uint32_t tmp; |
526 | int i, j, r; |
527 | |
528 | if (amdgpu_sriov_vf(adev)) |
529 | r = uvd_v7_0_sriov_start(adev); |
530 | else |
531 | r = uvd_v7_0_start(adev); |
532 | if (r) |
533 | goto done; |
534 | |
535 | for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { |
536 | if (adev->uvd.harvest_config & (1 << j)) |
537 | continue; |
538 | ring = &adev->uvd.inst[j].ring; |
539 | |
540 | if (!amdgpu_sriov_vf(adev)) { |
541 | r = amdgpu_ring_test_helper(ring); |
542 | if (r) |
543 | goto done; |
544 | |
545 | r = amdgpu_ring_alloc(ring, ndw: 10); |
546 | if (r) { |
547 | DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n" , j, r); |
548 | goto done; |
549 | } |
550 | |
551 | tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, |
552 | mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); |
553 | amdgpu_ring_write(ring, v: tmp); |
554 | amdgpu_ring_write(ring, v: 0xFFFFF); |
555 | |
556 | tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, |
557 | mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); |
558 | amdgpu_ring_write(ring, v: tmp); |
559 | amdgpu_ring_write(ring, v: 0xFFFFF); |
560 | |
561 | tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, |
562 | mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); |
563 | amdgpu_ring_write(ring, v: tmp); |
564 | amdgpu_ring_write(ring, v: 0xFFFFF); |
565 | |
566 | /* Clear timeout status bits */ |
567 | amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, |
568 | mmUVD_SEMA_TIMEOUT_STATUS), 0)); |
569 | amdgpu_ring_write(ring, v: 0x8); |
570 | |
571 | amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, |
572 | mmUVD_SEMA_CNTL), 0)); |
573 | amdgpu_ring_write(ring, v: 3); |
574 | |
575 | amdgpu_ring_commit(ring); |
576 | } |
577 | |
578 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
579 | ring = &adev->uvd.inst[j].ring_enc[i]; |
580 | r = amdgpu_ring_test_helper(ring); |
581 | if (r) |
582 | goto done; |
583 | } |
584 | } |
585 | done: |
586 | if (!r) |
587 | DRM_INFO("UVD and UVD ENC initialized successfully.\n" ); |
588 | |
589 | return r; |
590 | } |
591 | |
592 | /** |
593 | * uvd_v7_0_hw_fini - stop the hardware block |
594 | * |
595 | * @handle: handle used to pass amdgpu_device pointer |
596 | * |
597 | * Stop the UVD block, mark ring as not ready any more |
598 | */ |
599 | static int uvd_v7_0_hw_fini(void *handle) |
600 | { |
601 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
602 | |
603 | cancel_delayed_work_sync(dwork: &adev->uvd.idle_work); |
604 | |
605 | if (!amdgpu_sriov_vf(adev)) |
606 | uvd_v7_0_stop(adev); |
607 | else { |
608 | /* full access mode, so don't touch any UVD register */ |
609 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n" ); |
610 | } |
611 | |
612 | return 0; |
613 | } |
614 | |
615 | static int uvd_v7_0_prepare_suspend(void *handle) |
616 | { |
617 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
618 | |
619 | return amdgpu_uvd_prepare_suspend(adev); |
620 | } |
621 | |
622 | static int uvd_v7_0_suspend(void *handle) |
623 | { |
624 | int r; |
625 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
626 | |
627 | /* |
628 | * Proper cleanups before halting the HW engine: |
629 | * - cancel the delayed idle work |
630 | * - enable powergating |
631 | * - enable clockgating |
632 | * - disable dpm |
633 | * |
634 | * TODO: to align with the VCN implementation, move the |
635 | * jobs for clockgating/powergating/dpm setting to |
636 | * ->set_powergating_state(). |
637 | */ |
638 | cancel_delayed_work_sync(dwork: &adev->uvd.idle_work); |
639 | |
640 | if (adev->pm.dpm_enabled) { |
641 | amdgpu_dpm_enable_uvd(adev, enable: false); |
642 | } else { |
643 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); |
644 | /* shutdown the UVD block */ |
645 | amdgpu_device_ip_set_powergating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
646 | state: AMD_PG_STATE_GATE); |
647 | amdgpu_device_ip_set_clockgating_state(dev: adev, block_type: AMD_IP_BLOCK_TYPE_UVD, |
648 | state: AMD_CG_STATE_GATE); |
649 | } |
650 | |
651 | r = uvd_v7_0_hw_fini(handle: adev); |
652 | if (r) |
653 | return r; |
654 | |
655 | return amdgpu_uvd_suspend(adev); |
656 | } |
657 | |
658 | static int uvd_v7_0_resume(void *handle) |
659 | { |
660 | int r; |
661 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
662 | |
663 | r = amdgpu_uvd_resume(adev); |
664 | if (r) |
665 | return r; |
666 | |
667 | return uvd_v7_0_hw_init(handle: adev); |
668 | } |
669 | |
670 | /** |
671 | * uvd_v7_0_mc_resume - memory controller programming |
672 | * |
673 | * @adev: amdgpu_device pointer |
674 | * |
675 | * Let the UVD memory controller know it's offsets |
676 | */ |
677 | static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) |
678 | { |
679 | uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev); |
680 | uint32_t offset; |
681 | int i; |
682 | |
683 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
684 | if (adev->uvd.harvest_config & (1 << i)) |
685 | continue; |
686 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
687 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
688 | i == 0 ? |
689 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo : |
690 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_lo); |
691 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
692 | i == 0 ? |
693 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi : |
694 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD1].tmr_mc_addr_hi); |
695 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0); |
696 | offset = 0; |
697 | } else { |
698 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
699 | lower_32_bits(adev->uvd.inst[i].gpu_addr)); |
700 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
701 | upper_32_bits(adev->uvd.inst[i].gpu_addr)); |
702 | offset = size; |
703 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, |
704 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
705 | } |
706 | |
707 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); |
708 | |
709 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, |
710 | lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); |
711 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, |
712 | upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); |
713 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); |
714 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); |
715 | |
716 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, |
717 | lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
718 | WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, |
719 | upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
720 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); |
721 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, |
722 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); |
723 | |
724 | WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG, |
725 | adev->gfx.config.gb_addr_config); |
726 | WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG, |
727 | adev->gfx.config.gb_addr_config); |
728 | WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG, |
729 | adev->gfx.config.gb_addr_config); |
730 | |
731 | WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); |
732 | } |
733 | } |
734 | |
735 | static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, |
736 | struct amdgpu_mm_table *table) |
737 | { |
738 | uint32_t data = 0, loop; |
739 | uint64_t addr = table->gpu_addr; |
740 | struct mmsch_v1_0_init_header * = (struct mmsch_v1_0_init_header *)table->cpu_addr; |
741 | uint32_t size; |
742 | int i; |
743 | |
744 | size = header->header_size + header->vce_table_size + header->uvd_table_size; |
745 | |
746 | /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of memory descriptor location */ |
747 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); |
748 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); |
749 | |
750 | /* 2, update vmid of descriptor */ |
751 | data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID); |
752 | data &= ~VCE_MMSCH_VF_VMID__VF_CTX_VMID_MASK; |
753 | data |= (0 << VCE_MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); /* use domain0 for MM scheduler */ |
754 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_VMID, data); |
755 | |
756 | /* 3, notify mmsch about the size of this descriptor */ |
757 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_CTX_SIZE, size); |
758 | |
759 | /* 4, set resp to zero */ |
760 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); |
761 | |
762 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
763 | if (adev->uvd.harvest_config & (1 << i)) |
764 | continue; |
765 | WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); |
766 | *adev->uvd.inst[i].ring_enc[0].wptr_cpu_addr = 0; |
767 | adev->uvd.inst[i].ring_enc[0].wptr = 0; |
768 | adev->uvd.inst[i].ring_enc[0].wptr_old = 0; |
769 | } |
770 | /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ |
771 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); |
772 | |
773 | data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP); |
774 | loop = 1000; |
775 | while ((data & 0x10000002) != 0x10000002) { |
776 | udelay(10); |
777 | data = RREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP); |
778 | loop--; |
779 | if (!loop) |
780 | break; |
781 | } |
782 | |
783 | if (!loop) { |
784 | dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n" , data); |
785 | return -EBUSY; |
786 | } |
787 | |
788 | return 0; |
789 | } |
790 | |
791 | static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) |
792 | { |
793 | struct amdgpu_ring *ring; |
794 | uint32_t offset, size, tmp; |
795 | uint32_t table_size = 0; |
796 | struct mmsch_v1_0_cmd_direct_write direct_wt = { {0} }; |
797 | struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} }; |
798 | struct mmsch_v1_0_cmd_direct_polling direct_poll = { {0} }; |
799 | struct mmsch_v1_0_cmd_end end = { {0} }; |
800 | uint32_t *init_table = adev->virt.mm_table.cpu_addr; |
801 | struct mmsch_v1_0_init_header * = (struct mmsch_v1_0_init_header *)init_table; |
802 | uint8_t i = 0; |
803 | |
804 | direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; |
805 | direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; |
806 | direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING; |
807 | end.cmd_header.command_type = MMSCH_COMMAND__END; |
808 | |
809 | if (header->uvd_table_offset == 0 && header->uvd_table_size == 0) { |
810 | header->version = MMSCH_VERSION; |
811 | header->header_size = sizeof(struct mmsch_v1_0_init_header) >> 2; |
812 | |
813 | if (header->vce_table_offset == 0 && header->vce_table_size == 0) |
814 | header->uvd_table_offset = header->header_size; |
815 | else |
816 | header->uvd_table_offset = header->vce_table_size + header->vce_table_offset; |
817 | |
818 | init_table += header->uvd_table_offset; |
819 | |
820 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
821 | if (adev->uvd.harvest_config & (1 << i)) |
822 | continue; |
823 | ring = &adev->uvd.inst[i].ring; |
824 | ring->wptr = 0; |
825 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); |
826 | |
827 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), |
828 | 0xFFFFFFFF, 0x00000004); |
829 | /* mc resume*/ |
830 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
831 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, |
832 | mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
833 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_lo); |
834 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, |
835 | mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
836 | adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].tmr_mc_addr_hi); |
837 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0); |
838 | offset = 0; |
839 | } else { |
840 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
841 | lower_32_bits(adev->uvd.inst[i].gpu_addr)); |
842 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
843 | upper_32_bits(adev->uvd.inst[i].gpu_addr)); |
844 | offset = size; |
845 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), |
846 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
847 | |
848 | } |
849 | |
850 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size); |
851 | |
852 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), |
853 | lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); |
854 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), |
855 | upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); |
856 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); |
857 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); |
858 | |
859 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), |
860 | lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
861 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), |
862 | upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); |
863 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); |
864 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), |
865 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); |
866 | |
867 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); |
868 | /* mc resume end*/ |
869 | |
870 | /* disable clock gating */ |
871 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL), |
872 | ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0); |
873 | |
874 | /* disable interupt */ |
875 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), |
876 | ~UVD_MASTINT_EN__VCPU_EN_MASK, 0); |
877 | |
878 | /* stall UMC and register bus before resetting VCPU */ |
879 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), |
880 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, |
881 | UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
882 | |
883 | /* put LMI, VCPU, RBC etc... into reset */ |
884 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), |
885 | (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | |
886 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | |
887 | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | |
888 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | |
889 | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | |
890 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | |
891 | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | |
892 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)); |
893 | |
894 | /* initialize UVD memory controller */ |
895 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL), |
896 | (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | |
897 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
898 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
899 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | |
900 | UVD_LMI_CTRL__REQ_MODE_MASK | |
901 | 0x00100000L)); |
902 | |
903 | /* take all subblocks out of reset, except VCPU */ |
904 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), |
905 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
906 | |
907 | /* enable VCPU clock */ |
908 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), |
909 | UVD_VCPU_CNTL__CLK_EN_MASK); |
910 | |
911 | /* enable master interrupt */ |
912 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), |
913 | ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), |
914 | (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); |
915 | |
916 | /* clear the bit 4 of UVD_STATUS */ |
917 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), |
918 | ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0); |
919 | |
920 | /* force RBC into idle state */ |
921 | size = order_base_2(ring->ring_size); |
922 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); |
923 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
924 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); |
925 | |
926 | ring = &adev->uvd.inst[i].ring_enc[0]; |
927 | ring->wptr = 0; |
928 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr); |
929 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); |
930 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4); |
931 | |
932 | /* boot up the VCPU */ |
933 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0); |
934 | |
935 | /* enable UMC */ |
936 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), |
937 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); |
938 | |
939 | MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02); |
940 | } |
941 | /* add end packet */ |
942 | memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); |
943 | table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; |
944 | header->uvd_table_size = table_size; |
945 | |
946 | } |
947 | return uvd_v7_0_mmsch_start(adev, table: &adev->virt.mm_table); |
948 | } |
949 | |
950 | /** |
951 | * uvd_v7_0_start - start UVD block |
952 | * |
953 | * @adev: amdgpu_device pointer |
954 | * |
955 | * Setup and start the UVD block |
956 | */ |
957 | static int uvd_v7_0_start(struct amdgpu_device *adev) |
958 | { |
959 | struct amdgpu_ring *ring; |
960 | uint32_t rb_bufsz, tmp; |
961 | uint32_t lmi_swap_cntl; |
962 | uint32_t mp_swap_cntl; |
963 | int i, j, k, r; |
964 | |
965 | for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { |
966 | if (adev->uvd.harvest_config & (1 << k)) |
967 | continue; |
968 | /* disable DPG */ |
969 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0, |
970 | ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); |
971 | } |
972 | |
973 | /* disable byte swapping */ |
974 | lmi_swap_cntl = 0; |
975 | mp_swap_cntl = 0; |
976 | |
977 | uvd_v7_0_mc_resume(adev); |
978 | |
979 | for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { |
980 | if (adev->uvd.harvest_config & (1 << k)) |
981 | continue; |
982 | ring = &adev->uvd.inst[k].ring; |
983 | /* disable clock gating */ |
984 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0, |
985 | ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); |
986 | |
987 | /* disable interupt */ |
988 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0, |
989 | ~UVD_MASTINT_EN__VCPU_EN_MASK); |
990 | |
991 | /* stall UMC and register bus before resetting VCPU */ |
992 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), |
993 | UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, |
994 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
995 | mdelay(1); |
996 | |
997 | /* put LMI, VCPU, RBC etc... into reset */ |
998 | WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, |
999 | UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | |
1000 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | |
1001 | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | |
1002 | UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | |
1003 | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | |
1004 | UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | |
1005 | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | |
1006 | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); |
1007 | mdelay(5); |
1008 | |
1009 | /* initialize UVD memory controller */ |
1010 | WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL, |
1011 | (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | |
1012 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
1013 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
1014 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | |
1015 | UVD_LMI_CTRL__REQ_MODE_MASK | |
1016 | 0x00100000L); |
1017 | |
1018 | #ifdef __BIG_ENDIAN |
1019 | /* swap (8 in 32) RB and IB */ |
1020 | lmi_swap_cntl = 0xa; |
1021 | mp_swap_cntl = 0; |
1022 | #endif |
1023 | WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); |
1024 | WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl); |
1025 | |
1026 | WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040); |
1027 | WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0); |
1028 | WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040); |
1029 | WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0); |
1030 | WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0); |
1031 | WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88); |
1032 | |
1033 | /* take all subblocks out of reset, except VCPU */ |
1034 | WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, |
1035 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
1036 | mdelay(5); |
1037 | |
1038 | /* enable VCPU clock */ |
1039 | WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL, |
1040 | UVD_VCPU_CNTL__CLK_EN_MASK); |
1041 | |
1042 | /* enable UMC */ |
1043 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0, |
1044 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
1045 | |
1046 | /* boot up the VCPU */ |
1047 | WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0); |
1048 | mdelay(10); |
1049 | |
1050 | for (i = 0; i < 10; ++i) { |
1051 | uint32_t status; |
1052 | |
1053 | for (j = 0; j < 100; ++j) { |
1054 | status = RREG32_SOC15(UVD, k, mmUVD_STATUS); |
1055 | if (status & 2) |
1056 | break; |
1057 | mdelay(10); |
1058 | } |
1059 | r = 0; |
1060 | if (status & 2) |
1061 | break; |
1062 | |
1063 | DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n" , k); |
1064 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), |
1065 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, |
1066 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
1067 | mdelay(10); |
1068 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0, |
1069 | ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
1070 | mdelay(10); |
1071 | r = -1; |
1072 | } |
1073 | |
1074 | if (r) { |
1075 | DRM_ERROR("UVD(%d) not responding, giving up!!!\n" , k); |
1076 | return r; |
1077 | } |
1078 | /* enable master interrupt */ |
1079 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), |
1080 | (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), |
1081 | ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); |
1082 | |
1083 | /* clear the bit 4 of UVD_STATUS */ |
1084 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0, |
1085 | ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); |
1086 | |
1087 | /* force RBC into idle state */ |
1088 | rb_bufsz = order_base_2(ring->ring_size); |
1089 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); |
1090 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); |
1091 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
1092 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); |
1093 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); |
1094 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); |
1095 | WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp); |
1096 | |
1097 | /* set the write pointer delay */ |
1098 | WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0); |
1099 | |
1100 | /* set the wb address */ |
1101 | WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR, |
1102 | (upper_32_bits(ring->gpu_addr) >> 2)); |
1103 | |
1104 | /* program the RB_BASE for ring buffer */ |
1105 | WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, |
1106 | lower_32_bits(ring->gpu_addr)); |
1107 | WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, |
1108 | upper_32_bits(ring->gpu_addr)); |
1109 | |
1110 | /* Initialize the ring buffer's read and write pointers */ |
1111 | WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0); |
1112 | |
1113 | ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR); |
1114 | WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR, |
1115 | lower_32_bits(ring->wptr)); |
1116 | |
1117 | WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0, |
1118 | ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); |
1119 | |
1120 | ring = &adev->uvd.inst[k].ring_enc[0]; |
1121 | WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); |
1122 | WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
1123 | WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr); |
1124 | WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
1125 | WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4); |
1126 | |
1127 | ring = &adev->uvd.inst[k].ring_enc[1]; |
1128 | WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); |
1129 | WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); |
1130 | WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr); |
1131 | WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); |
1132 | WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4); |
1133 | } |
1134 | return 0; |
1135 | } |
1136 | |
1137 | /** |
1138 | * uvd_v7_0_stop - stop UVD block |
1139 | * |
1140 | * @adev: amdgpu_device pointer |
1141 | * |
1142 | * stop the UVD block |
1143 | */ |
1144 | static void uvd_v7_0_stop(struct amdgpu_device *adev) |
1145 | { |
1146 | uint8_t i = 0; |
1147 | |
1148 | for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { |
1149 | if (adev->uvd.harvest_config & (1 << i)) |
1150 | continue; |
1151 | /* force RBC into idle state */ |
1152 | WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101); |
1153 | |
1154 | /* Stall UMC and register bus before resetting VCPU */ |
1155 | WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), |
1156 | UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, |
1157 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
1158 | mdelay(1); |
1159 | |
1160 | /* put VCPU into reset */ |
1161 | WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET, |
1162 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
1163 | mdelay(5); |
1164 | |
1165 | /* disable VCPU clock */ |
1166 | WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0); |
1167 | |
1168 | /* Unstall UMC and register bus */ |
1169 | WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, |
1170 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
1171 | } |
1172 | } |
1173 | |
1174 | /** |
1175 | * uvd_v7_0_ring_emit_fence - emit an fence & trap command |
1176 | * |
1177 | * @ring: amdgpu_ring pointer |
1178 | * @addr: address |
1179 | * @seq: sequence number |
1180 | * @flags: fence related flags |
1181 | * |
1182 | * Write a fence and a trap command to the ring. |
1183 | */ |
1184 | static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
1185 | unsigned flags) |
1186 | { |
1187 | struct amdgpu_device *adev = ring->adev; |
1188 | |
1189 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
1190 | |
1191 | amdgpu_ring_write(ring, |
1192 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); |
1193 | amdgpu_ring_write(ring, v: seq); |
1194 | amdgpu_ring_write(ring, |
1195 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1196 | amdgpu_ring_write(ring, v: addr & 0xffffffff); |
1197 | amdgpu_ring_write(ring, |
1198 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1199 | amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); |
1200 | amdgpu_ring_write(ring, |
1201 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); |
1202 | amdgpu_ring_write(ring, v: 0); |
1203 | |
1204 | amdgpu_ring_write(ring, |
1205 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1206 | amdgpu_ring_write(ring, v: 0); |
1207 | amdgpu_ring_write(ring, |
1208 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1209 | amdgpu_ring_write(ring, v: 0); |
1210 | amdgpu_ring_write(ring, |
1211 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); |
1212 | amdgpu_ring_write(ring, v: 2); |
1213 | } |
1214 | |
1215 | /** |
1216 | * uvd_v7_0_enc_ring_emit_fence - emit an enc fence & trap command |
1217 | * |
1218 | * @ring: amdgpu_ring pointer |
1219 | * @addr: address |
1220 | * @seq: sequence number |
1221 | * @flags: fence related flags |
1222 | * |
1223 | * Write enc a fence and a trap command to the ring. |
1224 | */ |
1225 | static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, |
1226 | u64 seq, unsigned flags) |
1227 | { |
1228 | |
1229 | WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
1230 | |
1231 | amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE); |
1232 | amdgpu_ring_write(ring, v: addr); |
1233 | amdgpu_ring_write(ring, upper_32_bits(addr)); |
1234 | amdgpu_ring_write(ring, v: seq); |
1235 | amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); |
1236 | } |
1237 | |
1238 | /** |
1239 | * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing |
1240 | * |
1241 | * @ring: amdgpu_ring pointer |
1242 | */ |
1243 | static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
1244 | { |
1245 | /* The firmware doesn't seem to like touching registers at this point. */ |
1246 | } |
1247 | |
1248 | /** |
1249 | * uvd_v7_0_ring_test_ring - register write test |
1250 | * |
1251 | * @ring: amdgpu_ring pointer |
1252 | * |
1253 | * Test if we can successfully write to the context register |
1254 | */ |
1255 | static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) |
1256 | { |
1257 | struct amdgpu_device *adev = ring->adev; |
1258 | uint32_t tmp = 0; |
1259 | unsigned i; |
1260 | int r; |
1261 | |
1262 | WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD); |
1263 | r = amdgpu_ring_alloc(ring, ndw: 3); |
1264 | if (r) |
1265 | return r; |
1266 | |
1267 | amdgpu_ring_write(ring, |
1268 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); |
1269 | amdgpu_ring_write(ring, v: 0xDEADBEEF); |
1270 | amdgpu_ring_commit(ring); |
1271 | for (i = 0; i < adev->usec_timeout; i++) { |
1272 | tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID); |
1273 | if (tmp == 0xDEADBEEF) |
1274 | break; |
1275 | udelay(1); |
1276 | } |
1277 | |
1278 | if (i >= adev->usec_timeout) |
1279 | r = -ETIMEDOUT; |
1280 | |
1281 | return r; |
1282 | } |
1283 | |
1284 | /** |
1285 | * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission. |
1286 | * |
1287 | * @p: the CS parser with the IBs |
1288 | * @job: which job this ib is in |
1289 | * @ib: which IB to patch |
1290 | * |
1291 | */ |
1292 | static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, |
1293 | struct amdgpu_job *job, |
1294 | struct amdgpu_ib *ib) |
1295 | { |
1296 | struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); |
1297 | unsigned i; |
1298 | |
1299 | /* No patching necessary for the first instance */ |
1300 | if (!ring->me) |
1301 | return 0; |
1302 | |
1303 | for (i = 0; i < ib->length_dw; i += 2) { |
1304 | uint32_t reg = amdgpu_ib_get_value(ib, idx: i); |
1305 | |
1306 | reg -= p->adev->reg_offset[UVD_HWIP][0][1]; |
1307 | reg += p->adev->reg_offset[UVD_HWIP][1][1]; |
1308 | |
1309 | amdgpu_ib_set_value(ib, idx: i, value: reg); |
1310 | } |
1311 | return 0; |
1312 | } |
1313 | |
1314 | /** |
1315 | * uvd_v7_0_ring_emit_ib - execute indirect buffer |
1316 | * |
1317 | * @ring: amdgpu_ring pointer |
1318 | * @job: job to retrieve vmid from |
1319 | * @ib: indirect buffer to execute |
1320 | * @flags: unused |
1321 | * |
1322 | * Write ring commands to execute the indirect buffer |
1323 | */ |
1324 | static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, |
1325 | struct amdgpu_job *job, |
1326 | struct amdgpu_ib *ib, |
1327 | uint32_t flags) |
1328 | { |
1329 | struct amdgpu_device *adev = ring->adev; |
1330 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
1331 | |
1332 | amdgpu_ring_write(ring, |
1333 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0)); |
1334 | amdgpu_ring_write(ring, v: vmid); |
1335 | |
1336 | amdgpu_ring_write(ring, |
1337 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); |
1338 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
1339 | amdgpu_ring_write(ring, |
1340 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); |
1341 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
1342 | amdgpu_ring_write(ring, |
1343 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0)); |
1344 | amdgpu_ring_write(ring, v: ib->length_dw); |
1345 | } |
1346 | |
1347 | /** |
1348 | * uvd_v7_0_enc_ring_emit_ib - enc execute indirect buffer |
1349 | * |
1350 | * @ring: amdgpu_ring pointer |
1351 | * @job: job to retrive vmid from |
1352 | * @ib: indirect buffer to execute |
1353 | * @flags: unused |
1354 | * |
1355 | * Write enc ring commands to execute the indirect buffer |
1356 | */ |
1357 | static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, |
1358 | struct amdgpu_job *job, |
1359 | struct amdgpu_ib *ib, |
1360 | uint32_t flags) |
1361 | { |
1362 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
1363 | |
1364 | amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); |
1365 | amdgpu_ring_write(ring, v: vmid); |
1366 | amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); |
1367 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
1368 | amdgpu_ring_write(ring, v: ib->length_dw); |
1369 | } |
1370 | |
1371 | static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, |
1372 | uint32_t reg, uint32_t val) |
1373 | { |
1374 | struct amdgpu_device *adev = ring->adev; |
1375 | |
1376 | amdgpu_ring_write(ring, |
1377 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1378 | amdgpu_ring_write(ring, v: reg << 2); |
1379 | amdgpu_ring_write(ring, |
1380 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1381 | amdgpu_ring_write(ring, v: val); |
1382 | amdgpu_ring_write(ring, |
1383 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); |
1384 | amdgpu_ring_write(ring, v: 8); |
1385 | } |
1386 | |
1387 | static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, |
1388 | uint32_t val, uint32_t mask) |
1389 | { |
1390 | struct amdgpu_device *adev = ring->adev; |
1391 | |
1392 | amdgpu_ring_write(ring, |
1393 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); |
1394 | amdgpu_ring_write(ring, v: reg << 2); |
1395 | amdgpu_ring_write(ring, |
1396 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); |
1397 | amdgpu_ring_write(ring, v: val); |
1398 | amdgpu_ring_write(ring, |
1399 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0)); |
1400 | amdgpu_ring_write(ring, v: mask); |
1401 | amdgpu_ring_write(ring, |
1402 | PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); |
1403 | amdgpu_ring_write(ring, v: 12); |
1404 | } |
1405 | |
1406 | static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1407 | unsigned vmid, uint64_t pd_addr) |
1408 | { |
1409 | struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; |
1410 | uint32_t data0, data1, mask; |
1411 | |
1412 | pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
1413 | |
1414 | /* wait for reg writes */ |
1415 | data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; |
1416 | data1 = lower_32_bits(pd_addr); |
1417 | mask = 0xffffffff; |
1418 | uvd_v7_0_ring_emit_reg_wait(ring, reg: data0, val: data1, mask); |
1419 | } |
1420 | |
1421 | static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
1422 | { |
1423 | struct amdgpu_device *adev = ring->adev; |
1424 | int i; |
1425 | |
1426 | WARN_ON(ring->wptr % 2 || count % 2); |
1427 | |
1428 | for (i = 0; i < count / 2; i++) { |
1429 | amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0)); |
1430 | amdgpu_ring_write(ring, v: 0); |
1431 | } |
1432 | } |
1433 | |
1434 | static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) |
1435 | { |
1436 | amdgpu_ring_write(ring, HEVC_ENC_CMD_END); |
1437 | } |
1438 | |
1439 | static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, |
1440 | uint32_t reg, uint32_t val, |
1441 | uint32_t mask) |
1442 | { |
1443 | amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT); |
1444 | amdgpu_ring_write(ring, v: reg << 2); |
1445 | amdgpu_ring_write(ring, v: mask); |
1446 | amdgpu_ring_write(ring, v: val); |
1447 | } |
1448 | |
1449 | static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, |
1450 | unsigned int vmid, uint64_t pd_addr) |
1451 | { |
1452 | struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->vm_hub]; |
1453 | |
1454 | pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
1455 | |
1456 | /* wait for reg writes */ |
1457 | uvd_v7_0_enc_ring_emit_reg_wait(ring, reg: hub->ctx0_ptb_addr_lo32 + |
1458 | vmid * hub->ctx_addr_distance, |
1459 | lower_32_bits(pd_addr), mask: 0xffffffff); |
1460 | } |
1461 | |
1462 | static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, |
1463 | uint32_t reg, uint32_t val) |
1464 | { |
1465 | amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); |
1466 | amdgpu_ring_write(ring, v: reg << 2); |
1467 | amdgpu_ring_write(ring, v: val); |
1468 | } |
1469 | |
1470 | #if 0 |
1471 | static bool uvd_v7_0_is_idle(void *handle) |
1472 | { |
1473 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1474 | |
1475 | return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); |
1476 | } |
1477 | |
1478 | static int uvd_v7_0_wait_for_idle(void *handle) |
1479 | { |
1480 | unsigned i; |
1481 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1482 | |
1483 | for (i = 0; i < adev->usec_timeout; i++) { |
1484 | if (uvd_v7_0_is_idle(handle)) |
1485 | return 0; |
1486 | } |
1487 | return -ETIMEDOUT; |
1488 | } |
1489 | |
1490 | #define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd |
1491 | static bool uvd_v7_0_check_soft_reset(void *handle) |
1492 | { |
1493 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1494 | u32 srbm_soft_reset = 0; |
1495 | u32 tmp = RREG32(mmSRBM_STATUS); |
1496 | |
1497 | if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || |
1498 | REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || |
1499 | (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) & |
1500 | AMDGPU_UVD_STATUS_BUSY_MASK)) |
1501 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, |
1502 | SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); |
1503 | |
1504 | if (srbm_soft_reset) { |
1505 | adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset; |
1506 | return true; |
1507 | } else { |
1508 | adev->uvd.inst[ring->me].srbm_soft_reset = 0; |
1509 | return false; |
1510 | } |
1511 | } |
1512 | |
1513 | static int uvd_v7_0_pre_soft_reset(void *handle) |
1514 | { |
1515 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1516 | |
1517 | if (!adev->uvd.inst[ring->me].srbm_soft_reset) |
1518 | return 0; |
1519 | |
1520 | uvd_v7_0_stop(adev); |
1521 | return 0; |
1522 | } |
1523 | |
1524 | static int uvd_v7_0_soft_reset(void *handle) |
1525 | { |
1526 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1527 | u32 srbm_soft_reset; |
1528 | |
1529 | if (!adev->uvd.inst[ring->me].srbm_soft_reset) |
1530 | return 0; |
1531 | srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset; |
1532 | |
1533 | if (srbm_soft_reset) { |
1534 | u32 tmp; |
1535 | |
1536 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1537 | tmp |= srbm_soft_reset; |
1538 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n" , tmp); |
1539 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1540 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1541 | |
1542 | udelay(50); |
1543 | |
1544 | tmp &= ~srbm_soft_reset; |
1545 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1546 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1547 | |
1548 | /* Wait a little for things to settle down */ |
1549 | udelay(50); |
1550 | } |
1551 | |
1552 | return 0; |
1553 | } |
1554 | |
1555 | static int uvd_v7_0_post_soft_reset(void *handle) |
1556 | { |
1557 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1558 | |
1559 | if (!adev->uvd.inst[ring->me].srbm_soft_reset) |
1560 | return 0; |
1561 | |
1562 | mdelay(5); |
1563 | |
1564 | return uvd_v7_0_start(adev); |
1565 | } |
1566 | #endif |
1567 | |
1568 | static int uvd_v7_0_set_interrupt_state(struct amdgpu_device *adev, |
1569 | struct amdgpu_irq_src *source, |
1570 | unsigned type, |
1571 | enum amdgpu_interrupt_state state) |
1572 | { |
1573 | // TODO |
1574 | return 0; |
1575 | } |
1576 | |
1577 | static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, |
1578 | struct amdgpu_irq_src *source, |
1579 | struct amdgpu_iv_entry *entry) |
1580 | { |
1581 | uint32_t ip_instance; |
1582 | |
1583 | switch (entry->client_id) { |
1584 | case SOC15_IH_CLIENTID_UVD: |
1585 | ip_instance = 0; |
1586 | break; |
1587 | case SOC15_IH_CLIENTID_UVD1: |
1588 | ip_instance = 1; |
1589 | break; |
1590 | default: |
1591 | DRM_ERROR("Unhandled client id: %d\n" , entry->client_id); |
1592 | return 0; |
1593 | } |
1594 | |
1595 | DRM_DEBUG("IH: UVD TRAP\n" ); |
1596 | |
1597 | switch (entry->src_id) { |
1598 | case 124: |
1599 | amdgpu_fence_process(ring: &adev->uvd.inst[ip_instance].ring); |
1600 | break; |
1601 | case 119: |
1602 | amdgpu_fence_process(ring: &adev->uvd.inst[ip_instance].ring_enc[0]); |
1603 | break; |
1604 | case 120: |
1605 | if (!amdgpu_sriov_vf(adev)) |
1606 | amdgpu_fence_process(ring: &adev->uvd.inst[ip_instance].ring_enc[1]); |
1607 | break; |
1608 | default: |
1609 | DRM_ERROR("Unhandled interrupt: %d %d\n" , |
1610 | entry->src_id, entry->src_data[0]); |
1611 | break; |
1612 | } |
1613 | |
1614 | return 0; |
1615 | } |
1616 | |
1617 | #if 0 |
1618 | static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) |
1619 | { |
1620 | uint32_t data, data1, data2, suvd_flags; |
1621 | |
1622 | data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL); |
1623 | data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); |
1624 | data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL); |
1625 | |
1626 | data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | |
1627 | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); |
1628 | |
1629 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | |
1630 | UVD_SUVD_CGC_GATE__SIT_MASK | |
1631 | UVD_SUVD_CGC_GATE__SMP_MASK | |
1632 | UVD_SUVD_CGC_GATE__SCM_MASK | |
1633 | UVD_SUVD_CGC_GATE__SDB_MASK; |
1634 | |
1635 | data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | |
1636 | (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | |
1637 | (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); |
1638 | |
1639 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | |
1640 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | |
1641 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | |
1642 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | |
1643 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | |
1644 | UVD_CGC_CTRL__SYS_MODE_MASK | |
1645 | UVD_CGC_CTRL__UDEC_MODE_MASK | |
1646 | UVD_CGC_CTRL__MPEG2_MODE_MASK | |
1647 | UVD_CGC_CTRL__REGS_MODE_MASK | |
1648 | UVD_CGC_CTRL__RBC_MODE_MASK | |
1649 | UVD_CGC_CTRL__LMI_MC_MODE_MASK | |
1650 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | |
1651 | UVD_CGC_CTRL__IDCT_MODE_MASK | |
1652 | UVD_CGC_CTRL__MPRD_MODE_MASK | |
1653 | UVD_CGC_CTRL__MPC_MODE_MASK | |
1654 | UVD_CGC_CTRL__LBSI_MODE_MASK | |
1655 | UVD_CGC_CTRL__LRBBM_MODE_MASK | |
1656 | UVD_CGC_CTRL__WCB_MODE_MASK | |
1657 | UVD_CGC_CTRL__VCPU_MODE_MASK | |
1658 | UVD_CGC_CTRL__JPEG_MODE_MASK | |
1659 | UVD_CGC_CTRL__JPEG2_MODE_MASK | |
1660 | UVD_CGC_CTRL__SCPU_MODE_MASK); |
1661 | data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | |
1662 | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | |
1663 | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | |
1664 | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | |
1665 | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); |
1666 | data1 |= suvd_flags; |
1667 | |
1668 | WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data); |
1669 | WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0); |
1670 | WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); |
1671 | WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2); |
1672 | } |
1673 | |
1674 | static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) |
1675 | { |
1676 | uint32_t data, data1, cgc_flags, suvd_flags; |
1677 | |
1678 | data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE); |
1679 | data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); |
1680 | |
1681 | cgc_flags = UVD_CGC_GATE__SYS_MASK | |
1682 | UVD_CGC_GATE__UDEC_MASK | |
1683 | UVD_CGC_GATE__MPEG2_MASK | |
1684 | UVD_CGC_GATE__RBC_MASK | |
1685 | UVD_CGC_GATE__LMI_MC_MASK | |
1686 | UVD_CGC_GATE__IDCT_MASK | |
1687 | UVD_CGC_GATE__MPRD_MASK | |
1688 | UVD_CGC_GATE__MPC_MASK | |
1689 | UVD_CGC_GATE__LBSI_MASK | |
1690 | UVD_CGC_GATE__LRBBM_MASK | |
1691 | UVD_CGC_GATE__UDEC_RE_MASK | |
1692 | UVD_CGC_GATE__UDEC_CM_MASK | |
1693 | UVD_CGC_GATE__UDEC_IT_MASK | |
1694 | UVD_CGC_GATE__UDEC_DB_MASK | |
1695 | UVD_CGC_GATE__UDEC_MP_MASK | |
1696 | UVD_CGC_GATE__WCB_MASK | |
1697 | UVD_CGC_GATE__VCPU_MASK | |
1698 | UVD_CGC_GATE__SCPU_MASK | |
1699 | UVD_CGC_GATE__JPEG_MASK | |
1700 | UVD_CGC_GATE__JPEG2_MASK; |
1701 | |
1702 | suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | |
1703 | UVD_SUVD_CGC_GATE__SIT_MASK | |
1704 | UVD_SUVD_CGC_GATE__SMP_MASK | |
1705 | UVD_SUVD_CGC_GATE__SCM_MASK | |
1706 | UVD_SUVD_CGC_GATE__SDB_MASK; |
1707 | |
1708 | data |= cgc_flags; |
1709 | data1 |= suvd_flags; |
1710 | |
1711 | WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data); |
1712 | WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); |
1713 | } |
1714 | |
1715 | static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) |
1716 | { |
1717 | u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); |
1718 | |
1719 | if (enable) |
1720 | tmp |= (GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | |
1721 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); |
1722 | else |
1723 | tmp &= ~(GCK_DFS_BYPASS_CNTL__BYPASSDCLK_MASK | |
1724 | GCK_DFS_BYPASS_CNTL__BYPASSVCLK_MASK); |
1725 | |
1726 | WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); |
1727 | } |
1728 | |
1729 | |
1730 | static int uvd_v7_0_set_clockgating_state(void *handle, |
1731 | enum amd_clockgating_state state) |
1732 | { |
1733 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1734 | bool enable = (state == AMD_CG_STATE_GATE); |
1735 | |
1736 | uvd_v7_0_set_bypass_mode(adev, enable); |
1737 | |
1738 | if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) |
1739 | return 0; |
1740 | |
1741 | if (enable) { |
1742 | /* disable HW gating and enable Sw gating */ |
1743 | uvd_v7_0_set_sw_clock_gating(adev); |
1744 | } else { |
1745 | /* wait for STATUS to clear */ |
1746 | if (uvd_v7_0_wait_for_idle(handle)) |
1747 | return -EBUSY; |
1748 | |
1749 | /* enable HW gates because UVD is idle */ |
1750 | /* uvd_v7_0_set_hw_clock_gating(adev); */ |
1751 | } |
1752 | |
1753 | return 0; |
1754 | } |
1755 | |
1756 | static int uvd_v7_0_set_powergating_state(void *handle, |
1757 | enum amd_powergating_state state) |
1758 | { |
1759 | /* This doesn't actually powergate the UVD block. |
1760 | * That's done in the dpm code via the SMC. This |
1761 | * just re-inits the block as necessary. The actual |
1762 | * gating still happens in the dpm code. We should |
1763 | * revisit this when there is a cleaner line between |
1764 | * the smc and the hw blocks |
1765 | */ |
1766 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1767 | |
1768 | if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) |
1769 | return 0; |
1770 | |
1771 | WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); |
1772 | |
1773 | if (state == AMD_PG_STATE_GATE) { |
1774 | uvd_v7_0_stop(adev); |
1775 | return 0; |
1776 | } else { |
1777 | return uvd_v7_0_start(adev); |
1778 | } |
1779 | } |
1780 | #endif |
1781 | |
1782 | static int uvd_v7_0_set_clockgating_state(void *handle, |
1783 | enum amd_clockgating_state state) |
1784 | { |
1785 | /* needed for driver unload*/ |
1786 | return 0; |
1787 | } |
1788 | |
1789 | const struct amd_ip_funcs uvd_v7_0_ip_funcs = { |
1790 | .name = "uvd_v7_0" , |
1791 | .early_init = uvd_v7_0_early_init, |
1792 | .late_init = NULL, |
1793 | .sw_init = uvd_v7_0_sw_init, |
1794 | .sw_fini = uvd_v7_0_sw_fini, |
1795 | .hw_init = uvd_v7_0_hw_init, |
1796 | .hw_fini = uvd_v7_0_hw_fini, |
1797 | .prepare_suspend = uvd_v7_0_prepare_suspend, |
1798 | .suspend = uvd_v7_0_suspend, |
1799 | .resume = uvd_v7_0_resume, |
1800 | .is_idle = NULL /* uvd_v7_0_is_idle */, |
1801 | .wait_for_idle = NULL /* uvd_v7_0_wait_for_idle */, |
1802 | .check_soft_reset = NULL /* uvd_v7_0_check_soft_reset */, |
1803 | .pre_soft_reset = NULL /* uvd_v7_0_pre_soft_reset */, |
1804 | .soft_reset = NULL /* uvd_v7_0_soft_reset */, |
1805 | .post_soft_reset = NULL /* uvd_v7_0_post_soft_reset */, |
1806 | .set_clockgating_state = uvd_v7_0_set_clockgating_state, |
1807 | .set_powergating_state = NULL /* uvd_v7_0_set_powergating_state */, |
1808 | }; |
1809 | |
1810 | static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { |
1811 | .type = AMDGPU_RING_TYPE_UVD, |
1812 | .align_mask = 0xf, |
1813 | .support_64bit_ptrs = false, |
1814 | .no_user_fence = true, |
1815 | .get_rptr = uvd_v7_0_ring_get_rptr, |
1816 | .get_wptr = uvd_v7_0_ring_get_wptr, |
1817 | .set_wptr = uvd_v7_0_ring_set_wptr, |
1818 | .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place, |
1819 | .emit_frame_size = |
1820 | 6 + /* hdp invalidate */ |
1821 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + |
1822 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + |
1823 | 8 + /* uvd_v7_0_ring_emit_vm_flush */ |
1824 | 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */ |
1825 | .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */ |
1826 | .emit_ib = uvd_v7_0_ring_emit_ib, |
1827 | .emit_fence = uvd_v7_0_ring_emit_fence, |
1828 | .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush, |
1829 | .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush, |
1830 | .test_ring = uvd_v7_0_ring_test_ring, |
1831 | .test_ib = amdgpu_uvd_ring_test_ib, |
1832 | .insert_nop = uvd_v7_0_ring_insert_nop, |
1833 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1834 | .begin_use = amdgpu_uvd_ring_begin_use, |
1835 | .end_use = amdgpu_uvd_ring_end_use, |
1836 | .emit_wreg = uvd_v7_0_ring_emit_wreg, |
1837 | .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait, |
1838 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
1839 | }; |
1840 | |
1841 | static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { |
1842 | .type = AMDGPU_RING_TYPE_UVD_ENC, |
1843 | .align_mask = 0x3f, |
1844 | .nop = HEVC_ENC_CMD_NO_OP, |
1845 | .support_64bit_ptrs = false, |
1846 | .no_user_fence = true, |
1847 | .get_rptr = uvd_v7_0_enc_ring_get_rptr, |
1848 | .get_wptr = uvd_v7_0_enc_ring_get_wptr, |
1849 | .set_wptr = uvd_v7_0_enc_ring_set_wptr, |
1850 | .emit_frame_size = |
1851 | 3 + 3 + /* hdp flush / invalidate */ |
1852 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + |
1853 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + |
1854 | 4 + /* uvd_v7_0_enc_ring_emit_vm_flush */ |
1855 | 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */ |
1856 | 1, /* uvd_v7_0_enc_ring_insert_end */ |
1857 | .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */ |
1858 | .emit_ib = uvd_v7_0_enc_ring_emit_ib, |
1859 | .emit_fence = uvd_v7_0_enc_ring_emit_fence, |
1860 | .emit_vm_flush = uvd_v7_0_enc_ring_emit_vm_flush, |
1861 | .test_ring = uvd_v7_0_enc_ring_test_ring, |
1862 | .test_ib = uvd_v7_0_enc_ring_test_ib, |
1863 | .insert_nop = amdgpu_ring_insert_nop, |
1864 | .insert_end = uvd_v7_0_enc_ring_insert_end, |
1865 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1866 | .begin_use = amdgpu_uvd_ring_begin_use, |
1867 | .end_use = amdgpu_uvd_ring_end_use, |
1868 | .emit_wreg = uvd_v7_0_enc_ring_emit_wreg, |
1869 | .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait, |
1870 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
1871 | }; |
1872 | |
1873 | static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) |
1874 | { |
1875 | int i; |
1876 | |
1877 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { |
1878 | if (adev->uvd.harvest_config & (1 << i)) |
1879 | continue; |
1880 | adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; |
1881 | adev->uvd.inst[i].ring.me = i; |
1882 | DRM_INFO("UVD(%d) is enabled in VM mode\n" , i); |
1883 | } |
1884 | } |
1885 | |
1886 | static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) |
1887 | { |
1888 | int i, j; |
1889 | |
1890 | for (j = 0; j < adev->uvd.num_uvd_inst; j++) { |
1891 | if (adev->uvd.harvest_config & (1 << j)) |
1892 | continue; |
1893 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
1894 | adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; |
1895 | adev->uvd.inst[j].ring_enc[i].me = j; |
1896 | } |
1897 | |
1898 | DRM_INFO("UVD(%d) ENC is enabled in VM mode\n" , j); |
1899 | } |
1900 | } |
1901 | |
1902 | static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { |
1903 | .set = uvd_v7_0_set_interrupt_state, |
1904 | .process = uvd_v7_0_process_interrupt, |
1905 | }; |
1906 | |
1907 | static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) |
1908 | { |
1909 | int i; |
1910 | |
1911 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { |
1912 | if (adev->uvd.harvest_config & (1 << i)) |
1913 | continue; |
1914 | adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; |
1915 | adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; |
1916 | } |
1917 | } |
1918 | |
1919 | const struct amdgpu_ip_block_version uvd_v7_0_ip_block = { |
1920 | .type = AMD_IP_BLOCK_TYPE_UVD, |
1921 | .major = 7, |
1922 | .minor = 0, |
1923 | .rev = 0, |
1924 | .funcs = &uvd_v7_0_ip_funcs, |
1925 | }; |
1926 | |