1 | /* |
2 | * Copyright 2023 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | #include "amdgpu.h" |
26 | #include "amdgpu_vcn.h" |
27 | #include "amdgpu_pm.h" |
28 | #include "amdgpu_cs.h" |
29 | #include "soc15.h" |
30 | #include "soc15d.h" |
31 | #include "soc15_hw_ip.h" |
32 | #include "vcn_v2_0.h" |
33 | #include "mmsch_v4_0.h" |
34 | #include "vcn_v4_0_5.h" |
35 | |
36 | #include "vcn/vcn_4_0_5_offset.h" |
37 | #include "vcn/vcn_4_0_5_sh_mask.h" |
38 | #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h" |
39 | |
40 | #include <drm/drm_drv.h> |
41 | |
42 | #define mmUVD_DPG_LMA_CTL regUVD_DPG_LMA_CTL |
43 | #define mmUVD_DPG_LMA_CTL_BASE_IDX regUVD_DPG_LMA_CTL_BASE_IDX |
44 | #define mmUVD_DPG_LMA_DATA regUVD_DPG_LMA_DATA |
45 | #define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX |
46 | |
47 | #define VCN_VID_SOC_ADDRESS_2_0 0x1fb00 |
48 | #define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000) |
49 | |
50 | #define VCN_HARVEST_MMSCH 0 |
51 | |
52 | #define RDECODE_MSG_CREATE 0x00000000 |
53 | #define RDECODE_MESSAGE_CREATE 0x00000001 |
54 | |
55 | static int amdgpu_ih_clientid_vcns[] = { |
56 | SOC15_IH_CLIENTID_VCN, |
57 | SOC15_IH_CLIENTID_VCN1 |
58 | }; |
59 | |
60 | static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev); |
61 | static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev); |
62 | static int vcn_v4_0_5_set_powergating_state(void *handle, |
63 | enum amd_powergating_state state); |
64 | static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev, |
65 | int inst_idx, struct dpg_pause_state *new_state); |
66 | static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring); |
67 | |
68 | /** |
69 | * vcn_v4_0_5_early_init - set function pointers and load microcode |
70 | * |
71 | * @handle: amdgpu_device pointer |
72 | * |
73 | * Set ring and irq function pointers |
74 | * Load microcode from filesystem |
75 | */ |
76 | static int vcn_v4_0_5_early_init(void *handle) |
77 | { |
78 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
79 | |
80 | /* re-use enc ring as unified ring */ |
81 | adev->vcn.num_enc_rings = 1; |
82 | vcn_v4_0_5_set_unified_ring_funcs(adev); |
83 | vcn_v4_0_5_set_irq_funcs(adev); |
84 | |
85 | return amdgpu_vcn_early_init(adev); |
86 | } |
87 | |
88 | /** |
89 | * vcn_v4_0_5_sw_init - sw init for VCN block |
90 | * |
91 | * @handle: amdgpu_device pointer |
92 | * |
93 | * Load firmware and sw initialization |
94 | */ |
95 | static int vcn_v4_0_5_sw_init(void *handle) |
96 | { |
97 | struct amdgpu_ring *ring; |
98 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
99 | int i, r; |
100 | |
101 | r = amdgpu_vcn_sw_init(adev); |
102 | if (r) |
103 | return r; |
104 | |
105 | amdgpu_vcn_setup_ucode(adev); |
106 | |
107 | r = amdgpu_vcn_resume(adev); |
108 | if (r) |
109 | return r; |
110 | |
111 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
112 | volatile struct amdgpu_vcn4_fw_shared *fw_shared; |
113 | |
114 | if (adev->vcn.harvest_config & (1 << i)) |
115 | continue; |
116 | |
117 | atomic_set(v: &adev->vcn.inst[i].sched_score, i: 0); |
118 | |
119 | /* VCN UNIFIED TRAP */ |
120 | r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[i], |
121 | VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE, source: &adev->vcn.inst[i].irq); |
122 | if (r) |
123 | return r; |
124 | |
125 | /* VCN POISON TRAP */ |
126 | r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[i], |
127 | VCN_4_0__SRCID_UVD_POISON, source: &adev->vcn.inst[i].irq); |
128 | if (r) |
129 | return r; |
130 | |
131 | ring = &adev->vcn.inst[i].ring_enc[0]; |
132 | ring->use_doorbell = true; |
133 | if (amdgpu_sriov_vf(adev)) |
134 | ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + |
135 | i * (adev->vcn.num_enc_rings + 1) + 1; |
136 | else |
137 | ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + |
138 | 2 + 8 * i; |
139 | ring->vm_hub = AMDGPU_MMHUB0(0); |
140 | sprintf(buf: ring->name, fmt: "vcn_unified_%d" , i); |
141 | |
142 | r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->vcn.inst[i].irq, irq_type: 0, |
143 | hw_prio: AMDGPU_RING_PRIO_0, sched_score: &adev->vcn.inst[i].sched_score); |
144 | if (r) |
145 | return r; |
146 | |
147 | fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; |
148 | fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); |
149 | fw_shared->sq.is_enabled = 1; |
150 | |
151 | fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG); |
152 | fw_shared->smu_dpm_interface.smu_interface_type = (adev->flags & AMD_IS_APU) ? |
153 | AMDGPU_VCN_SMU_DPM_INTERFACE_APU : AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU; |
154 | |
155 | if (amdgpu_sriov_vf(adev)) |
156 | fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); |
157 | |
158 | if (amdgpu_vcnfw_log) |
159 | amdgpu_vcn_fwlog_init(vcn: &adev->vcn.inst[i]); |
160 | } |
161 | |
162 | if (amdgpu_sriov_vf(adev)) { |
163 | r = amdgpu_virt_alloc_mm_table(adev); |
164 | if (r) |
165 | return r; |
166 | } |
167 | |
168 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) |
169 | adev->vcn.pause_dpg_mode = vcn_v4_0_5_pause_dpg_mode; |
170 | |
171 | return 0; |
172 | } |
173 | |
174 | /** |
175 | * vcn_v4_0_5_sw_fini - sw fini for VCN block |
176 | * |
177 | * @handle: amdgpu_device pointer |
178 | * |
179 | * VCN suspend and free up sw allocation |
180 | */ |
181 | static int vcn_v4_0_5_sw_fini(void *handle) |
182 | { |
183 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
184 | int i, r, idx; |
185 | |
186 | if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) { |
187 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
188 | volatile struct amdgpu_vcn4_fw_shared *fw_shared; |
189 | |
190 | if (adev->vcn.harvest_config & (1 << i)) |
191 | continue; |
192 | |
193 | fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; |
194 | fw_shared->present_flag_0 = 0; |
195 | fw_shared->sq.is_enabled = 0; |
196 | } |
197 | |
198 | drm_dev_exit(idx); |
199 | } |
200 | |
201 | if (amdgpu_sriov_vf(adev)) |
202 | amdgpu_virt_free_mm_table(adev); |
203 | |
204 | r = amdgpu_vcn_suspend(adev); |
205 | if (r) |
206 | return r; |
207 | |
208 | r = amdgpu_vcn_sw_fini(adev); |
209 | |
210 | return r; |
211 | } |
212 | |
213 | /** |
214 | * vcn_v4_0_5_hw_init - start and test VCN block |
215 | * |
216 | * @handle: amdgpu_device pointer |
217 | * |
218 | * Initialize the hardware, boot up the VCPU and do some testing |
219 | */ |
220 | static int vcn_v4_0_5_hw_init(void *handle) |
221 | { |
222 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
223 | struct amdgpu_ring *ring; |
224 | int i, r; |
225 | |
226 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
227 | if (adev->vcn.harvest_config & (1 << i)) |
228 | continue; |
229 | |
230 | ring = &adev->vcn.inst[i].ring_enc[0]; |
231 | |
232 | adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, |
233 | ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i), i); |
234 | |
235 | r = amdgpu_ring_test_helper(ring); |
236 | if (r) |
237 | goto done; |
238 | } |
239 | |
240 | done: |
241 | if (!r) |
242 | DRM_INFO("VCN decode and encode initialized successfully(under %s).\n" , |
243 | (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode" :"SPG Mode" ); |
244 | |
245 | return r; |
246 | } |
247 | |
248 | /** |
249 | * vcn_v4_0_5_hw_fini - stop the hardware block |
250 | * |
251 | * @handle: amdgpu_device pointer |
252 | * |
253 | * Stop the VCN block, mark ring as not ready any more |
254 | */ |
255 | static int vcn_v4_0_5_hw_fini(void *handle) |
256 | { |
257 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
258 | int i; |
259 | |
260 | cancel_delayed_work_sync(dwork: &adev->vcn.idle_work); |
261 | |
262 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
263 | if (adev->vcn.harvest_config & (1 << i)) |
264 | continue; |
265 | if (!amdgpu_sriov_vf(adev)) { |
266 | if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || |
267 | (adev->vcn.cur_state != AMD_PG_STATE_GATE && |
268 | RREG32_SOC15(VCN, i, regUVD_STATUS))) { |
269 | vcn_v4_0_5_set_powergating_state(handle: adev, state: AMD_PG_STATE_GATE); |
270 | } |
271 | } |
272 | } |
273 | |
274 | return 0; |
275 | } |
276 | |
277 | /** |
278 | * vcn_v4_0_5_suspend - suspend VCN block |
279 | * |
280 | * @handle: amdgpu_device pointer |
281 | * |
282 | * HW fini and suspend VCN block |
283 | */ |
284 | static int vcn_v4_0_5_suspend(void *handle) |
285 | { |
286 | int r; |
287 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
288 | |
289 | r = vcn_v4_0_5_hw_fini(handle: adev); |
290 | if (r) |
291 | return r; |
292 | |
293 | r = amdgpu_vcn_suspend(adev); |
294 | |
295 | return r; |
296 | } |
297 | |
298 | /** |
299 | * vcn_v4_0_5_resume - resume VCN block |
300 | * |
301 | * @handle: amdgpu_device pointer |
302 | * |
303 | * Resume firmware and hw init VCN block |
304 | */ |
305 | static int vcn_v4_0_5_resume(void *handle) |
306 | { |
307 | int r; |
308 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
309 | |
310 | r = amdgpu_vcn_resume(adev); |
311 | if (r) |
312 | return r; |
313 | |
314 | r = vcn_v4_0_5_hw_init(handle: adev); |
315 | |
316 | return r; |
317 | } |
318 | |
319 | /** |
320 | * vcn_v4_0_5_mc_resume - memory controller programming |
321 | * |
322 | * @adev: amdgpu_device pointer |
323 | * @inst: instance number |
324 | * |
325 | * Let the VCN memory controller know it's offsets |
326 | */ |
327 | static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst) |
328 | { |
329 | uint32_t offset, size; |
330 | const struct common_firmware_header *hdr; |
331 | |
332 | hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data; |
333 | size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); |
334 | |
335 | /* cache window 0: fw */ |
336 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
337 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
338 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); |
339 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
340 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); |
341 | WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, 0); |
342 | offset = 0; |
343 | } else { |
344 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
345 | lower_32_bits(adev->vcn.inst[inst].gpu_addr)); |
346 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
347 | upper_32_bits(adev->vcn.inst[inst].gpu_addr)); |
348 | offset = size; |
349 | WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
350 | } |
351 | WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE0, size); |
352 | |
353 | /* cache window 1: stack */ |
354 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, |
355 | lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); |
356 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, |
357 | upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); |
358 | WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET1, 0); |
359 | WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); |
360 | |
361 | /* cache window 2: context */ |
362 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, |
363 | lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); |
364 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, |
365 | upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); |
366 | WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_OFFSET2, 0); |
367 | WREG32_SOC15(VCN, inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); |
368 | |
369 | /* non-cache window */ |
370 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, |
371 | lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); |
372 | WREG32_SOC15(VCN, inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, |
373 | upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); |
374 | WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); |
375 | WREG32_SOC15(VCN, inst, regUVD_VCPU_NONCACHE_SIZE0, |
376 | AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); |
377 | } |
378 | |
379 | /** |
380 | * vcn_v4_0_5_mc_resume_dpg_mode - memory controller programming for dpg mode |
381 | * |
382 | * @adev: amdgpu_device pointer |
383 | * @inst_idx: instance number index |
384 | * @indirect: indirectly write sram |
385 | * |
386 | * Let the VCN memory controller know it's offsets with dpg mode |
387 | */ |
388 | static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) |
389 | { |
390 | uint32_t offset, size; |
391 | const struct common_firmware_header *hdr; |
392 | |
393 | hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data; |
394 | size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); |
395 | |
396 | /* cache window 0: fw */ |
397 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
398 | if (!indirect) { |
399 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
400 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
401 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), |
402 | 0, indirect); |
403 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
404 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
405 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), |
406 | 0, indirect); |
407 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
408 | VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); |
409 | } else { |
410 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
411 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); |
412 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
413 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); |
414 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
415 | VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); |
416 | } |
417 | offset = 0; |
418 | } else { |
419 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
420 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
421 | lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); |
422 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
423 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
424 | upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); |
425 | offset = size; |
426 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
427 | VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET0), |
428 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); |
429 | } |
430 | |
431 | if (!indirect) |
432 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
433 | VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); |
434 | else |
435 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
436 | VCN, inst_idx, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); |
437 | |
438 | /* cache window 1: stack */ |
439 | if (!indirect) { |
440 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
441 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), |
442 | lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); |
443 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
444 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), |
445 | upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); |
446 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
447 | VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); |
448 | } else { |
449 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
450 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); |
451 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
452 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); |
453 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
454 | VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); |
455 | } |
456 | |
457 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
458 | VCN, inst_idx, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); |
459 | |
460 | /* cache window 2: context */ |
461 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
462 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), |
463 | lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), |
464 | 0, indirect); |
465 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
466 | VCN, inst_idx, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), |
467 | upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), |
468 | 0, indirect); |
469 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
470 | VCN, inst_idx, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); |
471 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
472 | VCN, inst_idx, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); |
473 | |
474 | /* non-cache window */ |
475 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
476 | VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), |
477 | lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); |
478 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
479 | VCN, inst_idx, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), |
480 | upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); |
481 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
482 | VCN, inst_idx, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); |
483 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
484 | VCN, inst_idx, regUVD_VCPU_NONCACHE_SIZE0), |
485 | AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared)), 0, indirect); |
486 | |
487 | /* VCN global tiling registers */ |
488 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
489 | VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG), |
490 | adev->gfx.config.gb_addr_config, 0, indirect); |
491 | } |
492 | |
493 | /** |
494 | * vcn_v4_0_5_disable_static_power_gating - disable VCN static power gating |
495 | * |
496 | * @adev: amdgpu_device pointer |
497 | * @inst: instance number |
498 | * |
499 | * Disable static power gating for VCN block |
500 | */ |
501 | static void vcn_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst) |
502 | { |
503 | uint32_t data = 0; |
504 | |
505 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { |
506 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
507 | 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT); |
508 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, 0, |
509 | UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); |
510 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
511 | 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT); |
512 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
513 | 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, |
514 | UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); |
515 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
516 | 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT); |
517 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
518 | 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, |
519 | UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); |
520 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
521 | 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT); |
522 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
523 | 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, |
524 | UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); |
525 | } else { |
526 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
527 | 1 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT); |
528 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
529 | 0, UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); |
530 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
531 | 1 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT); |
532 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
533 | 0, UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); |
534 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
535 | 1 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT); |
536 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
537 | 0, UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); |
538 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
539 | 1 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT); |
540 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
541 | 0, UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); |
542 | } |
543 | |
544 | data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); |
545 | data &= ~0x103; |
546 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) |
547 | data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | |
548 | UVD_POWER_STATUS__UVD_PG_EN_MASK; |
549 | WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); |
550 | } |
551 | |
552 | /** |
553 | * vcn_v4_0_5_enable_static_power_gating - enable VCN static power gating |
554 | * |
555 | * @adev: amdgpu_device pointer |
556 | * @inst: instance number |
557 | * |
558 | * Enable static power gating for VCN block |
559 | */ |
560 | static void vcn_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst) |
561 | { |
562 | uint32_t data; |
563 | |
564 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { |
565 | /* Before power off, this indicator has to be turned on */ |
566 | data = RREG32_SOC15(VCN, inst, regUVD_POWER_STATUS); |
567 | data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; |
568 | data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; |
569 | WREG32_SOC15(VCN, inst, regUVD_POWER_STATUS, data); |
570 | |
571 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
572 | 2 << UVD_IPX_DLDO_CONFIG__ONO5_PWR_CONFIG__SHIFT); |
573 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
574 | 1 << UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS__SHIFT, |
575 | UVD_IPX_DLDO_STATUS__ONO5_PWR_STATUS_MASK); |
576 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
577 | 2 << UVD_IPX_DLDO_CONFIG__ONO4_PWR_CONFIG__SHIFT); |
578 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
579 | 1 << UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS__SHIFT, |
580 | UVD_IPX_DLDO_STATUS__ONO4_PWR_STATUS_MASK); |
581 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
582 | 2 << UVD_IPX_DLDO_CONFIG__ONO3_PWR_CONFIG__SHIFT); |
583 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
584 | 1 << UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS__SHIFT, |
585 | UVD_IPX_DLDO_STATUS__ONO3_PWR_STATUS_MASK); |
586 | WREG32_SOC15(VCN, inst, regUVD_IPX_DLDO_CONFIG, |
587 | 2 << UVD_IPX_DLDO_CONFIG__ONO2_PWR_CONFIG__SHIFT); |
588 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_IPX_DLDO_STATUS, |
589 | 1 << UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS__SHIFT, |
590 | UVD_IPX_DLDO_STATUS__ONO2_PWR_STATUS_MASK); |
591 | } |
592 | } |
593 | |
594 | /** |
595 | * vcn_v4_0_5_disable_clock_gating - disable VCN clock gating |
596 | * |
597 | * @adev: amdgpu_device pointer |
598 | * @inst: instance number |
599 | * |
600 | * Disable clock gating for VCN block |
601 | */ |
602 | static void vcn_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst) |
603 | { |
604 | uint32_t data; |
605 | |
606 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
607 | return; |
608 | |
609 | /* VCN disable CGC */ |
610 | data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); |
611 | data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; |
612 | data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
613 | data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
614 | WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); |
615 | |
616 | data = RREG32_SOC15(VCN, inst, regUVD_CGC_GATE); |
617 | data &= ~(UVD_CGC_GATE__SYS_MASK |
618 | | UVD_CGC_GATE__UDEC_MASK |
619 | | UVD_CGC_GATE__MPEG2_MASK |
620 | | UVD_CGC_GATE__REGS_MASK |
621 | | UVD_CGC_GATE__RBC_MASK |
622 | | UVD_CGC_GATE__LMI_MC_MASK |
623 | | UVD_CGC_GATE__LMI_UMC_MASK |
624 | | UVD_CGC_GATE__IDCT_MASK |
625 | | UVD_CGC_GATE__MPRD_MASK |
626 | | UVD_CGC_GATE__MPC_MASK |
627 | | UVD_CGC_GATE__LBSI_MASK |
628 | | UVD_CGC_GATE__LRBBM_MASK |
629 | | UVD_CGC_GATE__UDEC_RE_MASK |
630 | | UVD_CGC_GATE__UDEC_CM_MASK |
631 | | UVD_CGC_GATE__UDEC_IT_MASK |
632 | | UVD_CGC_GATE__UDEC_DB_MASK |
633 | | UVD_CGC_GATE__UDEC_MP_MASK |
634 | | UVD_CGC_GATE__WCB_MASK |
635 | | UVD_CGC_GATE__VCPU_MASK |
636 | | UVD_CGC_GATE__MMSCH_MASK); |
637 | |
638 | WREG32_SOC15(VCN, inst, regUVD_CGC_GATE, data); |
639 | SOC15_WAIT_ON_RREG(VCN, inst, regUVD_CGC_GATE, 0, 0xFFFFFFFF); |
640 | |
641 | data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); |
642 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
643 | | UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
644 | | UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
645 | | UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
646 | | UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
647 | | UVD_CGC_CTRL__SYS_MODE_MASK |
648 | | UVD_CGC_CTRL__UDEC_MODE_MASK |
649 | | UVD_CGC_CTRL__MPEG2_MODE_MASK |
650 | | UVD_CGC_CTRL__REGS_MODE_MASK |
651 | | UVD_CGC_CTRL__RBC_MODE_MASK |
652 | | UVD_CGC_CTRL__LMI_MC_MODE_MASK |
653 | | UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
654 | | UVD_CGC_CTRL__IDCT_MODE_MASK |
655 | | UVD_CGC_CTRL__MPRD_MODE_MASK |
656 | | UVD_CGC_CTRL__MPC_MODE_MASK |
657 | | UVD_CGC_CTRL__LBSI_MODE_MASK |
658 | | UVD_CGC_CTRL__LRBBM_MODE_MASK |
659 | | UVD_CGC_CTRL__WCB_MODE_MASK |
660 | | UVD_CGC_CTRL__VCPU_MODE_MASK |
661 | | UVD_CGC_CTRL__MMSCH_MODE_MASK); |
662 | WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); |
663 | |
664 | data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE); |
665 | data |= (UVD_SUVD_CGC_GATE__SRE_MASK |
666 | | UVD_SUVD_CGC_GATE__SIT_MASK |
667 | | UVD_SUVD_CGC_GATE__SMP_MASK |
668 | | UVD_SUVD_CGC_GATE__SCM_MASK |
669 | | UVD_SUVD_CGC_GATE__SDB_MASK |
670 | | UVD_SUVD_CGC_GATE__SRE_H264_MASK |
671 | | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
672 | | UVD_SUVD_CGC_GATE__SIT_H264_MASK |
673 | | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
674 | | UVD_SUVD_CGC_GATE__SCM_H264_MASK |
675 | | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
676 | | UVD_SUVD_CGC_GATE__SDB_H264_MASK |
677 | | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK |
678 | | UVD_SUVD_CGC_GATE__SCLR_MASK |
679 | | UVD_SUVD_CGC_GATE__UVD_SC_MASK |
680 | | UVD_SUVD_CGC_GATE__ENT_MASK |
681 | | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK |
682 | | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK |
683 | | UVD_SUVD_CGC_GATE__SITE_MASK |
684 | | UVD_SUVD_CGC_GATE__SRE_VP9_MASK |
685 | | UVD_SUVD_CGC_GATE__SCM_VP9_MASK |
686 | | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK |
687 | | UVD_SUVD_CGC_GATE__SDB_VP9_MASK |
688 | | UVD_SUVD_CGC_GATE__IME_HEVC_MASK); |
689 | WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_GATE, data); |
690 | |
691 | data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); |
692 | data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
693 | | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
694 | | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
695 | | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
696 | | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK |
697 | | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK |
698 | | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK |
699 | | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK |
700 | | UVD_SUVD_CGC_CTRL__IME_MODE_MASK |
701 | | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); |
702 | WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); |
703 | } |
704 | |
705 | /** |
706 | * vcn_v4_0_5_disable_clock_gating_dpg_mode - disable VCN clock gating dpg mode |
707 | * |
708 | * @adev: amdgpu_device pointer |
709 | * @sram_sel: sram select |
710 | * @inst_idx: instance number index |
711 | * @indirect: indirectly write sram |
712 | * |
713 | * Disable clock gating for VCN block with dpg mode |
714 | */ |
715 | static void vcn_v4_0_5_disable_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel, |
716 | int inst_idx, uint8_t indirect) |
717 | { |
718 | uint32_t reg_data = 0; |
719 | |
720 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
721 | return; |
722 | |
723 | /* enable sw clock gating control */ |
724 | reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
725 | reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
726 | reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
727 | reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | |
728 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | |
729 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | |
730 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | |
731 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | |
732 | UVD_CGC_CTRL__SYS_MODE_MASK | |
733 | UVD_CGC_CTRL__UDEC_MODE_MASK | |
734 | UVD_CGC_CTRL__MPEG2_MODE_MASK | |
735 | UVD_CGC_CTRL__REGS_MODE_MASK | |
736 | UVD_CGC_CTRL__RBC_MODE_MASK | |
737 | UVD_CGC_CTRL__LMI_MC_MODE_MASK | |
738 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | |
739 | UVD_CGC_CTRL__IDCT_MODE_MASK | |
740 | UVD_CGC_CTRL__MPRD_MODE_MASK | |
741 | UVD_CGC_CTRL__MPC_MODE_MASK | |
742 | UVD_CGC_CTRL__LBSI_MODE_MASK | |
743 | UVD_CGC_CTRL__LRBBM_MODE_MASK | |
744 | UVD_CGC_CTRL__WCB_MODE_MASK | |
745 | UVD_CGC_CTRL__VCPU_MODE_MASK); |
746 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
747 | VCN, inst_idx, regUVD_CGC_CTRL), reg_data, sram_sel, indirect); |
748 | |
749 | /* turn off clock gating */ |
750 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
751 | VCN, inst_idx, regUVD_CGC_GATE), 0, sram_sel, indirect); |
752 | |
753 | /* turn on SUVD clock gating */ |
754 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
755 | VCN, inst_idx, regUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); |
756 | |
757 | /* turn on sw mode in UVD_SUVD_CGC_CTRL */ |
758 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
759 | VCN, inst_idx, regUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); |
760 | } |
761 | |
762 | /** |
763 | * vcn_v4_0_5_enable_clock_gating - enable VCN clock gating |
764 | * |
765 | * @adev: amdgpu_device pointer |
766 | * @inst: instance number |
767 | * |
768 | * Enable clock gating for VCN block |
769 | */ |
770 | static void vcn_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst) |
771 | { |
772 | uint32_t data; |
773 | |
774 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
775 | return; |
776 | |
777 | /* enable VCN CGC */ |
778 | data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); |
779 | data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
780 | data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
781 | data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
782 | WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); |
783 | |
784 | data = RREG32_SOC15(VCN, inst, regUVD_CGC_CTRL); |
785 | data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
786 | | UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
787 | | UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
788 | | UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
789 | | UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
790 | | UVD_CGC_CTRL__SYS_MODE_MASK |
791 | | UVD_CGC_CTRL__UDEC_MODE_MASK |
792 | | UVD_CGC_CTRL__MPEG2_MODE_MASK |
793 | | UVD_CGC_CTRL__REGS_MODE_MASK |
794 | | UVD_CGC_CTRL__RBC_MODE_MASK |
795 | | UVD_CGC_CTRL__LMI_MC_MODE_MASK |
796 | | UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
797 | | UVD_CGC_CTRL__IDCT_MODE_MASK |
798 | | UVD_CGC_CTRL__MPRD_MODE_MASK |
799 | | UVD_CGC_CTRL__MPC_MODE_MASK |
800 | | UVD_CGC_CTRL__LBSI_MODE_MASK |
801 | | UVD_CGC_CTRL__LRBBM_MODE_MASK |
802 | | UVD_CGC_CTRL__WCB_MODE_MASK |
803 | | UVD_CGC_CTRL__VCPU_MODE_MASK |
804 | | UVD_CGC_CTRL__MMSCH_MODE_MASK); |
805 | WREG32_SOC15(VCN, inst, regUVD_CGC_CTRL, data); |
806 | |
807 | data = RREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL); |
808 | data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
809 | | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
810 | | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
811 | | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
812 | | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK |
813 | | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK |
814 | | UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK |
815 | | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK |
816 | | UVD_SUVD_CGC_CTRL__IME_MODE_MASK |
817 | | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK); |
818 | WREG32_SOC15(VCN, inst, regUVD_SUVD_CGC_CTRL, data); |
819 | } |
820 | |
821 | /** |
822 | * vcn_v4_0_5_start_dpg_mode - VCN start with dpg mode |
823 | * |
824 | * @adev: amdgpu_device pointer |
825 | * @inst_idx: instance number index |
826 | * @indirect: indirectly write sram |
827 | * |
828 | * Start VCN block with dpg mode |
829 | */ |
830 | static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) |
831 | { |
832 | volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; |
833 | struct amdgpu_ring *ring; |
834 | uint32_t tmp; |
835 | |
836 | /* disable register anti-hang mechanism */ |
837 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1, |
838 | ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
839 | /* enable dynamic power gating mode */ |
840 | tmp = RREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS); |
841 | tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; |
842 | tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; |
843 | WREG32_SOC15(VCN, inst_idx, regUVD_POWER_STATUS, tmp); |
844 | |
845 | if (indirect) |
846 | adev->vcn.inst[inst_idx].dpg_sram_curr_addr = |
847 | (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; |
848 | |
849 | /* enable clock gating */ |
850 | vcn_v4_0_5_disable_clock_gating_dpg_mode(adev, sram_sel: 0, inst_idx, indirect); |
851 | |
852 | /* enable VCPU clock */ |
853 | tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); |
854 | tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; |
855 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
856 | VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); |
857 | |
858 | /* disable master interrupt */ |
859 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
860 | VCN, inst_idx, regUVD_MASTINT_EN), 0, 0, indirect); |
861 | |
862 | /* setup regUVD_LMI_CTRL */ |
863 | tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
864 | UVD_LMI_CTRL__REQ_MODE_MASK | |
865 | UVD_LMI_CTRL__CRC_RESET_MASK | |
866 | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | |
867 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
868 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | |
869 | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | |
870 | 0x00100000L); |
871 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
872 | VCN, inst_idx, regUVD_LMI_CTRL), tmp, 0, indirect); |
873 | |
874 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
875 | VCN, inst_idx, regUVD_MPC_CNTL), |
876 | 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); |
877 | |
878 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
879 | VCN, inst_idx, regUVD_MPC_SET_MUXA0), |
880 | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | |
881 | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | |
882 | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | |
883 | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); |
884 | |
885 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
886 | VCN, inst_idx, regUVD_MPC_SET_MUXB0), |
887 | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | |
888 | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | |
889 | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | |
890 | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); |
891 | |
892 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
893 | VCN, inst_idx, regUVD_MPC_SET_MUX), |
894 | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | |
895 | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | |
896 | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); |
897 | |
898 | vcn_v4_0_5_mc_resume_dpg_mode(adev, inst_idx, indirect); |
899 | |
900 | tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); |
901 | tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; |
902 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
903 | VCN, inst_idx, regUVD_VCPU_CNTL), tmp, 0, indirect); |
904 | |
905 | /* enable LMI MC and UMC channels */ |
906 | tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; |
907 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
908 | VCN, inst_idx, regUVD_LMI_CTRL2), tmp, 0, indirect); |
909 | |
910 | /* enable master interrupt */ |
911 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
912 | VCN, inst_idx, regUVD_MASTINT_EN), |
913 | UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); |
914 | |
915 | if (indirect) |
916 | amdgpu_vcn_psp_update_sram(adev, inst_idx, ucode_id: 0); |
917 | |
918 | ring = &adev->vcn.inst[inst_idx].ring_enc[0]; |
919 | |
920 | WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_LO, ring->gpu_addr); |
921 | WREG32_SOC15(VCN, inst_idx, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
922 | WREG32_SOC15(VCN, inst_idx, regUVD_RB_SIZE, ring->ring_size / 4); |
923 | |
924 | tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); |
925 | tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); |
926 | WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); |
927 | fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; |
928 | WREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR, 0); |
929 | WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, 0); |
930 | |
931 | tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_RPTR); |
932 | WREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR, tmp); |
933 | ring->wptr = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); |
934 | |
935 | tmp = RREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE); |
936 | tmp |= VCN_RB_ENABLE__RB1_EN_MASK; |
937 | WREG32_SOC15(VCN, inst_idx, regVCN_RB_ENABLE, tmp); |
938 | fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); |
939 | |
940 | WREG32_SOC15(VCN, inst_idx, regVCN_RB1_DB_CTRL, |
941 | ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | |
942 | VCN_RB1_DB_CTRL__EN_MASK); |
943 | |
944 | return 0; |
945 | } |
946 | |
947 | |
948 | /** |
949 | * vcn_v4_0_5_start - VCN start |
950 | * |
951 | * @adev: amdgpu_device pointer |
952 | * |
953 | * Start VCN block |
954 | */ |
955 | static int vcn_v4_0_5_start(struct amdgpu_device *adev) |
956 | { |
957 | volatile struct amdgpu_vcn4_fw_shared *fw_shared; |
958 | struct amdgpu_ring *ring; |
959 | uint32_t tmp; |
960 | int i, j, k, r; |
961 | |
962 | if (adev->pm.dpm_enabled) |
963 | amdgpu_dpm_enable_uvd(adev, enable: true); |
964 | |
965 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
966 | fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; |
967 | |
968 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { |
969 | r = vcn_v4_0_5_start_dpg_mode(adev, inst_idx: i, indirect: adev->vcn.indirect_sram); |
970 | continue; |
971 | } |
972 | |
973 | /* disable VCN power gating */ |
974 | vcn_v4_0_5_disable_static_power_gating(adev, inst: i); |
975 | |
976 | /* set VCN status busy */ |
977 | tmp = RREG32_SOC15(VCN, i, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; |
978 | WREG32_SOC15(VCN, i, regUVD_STATUS, tmp); |
979 | |
980 | /*SW clock gating */ |
981 | vcn_v4_0_5_disable_clock_gating(adev, inst: i); |
982 | |
983 | /* enable VCPU clock */ |
984 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), |
985 | UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); |
986 | |
987 | /* disable master interrupt */ |
988 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), 0, |
989 | ~UVD_MASTINT_EN__VCPU_EN_MASK); |
990 | |
991 | /* enable LMI MC and UMC channels */ |
992 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_LMI_CTRL2), 0, |
993 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
994 | |
995 | tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); |
996 | tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; |
997 | tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; |
998 | WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); |
999 | |
1000 | /* setup regUVD_LMI_CTRL */ |
1001 | tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL); |
1002 | WREG32_SOC15(VCN, i, regUVD_LMI_CTRL, tmp | |
1003 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
1004 | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | |
1005 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
1006 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); |
1007 | |
1008 | /* setup regUVD_MPC_CNTL */ |
1009 | tmp = RREG32_SOC15(VCN, i, regUVD_MPC_CNTL); |
1010 | tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; |
1011 | tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; |
1012 | WREG32_SOC15(VCN, i, regUVD_MPC_CNTL, tmp); |
1013 | |
1014 | /* setup UVD_MPC_SET_MUXA0 */ |
1015 | WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXA0, |
1016 | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | |
1017 | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | |
1018 | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | |
1019 | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); |
1020 | |
1021 | /* setup UVD_MPC_SET_MUXB0 */ |
1022 | WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUXB0, |
1023 | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | |
1024 | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | |
1025 | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | |
1026 | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); |
1027 | |
1028 | /* setup UVD_MPC_SET_MUX */ |
1029 | WREG32_SOC15(VCN, i, regUVD_MPC_SET_MUX, |
1030 | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | |
1031 | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | |
1032 | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); |
1033 | |
1034 | vcn_v4_0_5_mc_resume(adev, inst: i); |
1035 | |
1036 | /* VCN global tiling registers */ |
1037 | WREG32_SOC15(VCN, i, regUVD_GFX10_ADDR_CONFIG, |
1038 | adev->gfx.config.gb_addr_config); |
1039 | |
1040 | /* unblock VCPU register access */ |
1041 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), 0, |
1042 | ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); |
1043 | |
1044 | /* release VCPU reset to boot */ |
1045 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, |
1046 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1047 | |
1048 | for (j = 0; j < 10; ++j) { |
1049 | uint32_t status; |
1050 | |
1051 | for (k = 0; k < 100; ++k) { |
1052 | status = RREG32_SOC15(VCN, i, regUVD_STATUS); |
1053 | if (status & 2) |
1054 | break; |
1055 | mdelay(10); |
1056 | if (amdgpu_emu_mode == 1) |
1057 | msleep(msecs: 1); |
1058 | } |
1059 | |
1060 | if (amdgpu_emu_mode == 1) { |
1061 | r = -1; |
1062 | if (status & 2) { |
1063 | r = 0; |
1064 | break; |
1065 | } |
1066 | } else { |
1067 | r = 0; |
1068 | if (status & 2) |
1069 | break; |
1070 | |
1071 | dev_err(adev->dev, |
1072 | "VCN[%d] is not responding, trying to reset VCPU!!!\n" , i); |
1073 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), |
1074 | UVD_VCPU_CNTL__BLK_RST_MASK, |
1075 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1076 | mdelay(10); |
1077 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, |
1078 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1079 | |
1080 | mdelay(10); |
1081 | r = -1; |
1082 | } |
1083 | } |
1084 | |
1085 | if (r) { |
1086 | dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n" , i); |
1087 | return r; |
1088 | } |
1089 | |
1090 | /* enable master interrupt */ |
1091 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_MASTINT_EN), |
1092 | UVD_MASTINT_EN__VCPU_EN_MASK, |
1093 | ~UVD_MASTINT_EN__VCPU_EN_MASK); |
1094 | |
1095 | /* clear the busy bit of VCN_STATUS */ |
1096 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_STATUS), 0, |
1097 | ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); |
1098 | |
1099 | ring = &adev->vcn.inst[i].ring_enc[0]; |
1100 | WREG32_SOC15(VCN, i, regVCN_RB1_DB_CTRL, |
1101 | ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | |
1102 | VCN_RB1_DB_CTRL__EN_MASK); |
1103 | |
1104 | WREG32_SOC15(VCN, i, regUVD_RB_BASE_LO, ring->gpu_addr); |
1105 | WREG32_SOC15(VCN, i, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
1106 | WREG32_SOC15(VCN, i, regUVD_RB_SIZE, ring->ring_size / 4); |
1107 | |
1108 | tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); |
1109 | tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); |
1110 | WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); |
1111 | fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; |
1112 | WREG32_SOC15(VCN, i, regUVD_RB_RPTR, 0); |
1113 | WREG32_SOC15(VCN, i, regUVD_RB_WPTR, 0); |
1114 | |
1115 | tmp = RREG32_SOC15(VCN, i, regUVD_RB_RPTR); |
1116 | WREG32_SOC15(VCN, i, regUVD_RB_WPTR, tmp); |
1117 | ring->wptr = RREG32_SOC15(VCN, i, regUVD_RB_WPTR); |
1118 | |
1119 | tmp = RREG32_SOC15(VCN, i, regVCN_RB_ENABLE); |
1120 | tmp |= VCN_RB_ENABLE__RB1_EN_MASK; |
1121 | WREG32_SOC15(VCN, i, regVCN_RB_ENABLE, tmp); |
1122 | fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); |
1123 | } |
1124 | |
1125 | return 0; |
1126 | } |
1127 | |
1128 | /** |
1129 | * vcn_v4_0_5_stop_dpg_mode - VCN stop with dpg mode |
1130 | * |
1131 | * @adev: amdgpu_device pointer |
1132 | * @inst_idx: instance number index |
1133 | * |
1134 | * Stop VCN block with dpg mode |
1135 | */ |
1136 | static void vcn_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) |
1137 | { |
1138 | uint32_t tmp; |
1139 | |
1140 | /* Wait for power status to be 1 */ |
1141 | SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, |
1142 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1143 | |
1144 | /* wait for read ptr to be equal to write ptr */ |
1145 | tmp = RREG32_SOC15(VCN, inst_idx, regUVD_RB_WPTR); |
1146 | SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); |
1147 | |
1148 | SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 1, |
1149 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1150 | |
1151 | /* disable dynamic power gating mode */ |
1152 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0, |
1153 | ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); |
1154 | } |
1155 | |
1156 | /** |
1157 | * vcn_v4_0_5_stop - VCN stop |
1158 | * |
1159 | * @adev: amdgpu_device pointer |
1160 | * |
1161 | * Stop VCN block |
1162 | */ |
1163 | static int vcn_v4_0_5_stop(struct amdgpu_device *adev) |
1164 | { |
1165 | volatile struct amdgpu_vcn4_fw_shared *fw_shared; |
1166 | uint32_t tmp; |
1167 | int i, r = 0; |
1168 | |
1169 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
1170 | fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; |
1171 | fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; |
1172 | |
1173 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { |
1174 | vcn_v4_0_5_stop_dpg_mode(adev, inst_idx: i); |
1175 | continue; |
1176 | } |
1177 | |
1178 | /* wait for vcn idle */ |
1179 | r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); |
1180 | if (r) |
1181 | return r; |
1182 | |
1183 | tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | |
1184 | UVD_LMI_STATUS__READ_CLEAN_MASK | |
1185 | UVD_LMI_STATUS__WRITE_CLEAN_MASK | |
1186 | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; |
1187 | r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); |
1188 | if (r) |
1189 | return r; |
1190 | |
1191 | /* disable LMI UMC channel */ |
1192 | tmp = RREG32_SOC15(VCN, i, regUVD_LMI_CTRL2); |
1193 | tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; |
1194 | WREG32_SOC15(VCN, i, regUVD_LMI_CTRL2, tmp); |
1195 | tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | |
1196 | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; |
1197 | r = SOC15_WAIT_ON_RREG(VCN, i, regUVD_LMI_STATUS, tmp, tmp); |
1198 | if (r) |
1199 | return r; |
1200 | |
1201 | /* block VCPU register access */ |
1202 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_RB_ARB_CTRL), |
1203 | UVD_RB_ARB_CTRL__VCPU_DIS_MASK, |
1204 | ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); |
1205 | |
1206 | /* reset VCPU */ |
1207 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), |
1208 | UVD_VCPU_CNTL__BLK_RST_MASK, |
1209 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1210 | |
1211 | /* disable VCPU clock */ |
1212 | WREG32_P(SOC15_REG_OFFSET(VCN, i, regUVD_VCPU_CNTL), 0, |
1213 | ~(UVD_VCPU_CNTL__CLK_EN_MASK)); |
1214 | |
1215 | /* apply soft reset */ |
1216 | tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); |
1217 | tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; |
1218 | WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); |
1219 | tmp = RREG32_SOC15(VCN, i, regUVD_SOFT_RESET); |
1220 | tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; |
1221 | WREG32_SOC15(VCN, i, regUVD_SOFT_RESET, tmp); |
1222 | |
1223 | /* clear status */ |
1224 | WREG32_SOC15(VCN, i, regUVD_STATUS, 0); |
1225 | |
1226 | /* apply HW clock gating */ |
1227 | vcn_v4_0_5_enable_clock_gating(adev, inst: i); |
1228 | |
1229 | /* enable VCN power gating */ |
1230 | vcn_v4_0_5_enable_static_power_gating(adev, inst: i); |
1231 | } |
1232 | |
1233 | if (adev->pm.dpm_enabled) |
1234 | amdgpu_dpm_enable_uvd(adev, enable: false); |
1235 | |
1236 | return 0; |
1237 | } |
1238 | |
1239 | /** |
1240 | * vcn_v4_0_5_pause_dpg_mode - VCN pause with dpg mode |
1241 | * |
1242 | * @adev: amdgpu_device pointer |
1243 | * @inst_idx: instance number index |
1244 | * @new_state: pause state |
1245 | * |
1246 | * Pause dpg mode for VCN block |
1247 | */ |
1248 | static int vcn_v4_0_5_pause_dpg_mode(struct amdgpu_device *adev, int inst_idx, |
1249 | struct dpg_pause_state *new_state) |
1250 | { |
1251 | uint32_t reg_data = 0; |
1252 | int ret_code; |
1253 | |
1254 | /* pause/unpause if state is changed */ |
1255 | if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { |
1256 | DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d" , |
1257 | adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); |
1258 | reg_data = RREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE) & |
1259 | (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); |
1260 | |
1261 | if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { |
1262 | ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, 0x1, |
1263 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1264 | |
1265 | if (!ret_code) { |
1266 | /* pause DPG */ |
1267 | reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; |
1268 | WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); |
1269 | |
1270 | /* wait for ACK */ |
1271 | SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_DPG_PAUSE, |
1272 | UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, |
1273 | UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); |
1274 | |
1275 | SOC15_WAIT_ON_RREG(VCN, inst_idx, regUVD_POWER_STATUS, |
1276 | UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, |
1277 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1278 | } |
1279 | } else { |
1280 | /* unpause dpg, no need to wait */ |
1281 | reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; |
1282 | WREG32_SOC15(VCN, inst_idx, regUVD_DPG_PAUSE, reg_data); |
1283 | } |
1284 | adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; |
1285 | } |
1286 | |
1287 | return 0; |
1288 | } |
1289 | |
1290 | /** |
1291 | * vcn_v4_0_5_unified_ring_get_rptr - get unified read pointer |
1292 | * |
1293 | * @ring: amdgpu_ring pointer |
1294 | * |
1295 | * Returns the current hardware unified read pointer |
1296 | */ |
1297 | static uint64_t vcn_v4_0_5_unified_ring_get_rptr(struct amdgpu_ring *ring) |
1298 | { |
1299 | struct amdgpu_device *adev = ring->adev; |
1300 | |
1301 | if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) |
1302 | DRM_ERROR("wrong ring id is identified in %s" , __func__); |
1303 | |
1304 | return RREG32_SOC15(VCN, ring->me, regUVD_RB_RPTR); |
1305 | } |
1306 | |
1307 | /** |
1308 | * vcn_v4_0_5_unified_ring_get_wptr - get unified write pointer |
1309 | * |
1310 | * @ring: amdgpu_ring pointer |
1311 | * |
1312 | * Returns the current hardware unified write pointer |
1313 | */ |
1314 | static uint64_t vcn_v4_0_5_unified_ring_get_wptr(struct amdgpu_ring *ring) |
1315 | { |
1316 | struct amdgpu_device *adev = ring->adev; |
1317 | |
1318 | if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) |
1319 | DRM_ERROR("wrong ring id is identified in %s" , __func__); |
1320 | |
1321 | if (ring->use_doorbell) |
1322 | return *ring->wptr_cpu_addr; |
1323 | else |
1324 | return RREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR); |
1325 | } |
1326 | |
1327 | /** |
1328 | * vcn_v4_0_5_unified_ring_set_wptr - set enc write pointer |
1329 | * |
1330 | * @ring: amdgpu_ring pointer |
1331 | * |
1332 | * Commits the enc write pointer to the hardware |
1333 | */ |
1334 | static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) |
1335 | { |
1336 | struct amdgpu_device *adev = ring->adev; |
1337 | |
1338 | if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) |
1339 | DRM_ERROR("wrong ring id is identified in %s" , __func__); |
1340 | |
1341 | if (ring->use_doorbell) { |
1342 | *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); |
1343 | WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); |
1344 | } else { |
1345 | WREG32_SOC15(VCN, ring->me, regUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
1346 | } |
1347 | } |
1348 | |
1349 | static int vcn_v4_0_5_limit_sched(struct amdgpu_cs_parser *p, |
1350 | struct amdgpu_job *job) |
1351 | { |
1352 | struct drm_gpu_scheduler **scheds; |
1353 | |
1354 | /* The create msg must be in the first IB submitted */ |
1355 | if (atomic_read(v: &job->base.entity->fence_seq)) |
1356 | return -EINVAL; |
1357 | |
1358 | /* if VCN0 is harvested, we can't support AV1 */ |
1359 | if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) |
1360 | return -EINVAL; |
1361 | |
1362 | scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC] |
1363 | [AMDGPU_RING_PRIO_0].sched; |
1364 | drm_sched_entity_modify_sched(entity: job->base.entity, sched_list: scheds, num_sched_list: 1); |
1365 | return 0; |
1366 | } |
1367 | |
1368 | static int vcn_v4_0_5_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, |
1369 | uint64_t addr) |
1370 | { |
1371 | struct ttm_operation_ctx ctx = { false, false }; |
1372 | struct amdgpu_bo_va_mapping *map; |
1373 | uint32_t *msg, num_buffers; |
1374 | struct amdgpu_bo *bo; |
1375 | uint64_t start, end; |
1376 | unsigned int i; |
1377 | void *ptr; |
1378 | int r; |
1379 | |
1380 | addr &= AMDGPU_GMC_HOLE_MASK; |
1381 | r = amdgpu_cs_find_mapping(parser: p, addr, bo: &bo, mapping: &map); |
1382 | if (r) { |
1383 | DRM_ERROR("Can't find BO for addr 0x%08llx\n" , addr); |
1384 | return r; |
1385 | } |
1386 | |
1387 | start = map->start * AMDGPU_GPU_PAGE_SIZE; |
1388 | end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; |
1389 | if (addr & 0x7) { |
1390 | DRM_ERROR("VCN messages must be 8 byte aligned!\n" ); |
1391 | return -EINVAL; |
1392 | } |
1393 | |
1394 | bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
1395 | amdgpu_bo_placement_from_domain(abo: bo, domain: bo->allowed_domains); |
1396 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
1397 | if (r) { |
1398 | DRM_ERROR("Failed validating the VCN message BO (%d)!\n" , r); |
1399 | return r; |
1400 | } |
1401 | |
1402 | r = amdgpu_bo_kmap(bo, ptr: &ptr); |
1403 | if (r) { |
1404 | DRM_ERROR("Failed mapping the VCN message (%d)!\n" , r); |
1405 | return r; |
1406 | } |
1407 | |
1408 | msg = ptr + addr - start; |
1409 | |
1410 | /* Check length */ |
1411 | if (msg[1] > end - addr) { |
1412 | r = -EINVAL; |
1413 | goto out; |
1414 | } |
1415 | |
1416 | if (msg[3] != RDECODE_MSG_CREATE) |
1417 | goto out; |
1418 | |
1419 | num_buffers = msg[2]; |
1420 | for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { |
1421 | uint32_t offset, size, *create; |
1422 | |
1423 | if (msg[0] != RDECODE_MESSAGE_CREATE) |
1424 | continue; |
1425 | |
1426 | offset = msg[1]; |
1427 | size = msg[2]; |
1428 | |
1429 | if (offset + size > end) { |
1430 | r = -EINVAL; |
1431 | goto out; |
1432 | } |
1433 | |
1434 | create = ptr + addr + offset - start; |
1435 | |
1436 | /* H264, HEVC and VP9 can run on any instance */ |
1437 | if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) |
1438 | continue; |
1439 | |
1440 | r = vcn_v4_0_5_limit_sched(p, job); |
1441 | if (r) |
1442 | goto out; |
1443 | } |
1444 | |
1445 | out: |
1446 | amdgpu_bo_kunmap(bo); |
1447 | return r; |
1448 | } |
1449 | |
1450 | #define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002) |
1451 | #define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003) |
1452 | |
1453 | #define RADEON_VCN_ENGINE_INFO (0x30000001) |
1454 | #define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16 |
1455 | |
1456 | #define RENCODE_ENCODE_STANDARD_AV1 2 |
1457 | #define RENCODE_IB_PARAM_SESSION_INIT 0x00000003 |
1458 | #define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64 |
1459 | |
1460 | /* return the offset in ib if id is found, -1 otherwise |
1461 | * to speed up the searching we only search upto max_offset |
1462 | */ |
1463 | static int vcn_v4_0_5_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset) |
1464 | { |
1465 | int i; |
1466 | |
1467 | for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) { |
1468 | if (ib->ptr[i + 1] == id) |
1469 | return i; |
1470 | } |
1471 | return -1; |
1472 | } |
1473 | |
1474 | static int vcn_v4_0_5_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, |
1475 | struct amdgpu_job *job, |
1476 | struct amdgpu_ib *ib) |
1477 | { |
1478 | struct amdgpu_ring *ring = amdgpu_job_ring(job); |
1479 | struct amdgpu_vcn_decode_buffer *decode_buffer; |
1480 | uint64_t addr; |
1481 | uint32_t val; |
1482 | int idx; |
1483 | |
1484 | /* The first instance can decode anything */ |
1485 | if (!ring->me) |
1486 | return 0; |
1487 | |
1488 | /* RADEON_VCN_ENGINE_INFO is at the top of ib block */ |
1489 | idx = vcn_v4_0_5_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO, |
1490 | RADEON_VCN_ENGINE_INFO_MAX_OFFSET); |
1491 | if (idx < 0) /* engine info is missing */ |
1492 | return 0; |
1493 | |
1494 | val = amdgpu_ib_get_value(ib, idx: idx + 2); /* RADEON_VCN_ENGINE_TYPE */ |
1495 | if (val == RADEON_VCN_ENGINE_TYPE_DECODE) { |
1496 | decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6]; |
1497 | |
1498 | if (!(decode_buffer->valid_buf_flag & 0x1)) |
1499 | return 0; |
1500 | |
1501 | addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 | |
1502 | decode_buffer->msg_buffer_address_lo; |
1503 | return vcn_v4_0_5_dec_msg(p, job, addr); |
1504 | } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) { |
1505 | idx = vcn_v4_0_5_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT, |
1506 | RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET); |
1507 | if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1) |
1508 | return vcn_v4_0_5_limit_sched(p, job); |
1509 | } |
1510 | return 0; |
1511 | } |
1512 | |
1513 | static const struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { |
1514 | .type = AMDGPU_RING_TYPE_VCN_ENC, |
1515 | .align_mask = 0x3f, |
1516 | .nop = VCN_ENC_CMD_NO_OP, |
1517 | .get_rptr = vcn_v4_0_5_unified_ring_get_rptr, |
1518 | .get_wptr = vcn_v4_0_5_unified_ring_get_wptr, |
1519 | .set_wptr = vcn_v4_0_5_unified_ring_set_wptr, |
1520 | .patch_cs_in_place = vcn_v4_0_5_ring_patch_cs_in_place, |
1521 | .emit_frame_size = |
1522 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + |
1523 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + |
1524 | 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ |
1525 | 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ |
1526 | 1, /* vcn_v2_0_enc_ring_insert_end */ |
1527 | .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ |
1528 | .emit_ib = vcn_v2_0_enc_ring_emit_ib, |
1529 | .emit_fence = vcn_v2_0_enc_ring_emit_fence, |
1530 | .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, |
1531 | .test_ring = amdgpu_vcn_enc_ring_test_ring, |
1532 | .test_ib = amdgpu_vcn_unified_ring_test_ib, |
1533 | .insert_nop = amdgpu_ring_insert_nop, |
1534 | .insert_end = vcn_v2_0_enc_ring_insert_end, |
1535 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1536 | .begin_use = amdgpu_vcn_ring_begin_use, |
1537 | .end_use = amdgpu_vcn_ring_end_use, |
1538 | .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, |
1539 | .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, |
1540 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
1541 | }; |
1542 | |
1543 | /** |
1544 | * vcn_v4_0_5_set_unified_ring_funcs - set unified ring functions |
1545 | * |
1546 | * @adev: amdgpu_device pointer |
1547 | * |
1548 | * Set unified ring functions |
1549 | */ |
1550 | static void vcn_v4_0_5_set_unified_ring_funcs(struct amdgpu_device *adev) |
1551 | { |
1552 | int i; |
1553 | |
1554 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
1555 | if (adev->vcn.harvest_config & (1 << i)) |
1556 | continue; |
1557 | |
1558 | adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v4_0_5_unified_ring_vm_funcs; |
1559 | adev->vcn.inst[i].ring_enc[0].me = i; |
1560 | |
1561 | DRM_INFO("VCN(%d) encode/decode are enabled in VM mode\n" , i); |
1562 | } |
1563 | } |
1564 | |
1565 | /** |
1566 | * vcn_v4_0_5_is_idle - check VCN block is idle |
1567 | * |
1568 | * @handle: amdgpu_device pointer |
1569 | * |
1570 | * Check whether VCN block is idle |
1571 | */ |
1572 | static bool vcn_v4_0_5_is_idle(void *handle) |
1573 | { |
1574 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1575 | int i, ret = 1; |
1576 | |
1577 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
1578 | if (adev->vcn.harvest_config & (1 << i)) |
1579 | continue; |
1580 | |
1581 | ret &= (RREG32_SOC15(VCN, i, regUVD_STATUS) == UVD_STATUS__IDLE); |
1582 | } |
1583 | |
1584 | return ret; |
1585 | } |
1586 | |
1587 | /** |
1588 | * vcn_v4_0_5_wait_for_idle - wait for VCN block idle |
1589 | * |
1590 | * @handle: amdgpu_device pointer |
1591 | * |
1592 | * Wait for VCN block idle |
1593 | */ |
1594 | static int vcn_v4_0_5_wait_for_idle(void *handle) |
1595 | { |
1596 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1597 | int i, ret = 0; |
1598 | |
1599 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
1600 | if (adev->vcn.harvest_config & (1 << i)) |
1601 | continue; |
1602 | |
1603 | ret = SOC15_WAIT_ON_RREG(VCN, i, regUVD_STATUS, UVD_STATUS__IDLE, |
1604 | UVD_STATUS__IDLE); |
1605 | if (ret) |
1606 | return ret; |
1607 | } |
1608 | |
1609 | return ret; |
1610 | } |
1611 | |
1612 | /** |
1613 | * vcn_v4_0_5_set_clockgating_state - set VCN block clockgating state |
1614 | * |
1615 | * @handle: amdgpu_device pointer |
1616 | * @state: clock gating state |
1617 | * |
1618 | * Set VCN block clockgating state |
1619 | */ |
1620 | static int vcn_v4_0_5_set_clockgating_state(void *handle, enum amd_clockgating_state state) |
1621 | { |
1622 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1623 | bool enable = (state == AMD_CG_STATE_GATE) ? true : false; |
1624 | int i; |
1625 | |
1626 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
1627 | if (adev->vcn.harvest_config & (1 << i)) |
1628 | continue; |
1629 | |
1630 | if (enable) { |
1631 | if (RREG32_SOC15(VCN, i, regUVD_STATUS) != UVD_STATUS__IDLE) |
1632 | return -EBUSY; |
1633 | vcn_v4_0_5_enable_clock_gating(adev, inst: i); |
1634 | } else { |
1635 | vcn_v4_0_5_disable_clock_gating(adev, inst: i); |
1636 | } |
1637 | } |
1638 | |
1639 | return 0; |
1640 | } |
1641 | |
1642 | /** |
1643 | * vcn_v4_0_5_set_powergating_state - set VCN block powergating state |
1644 | * |
1645 | * @handle: amdgpu_device pointer |
1646 | * @state: power gating state |
1647 | * |
1648 | * Set VCN block powergating state |
1649 | */ |
1650 | static int vcn_v4_0_5_set_powergating_state(void *handle, enum amd_powergating_state state) |
1651 | { |
1652 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1653 | int ret; |
1654 | |
1655 | if (state == adev->vcn.cur_state) |
1656 | return 0; |
1657 | |
1658 | if (state == AMD_PG_STATE_GATE) |
1659 | ret = vcn_v4_0_5_stop(adev); |
1660 | else |
1661 | ret = vcn_v4_0_5_start(adev); |
1662 | |
1663 | if (!ret) |
1664 | adev->vcn.cur_state = state; |
1665 | |
1666 | return ret; |
1667 | } |
1668 | |
1669 | /** |
1670 | * vcn_v4_0_5_process_interrupt - process VCN block interrupt |
1671 | * |
1672 | * @adev: amdgpu_device pointer |
1673 | * @source: interrupt sources |
1674 | * @entry: interrupt entry from clients and sources |
1675 | * |
1676 | * Process VCN block interrupt |
1677 | */ |
1678 | static int vcn_v4_0_5_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, |
1679 | struct amdgpu_iv_entry *entry) |
1680 | { |
1681 | uint32_t ip_instance; |
1682 | |
1683 | switch (entry->client_id) { |
1684 | case SOC15_IH_CLIENTID_VCN: |
1685 | ip_instance = 0; |
1686 | break; |
1687 | case SOC15_IH_CLIENTID_VCN1: |
1688 | ip_instance = 1; |
1689 | break; |
1690 | default: |
1691 | DRM_ERROR("Unhandled client id: %d\n" , entry->client_id); |
1692 | return 0; |
1693 | } |
1694 | |
1695 | DRM_DEBUG("IH: VCN TRAP\n" ); |
1696 | |
1697 | switch (entry->src_id) { |
1698 | case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE: |
1699 | amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_enc[0]); |
1700 | break; |
1701 | case VCN_4_0__SRCID_UVD_POISON: |
1702 | amdgpu_vcn_process_poison_irq(adev, source, entry); |
1703 | break; |
1704 | default: |
1705 | DRM_ERROR("Unhandled interrupt: %d %d\n" , |
1706 | entry->src_id, entry->src_data[0]); |
1707 | break; |
1708 | } |
1709 | |
1710 | return 0; |
1711 | } |
1712 | |
1713 | static const struct amdgpu_irq_src_funcs vcn_v4_0_5_irq_funcs = { |
1714 | .process = vcn_v4_0_5_process_interrupt, |
1715 | }; |
1716 | |
1717 | /** |
1718 | * vcn_v4_0_5_set_irq_funcs - set VCN block interrupt irq functions |
1719 | * |
1720 | * @adev: amdgpu_device pointer |
1721 | * |
1722 | * Set VCN block interrupt irq functions |
1723 | */ |
1724 | static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev) |
1725 | { |
1726 | int i; |
1727 | |
1728 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
1729 | if (adev->vcn.harvest_config & (1 << i)) |
1730 | continue; |
1731 | |
1732 | adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; |
1733 | adev->vcn.inst[i].irq.funcs = &vcn_v4_0_5_irq_funcs; |
1734 | } |
1735 | } |
1736 | |
1737 | static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = { |
1738 | .name = "vcn_v4_0_5" , |
1739 | .early_init = vcn_v4_0_5_early_init, |
1740 | .late_init = NULL, |
1741 | .sw_init = vcn_v4_0_5_sw_init, |
1742 | .sw_fini = vcn_v4_0_5_sw_fini, |
1743 | .hw_init = vcn_v4_0_5_hw_init, |
1744 | .hw_fini = vcn_v4_0_5_hw_fini, |
1745 | .suspend = vcn_v4_0_5_suspend, |
1746 | .resume = vcn_v4_0_5_resume, |
1747 | .is_idle = vcn_v4_0_5_is_idle, |
1748 | .wait_for_idle = vcn_v4_0_5_wait_for_idle, |
1749 | .check_soft_reset = NULL, |
1750 | .pre_soft_reset = NULL, |
1751 | .soft_reset = NULL, |
1752 | .post_soft_reset = NULL, |
1753 | .set_clockgating_state = vcn_v4_0_5_set_clockgating_state, |
1754 | .set_powergating_state = vcn_v4_0_5_set_powergating_state, |
1755 | }; |
1756 | |
1757 | const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = { |
1758 | .type = AMD_IP_BLOCK_TYPE_VCN, |
1759 | .major = 4, |
1760 | .minor = 0, |
1761 | .rev = 5, |
1762 | .funcs = &vcn_v4_0_5_ip_funcs, |
1763 | }; |
1764 | |