1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | #include "amdgpu.h" |
26 | #include "amdgpu_vcn.h" |
27 | #include "amdgpu_pm.h" |
28 | #include "amdgpu_cs.h" |
29 | #include "soc15.h" |
30 | #include "soc15d.h" |
31 | #include "vcn_v2_0.h" |
32 | #include "mmsch_v3_0.h" |
33 | #include "vcn_sw_ring.h" |
34 | |
35 | #include "vcn/vcn_3_0_0_offset.h" |
36 | #include "vcn/vcn_3_0_0_sh_mask.h" |
37 | #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h" |
38 | |
39 | #include <drm/drm_drv.h> |
40 | |
41 | #define VCN_VID_SOC_ADDRESS_2_0 0x1fa00 |
42 | #define VCN1_VID_SOC_ADDRESS_3_0 0x48200 |
43 | #define VCN1_AON_SOC_ADDRESS_3_0 0x48000 |
44 | |
45 | #define mmUVD_CONTEXT_ID_INTERNAL_OFFSET 0x27 |
46 | #define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET 0x0f |
47 | #define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET 0x10 |
48 | #define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET 0x11 |
49 | #define mmUVD_NO_OP_INTERNAL_OFFSET 0x29 |
50 | #define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET 0x66 |
51 | #define mmUVD_SCRATCH9_INTERNAL_OFFSET 0xc01d |
52 | |
53 | #define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET 0x431 |
54 | #define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x3b4 |
55 | #define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x3b5 |
56 | #define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET 0x25c |
57 | |
58 | #define VCN_INSTANCES_SIENNA_CICHLID 2 |
59 | #define DEC_SW_RING_ENABLED FALSE |
60 | |
61 | #define RDECODE_MSG_CREATE 0x00000000 |
62 | #define RDECODE_MESSAGE_CREATE 0x00000001 |
63 | |
64 | static const struct amdgpu_hwip_reg_entry vcn_reg_list_3_0[] = { |
65 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_POWER_STATUS), |
66 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_STATUS), |
67 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID), |
68 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_CONTEXT_ID2), |
69 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA0), |
70 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_DATA1), |
71 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_GPCOM_VCPU_CMD), |
72 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI), |
73 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO), |
74 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI2), |
75 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO2), |
76 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI3), |
77 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO3), |
78 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_HI4), |
79 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_BASE_LO4), |
80 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR), |
81 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR), |
82 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR2), |
83 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR2), |
84 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR3), |
85 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR3), |
86 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_RPTR4), |
87 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_WPTR4), |
88 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE), |
89 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE2), |
90 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE3), |
91 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_RB_SIZE4), |
92 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_CONFIG), |
93 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_PGFSM_STATUS), |
94 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_CTL), |
95 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_DATA), |
96 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_LMA_MASK), |
97 | SOC15_REG_ENTRY_STR(VCN, 0, mmUVD_DPG_PAUSE) |
98 | }; |
99 | |
100 | static int amdgpu_ih_clientid_vcns[] = { |
101 | SOC15_IH_CLIENTID_VCN, |
102 | SOC15_IH_CLIENTID_VCN1 |
103 | }; |
104 | |
105 | static int vcn_v3_0_start_sriov(struct amdgpu_device *adev); |
106 | static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev); |
107 | static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev); |
108 | static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev); |
109 | static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst, |
110 | enum amd_powergating_state state); |
111 | static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, |
112 | struct dpg_pause_state *new_state); |
113 | |
114 | static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring); |
115 | static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); |
116 | |
117 | /** |
118 | * vcn_v3_0_early_init - set function pointers and load microcode |
119 | * |
120 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
121 | * |
122 | * Set ring and irq function pointers |
123 | * Load microcode from filesystem |
124 | */ |
125 | static int vcn_v3_0_early_init(struct amdgpu_ip_block *ip_block) |
126 | { |
127 | struct amdgpu_device *adev = ip_block->adev; |
128 | int i, r; |
129 | |
130 | if (amdgpu_sriov_vf(adev)) { |
131 | adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID; |
132 | adev->vcn.harvest_config = 0; |
133 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) |
134 | adev->vcn.inst[i].num_enc_rings = 1; |
135 | |
136 | } else { |
137 | if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | |
138 | AMDGPU_VCN_HARVEST_VCN1)) |
139 | /* both instances are harvested, disable the block */ |
140 | return -ENOENT; |
141 | |
142 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
143 | if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) == |
144 | IP_VERSION(3, 0, 33)) |
145 | adev->vcn.inst[i].num_enc_rings = 0; |
146 | else |
147 | adev->vcn.inst[i].num_enc_rings = 2; |
148 | } |
149 | } |
150 | |
151 | vcn_v3_0_set_dec_ring_funcs(adev); |
152 | vcn_v3_0_set_enc_ring_funcs(adev); |
153 | vcn_v3_0_set_irq_funcs(adev); |
154 | |
155 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
156 | adev->vcn.inst[i].set_pg_state = vcn_v3_0_set_pg_state; |
157 | |
158 | r = amdgpu_vcn_early_init(adev, i); |
159 | if (r) |
160 | return r; |
161 | } |
162 | return 0; |
163 | } |
164 | |
165 | /** |
166 | * vcn_v3_0_sw_init - sw init for VCN block |
167 | * |
168 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
169 | * |
170 | * Load firmware and sw initialization |
171 | */ |
172 | static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) |
173 | { |
174 | struct amdgpu_ring *ring; |
175 | int i, j, r; |
176 | int vcn_doorbell_index = 0; |
177 | uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); |
178 | uint32_t *ptr; |
179 | struct amdgpu_device *adev = ip_block->adev; |
180 | |
181 | /* |
182 | * Note: doorbell assignment is fixed for SRIOV multiple VCN engines |
183 | * Formula: |
184 | * vcn_db_base = adev->doorbell_index.vcn.vcn_ring0_1 << 1; |
185 | * dec_ring_i = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) |
186 | * enc_ring_i,j = vcn_db_base + i * (adev->vcn.num_enc_rings + 1) + 1 + j |
187 | */ |
188 | if (amdgpu_sriov_vf(adev)) { |
189 | vcn_doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1; |
190 | /* get DWORD offset */ |
191 | vcn_doorbell_index = vcn_doorbell_index << 1; |
192 | } |
193 | |
194 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
195 | volatile struct amdgpu_fw_shared *fw_shared; |
196 | |
197 | if (adev->vcn.harvest_config & (1 << i)) |
198 | continue; |
199 | |
200 | r = amdgpu_vcn_sw_init(adev, i); |
201 | if (r) |
202 | return r; |
203 | |
204 | amdgpu_vcn_setup_ucode(adev, i); |
205 | |
206 | r = amdgpu_vcn_resume(adev, i); |
207 | if (r) |
208 | return r; |
209 | |
210 | adev->vcn.inst[i].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; |
211 | adev->vcn.inst[i].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; |
212 | adev->vcn.inst[i].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; |
213 | adev->vcn.inst[i].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; |
214 | adev->vcn.inst[i].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; |
215 | adev->vcn.inst[i].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; |
216 | |
217 | adev->vcn.inst[i].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; |
218 | adev->vcn.inst[i].external.scratch9 = SOC15_REG_OFFSET(VCN, i, mmUVD_SCRATCH9); |
219 | adev->vcn.inst[i].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; |
220 | adev->vcn.inst[i].external.data0 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA0); |
221 | adev->vcn.inst[i].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; |
222 | adev->vcn.inst[i].external.data1 = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_DATA1); |
223 | adev->vcn.inst[i].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; |
224 | adev->vcn.inst[i].external.cmd = SOC15_REG_OFFSET(VCN, i, mmUVD_GPCOM_VCPU_CMD); |
225 | adev->vcn.inst[i].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; |
226 | adev->vcn.inst[i].external.nop = SOC15_REG_OFFSET(VCN, i, mmUVD_NO_OP); |
227 | |
228 | /* VCN DEC TRAP */ |
229 | r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[i], |
230 | VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, source: &adev->vcn.inst[i].irq); |
231 | if (r) |
232 | return r; |
233 | |
234 | atomic_set(v: &adev->vcn.inst[i].sched_score, i: 0); |
235 | |
236 | ring = &adev->vcn.inst[i].ring_dec; |
237 | ring->use_doorbell = true; |
238 | if (amdgpu_sriov_vf(adev)) { |
239 | ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1); |
240 | } else { |
241 | ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i; |
242 | } |
243 | ring->vm_hub = AMDGPU_MMHUB0(0); |
244 | sprintf(buf: ring->name, fmt: "vcn_dec_%d" , i); |
245 | r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->vcn.inst[i].irq, irq_type: 0, |
246 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, |
247 | sched_score: &adev->vcn.inst[i].sched_score); |
248 | if (r) |
249 | return r; |
250 | |
251 | for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { |
252 | enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(ring: j); |
253 | |
254 | /* VCN ENC TRAP */ |
255 | r = amdgpu_irq_add_id(adev, client_id: amdgpu_ih_clientid_vcns[i], |
256 | src_id: j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, source: &adev->vcn.inst[i].irq); |
257 | if (r) |
258 | return r; |
259 | |
260 | ring = &adev->vcn.inst[i].ring_enc[j]; |
261 | ring->use_doorbell = true; |
262 | if (amdgpu_sriov_vf(adev)) { |
263 | ring->doorbell_index = vcn_doorbell_index + i * (adev->vcn.inst[i].num_enc_rings + 1) + 1 + j; |
264 | } else { |
265 | ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + j + 8 * i; |
266 | } |
267 | ring->vm_hub = AMDGPU_MMHUB0(0); |
268 | sprintf(buf: ring->name, fmt: "vcn_enc_%d.%d" , i, j); |
269 | r = amdgpu_ring_init(adev, ring, max_dw: 512, irq_src: &adev->vcn.inst[i].irq, irq_type: 0, |
270 | hw_prio, sched_score: &adev->vcn.inst[i].sched_score); |
271 | if (r) |
272 | return r; |
273 | } |
274 | |
275 | fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; |
276 | fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) | |
277 | cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | |
278 | cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); |
279 | fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); |
280 | fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG; |
281 | if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) == IP_VERSION(3, 1, 2)) |
282 | fw_shared->smu_interface_info.smu_interface_type = 2; |
283 | else if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) == |
284 | IP_VERSION(3, 1, 1)) |
285 | fw_shared->smu_interface_info.smu_interface_type = 1; |
286 | |
287 | if (amdgpu_vcnfw_log) |
288 | amdgpu_vcn_fwlog_init(vcn: &adev->vcn.inst[i]); |
289 | |
290 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) |
291 | adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode; |
292 | } |
293 | |
294 | if (amdgpu_sriov_vf(adev)) { |
295 | r = amdgpu_virt_alloc_mm_table(adev); |
296 | if (r) |
297 | return r; |
298 | } |
299 | |
300 | /* Allocate memory for VCN IP Dump buffer */ |
301 | ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL); |
302 | if (ptr == NULL) { |
303 | DRM_ERROR("Failed to allocate memory for VCN IP Dump\n" ); |
304 | adev->vcn.ip_dump = NULL; |
305 | } else { |
306 | adev->vcn.ip_dump = ptr; |
307 | } |
308 | |
309 | return 0; |
310 | } |
311 | |
312 | /** |
313 | * vcn_v3_0_sw_fini - sw fini for VCN block |
314 | * |
315 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
316 | * |
317 | * VCN suspend and free up sw allocation |
318 | */ |
319 | static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) |
320 | { |
321 | struct amdgpu_device *adev = ip_block->adev; |
322 | int i, r, idx; |
323 | |
324 | if (drm_dev_enter(dev: adev_to_drm(adev), idx: &idx)) { |
325 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
326 | volatile struct amdgpu_fw_shared *fw_shared; |
327 | |
328 | if (adev->vcn.harvest_config & (1 << i)) |
329 | continue; |
330 | fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; |
331 | fw_shared->present_flag_0 = 0; |
332 | fw_shared->sw_ring.is_enabled = false; |
333 | } |
334 | |
335 | drm_dev_exit(idx); |
336 | } |
337 | |
338 | if (amdgpu_sriov_vf(adev)) |
339 | amdgpu_virt_free_mm_table(adev); |
340 | |
341 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
342 | r = amdgpu_vcn_suspend(adev, i); |
343 | if (r) |
344 | return r; |
345 | |
346 | r = amdgpu_vcn_sw_fini(adev, i); |
347 | if (r) |
348 | return r; |
349 | } |
350 | |
351 | kfree(objp: adev->vcn.ip_dump); |
352 | return 0; |
353 | } |
354 | |
355 | /** |
356 | * vcn_v3_0_hw_init - start and test VCN block |
357 | * |
358 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
359 | * |
360 | * Initialize the hardware, boot up the VCPU and do some testing |
361 | */ |
362 | static int vcn_v3_0_hw_init(struct amdgpu_ip_block *ip_block) |
363 | { |
364 | struct amdgpu_device *adev = ip_block->adev; |
365 | struct amdgpu_ring *ring; |
366 | int i, j, r; |
367 | |
368 | if (amdgpu_sriov_vf(adev)) { |
369 | r = vcn_v3_0_start_sriov(adev); |
370 | if (r) |
371 | return r; |
372 | |
373 | /* initialize VCN dec and enc ring buffers */ |
374 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
375 | if (adev->vcn.harvest_config & (1 << i)) |
376 | continue; |
377 | |
378 | ring = &adev->vcn.inst[i].ring_dec; |
379 | if (amdgpu_vcn_is_disabled_vcn(adev, type: VCN_DECODE_RING, vcn_instance: i)) { |
380 | ring->sched.ready = false; |
381 | ring->no_scheduler = true; |
382 | dev_info(adev->dev, "ring %s is disabled by hypervisor\n" , ring->name); |
383 | } else { |
384 | ring->wptr = 0; |
385 | ring->wptr_old = 0; |
386 | vcn_v3_0_dec_ring_set_wptr(ring); |
387 | ring->sched.ready = true; |
388 | } |
389 | |
390 | for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { |
391 | ring = &adev->vcn.inst[i].ring_enc[j]; |
392 | if (amdgpu_vcn_is_disabled_vcn(adev, type: VCN_ENCODE_RING, vcn_instance: i)) { |
393 | ring->sched.ready = false; |
394 | ring->no_scheduler = true; |
395 | dev_info(adev->dev, "ring %s is disabled by hypervisor\n" , ring->name); |
396 | } else { |
397 | ring->wptr = 0; |
398 | ring->wptr_old = 0; |
399 | vcn_v3_0_enc_ring_set_wptr(ring); |
400 | ring->sched.ready = true; |
401 | } |
402 | } |
403 | } |
404 | } else { |
405 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
406 | if (adev->vcn.harvest_config & (1 << i)) |
407 | continue; |
408 | |
409 | ring = &adev->vcn.inst[i].ring_dec; |
410 | |
411 | adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, |
412 | ring->doorbell_index, i); |
413 | |
414 | r = amdgpu_ring_test_helper(ring); |
415 | if (r) |
416 | return r; |
417 | |
418 | for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { |
419 | ring = &adev->vcn.inst[i].ring_enc[j]; |
420 | r = amdgpu_ring_test_helper(ring); |
421 | if (r) |
422 | return r; |
423 | } |
424 | } |
425 | } |
426 | |
427 | return 0; |
428 | } |
429 | |
430 | /** |
431 | * vcn_v3_0_hw_fini - stop the hardware block |
432 | * |
433 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
434 | * |
435 | * Stop the VCN block, mark ring as not ready any more |
436 | */ |
437 | static int vcn_v3_0_hw_fini(struct amdgpu_ip_block *ip_block) |
438 | { |
439 | struct amdgpu_device *adev = ip_block->adev; |
440 | int i; |
441 | |
442 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
443 | struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; |
444 | |
445 | if (adev->vcn.harvest_config & (1 << i)) |
446 | continue; |
447 | |
448 | cancel_delayed_work_sync(dwork: &vinst->idle_work); |
449 | |
450 | if (!amdgpu_sriov_vf(adev)) { |
451 | if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || |
452 | (vinst->cur_state != AMD_PG_STATE_GATE && |
453 | RREG32_SOC15(VCN, i, mmUVD_STATUS))) { |
454 | vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); |
455 | } |
456 | } |
457 | } |
458 | |
459 | return 0; |
460 | } |
461 | |
462 | /** |
463 | * vcn_v3_0_suspend - suspend VCN block |
464 | * |
465 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
466 | * |
467 | * HW fini and suspend VCN block |
468 | */ |
469 | static int vcn_v3_0_suspend(struct amdgpu_ip_block *ip_block) |
470 | { |
471 | struct amdgpu_device *adev = ip_block->adev; |
472 | int r, i; |
473 | |
474 | r = vcn_v3_0_hw_fini(ip_block); |
475 | if (r) |
476 | return r; |
477 | |
478 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
479 | r = amdgpu_vcn_suspend(adev: ip_block->adev, i); |
480 | if (r) |
481 | return r; |
482 | } |
483 | |
484 | return 0; |
485 | } |
486 | |
487 | /** |
488 | * vcn_v3_0_resume - resume VCN block |
489 | * |
490 | * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. |
491 | * |
492 | * Resume firmware and hw init VCN block |
493 | */ |
494 | static int vcn_v3_0_resume(struct amdgpu_ip_block *ip_block) |
495 | { |
496 | struct amdgpu_device *adev = ip_block->adev; |
497 | int r, i; |
498 | |
499 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
500 | r = amdgpu_vcn_resume(adev: ip_block->adev, i); |
501 | if (r) |
502 | return r; |
503 | } |
504 | |
505 | r = vcn_v3_0_hw_init(ip_block); |
506 | |
507 | return r; |
508 | } |
509 | |
510 | /** |
511 | * vcn_v3_0_mc_resume - memory controller programming |
512 | * |
513 | * @vinst: VCN instance |
514 | * |
515 | * Let the VCN memory controller know it's offsets |
516 | */ |
517 | static void vcn_v3_0_mc_resume(struct amdgpu_vcn_inst *vinst) |
518 | { |
519 | struct amdgpu_device *adev = vinst->adev; |
520 | int inst = vinst->inst; |
521 | uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst].fw->size + 4); |
522 | uint32_t offset; |
523 | |
524 | /* cache window 0: fw */ |
525 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
526 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
527 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); |
528 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
529 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); |
530 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, 0); |
531 | offset = 0; |
532 | } else { |
533 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, |
534 | lower_32_bits(adev->vcn.inst[inst].gpu_addr)); |
535 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, |
536 | upper_32_bits(adev->vcn.inst[inst].gpu_addr)); |
537 | offset = size; |
538 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET0, |
539 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
540 | } |
541 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE0, size); |
542 | |
543 | /* cache window 1: stack */ |
544 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, |
545 | lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); |
546 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, |
547 | upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); |
548 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET1, 0); |
549 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); |
550 | |
551 | /* cache window 2: context */ |
552 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, |
553 | lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); |
554 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, |
555 | upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); |
556 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_OFFSET2, 0); |
557 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); |
558 | |
559 | /* non-cache window */ |
560 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, |
561 | lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); |
562 | WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, |
563 | upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); |
564 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0); |
565 | WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0, |
566 | AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); |
567 | } |
568 | |
569 | static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, |
570 | bool indirect) |
571 | { |
572 | struct amdgpu_device *adev = vinst->adev; |
573 | int inst_idx = vinst->inst; |
574 | uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4); |
575 | uint32_t offset; |
576 | |
577 | /* cache window 0: fw */ |
578 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
579 | if (!indirect) { |
580 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
581 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
582 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); |
583 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
584 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
585 | (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); |
586 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
587 | VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); |
588 | } else { |
589 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
590 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); |
591 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
592 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); |
593 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
594 | VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); |
595 | } |
596 | offset = 0; |
597 | } else { |
598 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
599 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
600 | lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); |
601 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
602 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
603 | upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); |
604 | offset = size; |
605 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
606 | VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), |
607 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); |
608 | } |
609 | |
610 | if (!indirect) |
611 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
612 | VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); |
613 | else |
614 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
615 | VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); |
616 | |
617 | /* cache window 1: stack */ |
618 | if (!indirect) { |
619 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
620 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), |
621 | lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); |
622 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
623 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), |
624 | upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); |
625 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
626 | VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); |
627 | } else { |
628 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
629 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); |
630 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
631 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); |
632 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
633 | VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); |
634 | } |
635 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
636 | VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); |
637 | |
638 | /* cache window 2: context */ |
639 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
640 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), |
641 | lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); |
642 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
643 | VCN, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), |
644 | upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); |
645 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
646 | VCN, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); |
647 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
648 | VCN, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); |
649 | |
650 | /* non-cache window */ |
651 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
652 | VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), |
653 | lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); |
654 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
655 | VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), |
656 | upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); |
657 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
658 | VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); |
659 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
660 | VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), |
661 | AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect); |
662 | |
663 | /* VCN global tiling registers */ |
664 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
665 | UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); |
666 | } |
667 | |
668 | static void vcn_v3_0_disable_static_power_gating(struct amdgpu_vcn_inst *vinst) |
669 | { |
670 | struct amdgpu_device *adev = vinst->adev; |
671 | int inst = vinst->inst; |
672 | uint32_t data = 0; |
673 | |
674 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { |
675 | data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT |
676 | | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT |
677 | | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT |
678 | | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT |
679 | | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT |
680 | | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT |
681 | | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT |
682 | | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT |
683 | | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT |
684 | | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT |
685 | | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT |
686 | | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT |
687 | | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT |
688 | | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); |
689 | |
690 | WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); |
691 | SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, |
692 | UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0, 0x3F3FFFFF); |
693 | } else { |
694 | data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT |
695 | | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT |
696 | | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT |
697 | | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT |
698 | | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT |
699 | | 1 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT |
700 | | 1 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT |
701 | | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT |
702 | | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT |
703 | | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT |
704 | | 1 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT |
705 | | 1 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT |
706 | | 1 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT |
707 | | 1 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); |
708 | WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); |
709 | SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, 0, 0x3F3FFFFF); |
710 | } |
711 | |
712 | data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS); |
713 | data &= ~0x103; |
714 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) |
715 | data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | |
716 | UVD_POWER_STATUS__UVD_PG_EN_MASK; |
717 | |
718 | WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data); |
719 | } |
720 | |
721 | static void vcn_v3_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst) |
722 | { |
723 | struct amdgpu_device *adev = vinst->adev; |
724 | int inst = vinst->inst; |
725 | uint32_t data; |
726 | |
727 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { |
728 | /* Before power off, this indicator has to be turned on */ |
729 | data = RREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS); |
730 | data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; |
731 | data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; |
732 | WREG32_SOC15(VCN, inst, mmUVD_POWER_STATUS, data); |
733 | |
734 | data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT |
735 | | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT |
736 | | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT |
737 | | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT |
738 | | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT |
739 | | 2 << UVD_PGFSM_CONFIG__UVDIRL_PWR_CONFIG__SHIFT |
740 | | 2 << UVD_PGFSM_CONFIG__UVDLM_PWR_CONFIG__SHIFT |
741 | | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT |
742 | | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT |
743 | | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT |
744 | | 2 << UVD_PGFSM_CONFIG__UVDAB_PWR_CONFIG__SHIFT |
745 | | 2 << UVD_PGFSM_CONFIG__UVDATD_PWR_CONFIG__SHIFT |
746 | | 2 << UVD_PGFSM_CONFIG__UVDNA_PWR_CONFIG__SHIFT |
747 | | 2 << UVD_PGFSM_CONFIG__UVDNB_PWR_CONFIG__SHIFT); |
748 | WREG32_SOC15(VCN, inst, mmUVD_PGFSM_CONFIG, data); |
749 | |
750 | data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT |
751 | | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT |
752 | | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT |
753 | | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT |
754 | | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT |
755 | | 2 << UVD_PGFSM_STATUS__UVDIRL_PWR_STATUS__SHIFT |
756 | | 2 << UVD_PGFSM_STATUS__UVDLM_PWR_STATUS__SHIFT |
757 | | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT |
758 | | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT |
759 | | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT |
760 | | 2 << UVD_PGFSM_STATUS__UVDAB_PWR_STATUS__SHIFT |
761 | | 2 << UVD_PGFSM_STATUS__UVDATD_PWR_STATUS__SHIFT |
762 | | 2 << UVD_PGFSM_STATUS__UVDNA_PWR_STATUS__SHIFT |
763 | | 2 << UVD_PGFSM_STATUS__UVDNB_PWR_STATUS__SHIFT); |
764 | SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_PGFSM_STATUS, data, 0x3F3FFFFF); |
765 | } |
766 | } |
767 | |
768 | /** |
769 | * vcn_v3_0_disable_clock_gating - disable VCN clock gating |
770 | * |
771 | * @vinst: Pointer to the VCN instance structure |
772 | * |
773 | * Disable clock gating for VCN block |
774 | */ |
775 | static void vcn_v3_0_disable_clock_gating(struct amdgpu_vcn_inst *vinst) |
776 | { |
777 | struct amdgpu_device *adev = vinst->adev; |
778 | int inst = vinst->inst; |
779 | uint32_t data; |
780 | |
781 | /* VCN disable CGC */ |
782 | data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); |
783 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
784 | data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
785 | else |
786 | data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; |
787 | data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
788 | data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
789 | WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); |
790 | |
791 | data = RREG32_SOC15(VCN, inst, mmUVD_CGC_GATE); |
792 | data &= ~(UVD_CGC_GATE__SYS_MASK |
793 | | UVD_CGC_GATE__UDEC_MASK |
794 | | UVD_CGC_GATE__MPEG2_MASK |
795 | | UVD_CGC_GATE__REGS_MASK |
796 | | UVD_CGC_GATE__RBC_MASK |
797 | | UVD_CGC_GATE__LMI_MC_MASK |
798 | | UVD_CGC_GATE__LMI_UMC_MASK |
799 | | UVD_CGC_GATE__IDCT_MASK |
800 | | UVD_CGC_GATE__MPRD_MASK |
801 | | UVD_CGC_GATE__MPC_MASK |
802 | | UVD_CGC_GATE__LBSI_MASK |
803 | | UVD_CGC_GATE__LRBBM_MASK |
804 | | UVD_CGC_GATE__UDEC_RE_MASK |
805 | | UVD_CGC_GATE__UDEC_CM_MASK |
806 | | UVD_CGC_GATE__UDEC_IT_MASK |
807 | | UVD_CGC_GATE__UDEC_DB_MASK |
808 | | UVD_CGC_GATE__UDEC_MP_MASK |
809 | | UVD_CGC_GATE__WCB_MASK |
810 | | UVD_CGC_GATE__VCPU_MASK |
811 | | UVD_CGC_GATE__MMSCH_MASK); |
812 | |
813 | WREG32_SOC15(VCN, inst, mmUVD_CGC_GATE, data); |
814 | |
815 | SOC15_WAIT_ON_RREG(VCN, inst, mmUVD_CGC_GATE, 0, 0xFFFFFFFF); |
816 | |
817 | data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); |
818 | data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
819 | | UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
820 | | UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
821 | | UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
822 | | UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
823 | | UVD_CGC_CTRL__SYS_MODE_MASK |
824 | | UVD_CGC_CTRL__UDEC_MODE_MASK |
825 | | UVD_CGC_CTRL__MPEG2_MODE_MASK |
826 | | UVD_CGC_CTRL__REGS_MODE_MASK |
827 | | UVD_CGC_CTRL__RBC_MODE_MASK |
828 | | UVD_CGC_CTRL__LMI_MC_MODE_MASK |
829 | | UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
830 | | UVD_CGC_CTRL__IDCT_MODE_MASK |
831 | | UVD_CGC_CTRL__MPRD_MODE_MASK |
832 | | UVD_CGC_CTRL__MPC_MODE_MASK |
833 | | UVD_CGC_CTRL__LBSI_MODE_MASK |
834 | | UVD_CGC_CTRL__LRBBM_MODE_MASK |
835 | | UVD_CGC_CTRL__WCB_MODE_MASK |
836 | | UVD_CGC_CTRL__VCPU_MODE_MASK |
837 | | UVD_CGC_CTRL__MMSCH_MODE_MASK); |
838 | WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); |
839 | |
840 | data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE); |
841 | data |= (UVD_SUVD_CGC_GATE__SRE_MASK |
842 | | UVD_SUVD_CGC_GATE__SIT_MASK |
843 | | UVD_SUVD_CGC_GATE__SMP_MASK |
844 | | UVD_SUVD_CGC_GATE__SCM_MASK |
845 | | UVD_SUVD_CGC_GATE__SDB_MASK |
846 | | UVD_SUVD_CGC_GATE__SRE_H264_MASK |
847 | | UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
848 | | UVD_SUVD_CGC_GATE__SIT_H264_MASK |
849 | | UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
850 | | UVD_SUVD_CGC_GATE__SCM_H264_MASK |
851 | | UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
852 | | UVD_SUVD_CGC_GATE__SDB_H264_MASK |
853 | | UVD_SUVD_CGC_GATE__SDB_HEVC_MASK |
854 | | UVD_SUVD_CGC_GATE__SCLR_MASK |
855 | | UVD_SUVD_CGC_GATE__ENT_MASK |
856 | | UVD_SUVD_CGC_GATE__IME_MASK |
857 | | UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK |
858 | | UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK |
859 | | UVD_SUVD_CGC_GATE__SITE_MASK |
860 | | UVD_SUVD_CGC_GATE__SRE_VP9_MASK |
861 | | UVD_SUVD_CGC_GATE__SCM_VP9_MASK |
862 | | UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK |
863 | | UVD_SUVD_CGC_GATE__SDB_VP9_MASK |
864 | | UVD_SUVD_CGC_GATE__IME_HEVC_MASK |
865 | | UVD_SUVD_CGC_GATE__EFC_MASK |
866 | | UVD_SUVD_CGC_GATE__SAOE_MASK |
867 | | UVD_SUVD_CGC_GATE__SRE_AV1_MASK |
868 | | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK |
869 | | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK |
870 | | UVD_SUVD_CGC_GATE__SCM_AV1_MASK |
871 | | UVD_SUVD_CGC_GATE__SMPA_MASK); |
872 | WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data); |
873 | |
874 | data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2); |
875 | data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK |
876 | | UVD_SUVD_CGC_GATE2__MPBE1_MASK |
877 | | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK |
878 | | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK |
879 | | UVD_SUVD_CGC_GATE2__MPC1_MASK); |
880 | WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data); |
881 | |
882 | data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL); |
883 | data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
884 | | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
885 | | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
886 | | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
887 | | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK |
888 | | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK |
889 | | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK |
890 | | UVD_SUVD_CGC_CTRL__IME_MODE_MASK |
891 | | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK |
892 | | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK |
893 | | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK |
894 | | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK |
895 | | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK |
896 | | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK |
897 | | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK |
898 | | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK |
899 | | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK |
900 | | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK |
901 | | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); |
902 | WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data); |
903 | } |
904 | |
905 | static void vcn_v3_0_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst, |
906 | uint8_t sram_sel, |
907 | uint8_t indirect) |
908 | { |
909 | struct amdgpu_device *adev = vinst->adev; |
910 | int inst_idx = vinst->inst; |
911 | uint32_t reg_data = 0; |
912 | |
913 | /* enable sw clock gating control */ |
914 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
915 | reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
916 | else |
917 | reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
918 | reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
919 | reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
920 | reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | |
921 | UVD_CGC_CTRL__UDEC_CM_MODE_MASK | |
922 | UVD_CGC_CTRL__UDEC_IT_MODE_MASK | |
923 | UVD_CGC_CTRL__UDEC_DB_MODE_MASK | |
924 | UVD_CGC_CTRL__UDEC_MP_MODE_MASK | |
925 | UVD_CGC_CTRL__SYS_MODE_MASK | |
926 | UVD_CGC_CTRL__UDEC_MODE_MASK | |
927 | UVD_CGC_CTRL__MPEG2_MODE_MASK | |
928 | UVD_CGC_CTRL__REGS_MODE_MASK | |
929 | UVD_CGC_CTRL__RBC_MODE_MASK | |
930 | UVD_CGC_CTRL__LMI_MC_MODE_MASK | |
931 | UVD_CGC_CTRL__LMI_UMC_MODE_MASK | |
932 | UVD_CGC_CTRL__IDCT_MODE_MASK | |
933 | UVD_CGC_CTRL__MPRD_MODE_MASK | |
934 | UVD_CGC_CTRL__MPC_MODE_MASK | |
935 | UVD_CGC_CTRL__LBSI_MODE_MASK | |
936 | UVD_CGC_CTRL__LRBBM_MODE_MASK | |
937 | UVD_CGC_CTRL__WCB_MODE_MASK | |
938 | UVD_CGC_CTRL__VCPU_MODE_MASK | |
939 | UVD_CGC_CTRL__MMSCH_MODE_MASK); |
940 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
941 | VCN, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); |
942 | |
943 | /* turn off clock gating */ |
944 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
945 | VCN, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect); |
946 | |
947 | /* turn on SUVD clock gating */ |
948 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
949 | VCN, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); |
950 | |
951 | /* turn on sw mode in UVD_SUVD_CGC_CTRL */ |
952 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
953 | VCN, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); |
954 | } |
955 | |
956 | /** |
957 | * vcn_v3_0_enable_clock_gating - enable VCN clock gating |
958 | * |
959 | * @vinst: Pointer to the VCN instance structure |
960 | * |
961 | * Enable clock gating for VCN block |
962 | */ |
963 | static void vcn_v3_0_enable_clock_gating(struct amdgpu_vcn_inst *vinst) |
964 | { |
965 | struct amdgpu_device *adev = vinst->adev; |
966 | int inst = vinst->inst; |
967 | uint32_t data; |
968 | |
969 | /* enable VCN CGC */ |
970 | data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); |
971 | if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) |
972 | data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
973 | else |
974 | data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; |
975 | data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; |
976 | data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; |
977 | WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); |
978 | |
979 | data = RREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL); |
980 | data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
981 | | UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
982 | | UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
983 | | UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
984 | | UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
985 | | UVD_CGC_CTRL__SYS_MODE_MASK |
986 | | UVD_CGC_CTRL__UDEC_MODE_MASK |
987 | | UVD_CGC_CTRL__MPEG2_MODE_MASK |
988 | | UVD_CGC_CTRL__REGS_MODE_MASK |
989 | | UVD_CGC_CTRL__RBC_MODE_MASK |
990 | | UVD_CGC_CTRL__LMI_MC_MODE_MASK |
991 | | UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
992 | | UVD_CGC_CTRL__IDCT_MODE_MASK |
993 | | UVD_CGC_CTRL__MPRD_MODE_MASK |
994 | | UVD_CGC_CTRL__MPC_MODE_MASK |
995 | | UVD_CGC_CTRL__LBSI_MODE_MASK |
996 | | UVD_CGC_CTRL__LRBBM_MODE_MASK |
997 | | UVD_CGC_CTRL__WCB_MODE_MASK |
998 | | UVD_CGC_CTRL__VCPU_MODE_MASK |
999 | | UVD_CGC_CTRL__MMSCH_MODE_MASK); |
1000 | WREG32_SOC15(VCN, inst, mmUVD_CGC_CTRL, data); |
1001 | |
1002 | data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL); |
1003 | data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
1004 | | UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
1005 | | UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
1006 | | UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
1007 | | UVD_SUVD_CGC_CTRL__SDB_MODE_MASK |
1008 | | UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK |
1009 | | UVD_SUVD_CGC_CTRL__ENT_MODE_MASK |
1010 | | UVD_SUVD_CGC_CTRL__IME_MODE_MASK |
1011 | | UVD_SUVD_CGC_CTRL__SITE_MODE_MASK |
1012 | | UVD_SUVD_CGC_CTRL__EFC_MODE_MASK |
1013 | | UVD_SUVD_CGC_CTRL__SAOE_MODE_MASK |
1014 | | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK |
1015 | | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK |
1016 | | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK |
1017 | | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK |
1018 | | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK |
1019 | | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK |
1020 | | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK |
1021 | | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); |
1022 | WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_CTRL, data); |
1023 | } |
1024 | |
1025 | static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) |
1026 | { |
1027 | struct amdgpu_device *adev = vinst->adev; |
1028 | int inst_idx = vinst->inst; |
1029 | volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; |
1030 | struct amdgpu_ring *ring; |
1031 | uint32_t rb_bufsz, tmp; |
1032 | |
1033 | /* disable register anti-hang mechanism */ |
1034 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1, |
1035 | ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1036 | /* enable dynamic power gating mode */ |
1037 | tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS); |
1038 | tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; |
1039 | tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; |
1040 | WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp); |
1041 | |
1042 | if (indirect) |
1043 | adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; |
1044 | |
1045 | /* enable clock gating */ |
1046 | vcn_v3_0_clock_gating_dpg_mode(vinst, sram_sel: 0, indirect); |
1047 | |
1048 | /* enable VCPU clock */ |
1049 | tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); |
1050 | tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; |
1051 | tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; |
1052 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1053 | VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); |
1054 | |
1055 | /* disable master interupt */ |
1056 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1057 | VCN, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect); |
1058 | |
1059 | /* setup mmUVD_LMI_CTRL */ |
1060 | tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
1061 | UVD_LMI_CTRL__REQ_MODE_MASK | |
1062 | UVD_LMI_CTRL__CRC_RESET_MASK | |
1063 | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | |
1064 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
1065 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | |
1066 | (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | |
1067 | 0x00100000L); |
1068 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1069 | VCN, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect); |
1070 | |
1071 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1072 | VCN, inst_idx, mmUVD_MPC_CNTL), |
1073 | 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); |
1074 | |
1075 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1076 | VCN, inst_idx, mmUVD_MPC_SET_MUXA0), |
1077 | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | |
1078 | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | |
1079 | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | |
1080 | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); |
1081 | |
1082 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1083 | VCN, inst_idx, mmUVD_MPC_SET_MUXB0), |
1084 | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | |
1085 | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | |
1086 | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | |
1087 | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); |
1088 | |
1089 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1090 | VCN, inst_idx, mmUVD_MPC_SET_MUX), |
1091 | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | |
1092 | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | |
1093 | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); |
1094 | |
1095 | vcn_v3_0_mc_resume_dpg_mode(vinst, indirect); |
1096 | |
1097 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1098 | VCN, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect); |
1099 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1100 | VCN, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); |
1101 | |
1102 | /* enable LMI MC and UMC channels */ |
1103 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1104 | VCN, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect); |
1105 | |
1106 | /* unblock VCPU register access */ |
1107 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1108 | VCN, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect); |
1109 | |
1110 | tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); |
1111 | tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; |
1112 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1113 | VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); |
1114 | |
1115 | /* enable master interrupt */ |
1116 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1117 | VCN, inst_idx, mmUVD_MASTINT_EN), |
1118 | UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); |
1119 | |
1120 | /* add nop to workaround PSP size check */ |
1121 | WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( |
1122 | VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); |
1123 | |
1124 | if (indirect) |
1125 | amdgpu_vcn_psp_update_sram(adev, inst_idx, ucode_id: 0); |
1126 | |
1127 | ring = &adev->vcn.inst[inst_idx].ring_dec; |
1128 | /* force RBC into idle state */ |
1129 | rb_bufsz = order_base_2(ring->ring_size); |
1130 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); |
1131 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); |
1132 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
1133 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); |
1134 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); |
1135 | WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp); |
1136 | |
1137 | /* Stall DPG before WPTR/RPTR reset */ |
1138 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), |
1139 | UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, |
1140 | ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
1141 | fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); |
1142 | |
1143 | /* set the write pointer delay */ |
1144 | WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0); |
1145 | |
1146 | /* set the wb address */ |
1147 | WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR, |
1148 | (upper_32_bits(ring->gpu_addr) >> 2)); |
1149 | |
1150 | /* programm the RB_BASE for ring buffer */ |
1151 | WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, |
1152 | lower_32_bits(ring->gpu_addr)); |
1153 | WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, |
1154 | upper_32_bits(ring->gpu_addr)); |
1155 | |
1156 | /* Initialize the ring buffer's read and write pointers */ |
1157 | WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0); |
1158 | |
1159 | WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0); |
1160 | |
1161 | ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR); |
1162 | WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, |
1163 | lower_32_bits(ring->wptr)); |
1164 | |
1165 | /* Reset FW shared memory RBC WPTR/RPTR */ |
1166 | fw_shared->rb.rptr = 0; |
1167 | fw_shared->rb.wptr = lower_32_bits(ring->wptr); |
1168 | |
1169 | /*resetting done, fw can check RB ring */ |
1170 | fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); |
1171 | |
1172 | /* Unstall DPG */ |
1173 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), |
1174 | 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
1175 | |
1176 | /* Keeping one read-back to ensure all register writes are done, |
1177 | * otherwise it may introduce race conditions. |
1178 | */ |
1179 | RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); |
1180 | |
1181 | return 0; |
1182 | } |
1183 | |
1184 | static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst) |
1185 | { |
1186 | struct amdgpu_device *adev = vinst->adev; |
1187 | int i = vinst->inst; |
1188 | volatile struct amdgpu_fw_shared *fw_shared; |
1189 | struct amdgpu_ring *ring; |
1190 | uint32_t rb_bufsz, tmp; |
1191 | int j, k, r; |
1192 | |
1193 | if (adev->vcn.harvest_config & (1 << i)) |
1194 | return 0; |
1195 | |
1196 | if (adev->pm.dpm_enabled) |
1197 | amdgpu_dpm_enable_vcn(adev, enable: true, inst: i); |
1198 | |
1199 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) |
1200 | return vcn_v3_0_start_dpg_mode(vinst, indirect: vinst->indirect_sram); |
1201 | |
1202 | /* disable VCN power gating */ |
1203 | vcn_v3_0_disable_static_power_gating(vinst); |
1204 | |
1205 | /* set VCN status busy */ |
1206 | tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; |
1207 | WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp); |
1208 | |
1209 | /* SW clock gating */ |
1210 | vcn_v3_0_disable_clock_gating(vinst); |
1211 | |
1212 | /* enable VCPU clock */ |
1213 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), |
1214 | UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); |
1215 | |
1216 | /* disable master interrupt */ |
1217 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0, |
1218 | ~UVD_MASTINT_EN__VCPU_EN_MASK); |
1219 | |
1220 | /* enable LMI MC and UMC channels */ |
1221 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0, |
1222 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); |
1223 | |
1224 | tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); |
1225 | tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; |
1226 | tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; |
1227 | WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); |
1228 | |
1229 | /* setup mmUVD_LMI_CTRL */ |
1230 | tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL); |
1231 | WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | |
1232 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | |
1233 | UVD_LMI_CTRL__MASK_MC_URGENT_MASK | |
1234 | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | |
1235 | UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); |
1236 | |
1237 | /* setup mmUVD_MPC_CNTL */ |
1238 | tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL); |
1239 | tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; |
1240 | tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; |
1241 | WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp); |
1242 | |
1243 | /* setup UVD_MPC_SET_MUXA0 */ |
1244 | WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0, |
1245 | ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | |
1246 | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | |
1247 | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | |
1248 | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); |
1249 | |
1250 | /* setup UVD_MPC_SET_MUXB0 */ |
1251 | WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0, |
1252 | ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | |
1253 | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | |
1254 | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | |
1255 | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); |
1256 | |
1257 | /* setup mmUVD_MPC_SET_MUX */ |
1258 | WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX, |
1259 | ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | |
1260 | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | |
1261 | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); |
1262 | |
1263 | vcn_v3_0_mc_resume(vinst); |
1264 | |
1265 | /* VCN global tiling registers */ |
1266 | WREG32_SOC15(VCN, i, mmUVD_GFX10_ADDR_CONFIG, |
1267 | adev->gfx.config.gb_addr_config); |
1268 | |
1269 | /* unblock VCPU register access */ |
1270 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0, |
1271 | ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); |
1272 | |
1273 | /* release VCPU reset to boot */ |
1274 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, |
1275 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1276 | |
1277 | for (j = 0; j < 10; ++j) { |
1278 | uint32_t status; |
1279 | |
1280 | for (k = 0; k < 100; ++k) { |
1281 | status = RREG32_SOC15(VCN, i, mmUVD_STATUS); |
1282 | if (status & 2) |
1283 | break; |
1284 | mdelay(10); |
1285 | } |
1286 | r = 0; |
1287 | if (status & 2) |
1288 | break; |
1289 | |
1290 | DRM_ERROR("VCN[%d] decode not responding, trying to reset the VCPU!!!\n" , i); |
1291 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), |
1292 | UVD_VCPU_CNTL__BLK_RST_MASK, |
1293 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1294 | mdelay(10); |
1295 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, |
1296 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1297 | |
1298 | mdelay(10); |
1299 | r = -1; |
1300 | } |
1301 | |
1302 | if (r) { |
1303 | DRM_ERROR("VCN[%d] decode not responding, giving up!!!\n" , i); |
1304 | return r; |
1305 | } |
1306 | |
1307 | /* enable master interrupt */ |
1308 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), |
1309 | UVD_MASTINT_EN__VCPU_EN_MASK, |
1310 | ~UVD_MASTINT_EN__VCPU_EN_MASK); |
1311 | |
1312 | /* clear the busy bit of VCN_STATUS */ |
1313 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0, |
1314 | ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); |
1315 | |
1316 | WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0); |
1317 | |
1318 | ring = &adev->vcn.inst[i].ring_dec; |
1319 | /* force RBC into idle state */ |
1320 | rb_bufsz = order_base_2(ring->ring_size); |
1321 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); |
1322 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); |
1323 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
1324 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); |
1325 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); |
1326 | WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp); |
1327 | |
1328 | fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; |
1329 | fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); |
1330 | |
1331 | /* programm the RB_BASE for ring buffer */ |
1332 | WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, |
1333 | lower_32_bits(ring->gpu_addr)); |
1334 | WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, |
1335 | upper_32_bits(ring->gpu_addr)); |
1336 | |
1337 | /* Initialize the ring buffer's read and write pointers */ |
1338 | WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0); |
1339 | |
1340 | WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0); |
1341 | ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR); |
1342 | WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR, |
1343 | lower_32_bits(ring->wptr)); |
1344 | fw_shared->rb.wptr = lower_32_bits(ring->wptr); |
1345 | fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); |
1346 | |
1347 | if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) != |
1348 | IP_VERSION(3, 0, 33)) { |
1349 | fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); |
1350 | ring = &adev->vcn.inst[i].ring_enc[0]; |
1351 | WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); |
1352 | WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
1353 | WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr); |
1354 | WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
1355 | WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4); |
1356 | fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); |
1357 | |
1358 | fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); |
1359 | ring = &adev->vcn.inst[i].ring_enc[1]; |
1360 | WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); |
1361 | WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); |
1362 | WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr); |
1363 | WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); |
1364 | WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4); |
1365 | fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); |
1366 | } |
1367 | |
1368 | /* Keeping one read-back to ensure all register writes are done, |
1369 | * otherwise it may introduce race conditions. |
1370 | */ |
1371 | RREG32_SOC15(VCN, i, mmUVD_STATUS); |
1372 | |
1373 | return 0; |
1374 | } |
1375 | |
1376 | static int vcn_v3_0_start_sriov(struct amdgpu_device *adev) |
1377 | { |
1378 | int i, j; |
1379 | struct amdgpu_ring *ring; |
1380 | uint64_t cache_addr; |
1381 | uint64_t rb_addr; |
1382 | uint64_t ctx_addr; |
1383 | uint32_t param, resp, expected; |
1384 | uint32_t offset, cache_size; |
1385 | uint32_t tmp, timeout; |
1386 | |
1387 | struct amdgpu_mm_table *table = &adev->virt.mm_table; |
1388 | uint32_t *table_loc; |
1389 | uint32_t table_size; |
1390 | uint32_t size, size_dw; |
1391 | |
1392 | struct mmsch_v3_0_cmd_direct_write |
1393 | direct_wt = { {0} }; |
1394 | struct mmsch_v3_0_cmd_direct_read_modify_write |
1395 | direct_rd_mod_wt = { {0} }; |
1396 | struct mmsch_v3_0_cmd_end end = { {0} }; |
1397 | struct mmsch_v3_0_init_header ; |
1398 | |
1399 | direct_wt.cmd_header.command_type = |
1400 | MMSCH_COMMAND__DIRECT_REG_WRITE; |
1401 | direct_rd_mod_wt.cmd_header.command_type = |
1402 | MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; |
1403 | end.cmd_header.command_type = |
1404 | MMSCH_COMMAND__END; |
1405 | |
1406 | header.version = MMSCH_VERSION; |
1407 | header.total_size = sizeof(struct mmsch_v3_0_init_header) >> 2; |
1408 | for (i = 0; i < MMSCH_V3_0_VCN_INSTANCES; i++) { |
1409 | header.inst[i].init_status = 0; |
1410 | header.inst[i].table_offset = 0; |
1411 | header.inst[i].table_size = 0; |
1412 | } |
1413 | |
1414 | table_loc = (uint32_t *)table->cpu_addr; |
1415 | table_loc += header.total_size; |
1416 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
1417 | if (adev->vcn.harvest_config & (1 << i)) |
1418 | continue; |
1419 | |
1420 | table_size = 0; |
1421 | |
1422 | MMSCH_V3_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, i, |
1423 | mmUVD_STATUS), |
1424 | ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); |
1425 | |
1426 | cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); |
1427 | |
1428 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
1429 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1430 | mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
1431 | adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); |
1432 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1433 | mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
1434 | adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); |
1435 | offset = 0; |
1436 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1437 | mmUVD_VCPU_CACHE_OFFSET0), |
1438 | 0); |
1439 | } else { |
1440 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1441 | mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), |
1442 | lower_32_bits(adev->vcn.inst[i].gpu_addr)); |
1443 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1444 | mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), |
1445 | upper_32_bits(adev->vcn.inst[i].gpu_addr)); |
1446 | offset = cache_size; |
1447 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1448 | mmUVD_VCPU_CACHE_OFFSET0), |
1449 | AMDGPU_UVD_FIRMWARE_OFFSET >> 3); |
1450 | } |
1451 | |
1452 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1453 | mmUVD_VCPU_CACHE_SIZE0), |
1454 | cache_size); |
1455 | |
1456 | cache_addr = adev->vcn.inst[i].gpu_addr + offset; |
1457 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1458 | mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), |
1459 | lower_32_bits(cache_addr)); |
1460 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1461 | mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), |
1462 | upper_32_bits(cache_addr)); |
1463 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1464 | mmUVD_VCPU_CACHE_OFFSET1), |
1465 | 0); |
1466 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1467 | mmUVD_VCPU_CACHE_SIZE1), |
1468 | AMDGPU_VCN_STACK_SIZE); |
1469 | |
1470 | cache_addr = adev->vcn.inst[i].gpu_addr + offset + |
1471 | AMDGPU_VCN_STACK_SIZE; |
1472 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1473 | mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), |
1474 | lower_32_bits(cache_addr)); |
1475 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1476 | mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), |
1477 | upper_32_bits(cache_addr)); |
1478 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1479 | mmUVD_VCPU_CACHE_OFFSET2), |
1480 | 0); |
1481 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1482 | mmUVD_VCPU_CACHE_SIZE2), |
1483 | AMDGPU_VCN_CONTEXT_SIZE); |
1484 | |
1485 | for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { |
1486 | ring = &adev->vcn.inst[i].ring_enc[j]; |
1487 | ring->wptr = 0; |
1488 | rb_addr = ring->gpu_addr; |
1489 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1490 | mmUVD_RB_BASE_LO), |
1491 | lower_32_bits(rb_addr)); |
1492 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1493 | mmUVD_RB_BASE_HI), |
1494 | upper_32_bits(rb_addr)); |
1495 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1496 | mmUVD_RB_SIZE), |
1497 | ring->ring_size / 4); |
1498 | } |
1499 | |
1500 | ring = &adev->vcn.inst[i].ring_dec; |
1501 | ring->wptr = 0; |
1502 | rb_addr = ring->gpu_addr; |
1503 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1504 | mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), |
1505 | lower_32_bits(rb_addr)); |
1506 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1507 | mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), |
1508 | upper_32_bits(rb_addr)); |
1509 | /* force RBC into idle state */ |
1510 | tmp = order_base_2(ring->ring_size); |
1511 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp); |
1512 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); |
1513 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
1514 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); |
1515 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); |
1516 | MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i, |
1517 | mmUVD_RBC_RB_CNTL), |
1518 | tmp); |
1519 | |
1520 | /* add end packet */ |
1521 | MMSCH_V3_0_INSERT_END(); |
1522 | |
1523 | /* refine header */ |
1524 | header.inst[i].init_status = 0; |
1525 | header.inst[i].table_offset = header.total_size; |
1526 | header.inst[i].table_size = table_size; |
1527 | header.total_size += table_size; |
1528 | } |
1529 | |
1530 | /* Update init table header in memory */ |
1531 | size = sizeof(struct mmsch_v3_0_init_header); |
1532 | table_loc = (uint32_t *)table->cpu_addr; |
1533 | memcpy((void *)table_loc, &header, size); |
1534 | |
1535 | /* message MMSCH (in VCN[0]) to initialize this client |
1536 | * 1, write to mmsch_vf_ctx_addr_lo/hi register with GPU mc addr |
1537 | * of memory descriptor location |
1538 | */ |
1539 | ctx_addr = table->gpu_addr; |
1540 | WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); |
1541 | WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); |
1542 | |
1543 | /* 2, update vmid of descriptor */ |
1544 | tmp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID); |
1545 | tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; |
1546 | /* use domain0 for MM scheduler */ |
1547 | tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); |
1548 | WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, tmp); |
1549 | |
1550 | /* 3, notify mmsch about the size of this descriptor */ |
1551 | size = header.total_size; |
1552 | WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size); |
1553 | |
1554 | /* 4, set resp to zero */ |
1555 | WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0); |
1556 | |
1557 | /* 5, kick off the initialization and wait until |
1558 | * MMSCH_VF_MAILBOX_RESP becomes non-zero |
1559 | */ |
1560 | param = 0x10000001; |
1561 | WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, param); |
1562 | tmp = 0; |
1563 | timeout = 1000; |
1564 | resp = 0; |
1565 | expected = param + 1; |
1566 | while (resp != expected) { |
1567 | resp = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP); |
1568 | if (resp == expected) |
1569 | break; |
1570 | |
1571 | udelay(usec: 10); |
1572 | tmp = tmp + 10; |
1573 | if (tmp >= timeout) { |
1574 | DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec" \ |
1575 | " waiting for mmMMSCH_VF_MAILBOX_RESP " \ |
1576 | "(expected=0x%08x, readback=0x%08x)\n" , |
1577 | tmp, expected, resp); |
1578 | return -EBUSY; |
1579 | } |
1580 | } |
1581 | |
1582 | return 0; |
1583 | } |
1584 | |
1585 | static int vcn_v3_0_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) |
1586 | { |
1587 | struct amdgpu_device *adev = vinst->adev; |
1588 | int inst_idx = vinst->inst; |
1589 | struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; |
1590 | uint32_t tmp; |
1591 | |
1592 | vcn_v3_0_pause_dpg_mode(vinst, new_state: &state); |
1593 | |
1594 | /* Wait for power status to be 1 */ |
1595 | SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, |
1596 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1597 | |
1598 | /* wait for read ptr to be equal to write ptr */ |
1599 | tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR); |
1600 | SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); |
1601 | |
1602 | tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2); |
1603 | SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); |
1604 | |
1605 | tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; |
1606 | SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); |
1607 | |
1608 | SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1, |
1609 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1610 | |
1611 | /* disable dynamic power gating mode */ |
1612 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0, |
1613 | ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); |
1614 | |
1615 | /* Keeping one read-back to ensure all register writes are done, |
1616 | * otherwise it may introduce race conditions. |
1617 | */ |
1618 | RREG32_SOC15(VCN, inst_idx, mmUVD_STATUS); |
1619 | |
1620 | return 0; |
1621 | } |
1622 | |
1623 | static int vcn_v3_0_stop(struct amdgpu_vcn_inst *vinst) |
1624 | { |
1625 | struct amdgpu_device *adev = vinst->adev; |
1626 | int i = vinst->inst; |
1627 | uint32_t tmp; |
1628 | int r = 0; |
1629 | |
1630 | if (adev->vcn.harvest_config & (1 << i)) |
1631 | return 0; |
1632 | |
1633 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { |
1634 | r = vcn_v3_0_stop_dpg_mode(vinst); |
1635 | goto done; |
1636 | } |
1637 | |
1638 | /* wait for vcn idle */ |
1639 | r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); |
1640 | if (r) |
1641 | goto done; |
1642 | |
1643 | tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | |
1644 | UVD_LMI_STATUS__READ_CLEAN_MASK | |
1645 | UVD_LMI_STATUS__WRITE_CLEAN_MASK | |
1646 | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; |
1647 | r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); |
1648 | if (r) |
1649 | goto done; |
1650 | |
1651 | /* disable LMI UMC channel */ |
1652 | tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); |
1653 | tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; |
1654 | WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); |
1655 | tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK| |
1656 | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; |
1657 | r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); |
1658 | if (r) |
1659 | goto done; |
1660 | |
1661 | /* block VCPU register access */ |
1662 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), |
1663 | UVD_RB_ARB_CTRL__VCPU_DIS_MASK, |
1664 | ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); |
1665 | |
1666 | /* reset VCPU */ |
1667 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), |
1668 | UVD_VCPU_CNTL__BLK_RST_MASK, |
1669 | ~UVD_VCPU_CNTL__BLK_RST_MASK); |
1670 | |
1671 | /* disable VCPU clock */ |
1672 | WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0, |
1673 | ~(UVD_VCPU_CNTL__CLK_EN_MASK)); |
1674 | |
1675 | /* apply soft reset */ |
1676 | tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); |
1677 | tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; |
1678 | WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); |
1679 | tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); |
1680 | tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; |
1681 | WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); |
1682 | |
1683 | /* clear status */ |
1684 | WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); |
1685 | |
1686 | /* apply HW clock gating */ |
1687 | vcn_v3_0_enable_clock_gating(vinst); |
1688 | |
1689 | /* enable VCN power gating */ |
1690 | vcn_v3_0_enable_static_power_gating(vinst); |
1691 | |
1692 | /* Keeping one read-back to ensure all register writes are done, |
1693 | * otherwise it may introduce race conditions. |
1694 | */ |
1695 | RREG32_SOC15(VCN, i, mmUVD_STATUS); |
1696 | |
1697 | done: |
1698 | if (adev->pm.dpm_enabled) |
1699 | amdgpu_dpm_enable_vcn(adev, enable: false, inst: i); |
1700 | |
1701 | return r; |
1702 | } |
1703 | |
1704 | static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, |
1705 | struct dpg_pause_state *new_state) |
1706 | { |
1707 | struct amdgpu_device *adev = vinst->adev; |
1708 | int inst_idx = vinst->inst; |
1709 | volatile struct amdgpu_fw_shared *fw_shared; |
1710 | struct amdgpu_ring *ring; |
1711 | uint32_t reg_data = 0; |
1712 | int ret_code; |
1713 | |
1714 | /* pause/unpause if state is changed */ |
1715 | if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) { |
1716 | DRM_DEBUG("dpg pause state changed %d -> %d" , |
1717 | adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based); |
1718 | reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) & |
1719 | (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); |
1720 | |
1721 | if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { |
1722 | ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1, |
1723 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1724 | |
1725 | if (!ret_code) { |
1726 | /* pause DPG */ |
1727 | reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; |
1728 | WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); |
1729 | |
1730 | /* wait for ACK */ |
1731 | SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE, |
1732 | UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, |
1733 | UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); |
1734 | |
1735 | /* Stall DPG before WPTR/RPTR reset */ |
1736 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), |
1737 | UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK, |
1738 | ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
1739 | |
1740 | if (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0) != |
1741 | IP_VERSION(3, 0, 33)) { |
1742 | /* Restore */ |
1743 | fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; |
1744 | fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); |
1745 | ring = &adev->vcn.inst[inst_idx].ring_enc[0]; |
1746 | ring->wptr = 0; |
1747 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr); |
1748 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); |
1749 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4); |
1750 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); |
1751 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
1752 | fw_shared->multi_queue.encode_generalpurpose_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); |
1753 | |
1754 | fw_shared->multi_queue.encode_lowlatency_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); |
1755 | ring = &adev->vcn.inst[inst_idx].ring_enc[1]; |
1756 | ring->wptr = 0; |
1757 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr); |
1758 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); |
1759 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4); |
1760 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); |
1761 | WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); |
1762 | fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET); |
1763 | |
1764 | /* restore wptr/rptr with pointers saved in FW shared memory*/ |
1765 | WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr); |
1766 | WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr); |
1767 | } |
1768 | |
1769 | /* Unstall DPG */ |
1770 | WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), |
1771 | 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK); |
1772 | |
1773 | SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, |
1774 | UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); |
1775 | } |
1776 | } else { |
1777 | /* unpause dpg, no need to wait */ |
1778 | reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; |
1779 | WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data); |
1780 | } |
1781 | adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based; |
1782 | } |
1783 | |
1784 | return 0; |
1785 | } |
1786 | |
1787 | /** |
1788 | * vcn_v3_0_dec_ring_get_rptr - get read pointer |
1789 | * |
1790 | * @ring: amdgpu_ring pointer |
1791 | * |
1792 | * Returns the current hardware read pointer |
1793 | */ |
1794 | static uint64_t vcn_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring) |
1795 | { |
1796 | struct amdgpu_device *adev = ring->adev; |
1797 | |
1798 | return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR); |
1799 | } |
1800 | |
1801 | /** |
1802 | * vcn_v3_0_dec_ring_get_wptr - get write pointer |
1803 | * |
1804 | * @ring: amdgpu_ring pointer |
1805 | * |
1806 | * Returns the current hardware write pointer |
1807 | */ |
1808 | static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring) |
1809 | { |
1810 | struct amdgpu_device *adev = ring->adev; |
1811 | |
1812 | if (ring->use_doorbell) |
1813 | return *ring->wptr_cpu_addr; |
1814 | else |
1815 | return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR); |
1816 | } |
1817 | |
1818 | /** |
1819 | * vcn_v3_0_dec_ring_set_wptr - set write pointer |
1820 | * |
1821 | * @ring: amdgpu_ring pointer |
1822 | * |
1823 | * Commits the write pointer to the hardware |
1824 | */ |
1825 | static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) |
1826 | { |
1827 | struct amdgpu_device *adev = ring->adev; |
1828 | volatile struct amdgpu_fw_shared *fw_shared; |
1829 | |
1830 | if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { |
1831 | /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ |
1832 | fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr; |
1833 | fw_shared->rb.wptr = lower_32_bits(ring->wptr); |
1834 | WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2, |
1835 | lower_32_bits(ring->wptr)); |
1836 | } |
1837 | |
1838 | if (ring->use_doorbell) { |
1839 | *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); |
1840 | WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); |
1841 | } else { |
1842 | WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); |
1843 | } |
1844 | } |
1845 | |
1846 | static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { |
1847 | .type = AMDGPU_RING_TYPE_VCN_DEC, |
1848 | .align_mask = 0x3f, |
1849 | .nop = VCN_DEC_SW_CMD_NO_OP, |
1850 | .secure_submission_supported = true, |
1851 | .get_rptr = vcn_v3_0_dec_ring_get_rptr, |
1852 | .get_wptr = vcn_v3_0_dec_ring_get_wptr, |
1853 | .set_wptr = vcn_v3_0_dec_ring_set_wptr, |
1854 | .emit_frame_size = |
1855 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + |
1856 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + |
1857 | VCN_SW_RING_EMIT_FRAME_SIZE, |
1858 | .emit_ib_size = 5, /* vcn_dec_sw_ring_emit_ib */ |
1859 | .emit_ib = vcn_dec_sw_ring_emit_ib, |
1860 | .emit_fence = vcn_dec_sw_ring_emit_fence, |
1861 | .emit_vm_flush = vcn_dec_sw_ring_emit_vm_flush, |
1862 | .test_ring = amdgpu_vcn_dec_sw_ring_test_ring, |
1863 | .test_ib = NULL,//amdgpu_vcn_dec_sw_ring_test_ib, |
1864 | .insert_nop = amdgpu_ring_insert_nop, |
1865 | .insert_end = vcn_dec_sw_ring_insert_end, |
1866 | .pad_ib = amdgpu_ring_generic_pad_ib, |
1867 | .begin_use = amdgpu_vcn_ring_begin_use, |
1868 | .end_use = amdgpu_vcn_ring_end_use, |
1869 | .emit_wreg = vcn_dec_sw_ring_emit_wreg, |
1870 | .emit_reg_wait = vcn_dec_sw_ring_emit_reg_wait, |
1871 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
1872 | }; |
1873 | |
1874 | static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, |
1875 | struct amdgpu_job *job) |
1876 | { |
1877 | struct drm_gpu_scheduler **scheds; |
1878 | |
1879 | /* The create msg must be in the first IB submitted */ |
1880 | if (atomic_read(v: &job->base.entity->fence_seq)) |
1881 | return -EINVAL; |
1882 | |
1883 | /* if VCN0 is harvested, we can't support AV1 */ |
1884 | if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) |
1885 | return -EINVAL; |
1886 | |
1887 | scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] |
1888 | [AMDGPU_RING_PRIO_DEFAULT].sched; |
1889 | drm_sched_entity_modify_sched(entity: job->base.entity, sched_list: scheds, num_sched_list: 1); |
1890 | return 0; |
1891 | } |
1892 | |
1893 | static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, |
1894 | uint64_t addr) |
1895 | { |
1896 | struct ttm_operation_ctx ctx = { false, false }; |
1897 | struct amdgpu_bo_va_mapping *map; |
1898 | uint32_t *msg, num_buffers; |
1899 | struct amdgpu_bo *bo; |
1900 | uint64_t start, end; |
1901 | unsigned int i; |
1902 | void *ptr; |
1903 | int r; |
1904 | |
1905 | addr &= AMDGPU_GMC_HOLE_MASK; |
1906 | r = amdgpu_cs_find_mapping(parser: p, addr, bo: &bo, mapping: &map); |
1907 | if (r) { |
1908 | DRM_ERROR("Can't find BO for addr 0x%08Lx\n" , addr); |
1909 | return r; |
1910 | } |
1911 | |
1912 | start = map->start * AMDGPU_GPU_PAGE_SIZE; |
1913 | end = (map->last + 1) * AMDGPU_GPU_PAGE_SIZE; |
1914 | if (addr & 0x7) { |
1915 | DRM_ERROR("VCN messages must be 8 byte aligned!\n" ); |
1916 | return -EINVAL; |
1917 | } |
1918 | |
1919 | bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
1920 | amdgpu_bo_placement_from_domain(abo: bo, domain: bo->allowed_domains); |
1921 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
1922 | if (r) { |
1923 | DRM_ERROR("Failed validating the VCN message BO (%d)!\n" , r); |
1924 | return r; |
1925 | } |
1926 | |
1927 | r = amdgpu_bo_kmap(bo, ptr: &ptr); |
1928 | if (r) { |
1929 | DRM_ERROR("Failed mapping the VCN message (%d)!\n" , r); |
1930 | return r; |
1931 | } |
1932 | |
1933 | msg = ptr + addr - start; |
1934 | |
1935 | /* Check length */ |
1936 | if (msg[1] > end - addr) { |
1937 | r = -EINVAL; |
1938 | goto out; |
1939 | } |
1940 | |
1941 | if (msg[3] != RDECODE_MSG_CREATE) |
1942 | goto out; |
1943 | |
1944 | num_buffers = msg[2]; |
1945 | for (i = 0, msg = &msg[6]; i < num_buffers; ++i, msg += 4) { |
1946 | uint32_t offset, size, *create; |
1947 | |
1948 | if (msg[0] != RDECODE_MESSAGE_CREATE) |
1949 | continue; |
1950 | |
1951 | offset = msg[1]; |
1952 | size = msg[2]; |
1953 | |
1954 | if (offset + size > end) { |
1955 | r = -EINVAL; |
1956 | goto out; |
1957 | } |
1958 | |
1959 | create = ptr + addr + offset - start; |
1960 | |
1961 | /* H246, HEVC and VP9 can run on any instance */ |
1962 | if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) |
1963 | continue; |
1964 | |
1965 | r = vcn_v3_0_limit_sched(p, job); |
1966 | if (r) |
1967 | goto out; |
1968 | } |
1969 | |
1970 | out: |
1971 | amdgpu_bo_kunmap(bo); |
1972 | return r; |
1973 | } |
1974 | |
1975 | static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, |
1976 | struct amdgpu_job *job, |
1977 | struct amdgpu_ib *ib) |
1978 | { |
1979 | struct amdgpu_ring *ring = amdgpu_job_ring(job); |
1980 | uint32_t msg_lo = 0, msg_hi = 0; |
1981 | unsigned i; |
1982 | int r; |
1983 | |
1984 | /* The first instance can decode anything */ |
1985 | if (!ring->me) |
1986 | return 0; |
1987 | |
1988 | for (i = 0; i < ib->length_dw; i += 2) { |
1989 | uint32_t reg = amdgpu_ib_get_value(ib, idx: i); |
1990 | uint32_t val = amdgpu_ib_get_value(ib, idx: i + 1); |
1991 | |
1992 | if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data0, 0)) { |
1993 | msg_lo = val; |
1994 | } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.data1, 0)) { |
1995 | msg_hi = val; |
1996 | } else if (reg == PACKET0(p->adev->vcn.inst[ring->me].internal.cmd, 0) && |
1997 | val == 0) { |
1998 | r = vcn_v3_0_dec_msg(p, job, |
1999 | addr: ((u64)msg_hi) << 32 | msg_lo); |
2000 | if (r) |
2001 | return r; |
2002 | } |
2003 | } |
2004 | return 0; |
2005 | } |
2006 | |
2007 | static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { |
2008 | .type = AMDGPU_RING_TYPE_VCN_DEC, |
2009 | .align_mask = 0xf, |
2010 | .secure_submission_supported = true, |
2011 | .get_rptr = vcn_v3_0_dec_ring_get_rptr, |
2012 | .get_wptr = vcn_v3_0_dec_ring_get_wptr, |
2013 | .set_wptr = vcn_v3_0_dec_ring_set_wptr, |
2014 | .patch_cs_in_place = vcn_v3_0_ring_patch_cs_in_place, |
2015 | .emit_frame_size = |
2016 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + |
2017 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + |
2018 | 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */ |
2019 | 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */ |
2020 | 6, |
2021 | .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */ |
2022 | .emit_ib = vcn_v2_0_dec_ring_emit_ib, |
2023 | .emit_fence = vcn_v2_0_dec_ring_emit_fence, |
2024 | .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, |
2025 | .test_ring = vcn_v2_0_dec_ring_test_ring, |
2026 | .test_ib = amdgpu_vcn_dec_ring_test_ib, |
2027 | .insert_nop = vcn_v2_0_dec_ring_insert_nop, |
2028 | .insert_start = vcn_v2_0_dec_ring_insert_start, |
2029 | .insert_end = vcn_v2_0_dec_ring_insert_end, |
2030 | .pad_ib = amdgpu_ring_generic_pad_ib, |
2031 | .begin_use = amdgpu_vcn_ring_begin_use, |
2032 | .end_use = amdgpu_vcn_ring_end_use, |
2033 | .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, |
2034 | .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, |
2035 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
2036 | }; |
2037 | |
2038 | /** |
2039 | * vcn_v3_0_enc_ring_get_rptr - get enc read pointer |
2040 | * |
2041 | * @ring: amdgpu_ring pointer |
2042 | * |
2043 | * Returns the current hardware enc read pointer |
2044 | */ |
2045 | static uint64_t vcn_v3_0_enc_ring_get_rptr(struct amdgpu_ring *ring) |
2046 | { |
2047 | struct amdgpu_device *adev = ring->adev; |
2048 | |
2049 | if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) |
2050 | return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR); |
2051 | else |
2052 | return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2); |
2053 | } |
2054 | |
2055 | /** |
2056 | * vcn_v3_0_enc_ring_get_wptr - get enc write pointer |
2057 | * |
2058 | * @ring: amdgpu_ring pointer |
2059 | * |
2060 | * Returns the current hardware enc write pointer |
2061 | */ |
2062 | static uint64_t vcn_v3_0_enc_ring_get_wptr(struct amdgpu_ring *ring) |
2063 | { |
2064 | struct amdgpu_device *adev = ring->adev; |
2065 | |
2066 | if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { |
2067 | if (ring->use_doorbell) |
2068 | return *ring->wptr_cpu_addr; |
2069 | else |
2070 | return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR); |
2071 | } else { |
2072 | if (ring->use_doorbell) |
2073 | return *ring->wptr_cpu_addr; |
2074 | else |
2075 | return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2); |
2076 | } |
2077 | } |
2078 | |
2079 | /** |
2080 | * vcn_v3_0_enc_ring_set_wptr - set enc write pointer |
2081 | * |
2082 | * @ring: amdgpu_ring pointer |
2083 | * |
2084 | * Commits the enc write pointer to the hardware |
2085 | */ |
2086 | static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring) |
2087 | { |
2088 | struct amdgpu_device *adev = ring->adev; |
2089 | |
2090 | if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) { |
2091 | if (ring->use_doorbell) { |
2092 | *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); |
2093 | WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); |
2094 | } else { |
2095 | WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); |
2096 | } |
2097 | } else { |
2098 | if (ring->use_doorbell) { |
2099 | *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); |
2100 | WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); |
2101 | } else { |
2102 | WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); |
2103 | } |
2104 | } |
2105 | } |
2106 | |
2107 | static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { |
2108 | .type = AMDGPU_RING_TYPE_VCN_ENC, |
2109 | .align_mask = 0x3f, |
2110 | .nop = VCN_ENC_CMD_NO_OP, |
2111 | .get_rptr = vcn_v3_0_enc_ring_get_rptr, |
2112 | .get_wptr = vcn_v3_0_enc_ring_get_wptr, |
2113 | .set_wptr = vcn_v3_0_enc_ring_set_wptr, |
2114 | .emit_frame_size = |
2115 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + |
2116 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + |
2117 | 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ |
2118 | 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ |
2119 | 1, /* vcn_v2_0_enc_ring_insert_end */ |
2120 | .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ |
2121 | .emit_ib = vcn_v2_0_enc_ring_emit_ib, |
2122 | .emit_fence = vcn_v2_0_enc_ring_emit_fence, |
2123 | .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush, |
2124 | .test_ring = amdgpu_vcn_enc_ring_test_ring, |
2125 | .test_ib = amdgpu_vcn_enc_ring_test_ib, |
2126 | .insert_nop = amdgpu_ring_insert_nop, |
2127 | .insert_end = vcn_v2_0_enc_ring_insert_end, |
2128 | .pad_ib = amdgpu_ring_generic_pad_ib, |
2129 | .begin_use = amdgpu_vcn_ring_begin_use, |
2130 | .end_use = amdgpu_vcn_ring_end_use, |
2131 | .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, |
2132 | .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, |
2133 | .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, |
2134 | }; |
2135 | |
2136 | static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) |
2137 | { |
2138 | int i; |
2139 | |
2140 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
2141 | if (adev->vcn.harvest_config & (1 << i)) |
2142 | continue; |
2143 | |
2144 | if (!DEC_SW_RING_ENABLED) |
2145 | adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_ring_vm_funcs; |
2146 | else |
2147 | adev->vcn.inst[i].ring_dec.funcs = &vcn_v3_0_dec_sw_ring_vm_funcs; |
2148 | adev->vcn.inst[i].ring_dec.me = i; |
2149 | } |
2150 | } |
2151 | |
2152 | static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) |
2153 | { |
2154 | int i, j; |
2155 | |
2156 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
2157 | if (adev->vcn.harvest_config & (1 << i)) |
2158 | continue; |
2159 | |
2160 | for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j) { |
2161 | adev->vcn.inst[i].ring_enc[j].funcs = &vcn_v3_0_enc_ring_vm_funcs; |
2162 | adev->vcn.inst[i].ring_enc[j].me = i; |
2163 | } |
2164 | } |
2165 | } |
2166 | |
2167 | static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block) |
2168 | { |
2169 | struct amdgpu_device *adev = ip_block->adev; |
2170 | int i, ret = 1; |
2171 | |
2172 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
2173 | if (adev->vcn.harvest_config & (1 << i)) |
2174 | continue; |
2175 | |
2176 | ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE); |
2177 | } |
2178 | |
2179 | return ret; |
2180 | } |
2181 | |
2182 | static int vcn_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block) |
2183 | { |
2184 | struct amdgpu_device *adev = ip_block->adev; |
2185 | int i, ret = 0; |
2186 | |
2187 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
2188 | if (adev->vcn.harvest_config & (1 << i)) |
2189 | continue; |
2190 | |
2191 | ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, |
2192 | UVD_STATUS__IDLE); |
2193 | if (ret) |
2194 | return ret; |
2195 | } |
2196 | |
2197 | return ret; |
2198 | } |
2199 | |
2200 | static int vcn_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, |
2201 | enum amd_clockgating_state state) |
2202 | { |
2203 | struct amdgpu_device *adev = ip_block->adev; |
2204 | bool enable = state == AMD_CG_STATE_GATE; |
2205 | int i; |
2206 | |
2207 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
2208 | struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; |
2209 | if (adev->vcn.harvest_config & (1 << i)) |
2210 | continue; |
2211 | |
2212 | if (enable) { |
2213 | if (RREG32_SOC15(VCN, i, mmUVD_STATUS) != UVD_STATUS__IDLE) |
2214 | return -EBUSY; |
2215 | vcn_v3_0_enable_clock_gating(vinst); |
2216 | } else { |
2217 | vcn_v3_0_disable_clock_gating(vinst); |
2218 | } |
2219 | } |
2220 | |
2221 | return 0; |
2222 | } |
2223 | |
2224 | static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst, |
2225 | enum amd_powergating_state state) |
2226 | { |
2227 | struct amdgpu_device *adev = vinst->adev; |
2228 | int ret = 0; |
2229 | |
2230 | /* for SRIOV, guest should not control VCN Power-gating |
2231 | * MMSCH FW should control Power-gating and clock-gating |
2232 | * guest should avoid touching CGC and PG |
2233 | */ |
2234 | if (amdgpu_sriov_vf(adev)) { |
2235 | vinst->cur_state = AMD_PG_STATE_UNGATE; |
2236 | return 0; |
2237 | } |
2238 | |
2239 | if (state == vinst->cur_state) |
2240 | return 0; |
2241 | |
2242 | if (state == AMD_PG_STATE_GATE) |
2243 | ret = vcn_v3_0_stop(vinst); |
2244 | else |
2245 | ret = vcn_v3_0_start(vinst); |
2246 | |
2247 | if (!ret) |
2248 | vinst->cur_state = state; |
2249 | |
2250 | return ret; |
2251 | } |
2252 | |
2253 | static int vcn_v3_0_set_interrupt_state(struct amdgpu_device *adev, |
2254 | struct amdgpu_irq_src *source, |
2255 | unsigned type, |
2256 | enum amdgpu_interrupt_state state) |
2257 | { |
2258 | return 0; |
2259 | } |
2260 | |
2261 | static int vcn_v3_0_process_interrupt(struct amdgpu_device *adev, |
2262 | struct amdgpu_irq_src *source, |
2263 | struct amdgpu_iv_entry *entry) |
2264 | { |
2265 | uint32_t ip_instance; |
2266 | |
2267 | switch (entry->client_id) { |
2268 | case SOC15_IH_CLIENTID_VCN: |
2269 | ip_instance = 0; |
2270 | break; |
2271 | case SOC15_IH_CLIENTID_VCN1: |
2272 | ip_instance = 1; |
2273 | break; |
2274 | default: |
2275 | DRM_ERROR("Unhandled client id: %d\n" , entry->client_id); |
2276 | return 0; |
2277 | } |
2278 | |
2279 | DRM_DEBUG("IH: VCN TRAP\n" ); |
2280 | |
2281 | switch (entry->src_id) { |
2282 | case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT: |
2283 | amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_dec); |
2284 | break; |
2285 | case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE: |
2286 | amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_enc[0]); |
2287 | break; |
2288 | case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY: |
2289 | amdgpu_fence_process(ring: &adev->vcn.inst[ip_instance].ring_enc[1]); |
2290 | break; |
2291 | default: |
2292 | DRM_ERROR("Unhandled interrupt: %d %d\n" , |
2293 | entry->src_id, entry->src_data[0]); |
2294 | break; |
2295 | } |
2296 | |
2297 | return 0; |
2298 | } |
2299 | |
2300 | static const struct amdgpu_irq_src_funcs vcn_v3_0_irq_funcs = { |
2301 | .set = vcn_v3_0_set_interrupt_state, |
2302 | .process = vcn_v3_0_process_interrupt, |
2303 | }; |
2304 | |
2305 | static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) |
2306 | { |
2307 | int i; |
2308 | |
2309 | for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { |
2310 | if (adev->vcn.harvest_config & (1 << i)) |
2311 | continue; |
2312 | |
2313 | adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; |
2314 | adev->vcn.inst[i].irq.funcs = &vcn_v3_0_irq_funcs; |
2315 | } |
2316 | } |
2317 | |
2318 | static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p) |
2319 | { |
2320 | struct amdgpu_device *adev = ip_block->adev; |
2321 | int i, j; |
2322 | uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); |
2323 | uint32_t inst_off; |
2324 | bool is_powered; |
2325 | |
2326 | if (!adev->vcn.ip_dump) |
2327 | return; |
2328 | |
2329 | drm_printf(p, f: "num_instances:%d\n" , adev->vcn.num_vcn_inst); |
2330 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
2331 | if (adev->vcn.harvest_config & (1 << i)) { |
2332 | drm_printf(p, f: "\nHarvested Instance:VCN%d Skipping dump\n" , i); |
2333 | continue; |
2334 | } |
2335 | |
2336 | inst_off = i * reg_count; |
2337 | is_powered = (adev->vcn.ip_dump[inst_off] & |
2338 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; |
2339 | |
2340 | if (is_powered) { |
2341 | drm_printf(p, f: "\nActive Instance:VCN%d\n" , i); |
2342 | for (j = 0; j < reg_count; j++) |
2343 | drm_printf(p, f: "%-50s \t 0x%08x\n" , vcn_reg_list_3_0[j].reg_name, |
2344 | adev->vcn.ip_dump[inst_off + j]); |
2345 | } else { |
2346 | drm_printf(p, f: "\nInactive Instance:VCN%d\n" , i); |
2347 | } |
2348 | } |
2349 | } |
2350 | |
2351 | static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block) |
2352 | { |
2353 | struct amdgpu_device *adev = ip_block->adev; |
2354 | int i, j; |
2355 | bool is_powered; |
2356 | uint32_t inst_off; |
2357 | uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0); |
2358 | |
2359 | if (!adev->vcn.ip_dump) |
2360 | return; |
2361 | |
2362 | for (i = 0; i < adev->vcn.num_vcn_inst; i++) { |
2363 | if (adev->vcn.harvest_config & (1 << i)) |
2364 | continue; |
2365 | |
2366 | inst_off = i * reg_count; |
2367 | /* mmUVD_POWER_STATUS is always readable and is first element of the array */ |
2368 | adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS); |
2369 | is_powered = (adev->vcn.ip_dump[inst_off] & |
2370 | UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1; |
2371 | |
2372 | if (is_powered) |
2373 | for (j = 1; j < reg_count; j++) |
2374 | adev->vcn.ip_dump[inst_off + j] = |
2375 | RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i)); |
2376 | } |
2377 | } |
2378 | |
2379 | static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { |
2380 | .name = "vcn_v3_0" , |
2381 | .early_init = vcn_v3_0_early_init, |
2382 | .sw_init = vcn_v3_0_sw_init, |
2383 | .sw_fini = vcn_v3_0_sw_fini, |
2384 | .hw_init = vcn_v3_0_hw_init, |
2385 | .hw_fini = vcn_v3_0_hw_fini, |
2386 | .suspend = vcn_v3_0_suspend, |
2387 | .resume = vcn_v3_0_resume, |
2388 | .is_idle = vcn_v3_0_is_idle, |
2389 | .wait_for_idle = vcn_v3_0_wait_for_idle, |
2390 | .set_clockgating_state = vcn_v3_0_set_clockgating_state, |
2391 | .set_powergating_state = vcn_set_powergating_state, |
2392 | .dump_ip_state = vcn_v3_0_dump_ip_state, |
2393 | .print_ip_state = vcn_v3_0_print_ip_state, |
2394 | }; |
2395 | |
2396 | const struct amdgpu_ip_block_version vcn_v3_0_ip_block = { |
2397 | .type = AMD_IP_BLOCK_TYPE_VCN, |
2398 | .major = 3, |
2399 | .minor = 0, |
2400 | .rev = 0, |
2401 | .funcs = &vcn_v3_0_ip_funcs, |
2402 | }; |
2403 | |