1 | /* |
2 | * Copyright 2022 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ |
22 | |
23 | #include <linux/firmware.h> |
24 | #include <drm/drm_drv.h> |
25 | |
26 | #include "amdgpu.h" |
27 | #include "amdgpu_ucode.h" |
28 | #include "amdgpu_vpe.h" |
29 | #include "vpe_v6_1.h" |
30 | #include "soc15_common.h" |
31 | #include "ivsrcid/vpe/irqsrcs_vpe_6_1.h" |
32 | #include "vpe/vpe_6_1_0_offset.h" |
33 | #include "vpe/vpe_6_1_0_sh_mask.h" |
34 | |
35 | MODULE_FIRMWARE("amdgpu/vpe_6_1_0.bin" ); |
36 | MODULE_FIRMWARE("amdgpu/vpe_6_1_1.bin" ); |
37 | |
38 | #define VPE_THREAD1_UCODE_OFFSET 0x8000 |
39 | |
40 | #define regVPEC_COLLABORATE_CNTL 0x0013 |
41 | #define regVPEC_COLLABORATE_CNTL_BASE_IDX 0 |
42 | #define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN__SHIFT 0x0 |
43 | #define VPEC_COLLABORATE_CNTL__COLLABORATE_MODE_EN_MASK 0x00000001L |
44 | |
45 | #define regVPEC_COLLABORATE_CFG 0x0014 |
46 | #define regVPEC_COLLABORATE_CFG_BASE_IDX 0 |
47 | #define VPEC_COLLABORATE_CFG__MASTER_ID__SHIFT 0x0 |
48 | #define VPEC_COLLABORATE_CFG__MASTER_EN__SHIFT 0x3 |
49 | #define VPEC_COLLABORATE_CFG__SLAVE0_ID__SHIFT 0x4 |
50 | #define VPEC_COLLABORATE_CFG__SLAVE0_EN__SHIFT 0x7 |
51 | #define VPEC_COLLABORATE_CFG__MASTER_ID_MASK 0x00000007L |
52 | #define VPEC_COLLABORATE_CFG__MASTER_EN_MASK 0x00000008L |
53 | #define VPEC_COLLABORATE_CFG__SLAVE0_ID_MASK 0x00000070L |
54 | #define VPEC_COLLABORATE_CFG__SLAVE0_EN_MASK 0x00000080L |
55 | |
56 | #define regVPEC_CNTL_6_1_1 0x0016 |
57 | #define regVPEC_CNTL_6_1_1_BASE_IDX 0 |
58 | #define regVPEC_QUEUE_RESET_REQ_6_1_1 0x002c |
59 | #define regVPEC_QUEUE_RESET_REQ_6_1_1_BASE_IDX 0 |
60 | #define regVPEC_PUB_DUMMY2_6_1_1 0x004c |
61 | #define regVPEC_PUB_DUMMY2_6_1_1_BASE_IDX 0 |
62 | |
63 | static uint32_t vpe_v6_1_get_reg_offset(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset) |
64 | { |
65 | uint32_t base; |
66 | |
67 | base = vpe->ring.adev->reg_offset[VPE_HWIP][inst][0]; |
68 | |
69 | return base + offset; |
70 | } |
71 | |
72 | static void vpe_v6_1_halt(struct amdgpu_vpe *vpe, bool halt) |
73 | { |
74 | struct amdgpu_device *adev = vpe->ring.adev; |
75 | uint32_t i, f32_cntl; |
76 | |
77 | for (i = 0; i < vpe->num_instances; i++) { |
78 | f32_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL)); |
79 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, halt ? 1 : 0); |
80 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, halt ? 1 : 0); |
81 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_F32_CNTL), f32_cntl); |
82 | } |
83 | } |
84 | |
85 | static int vpe_v6_1_irq_init(struct amdgpu_vpe *vpe) |
86 | { |
87 | struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); |
88 | int ret; |
89 | |
90 | ret = amdgpu_irq_add_id(adev, client_id: SOC21_IH_CLIENTID_VPE, |
91 | VPE_6_1_SRCID__VPE_TRAP, |
92 | source: &adev->vpe.trap_irq); |
93 | if (ret) |
94 | return ret; |
95 | |
96 | return 0; |
97 | } |
98 | |
99 | static void vpe_v6_1_set_collaborate_mode(struct amdgpu_vpe *vpe, bool enable) |
100 | { |
101 | struct amdgpu_device *adev = vpe->ring.adev; |
102 | uint32_t vpe_colla_cntl, vpe_colla_cfg, i; |
103 | |
104 | if (!vpe->collaborate_mode) |
105 | return; |
106 | |
107 | for (i = 0; i < vpe->num_instances; i++) { |
108 | vpe_colla_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL)); |
109 | vpe_colla_cntl = REG_SET_FIELD(vpe_colla_cntl, VPEC_COLLABORATE_CNTL, |
110 | COLLABORATE_MODE_EN, enable ? 1 : 0); |
111 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CNTL), vpe_colla_cntl); |
112 | |
113 | vpe_colla_cfg = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG)); |
114 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_ID, 0); |
115 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, MASTER_EN, enable ? 1 : 0); |
116 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_ID, 1); |
117 | vpe_colla_cfg = REG_SET_FIELD(vpe_colla_cfg, VPEC_COLLABORATE_CFG, SLAVE0_EN, enable ? 1 : 0); |
118 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_COLLABORATE_CFG), vpe_colla_cfg); |
119 | } |
120 | } |
121 | |
122 | static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe) |
123 | { |
124 | struct amdgpu_device *adev = vpe->ring.adev; |
125 | const struct vpe_firmware_header_v1_0 *vpe_hdr; |
126 | const __le32 *data; |
127 | uint32_t ucode_offset[2], ucode_size[2]; |
128 | uint32_t i, j, size_dw; |
129 | uint32_t ret; |
130 | |
131 | /* disable UMSCH_INT_ENABLE */ |
132 | for (j = 0; j < vpe->num_instances; j++) { |
133 | |
134 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
135 | ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1)); |
136 | else |
137 | ret = RREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL)); |
138 | |
139 | ret = REG_SET_FIELD(ret, VPEC_CNTL, UMSCH_INT_ENABLE, 0); |
140 | |
141 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
142 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL_6_1_1), ret); |
143 | else |
144 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret); |
145 | } |
146 | |
147 | /* |
148 | * For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well. |
149 | * Here use instance 0 as master. |
150 | */ |
151 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
152 | uint32_t f32_offset, f32_cntl; |
153 | |
154 | f32_offset = vpe_get_reg_offset(vpe, 0, regVPEC_F32_CNTL); |
155 | f32_cntl = RREG32(f32_offset); |
156 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, HALT, 0); |
157 | f32_cntl = REG_SET_FIELD(f32_cntl, VPEC_F32_CNTL, TH1_RESET, 0); |
158 | |
159 | adev->vpe.cmdbuf_cpu_addr[0] = f32_offset; |
160 | adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl; |
161 | |
162 | amdgpu_vpe_psp_update_sram(adev); |
163 | vpe_v6_1_set_collaborate_mode(vpe, enable: true); |
164 | amdgpu_vpe_configure_dpm(vpe); |
165 | |
166 | return 0; |
167 | } |
168 | |
169 | vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data; |
170 | |
171 | /* Thread 0(command thread) ucode offset/size */ |
172 | ucode_offset[0] = le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes); |
173 | ucode_size[0] = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes); |
174 | /* Thread 1(control thread) ucode offset/size */ |
175 | ucode_offset[1] = le32_to_cpu(vpe_hdr->ctl_ucode_offset); |
176 | ucode_size[1] = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes); |
177 | |
178 | vpe_v6_1_halt(vpe, halt: true); |
179 | |
180 | for (j = 0; j < vpe->num_instances; j++) { |
181 | for (i = 0; i < 2; i++) { |
182 | if (i > 0) |
183 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), VPE_THREAD1_UCODE_OFFSET); |
184 | else |
185 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_ADDR), 0); |
186 | |
187 | data = (const __le32 *)(adev->vpe.fw->data + ucode_offset[i]); |
188 | size_dw = ucode_size[i] / sizeof(__le32); |
189 | |
190 | while (size_dw--) { |
191 | if (amdgpu_emu_mode && size_dw % 500 == 0) |
192 | msleep(msecs: 1); |
193 | WREG32(vpe_get_reg_offset(vpe, j, regVPEC_UCODE_DATA), le32_to_cpup(data++)); |
194 | } |
195 | } |
196 | } |
197 | |
198 | vpe_v6_1_halt(vpe, halt: false); |
199 | vpe_v6_1_set_collaborate_mode(vpe, enable: true); |
200 | amdgpu_vpe_configure_dpm(vpe); |
201 | |
202 | return 0; |
203 | } |
204 | |
205 | static int vpe_v6_1_ring_start(struct amdgpu_vpe *vpe) |
206 | { |
207 | struct amdgpu_ring *ring = &vpe->ring; |
208 | struct amdgpu_device *adev = ring->adev; |
209 | uint32_t doorbell, doorbell_offset; |
210 | uint32_t rb_bufsz, rb_cntl; |
211 | uint32_t ib_cntl, i; |
212 | int ret; |
213 | |
214 | for (i = 0; i < vpe->num_instances; i++) { |
215 | /* Set ring buffer size in dwords */ |
216 | rb_bufsz = order_base_2(ring->ring_size / 4); |
217 | rb_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL)); |
218 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz); |
219 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_PRIV, 1); |
220 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_VMID, 0); |
221 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl); |
222 | |
223 | /* Initialize the ring buffer's read and write pointers */ |
224 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR), 0); |
225 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_HI), 0); |
226 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), 0); |
227 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), 0); |
228 | |
229 | /* set the wb address whether it's enabled or not */ |
230 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_LO), |
231 | lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC); |
232 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_RPTR_ADDR_HI), |
233 | upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF); |
234 | |
235 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); |
236 | |
237 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE), ring->gpu_addr >> 8); |
238 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40); |
239 | |
240 | ring->wptr = 0; |
241 | |
242 | /* before programing wptr to a less value, need set minor_ptr_update first */ |
243 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 1); |
244 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2); |
245 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2); |
246 | /* set minor_ptr_update to 0 after wptr programed */ |
247 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_MINOR_PTR_UPDATE), 0); |
248 | |
249 | doorbell_offset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET)); |
250 | doorbell_offset = REG_SET_FIELD(doorbell_offset, VPEC_QUEUE0_DOORBELL_OFFSET, OFFSET, ring->doorbell_index + i*4); |
251 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL_OFFSET), doorbell_offset); |
252 | |
253 | doorbell = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL)); |
254 | doorbell = REG_SET_FIELD(doorbell, VPEC_QUEUE0_DOORBELL, ENABLE, ring->use_doorbell ? 1 : 0); |
255 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_DOORBELL), doorbell); |
256 | |
257 | adev->nbio.funcs->vpe_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index + i*4, 4); |
258 | |
259 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); |
260 | rb_cntl = REG_SET_FIELD(rb_cntl, VPEC_QUEUE0_RB_CNTL, RB_ENABLE, 1); |
261 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_RB_CNTL), rb_cntl); |
262 | |
263 | ib_cntl = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL)); |
264 | ib_cntl = REG_SET_FIELD(ib_cntl, VPEC_QUEUE0_IB_CNTL, IB_ENABLE, 1); |
265 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE0_IB_CNTL), ib_cntl); |
266 | } |
267 | |
268 | ret = amdgpu_ring_test_helper(ring); |
269 | if (ret) |
270 | return ret; |
271 | |
272 | return 0; |
273 | } |
274 | |
275 | static int vpe_v_6_1_ring_stop(struct amdgpu_vpe *vpe) |
276 | { |
277 | struct amdgpu_device *adev = vpe->ring.adev; |
278 | uint32_t queue_reset, i; |
279 | int ret; |
280 | |
281 | for (i = 0; i < vpe->num_instances; i++) { |
282 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
283 | queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1)); |
284 | else |
285 | queue_reset = RREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ)); |
286 | |
287 | queue_reset = REG_SET_FIELD(queue_reset, VPEC_QUEUE_RESET_REQ, QUEUE0_RESET, 1); |
288 | |
289 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) { |
290 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ_6_1_1), queue_reset); |
291 | ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ_6_1_1, 0, |
292 | VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK); |
293 | } else { |
294 | WREG32(vpe_get_reg_offset(vpe, i, regVPEC_QUEUE_RESET_REQ), queue_reset); |
295 | ret = SOC15_WAIT_ON_RREG(VPE, i, regVPEC_QUEUE_RESET_REQ, 0, |
296 | VPEC_QUEUE_RESET_REQ__QUEUE0_RESET_MASK); |
297 | } |
298 | |
299 | if (ret) |
300 | dev_err(adev->dev, "VPE queue reset failed\n" ); |
301 | } |
302 | |
303 | vpe->ring.sched.ready = false; |
304 | |
305 | return ret; |
306 | } |
307 | |
308 | static int vpe_v6_1_set_trap_irq_state(struct amdgpu_device *adev, |
309 | struct amdgpu_irq_src *source, |
310 | unsigned int type, |
311 | enum amdgpu_interrupt_state state) |
312 | { |
313 | struct amdgpu_vpe *vpe = &adev->vpe; |
314 | uint32_t vpe_cntl; |
315 | |
316 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
317 | vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1)); |
318 | else |
319 | vpe_cntl = RREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL)); |
320 | |
321 | vpe_cntl = REG_SET_FIELD(vpe_cntl, VPEC_CNTL, TRAP_ENABLE, |
322 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); |
323 | |
324 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
325 | WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL_6_1_1), vpe_cntl); |
326 | else |
327 | WREG32(vpe_get_reg_offset(vpe, 0, regVPEC_CNTL), vpe_cntl); |
328 | |
329 | return 0; |
330 | } |
331 | |
332 | static int vpe_v6_1_process_trap_irq(struct amdgpu_device *adev, |
333 | struct amdgpu_irq_src *source, |
334 | struct amdgpu_iv_entry *entry) |
335 | { |
336 | |
337 | dev_dbg(adev->dev, "IH: VPE trap\n" ); |
338 | |
339 | switch (entry->client_id) { |
340 | case SOC21_IH_CLIENTID_VPE: |
341 | amdgpu_fence_process(ring: &adev->vpe.ring); |
342 | break; |
343 | default: |
344 | break; |
345 | } |
346 | |
347 | return 0; |
348 | } |
349 | |
350 | static int vpe_v6_1_set_regs(struct amdgpu_vpe *vpe) |
351 | { |
352 | struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe); |
353 | |
354 | vpe->regs.queue0_rb_rptr_lo = regVPEC_QUEUE0_RB_RPTR; |
355 | vpe->regs.queue0_rb_rptr_hi = regVPEC_QUEUE0_RB_RPTR_HI; |
356 | vpe->regs.queue0_rb_wptr_lo = regVPEC_QUEUE0_RB_WPTR; |
357 | vpe->regs.queue0_rb_wptr_hi = regVPEC_QUEUE0_RB_WPTR_HI; |
358 | vpe->regs.queue0_preempt = regVPEC_QUEUE0_PREEMPT; |
359 | |
360 | if (amdgpu_ip_version(adev, ip: VPE_HWIP, inst: 0) == IP_VERSION(6, 1, 1)) |
361 | vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2_6_1_1; |
362 | else |
363 | vpe->regs.dpm_enable = regVPEC_PUB_DUMMY2; |
364 | |
365 | vpe->regs.dpm_pratio = regVPEC_QUEUE6_DUMMY4; |
366 | vpe->regs.dpm_request_interval = regVPEC_QUEUE5_DUMMY3; |
367 | vpe->regs.dpm_decision_threshold = regVPEC_QUEUE5_DUMMY4; |
368 | vpe->regs.dpm_busy_clamp_threshold = regVPEC_QUEUE7_DUMMY2; |
369 | vpe->regs.dpm_idle_clamp_threshold = regVPEC_QUEUE7_DUMMY3; |
370 | vpe->regs.dpm_request_lv = regVPEC_QUEUE7_DUMMY1; |
371 | vpe->regs.context_indicator = regVPEC_QUEUE6_DUMMY3; |
372 | |
373 | return 0; |
374 | } |
375 | |
376 | static const struct vpe_funcs vpe_v6_1_funcs = { |
377 | .get_reg_offset = vpe_v6_1_get_reg_offset, |
378 | .set_regs = vpe_v6_1_set_regs, |
379 | .irq_init = vpe_v6_1_irq_init, |
380 | .init_microcode = amdgpu_vpe_init_microcode, |
381 | .load_microcode = vpe_v6_1_load_microcode, |
382 | .ring_init = amdgpu_vpe_ring_init, |
383 | .ring_start = vpe_v6_1_ring_start, |
384 | .ring_stop = vpe_v_6_1_ring_stop, |
385 | .ring_fini = amdgpu_vpe_ring_fini, |
386 | }; |
387 | |
388 | static const struct amdgpu_irq_src_funcs vpe_v6_1_trap_irq_funcs = { |
389 | .set = vpe_v6_1_set_trap_irq_state, |
390 | .process = vpe_v6_1_process_trap_irq, |
391 | }; |
392 | |
393 | void vpe_v6_1_set_funcs(struct amdgpu_vpe *vpe) |
394 | { |
395 | vpe->funcs = &vpe_v6_1_funcs; |
396 | vpe->trap_irq.funcs = &vpe_v6_1_trap_irq_funcs; |
397 | } |
398 | |