1 | /* |
2 | * Copyright 2022 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include <linux/firmware.h> |
24 | |
25 | #include "amdgpu.h" |
26 | #include "amdgpu_gfx.h" |
27 | #include "soc15.h" |
28 | #include "soc15d.h" |
29 | #include "soc15_common.h" |
30 | #include "vega10_enum.h" |
31 | |
32 | #include "v9_structs.h" |
33 | |
34 | #include "ivsrcid/gfx/irqsrcs_gfx_9_0.h" |
35 | |
36 | #include "gc/gc_9_4_3_offset.h" |
37 | #include "gc/gc_9_4_3_sh_mask.h" |
38 | |
39 | #include "gfx_v9_4_3.h" |
40 | #include "amdgpu_xcp.h" |
41 | #include "amdgpu_aca.h" |
42 | |
43 | MODULE_FIRMWARE("amdgpu/gc_9_4_3_mec.bin" ); |
44 | MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin" ); |
45 | |
46 | #define GFX9_MEC_HPD_SIZE 4096 |
47 | #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L |
48 | |
49 | #define GOLDEN_GB_ADDR_CONFIG 0x2a114042 |
50 | #define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301 |
51 | |
52 | #define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */ |
53 | #define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */ |
54 | #define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */ |
55 | |
56 | struct amdgpu_gfx_ras gfx_v9_4_3_ras; |
57 | |
58 | static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev); |
59 | static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev); |
60 | static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev); |
61 | static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev); |
62 | static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, |
63 | struct amdgpu_cu_info *cu_info); |
64 | |
65 | static void gfx_v9_4_3_kiq_set_resources(struct amdgpu_ring *kiq_ring, |
66 | uint64_t queue_mask) |
67 | { |
68 | amdgpu_ring_write(ring: kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6)); |
69 | amdgpu_ring_write(ring: kiq_ring, |
70 | PACKET3_SET_RESOURCES_VMID_MASK(0) | |
71 | /* vmid_mask:0* queue_type:0 (KIQ) */ |
72 | PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); |
73 | amdgpu_ring_write(ring: kiq_ring, |
74 | lower_32_bits(queue_mask)); /* queue mask lo */ |
75 | amdgpu_ring_write(ring: kiq_ring, |
76 | upper_32_bits(queue_mask)); /* queue mask hi */ |
77 | amdgpu_ring_write(ring: kiq_ring, v: 0); /* gws mask lo */ |
78 | amdgpu_ring_write(ring: kiq_ring, v: 0); /* gws mask hi */ |
79 | amdgpu_ring_write(ring: kiq_ring, v: 0); /* oac mask */ |
80 | amdgpu_ring_write(ring: kiq_ring, v: 0); /* gds heap base:0, gds heap size:0 */ |
81 | } |
82 | |
83 | static void gfx_v9_4_3_kiq_map_queues(struct amdgpu_ring *kiq_ring, |
84 | struct amdgpu_ring *ring) |
85 | { |
86 | struct amdgpu_device *adev = kiq_ring->adev; |
87 | uint64_t mqd_addr = amdgpu_bo_gpu_offset(bo: ring->mqd_obj); |
88 | uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); |
89 | uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; |
90 | |
91 | amdgpu_ring_write(ring: kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); |
92 | /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ |
93 | amdgpu_ring_write(ring: kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ |
94 | PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ |
95 | PACKET3_MAP_QUEUES_VMID(0) | /* VMID */ |
96 | PACKET3_MAP_QUEUES_QUEUE(ring->queue) | |
97 | PACKET3_MAP_QUEUES_PIPE(ring->pipe) | |
98 | PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | |
99 | /*queue_type: normal compute queue */ |
100 | PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | |
101 | /* alloc format: all_on_one_pipe */ |
102 | PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | |
103 | PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) | |
104 | /* num_queues: must be 1 */ |
105 | PACKET3_MAP_QUEUES_NUM_QUEUES(1)); |
106 | amdgpu_ring_write(ring: kiq_ring, |
107 | PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); |
108 | amdgpu_ring_write(ring: kiq_ring, lower_32_bits(mqd_addr)); |
109 | amdgpu_ring_write(ring: kiq_ring, upper_32_bits(mqd_addr)); |
110 | amdgpu_ring_write(ring: kiq_ring, lower_32_bits(wptr_addr)); |
111 | amdgpu_ring_write(ring: kiq_ring, upper_32_bits(wptr_addr)); |
112 | } |
113 | |
114 | static void gfx_v9_4_3_kiq_unmap_queues(struct amdgpu_ring *kiq_ring, |
115 | struct amdgpu_ring *ring, |
116 | enum amdgpu_unmap_queues_action action, |
117 | u64 gpu_addr, u64 seq) |
118 | { |
119 | uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; |
120 | |
121 | amdgpu_ring_write(ring: kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4)); |
122 | amdgpu_ring_write(ring: kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ |
123 | PACKET3_UNMAP_QUEUES_ACTION(action) | |
124 | PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) | |
125 | PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) | |
126 | PACKET3_UNMAP_QUEUES_NUM_QUEUES(1)); |
127 | amdgpu_ring_write(ring: kiq_ring, |
128 | PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index)); |
129 | |
130 | if (action == PREEMPT_QUEUES_NO_UNMAP) { |
131 | amdgpu_ring_write(ring: kiq_ring, lower_32_bits(gpu_addr)); |
132 | amdgpu_ring_write(ring: kiq_ring, upper_32_bits(gpu_addr)); |
133 | amdgpu_ring_write(ring: kiq_ring, v: seq); |
134 | } else { |
135 | amdgpu_ring_write(ring: kiq_ring, v: 0); |
136 | amdgpu_ring_write(ring: kiq_ring, v: 0); |
137 | amdgpu_ring_write(ring: kiq_ring, v: 0); |
138 | } |
139 | } |
140 | |
141 | static void gfx_v9_4_3_kiq_query_status(struct amdgpu_ring *kiq_ring, |
142 | struct amdgpu_ring *ring, |
143 | u64 addr, |
144 | u64 seq) |
145 | { |
146 | uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0; |
147 | |
148 | amdgpu_ring_write(ring: kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5)); |
149 | amdgpu_ring_write(ring: kiq_ring, |
150 | PACKET3_QUERY_STATUS_CONTEXT_ID(0) | |
151 | PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) | |
152 | PACKET3_QUERY_STATUS_COMMAND(2)); |
153 | /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */ |
154 | amdgpu_ring_write(ring: kiq_ring, |
155 | PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) | |
156 | PACKET3_QUERY_STATUS_ENG_SEL(eng_sel)); |
157 | amdgpu_ring_write(ring: kiq_ring, lower_32_bits(addr)); |
158 | amdgpu_ring_write(ring: kiq_ring, upper_32_bits(addr)); |
159 | amdgpu_ring_write(ring: kiq_ring, lower_32_bits(seq)); |
160 | amdgpu_ring_write(ring: kiq_ring, upper_32_bits(seq)); |
161 | } |
162 | |
163 | static void gfx_v9_4_3_kiq_invalidate_tlbs(struct amdgpu_ring *kiq_ring, |
164 | uint16_t pasid, uint32_t flush_type, |
165 | bool all_hub) |
166 | { |
167 | amdgpu_ring_write(ring: kiq_ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); |
168 | amdgpu_ring_write(ring: kiq_ring, |
169 | PACKET3_INVALIDATE_TLBS_DST_SEL(1) | |
170 | PACKET3_INVALIDATE_TLBS_ALL_HUB(all_hub) | |
171 | PACKET3_INVALIDATE_TLBS_PASID(pasid) | |
172 | PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type)); |
173 | } |
174 | |
175 | static const struct kiq_pm4_funcs gfx_v9_4_3_kiq_pm4_funcs = { |
176 | .kiq_set_resources = gfx_v9_4_3_kiq_set_resources, |
177 | .kiq_map_queues = gfx_v9_4_3_kiq_map_queues, |
178 | .kiq_unmap_queues = gfx_v9_4_3_kiq_unmap_queues, |
179 | .kiq_query_status = gfx_v9_4_3_kiq_query_status, |
180 | .kiq_invalidate_tlbs = gfx_v9_4_3_kiq_invalidate_tlbs, |
181 | .set_resources_size = 8, |
182 | .map_queues_size = 7, |
183 | .unmap_queues_size = 6, |
184 | .query_status_size = 7, |
185 | .invalidate_tlbs_size = 2, |
186 | }; |
187 | |
188 | static void gfx_v9_4_3_set_kiq_pm4_funcs(struct amdgpu_device *adev) |
189 | { |
190 | int i, num_xcc; |
191 | |
192 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
193 | for (i = 0; i < num_xcc; i++) |
194 | adev->gfx.kiq[i].pmf = &gfx_v9_4_3_kiq_pm4_funcs; |
195 | } |
196 | |
197 | static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev) |
198 | { |
199 | int i, num_xcc, dev_inst; |
200 | |
201 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
202 | for (i = 0; i < num_xcc; i++) { |
203 | dev_inst = GET_INST(GC, i); |
204 | |
205 | WREG32_SOC15(GC, dev_inst, regGB_ADDR_CONFIG, |
206 | GOLDEN_GB_ADDR_CONFIG); |
207 | /* Golden settings applied by driver for ASIC with rev_id 0 */ |
208 | if (adev->rev_id == 0) { |
209 | WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1, |
210 | REDUCE_FIFO_DEPTH_BY_2, 2); |
211 | } else { |
212 | WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2, |
213 | SPARE, 0x1); |
214 | } |
215 | } |
216 | } |
217 | |
218 | static void gfx_v9_4_3_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel, |
219 | bool wc, uint32_t reg, uint32_t val) |
220 | { |
221 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
222 | amdgpu_ring_write(ring, WRITE_DATA_ENGINE_SEL(eng_sel) | |
223 | WRITE_DATA_DST_SEL(0) | |
224 | (wc ? WR_CONFIRM : 0)); |
225 | amdgpu_ring_write(ring, v: reg); |
226 | amdgpu_ring_write(ring, v: 0); |
227 | amdgpu_ring_write(ring, v: val); |
228 | } |
229 | |
230 | static void gfx_v9_4_3_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, |
231 | int mem_space, int opt, uint32_t addr0, |
232 | uint32_t addr1, uint32_t ref, uint32_t mask, |
233 | uint32_t inv) |
234 | { |
235 | amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); |
236 | amdgpu_ring_write(ring, |
237 | /* memory (1) or register (0) */ |
238 | v: (WAIT_REG_MEM_MEM_SPACE(mem_space) | |
239 | WAIT_REG_MEM_OPERATION(opt) | /* wait */ |
240 | WAIT_REG_MEM_FUNCTION(3) | /* equal */ |
241 | WAIT_REG_MEM_ENGINE(eng_sel))); |
242 | |
243 | if (mem_space) |
244 | BUG_ON(addr0 & 0x3); /* Dword align */ |
245 | amdgpu_ring_write(ring, v: addr0); |
246 | amdgpu_ring_write(ring, v: addr1); |
247 | amdgpu_ring_write(ring, v: ref); |
248 | amdgpu_ring_write(ring, v: mask); |
249 | amdgpu_ring_write(ring, v: inv); /* poll interval */ |
250 | } |
251 | |
252 | static int gfx_v9_4_3_ring_test_ring(struct amdgpu_ring *ring) |
253 | { |
254 | uint32_t scratch_reg0_offset, xcc_offset; |
255 | struct amdgpu_device *adev = ring->adev; |
256 | uint32_t tmp = 0; |
257 | unsigned i; |
258 | int r; |
259 | |
260 | /* Use register offset which is local to XCC in the packet */ |
261 | xcc_offset = SOC15_REG_OFFSET(GC, 0, regSCRATCH_REG0); |
262 | scratch_reg0_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, ring->xcc_id), regSCRATCH_REG0); |
263 | WREG32(scratch_reg0_offset, 0xCAFEDEAD); |
264 | tmp = RREG32(scratch_reg0_offset); |
265 | |
266 | r = amdgpu_ring_alloc(ring, ndw: 3); |
267 | if (r) |
268 | return r; |
269 | |
270 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); |
271 | amdgpu_ring_write(ring, v: xcc_offset - PACKET3_SET_UCONFIG_REG_START); |
272 | amdgpu_ring_write(ring, v: 0xDEADBEEF); |
273 | amdgpu_ring_commit(ring); |
274 | |
275 | for (i = 0; i < adev->usec_timeout; i++) { |
276 | tmp = RREG32(scratch_reg0_offset); |
277 | if (tmp == 0xDEADBEEF) |
278 | break; |
279 | udelay(1); |
280 | } |
281 | |
282 | if (i >= adev->usec_timeout) |
283 | r = -ETIMEDOUT; |
284 | return r; |
285 | } |
286 | |
287 | static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
288 | { |
289 | struct amdgpu_device *adev = ring->adev; |
290 | struct amdgpu_ib ib; |
291 | struct dma_fence *f = NULL; |
292 | |
293 | unsigned index; |
294 | uint64_t gpu_addr; |
295 | uint32_t tmp; |
296 | long r; |
297 | |
298 | r = amdgpu_device_wb_get(adev, wb: &index); |
299 | if (r) |
300 | return r; |
301 | |
302 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
303 | adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD); |
304 | memset(&ib, 0, sizeof(ib)); |
305 | |
306 | r = amdgpu_ib_get(adev, NULL, size: 20, pool: AMDGPU_IB_POOL_DIRECT, ib: &ib); |
307 | if (r) |
308 | goto err1; |
309 | |
310 | ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3); |
311 | ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM; |
312 | ib.ptr[2] = lower_32_bits(gpu_addr); |
313 | ib.ptr[3] = upper_32_bits(gpu_addr); |
314 | ib.ptr[4] = 0xDEADBEEF; |
315 | ib.length_dw = 5; |
316 | |
317 | r = amdgpu_ib_schedule(ring, num_ibs: 1, ibs: &ib, NULL, f: &f); |
318 | if (r) |
319 | goto err2; |
320 | |
321 | r = dma_fence_wait_timeout(f, intr: false, timeout); |
322 | if (r == 0) { |
323 | r = -ETIMEDOUT; |
324 | goto err2; |
325 | } else if (r < 0) { |
326 | goto err2; |
327 | } |
328 | |
329 | tmp = adev->wb.wb[index]; |
330 | if (tmp == 0xDEADBEEF) |
331 | r = 0; |
332 | else |
333 | r = -EINVAL; |
334 | |
335 | err2: |
336 | amdgpu_ib_free(adev, ib: &ib, NULL); |
337 | dma_fence_put(fence: f); |
338 | err1: |
339 | amdgpu_device_wb_free(adev, wb: index); |
340 | return r; |
341 | } |
342 | |
343 | |
344 | /* This value might differs per partition */ |
345 | static uint64_t gfx_v9_4_3_get_gpu_clock_counter(struct amdgpu_device *adev) |
346 | { |
347 | uint64_t clock; |
348 | |
349 | mutex_lock(&adev->gfx.gpu_clock_mutex); |
350 | WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CAPTURE_GPU_CLOCK_COUNT, 1); |
351 | clock = (uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_LSB) | |
352 | ((uint64_t)RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); |
353 | mutex_unlock(lock: &adev->gfx.gpu_clock_mutex); |
354 | |
355 | return clock; |
356 | } |
357 | |
358 | static void gfx_v9_4_3_free_microcode(struct amdgpu_device *adev) |
359 | { |
360 | amdgpu_ucode_release(fw: &adev->gfx.pfp_fw); |
361 | amdgpu_ucode_release(fw: &adev->gfx.me_fw); |
362 | amdgpu_ucode_release(fw: &adev->gfx.ce_fw); |
363 | amdgpu_ucode_release(fw: &adev->gfx.rlc_fw); |
364 | amdgpu_ucode_release(fw: &adev->gfx.mec_fw); |
365 | amdgpu_ucode_release(fw: &adev->gfx.mec2_fw); |
366 | |
367 | kfree(objp: adev->gfx.rlc.register_list_format); |
368 | } |
369 | |
370 | static int gfx_v9_4_3_init_rlc_microcode(struct amdgpu_device *adev, |
371 | const char *chip_name) |
372 | { |
373 | char fw_name[30]; |
374 | int err; |
375 | const struct rlc_firmware_header_v2_0 *rlc_hdr; |
376 | uint16_t version_major; |
377 | uint16_t version_minor; |
378 | |
379 | snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_rlc.bin" , chip_name); |
380 | |
381 | err = amdgpu_ucode_request(adev, fw: &adev->gfx.rlc_fw, fw_name); |
382 | if (err) |
383 | goto out; |
384 | rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; |
385 | |
386 | version_major = le16_to_cpu(rlc_hdr->header.header_version_major); |
387 | version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); |
388 | err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor); |
389 | out: |
390 | if (err) |
391 | amdgpu_ucode_release(fw: &adev->gfx.rlc_fw); |
392 | |
393 | return err; |
394 | } |
395 | |
396 | static bool gfx_v9_4_3_should_disable_gfxoff(struct pci_dev *pdev) |
397 | { |
398 | return true; |
399 | } |
400 | |
401 | static void gfx_v9_4_3_check_if_need_gfxoff(struct amdgpu_device *adev) |
402 | { |
403 | if (gfx_v9_4_3_should_disable_gfxoff(pdev: adev->pdev)) |
404 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
405 | } |
406 | |
407 | static int gfx_v9_4_3_init_cp_compute_microcode(struct amdgpu_device *adev, |
408 | const char *chip_name) |
409 | { |
410 | char fw_name[30]; |
411 | int err; |
412 | |
413 | snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_mec.bin" , chip_name); |
414 | |
415 | err = amdgpu_ucode_request(adev, fw: &adev->gfx.mec_fw, fw_name); |
416 | if (err) |
417 | goto out; |
418 | amdgpu_gfx_cp_init_microcode(adev, ucode_id: AMDGPU_UCODE_ID_CP_MEC1); |
419 | amdgpu_gfx_cp_init_microcode(adev, ucode_id: AMDGPU_UCODE_ID_CP_MEC1_JT); |
420 | |
421 | adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version; |
422 | adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version; |
423 | |
424 | gfx_v9_4_3_check_if_need_gfxoff(adev); |
425 | |
426 | out: |
427 | if (err) |
428 | amdgpu_ucode_release(fw: &adev->gfx.mec_fw); |
429 | return err; |
430 | } |
431 | |
432 | static int gfx_v9_4_3_init_microcode(struct amdgpu_device *adev) |
433 | { |
434 | const char *chip_name; |
435 | int r; |
436 | |
437 | chip_name = "gc_9_4_3" ; |
438 | |
439 | r = gfx_v9_4_3_init_rlc_microcode(adev, chip_name); |
440 | if (r) |
441 | return r; |
442 | |
443 | r = gfx_v9_4_3_init_cp_compute_microcode(adev, chip_name); |
444 | if (r) |
445 | return r; |
446 | |
447 | return r; |
448 | } |
449 | |
450 | static void gfx_v9_4_3_mec_fini(struct amdgpu_device *adev) |
451 | { |
452 | amdgpu_bo_free_kernel(bo: &adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
453 | amdgpu_bo_free_kernel(bo: &adev->gfx.mec.mec_fw_obj, NULL, NULL); |
454 | } |
455 | |
456 | static int gfx_v9_4_3_mec_init(struct amdgpu_device *adev) |
457 | { |
458 | int r, i, num_xcc; |
459 | u32 *hpd; |
460 | const __le32 *fw_data; |
461 | unsigned fw_size; |
462 | u32 *fw; |
463 | size_t mec_hpd_size; |
464 | |
465 | const struct gfx_firmware_header_v1_0 *mec_hdr; |
466 | |
467 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
468 | for (i = 0; i < num_xcc; i++) |
469 | bitmap_zero(dst: adev->gfx.mec_bitmap[i].queue_bitmap, |
470 | AMDGPU_MAX_COMPUTE_QUEUES); |
471 | |
472 | /* take ownership of the relevant compute queues */ |
473 | amdgpu_gfx_compute_queue_acquire(adev); |
474 | mec_hpd_size = |
475 | adev->gfx.num_compute_rings * num_xcc * GFX9_MEC_HPD_SIZE; |
476 | if (mec_hpd_size) { |
477 | r = amdgpu_bo_create_reserved(adev, size: mec_hpd_size, PAGE_SIZE, |
478 | AMDGPU_GEM_DOMAIN_VRAM | |
479 | AMDGPU_GEM_DOMAIN_GTT, |
480 | bo_ptr: &adev->gfx.mec.hpd_eop_obj, |
481 | gpu_addr: &adev->gfx.mec.hpd_eop_gpu_addr, |
482 | cpu_addr: (void **)&hpd); |
483 | if (r) { |
484 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n" , r); |
485 | gfx_v9_4_3_mec_fini(adev); |
486 | return r; |
487 | } |
488 | |
489 | if (amdgpu_emu_mode == 1) { |
490 | for (i = 0; i < mec_hpd_size / 4; i++) { |
491 | memset((void *)(hpd + i), 0, 4); |
492 | if (i % 50 == 0) |
493 | msleep(msecs: 1); |
494 | } |
495 | } else { |
496 | memset(hpd, 0, mec_hpd_size); |
497 | } |
498 | |
499 | amdgpu_bo_kunmap(bo: adev->gfx.mec.hpd_eop_obj); |
500 | amdgpu_bo_unreserve(bo: adev->gfx.mec.hpd_eop_obj); |
501 | } |
502 | |
503 | mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; |
504 | |
505 | fw_data = (const __le32 *) |
506 | (adev->gfx.mec_fw->data + |
507 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); |
508 | fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes); |
509 | |
510 | r = amdgpu_bo_create_reserved(adev, size: mec_hdr->header.ucode_size_bytes, |
511 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, |
512 | bo_ptr: &adev->gfx.mec.mec_fw_obj, |
513 | gpu_addr: &adev->gfx.mec.mec_fw_gpu_addr, |
514 | cpu_addr: (void **)&fw); |
515 | if (r) { |
516 | dev_warn(adev->dev, "(%d) create mec firmware bo failed\n" , r); |
517 | gfx_v9_4_3_mec_fini(adev); |
518 | return r; |
519 | } |
520 | |
521 | memcpy(fw, fw_data, fw_size); |
522 | |
523 | amdgpu_bo_kunmap(bo: adev->gfx.mec.mec_fw_obj); |
524 | amdgpu_bo_unreserve(bo: adev->gfx.mec.mec_fw_obj); |
525 | |
526 | return 0; |
527 | } |
528 | |
529 | static void gfx_v9_4_3_xcc_select_se_sh(struct amdgpu_device *adev, u32 se_num, |
530 | u32 sh_num, u32 instance, int xcc_id) |
531 | { |
532 | u32 data; |
533 | |
534 | if (instance == 0xffffffff) |
535 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, |
536 | INSTANCE_BROADCAST_WRITES, 1); |
537 | else |
538 | data = REG_SET_FIELD(0, GRBM_GFX_INDEX, |
539 | INSTANCE_INDEX, instance); |
540 | |
541 | if (se_num == 0xffffffff) |
542 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
543 | SE_BROADCAST_WRITES, 1); |
544 | else |
545 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); |
546 | |
547 | if (sh_num == 0xffffffff) |
548 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
549 | SH_BROADCAST_WRITES, 1); |
550 | else |
551 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); |
552 | |
553 | WREG32_SOC15_RLC_SHADOW_EX(reg, GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX, data); |
554 | } |
555 | |
556 | static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t address) |
557 | { |
558 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, |
559 | (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | |
560 | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | |
561 | (address << SQ_IND_INDEX__INDEX__SHIFT) | |
562 | (SQ_IND_INDEX__FORCE_READ_MASK)); |
563 | return RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); |
564 | } |
565 | |
566 | static void wave_read_regs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
567 | uint32_t wave, uint32_t thread, |
568 | uint32_t regno, uint32_t num, uint32_t *out) |
569 | { |
570 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSQ_IND_INDEX, |
571 | (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) | |
572 | (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) | |
573 | (regno << SQ_IND_INDEX__INDEX__SHIFT) | |
574 | (thread << SQ_IND_INDEX__THREAD_ID__SHIFT) | |
575 | (SQ_IND_INDEX__FORCE_READ_MASK) | |
576 | (SQ_IND_INDEX__AUTO_INCR_MASK)); |
577 | while (num--) |
578 | *(out++) = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_IND_DATA); |
579 | } |
580 | |
581 | static void gfx_v9_4_3_read_wave_data(struct amdgpu_device *adev, |
582 | uint32_t xcc_id, uint32_t simd, uint32_t wave, |
583 | uint32_t *dst, int *no_fields) |
584 | { |
585 | /* type 1 wave data */ |
586 | dst[(*no_fields)++] = 1; |
587 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_STATUS); |
588 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_LO); |
589 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_PC_HI); |
590 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_LO); |
591 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_EXEC_HI); |
592 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_HW_ID); |
593 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW0); |
594 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_INST_DW1); |
595 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_GPR_ALLOC); |
596 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_LDS_ALLOC); |
597 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_TRAPSTS); |
598 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_STS); |
599 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_IB_DBG0); |
600 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_M0); |
601 | dst[(*no_fields)++] = wave_read_ind(adev, xcc_id, simd, wave, ixSQ_WAVE_MODE); |
602 | } |
603 | |
604 | static void gfx_v9_4_3_read_wave_sgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
605 | uint32_t wave, uint32_t start, |
606 | uint32_t size, uint32_t *dst) |
607 | { |
608 | wave_read_regs(adev, xcc_id, simd, wave, thread: 0, |
609 | regno: start + SQIND_WAVE_SGPRS_OFFSET, num: size, out: dst); |
610 | } |
611 | |
612 | static void gfx_v9_4_3_read_wave_vgprs(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, |
613 | uint32_t wave, uint32_t thread, |
614 | uint32_t start, uint32_t size, |
615 | uint32_t *dst) |
616 | { |
617 | wave_read_regs(adev, xcc_id, simd, wave, thread, |
618 | regno: start + SQIND_WAVE_VGPRS_OFFSET, num: size, out: dst); |
619 | } |
620 | |
621 | static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, |
622 | u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id) |
623 | { |
624 | soc15_grbm_select(adev, me, pipe, queue: q, vmid: vm, GET_INST(GC, xcc_id)); |
625 | } |
626 | |
627 | |
628 | static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, |
629 | int num_xccs_per_xcp) |
630 | { |
631 | int ret, i, num_xcc; |
632 | u32 tmp = 0; |
633 | |
634 | if (adev->psp.funcs) { |
635 | ret = psp_spatial_partition(psp: &adev->psp, |
636 | NUM_XCC(adev->gfx.xcc_mask) / |
637 | num_xccs_per_xcp); |
638 | if (ret) |
639 | return ret; |
640 | } else { |
641 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
642 | |
643 | for (i = 0; i < num_xcc; i++) { |
644 | tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP, |
645 | num_xccs_per_xcp); |
646 | tmp = REG_SET_FIELD(tmp, CP_HYP_XCP_CTL, VIRTUAL_XCC_ID, |
647 | i % num_xccs_per_xcp); |
648 | WREG32_SOC15(GC, GET_INST(GC, i), regCP_HYP_XCP_CTL, |
649 | tmp); |
650 | } |
651 | ret = 0; |
652 | } |
653 | |
654 | adev->gfx.num_xcc_per_xcp = num_xccs_per_xcp; |
655 | |
656 | return ret; |
657 | } |
658 | |
659 | static int gfx_v9_4_3_ih_to_xcc_inst(struct amdgpu_device *adev, int ih_node) |
660 | { |
661 | int xcc; |
662 | |
663 | xcc = hweight8(adev->gfx.xcc_mask & GENMASK(ih_node / 2, 0)); |
664 | if (!xcc) { |
665 | dev_err(adev->dev, "Couldn't find xcc mapping from IH node" ); |
666 | return -EINVAL; |
667 | } |
668 | |
669 | return xcc - 1; |
670 | } |
671 | |
672 | static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { |
673 | .get_gpu_clock_counter = &gfx_v9_4_3_get_gpu_clock_counter, |
674 | .select_se_sh = &gfx_v9_4_3_xcc_select_se_sh, |
675 | .read_wave_data = &gfx_v9_4_3_read_wave_data, |
676 | .read_wave_sgprs = &gfx_v9_4_3_read_wave_sgprs, |
677 | .read_wave_vgprs = &gfx_v9_4_3_read_wave_vgprs, |
678 | .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q, |
679 | .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition, |
680 | .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, |
681 | }; |
682 | |
683 | static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle, |
684 | struct aca_bank *bank, enum aca_error_type type, |
685 | struct aca_bank_report *report, void *data) |
686 | { |
687 | u64 status, misc0; |
688 | u32 instlo; |
689 | int ret; |
690 | |
691 | status = bank->regs[ACA_REG_IDX_STATUS]; |
692 | if ((type == ACA_ERROR_TYPE_UE && |
693 | ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || |
694 | (type == ACA_ERROR_TYPE_CE && |
695 | ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { |
696 | |
697 | ret = aca_bank_info_decode(bank, info: &report->info); |
698 | if (ret) |
699 | return ret; |
700 | |
701 | /* NOTE: overwrite info.die_id with xcd id for gfx */ |
702 | instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); |
703 | instlo &= GENMASK(31, 1); |
704 | report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; |
705 | |
706 | misc0 = bank->regs[ACA_REG_IDX_MISC0]; |
707 | report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); |
708 | } |
709 | |
710 | return 0; |
711 | } |
712 | |
713 | static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, |
714 | enum aca_error_type type, void *data) |
715 | { |
716 | u32 instlo; |
717 | |
718 | instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); |
719 | instlo &= GENMASK(31, 1); |
720 | switch (instlo) { |
721 | case mmSMNAID_XCD0_MCA_SMU: |
722 | case mmSMNAID_XCD1_MCA_SMU: |
723 | case mmSMNXCD_XCD0_MCA_SMU: |
724 | return true; |
725 | default: |
726 | break; |
727 | } |
728 | |
729 | return false; |
730 | } |
731 | |
732 | static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = { |
733 | .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report, |
734 | .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid, |
735 | }; |
736 | |
737 | static const struct aca_info gfx_v9_4_3_aca_info = { |
738 | .hwip = ACA_HWIP_TYPE_SMU, |
739 | .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, |
740 | .bank_ops = &gfx_v9_4_3_aca_bank_ops, |
741 | }; |
742 | |
743 | static int gfx_v9_4_3_gpu_early_init(struct amdgpu_device *adev) |
744 | { |
745 | u32 gb_addr_config; |
746 | |
747 | adev->gfx.funcs = &gfx_v9_4_3_gfx_funcs; |
748 | adev->gfx.ras = &gfx_v9_4_3_ras; |
749 | |
750 | switch (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0)) { |
751 | case IP_VERSION(9, 4, 3): |
752 | adev->gfx.config.max_hw_contexts = 8; |
753 | adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; |
754 | adev->gfx.config.sc_prim_fifo_size_backend = 0x100; |
755 | adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; |
756 | adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; |
757 | gb_addr_config = RREG32_SOC15(GC, GET_INST(GC, 0), regGB_ADDR_CONFIG); |
758 | break; |
759 | default: |
760 | BUG(); |
761 | break; |
762 | } |
763 | |
764 | adev->gfx.config.gb_addr_config = gb_addr_config; |
765 | |
766 | adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << |
767 | REG_GET_FIELD( |
768 | adev->gfx.config.gb_addr_config, |
769 | GB_ADDR_CONFIG, |
770 | NUM_PIPES); |
771 | |
772 | adev->gfx.config.max_tile_pipes = |
773 | adev->gfx.config.gb_addr_config_fields.num_pipes; |
774 | |
775 | adev->gfx.config.gb_addr_config_fields.num_banks = 1 << |
776 | REG_GET_FIELD( |
777 | adev->gfx.config.gb_addr_config, |
778 | GB_ADDR_CONFIG, |
779 | NUM_BANKS); |
780 | adev->gfx.config.gb_addr_config_fields.max_compress_frags = 1 << |
781 | REG_GET_FIELD( |
782 | adev->gfx.config.gb_addr_config, |
783 | GB_ADDR_CONFIG, |
784 | MAX_COMPRESSED_FRAGS); |
785 | adev->gfx.config.gb_addr_config_fields.num_rb_per_se = 1 << |
786 | REG_GET_FIELD( |
787 | adev->gfx.config.gb_addr_config, |
788 | GB_ADDR_CONFIG, |
789 | NUM_RB_PER_SE); |
790 | adev->gfx.config.gb_addr_config_fields.num_se = 1 << |
791 | REG_GET_FIELD( |
792 | adev->gfx.config.gb_addr_config, |
793 | GB_ADDR_CONFIG, |
794 | NUM_SHADER_ENGINES); |
795 | adev->gfx.config.gb_addr_config_fields.pipe_interleave_size = 1 << (8 + |
796 | REG_GET_FIELD( |
797 | adev->gfx.config.gb_addr_config, |
798 | GB_ADDR_CONFIG, |
799 | PIPE_INTERLEAVE_SIZE)); |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | static int gfx_v9_4_3_compute_ring_init(struct amdgpu_device *adev, int ring_id, |
805 | int xcc_id, int mec, int pipe, int queue) |
806 | { |
807 | unsigned irq_type; |
808 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; |
809 | unsigned int hw_prio; |
810 | uint32_t xcc_doorbell_start; |
811 | |
812 | ring = &adev->gfx.compute_ring[xcc_id * adev->gfx.num_compute_rings + |
813 | ring_id]; |
814 | |
815 | /* mec0 is me1 */ |
816 | ring->xcc_id = xcc_id; |
817 | ring->me = mec + 1; |
818 | ring->pipe = pipe; |
819 | ring->queue = queue; |
820 | |
821 | ring->ring_obj = NULL; |
822 | ring->use_doorbell = true; |
823 | xcc_doorbell_start = adev->doorbell_index.mec_ring0 + |
824 | xcc_id * adev->doorbell_index.xcc_doorbell_range; |
825 | ring->doorbell_index = (xcc_doorbell_start + ring_id) << 1; |
826 | ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + |
827 | (ring_id + xcc_id * adev->gfx.num_compute_rings) * |
828 | GFX9_MEC_HPD_SIZE; |
829 | ring->vm_hub = AMDGPU_GFXHUB(xcc_id); |
830 | sprintf(buf: ring->name, fmt: "comp_%d.%d.%d.%d" , |
831 | ring->xcc_id, ring->me, ring->pipe, ring->queue); |
832 | |
833 | irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP |
834 | + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) |
835 | + ring->pipe; |
836 | hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? |
837 | AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; |
838 | /* type-2 packets are deprecated on MEC, use type-3 instead */ |
839 | return amdgpu_ring_init(adev, ring, max_dw: 1024, irq_src: &adev->gfx.eop_irq, irq_type, |
840 | hw_prio, NULL); |
841 | } |
842 | |
843 | static int gfx_v9_4_3_sw_init(void *handle) |
844 | { |
845 | int i, j, k, r, ring_id, xcc_id, num_xcc; |
846 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
847 | |
848 | adev->gfx.mec.num_mec = 2; |
849 | adev->gfx.mec.num_pipe_per_mec = 4; |
850 | adev->gfx.mec.num_queue_per_pipe = 8; |
851 | |
852 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
853 | |
854 | /* EOP Event */ |
855 | r = amdgpu_irq_add_id(adev, client_id: SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, source: &adev->gfx.eop_irq); |
856 | if (r) |
857 | return r; |
858 | |
859 | /* Privileged reg */ |
860 | r = amdgpu_irq_add_id(adev, client_id: SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_REG_FAULT, |
861 | source: &adev->gfx.priv_reg_irq); |
862 | if (r) |
863 | return r; |
864 | |
865 | /* Privileged inst */ |
866 | r = amdgpu_irq_add_id(adev, client_id: SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_PRIV_INSTR_FAULT, |
867 | source: &adev->gfx.priv_inst_irq); |
868 | if (r) |
869 | return r; |
870 | |
871 | adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE; |
872 | |
873 | r = adev->gfx.rlc.funcs->init(adev); |
874 | if (r) { |
875 | DRM_ERROR("Failed to init rlc BOs!\n" ); |
876 | return r; |
877 | } |
878 | |
879 | r = gfx_v9_4_3_mec_init(adev); |
880 | if (r) { |
881 | DRM_ERROR("Failed to init MEC BOs!\n" ); |
882 | return r; |
883 | } |
884 | |
885 | /* set up the compute queues - allocate horizontally across pipes */ |
886 | for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { |
887 | ring_id = 0; |
888 | for (i = 0; i < adev->gfx.mec.num_mec; ++i) { |
889 | for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { |
890 | for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; |
891 | k++) { |
892 | if (!amdgpu_gfx_is_mec_queue_enabled( |
893 | adev, xcc_id, mec: i, pipe: k, queue: j)) |
894 | continue; |
895 | |
896 | r = gfx_v9_4_3_compute_ring_init(adev, |
897 | ring_id, |
898 | xcc_id, |
899 | mec: i, pipe: k, queue: j); |
900 | if (r) |
901 | return r; |
902 | |
903 | ring_id++; |
904 | } |
905 | } |
906 | } |
907 | |
908 | r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, xcc_id); |
909 | if (r) { |
910 | DRM_ERROR("Failed to init KIQ BOs!\n" ); |
911 | return r; |
912 | } |
913 | |
914 | r = amdgpu_gfx_kiq_init_ring(adev, xcc_id); |
915 | if (r) |
916 | return r; |
917 | |
918 | /* create MQD for all compute queues as wel as KIQ for SRIOV case */ |
919 | r = amdgpu_gfx_mqd_sw_init(adev, |
920 | mqd_size: sizeof(struct v9_mqd_allocation), xcc_id); |
921 | if (r) |
922 | return r; |
923 | } |
924 | |
925 | r = gfx_v9_4_3_gpu_early_init(adev); |
926 | if (r) |
927 | return r; |
928 | |
929 | r = amdgpu_gfx_ras_sw_init(adev); |
930 | if (r) |
931 | return r; |
932 | |
933 | |
934 | if (!amdgpu_sriov_vf(adev)) |
935 | r = amdgpu_gfx_sysfs_init(adev); |
936 | |
937 | return r; |
938 | } |
939 | |
940 | static int gfx_v9_4_3_sw_fini(void *handle) |
941 | { |
942 | int i, num_xcc; |
943 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
944 | |
945 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
946 | for (i = 0; i < adev->gfx.num_compute_rings * num_xcc; i++) |
947 | amdgpu_ring_fini(ring: &adev->gfx.compute_ring[i]); |
948 | |
949 | for (i = 0; i < num_xcc; i++) { |
950 | amdgpu_gfx_mqd_sw_fini(adev, xcc_id: i); |
951 | amdgpu_gfx_kiq_free_ring(ring: &adev->gfx.kiq[i].ring); |
952 | amdgpu_gfx_kiq_fini(adev, xcc_id: i); |
953 | } |
954 | |
955 | gfx_v9_4_3_mec_fini(adev); |
956 | amdgpu_bo_unref(bo: &adev->gfx.rlc.clear_state_obj); |
957 | gfx_v9_4_3_free_microcode(adev); |
958 | if (!amdgpu_sriov_vf(adev)) |
959 | amdgpu_gfx_sysfs_fini(adev); |
960 | |
961 | return 0; |
962 | } |
963 | |
964 | #define DEFAULT_SH_MEM_BASES (0x6000) |
965 | static void gfx_v9_4_3_xcc_init_compute_vmid(struct amdgpu_device *adev, |
966 | int xcc_id) |
967 | { |
968 | int i; |
969 | uint32_t sh_mem_config; |
970 | uint32_t sh_mem_bases; |
971 | uint32_t data; |
972 | |
973 | /* |
974 | * Configure apertures: |
975 | * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) |
976 | * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) |
977 | * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) |
978 | */ |
979 | sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); |
980 | |
981 | sh_mem_config = SH_MEM_ADDRESS_MODE_64 | |
982 | SH_MEM_ALIGNMENT_MODE_UNALIGNED << |
983 | SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; |
984 | |
985 | mutex_lock(&adev->srbm_mutex); |
986 | for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { |
987 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: i, GET_INST(GC, xcc_id)); |
988 | /* CP and shaders */ |
989 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_CONFIG, sh_mem_config); |
990 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSH_MEM_BASES, sh_mem_bases); |
991 | |
992 | /* Enable trap for each kfd vmid. */ |
993 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL); |
994 | data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); |
995 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regSPI_GDBG_PER_VMID_CNTL, data); |
996 | } |
997 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0, GET_INST(GC, xcc_id)); |
998 | mutex_unlock(lock: &adev->srbm_mutex); |
999 | |
1000 | /* Initialize all compute VMIDs to have no GDS, GWS, or OA |
1001 | acccess. These should be enabled by FW for target VMIDs. */ |
1002 | for (i = adev->vm_manager.first_kfd_vmid; i < AMDGPU_NUM_VMID; i++) { |
1003 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * i, 0); |
1004 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * i, 0); |
1005 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, i, 0); |
1006 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, i, 0); |
1007 | } |
1008 | } |
1009 | |
1010 | static void gfx_v9_4_3_xcc_init_gds_vmid(struct amdgpu_device *adev, int xcc_id) |
1011 | { |
1012 | int vmid; |
1013 | |
1014 | /* |
1015 | * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA |
1016 | * access. Compute VMIDs should be enabled by FW for target VMIDs, |
1017 | * the driver can enable them for graphics. VMID0 should maintain |
1018 | * access so that HWS firmware can save/restore entries. |
1019 | */ |
1020 | for (vmid = 1; vmid < AMDGPU_NUM_VMID; vmid++) { |
1021 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_BASE, 2 * vmid, 0); |
1022 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_VMID0_SIZE, 2 * vmid, 0); |
1023 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_GWS_VMID0, vmid, 0); |
1024 | WREG32_SOC15_OFFSET(GC, GET_INST(GC, xcc_id), regGDS_OA_VMID0, vmid, 0); |
1025 | } |
1026 | } |
1027 | |
1028 | static void gfx_v9_4_3_xcc_constants_init(struct amdgpu_device *adev, |
1029 | int xcc_id) |
1030 | { |
1031 | u32 tmp; |
1032 | int i; |
1033 | |
1034 | /* XXX SH_MEM regs */ |
1035 | /* where to put LDS, scratch, GPUVM in FSA64 space */ |
1036 | mutex_lock(&adev->srbm_mutex); |
1037 | for (i = 0; i < adev->vm_manager.id_mgr[AMDGPU_GFXHUB(0)].num_ids; i++) { |
1038 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: i, GET_INST(GC, xcc_id)); |
1039 | /* CP and shaders */ |
1040 | if (i == 0) { |
1041 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, |
1042 | SH_MEM_ALIGNMENT_MODE_UNALIGNED); |
1043 | tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, |
1044 | !!adev->gmc.noretry); |
1045 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), |
1046 | regSH_MEM_CONFIG, tmp); |
1047 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), |
1048 | regSH_MEM_BASES, 0); |
1049 | } else { |
1050 | tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, |
1051 | SH_MEM_ALIGNMENT_MODE_UNALIGNED); |
1052 | tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE, |
1053 | !!adev->gmc.noretry); |
1054 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), |
1055 | regSH_MEM_CONFIG, tmp); |
1056 | tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, |
1057 | (adev->gmc.private_aperture_start >> |
1058 | 48)); |
1059 | tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, |
1060 | (adev->gmc.shared_aperture_start >> |
1061 | 48)); |
1062 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), |
1063 | regSH_MEM_BASES, tmp); |
1064 | } |
1065 | } |
1066 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0, GET_INST(GC, 0)); |
1067 | |
1068 | mutex_unlock(lock: &adev->srbm_mutex); |
1069 | |
1070 | gfx_v9_4_3_xcc_init_compute_vmid(adev, xcc_id); |
1071 | gfx_v9_4_3_xcc_init_gds_vmid(adev, xcc_id); |
1072 | } |
1073 | |
1074 | static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) |
1075 | { |
1076 | int i, num_xcc; |
1077 | |
1078 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1079 | |
1080 | gfx_v9_4_3_get_cu_info(adev, cu_info: &adev->gfx.cu_info); |
1081 | adev->gfx.config.db_debug2 = |
1082 | RREG32_SOC15(GC, GET_INST(GC, 0), regDB_DEBUG2); |
1083 | |
1084 | for (i = 0; i < num_xcc; i++) |
1085 | gfx_v9_4_3_xcc_constants_init(adev, xcc_id: i); |
1086 | } |
1087 | |
1088 | static void |
1089 | gfx_v9_4_3_xcc_enable_save_restore_machine(struct amdgpu_device *adev, |
1090 | int xcc_id) |
1091 | { |
1092 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_SRM_CNTL, SRM_ENABLE, 1); |
1093 | } |
1094 | |
1095 | static void gfx_v9_4_3_xcc_init_pg(struct amdgpu_device *adev, int xcc_id) |
1096 | { |
1097 | /* |
1098 | * Rlc save restore list is workable since v2_1. |
1099 | * And it's needed by gfxoff feature. |
1100 | */ |
1101 | if (adev->gfx.rlc.is_rlc_v2_1) |
1102 | gfx_v9_4_3_xcc_enable_save_restore_machine(adev, xcc_id); |
1103 | } |
1104 | |
1105 | static void gfx_v9_4_3_xcc_disable_gpa_mode(struct amdgpu_device *adev, int xcc_id) |
1106 | { |
1107 | uint32_t data; |
1108 | |
1109 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG); |
1110 | data |= CPC_PSP_DEBUG__UTCL2IUGPAOVERRIDE_MASK; |
1111 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCPC_PSP_DEBUG, data); |
1112 | } |
1113 | |
1114 | static bool gfx_v9_4_3_is_rlc_enabled(struct amdgpu_device *adev) |
1115 | { |
1116 | uint32_t rlc_setting; |
1117 | |
1118 | /* if RLC is not enabled, do nothing */ |
1119 | rlc_setting = RREG32_SOC15(GC, GET_INST(GC, 0), regRLC_CNTL); |
1120 | if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) |
1121 | return false; |
1122 | |
1123 | return true; |
1124 | } |
1125 | |
1126 | static void gfx_v9_4_3_xcc_set_safe_mode(struct amdgpu_device *adev, int xcc_id) |
1127 | { |
1128 | uint32_t data; |
1129 | unsigned i; |
1130 | |
1131 | data = RLC_SAFE_MODE__CMD_MASK; |
1132 | data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); |
1133 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); |
1134 | |
1135 | /* wait for RLC_SAFE_MODE */ |
1136 | for (i = 0; i < adev->usec_timeout; i++) { |
1137 | if (!REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) |
1138 | break; |
1139 | udelay(1); |
1140 | } |
1141 | } |
1142 | |
1143 | static void gfx_v9_4_3_xcc_unset_safe_mode(struct amdgpu_device *adev, |
1144 | int xcc_id) |
1145 | { |
1146 | uint32_t data; |
1147 | |
1148 | data = RLC_SAFE_MODE__CMD_MASK; |
1149 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SAFE_MODE, data); |
1150 | } |
1151 | |
1152 | static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) |
1153 | { |
1154 | int xcc_id, num_xcc; |
1155 | struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; |
1156 | |
1157 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1158 | for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { |
1159 | reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[GET_INST(GC, xcc_id)]; |
1160 | reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG0); |
1161 | reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG1); |
1162 | reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG2); |
1163 | reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regSCRATCH_REG3); |
1164 | reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_CNTL); |
1165 | reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX); |
1166 | reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT); |
1167 | } |
1168 | adev->gfx.rlc.rlcg_reg_access_supported = true; |
1169 | } |
1170 | |
1171 | static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev) |
1172 | { |
1173 | /* init spm vmid with 0xf */ |
1174 | if (adev->gfx.rlc.funcs->update_spm_vmid) |
1175 | adev->gfx.rlc.funcs->update_spm_vmid(adev, NULL, 0xf); |
1176 | |
1177 | return 0; |
1178 | } |
1179 | |
1180 | static void gfx_v9_4_3_xcc_wait_for_rlc_serdes(struct amdgpu_device *adev, |
1181 | int xcc_id) |
1182 | { |
1183 | u32 i, j, k; |
1184 | u32 mask; |
1185 | |
1186 | mutex_lock(&adev->grbm_idx_mutex); |
1187 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { |
1188 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { |
1189 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: i, sh_num: j, instance: 0xffffffff, |
1190 | xcc_id); |
1191 | for (k = 0; k < adev->usec_timeout; k++) { |
1192 | if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_CU_MASTER_BUSY) == 0) |
1193 | break; |
1194 | udelay(1); |
1195 | } |
1196 | if (k == adev->usec_timeout) { |
1197 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: 0xffffffff, |
1198 | sh_num: 0xffffffff, |
1199 | instance: 0xffffffff, xcc_id); |
1200 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
1201 | DRM_INFO("Timeout wait for RLC serdes %u,%u\n" , |
1202 | i, j); |
1203 | return; |
1204 | } |
1205 | } |
1206 | } |
1207 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: 0xffffffff, sh_num: 0xffffffff, instance: 0xffffffff, |
1208 | xcc_id); |
1209 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
1210 | |
1211 | mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | |
1212 | RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK | |
1213 | RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK | |
1214 | RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK; |
1215 | for (k = 0; k < adev->usec_timeout; k++) { |
1216 | if ((RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_SERDES_NONCU_MASTER_BUSY) & mask) == 0) |
1217 | break; |
1218 | udelay(1); |
1219 | } |
1220 | } |
1221 | |
1222 | static void gfx_v9_4_3_xcc_enable_gui_idle_interrupt(struct amdgpu_device *adev, |
1223 | bool enable, int xcc_id) |
1224 | { |
1225 | u32 tmp; |
1226 | |
1227 | /* These interrupts should be enabled to drive DS clock */ |
1228 | |
1229 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0); |
1230 | |
1231 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0); |
1232 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0); |
1233 | tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0); |
1234 | |
1235 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_INT_CNTL_RING0, tmp); |
1236 | } |
1237 | |
1238 | static void gfx_v9_4_3_xcc_rlc_stop(struct amdgpu_device *adev, int xcc_id) |
1239 | { |
1240 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, |
1241 | RLC_ENABLE_F32, 0); |
1242 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, enable: false, xcc_id); |
1243 | gfx_v9_4_3_xcc_wait_for_rlc_serdes(adev, xcc_id); |
1244 | } |
1245 | |
1246 | static void gfx_v9_4_3_rlc_stop(struct amdgpu_device *adev) |
1247 | { |
1248 | int i, num_xcc; |
1249 | |
1250 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1251 | for (i = 0; i < num_xcc; i++) |
1252 | gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id: i); |
1253 | } |
1254 | |
1255 | static void gfx_v9_4_3_xcc_rlc_reset(struct amdgpu_device *adev, int xcc_id) |
1256 | { |
1257 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, |
1258 | SOFT_RESET_RLC, 1); |
1259 | udelay(50); |
1260 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), GRBM_SOFT_RESET, |
1261 | SOFT_RESET_RLC, 0); |
1262 | udelay(50); |
1263 | } |
1264 | |
1265 | static void gfx_v9_4_3_rlc_reset(struct amdgpu_device *adev) |
1266 | { |
1267 | int i, num_xcc; |
1268 | |
1269 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1270 | for (i = 0; i < num_xcc; i++) |
1271 | gfx_v9_4_3_xcc_rlc_reset(adev, xcc_id: i); |
1272 | } |
1273 | |
1274 | static void gfx_v9_4_3_xcc_rlc_start(struct amdgpu_device *adev, int xcc_id) |
1275 | { |
1276 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), RLC_CNTL, |
1277 | RLC_ENABLE_F32, 1); |
1278 | udelay(50); |
1279 | |
1280 | /* carrizo do enable cp interrupt after cp inited */ |
1281 | if (!(adev->flags & AMD_IS_APU)) { |
1282 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, enable: true, xcc_id); |
1283 | udelay(50); |
1284 | } |
1285 | } |
1286 | |
1287 | static void gfx_v9_4_3_rlc_start(struct amdgpu_device *adev) |
1288 | { |
1289 | #ifdef AMDGPU_RLC_DEBUG_RETRY |
1290 | u32 rlc_ucode_ver; |
1291 | #endif |
1292 | int i, num_xcc; |
1293 | |
1294 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1295 | for (i = 0; i < num_xcc; i++) { |
1296 | gfx_v9_4_3_xcc_rlc_start(adev, xcc_id: i); |
1297 | #ifdef AMDGPU_RLC_DEBUG_RETRY |
1298 | /* RLC_GPM_GENERAL_6 : RLC Ucode version */ |
1299 | rlc_ucode_ver = RREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_6); |
1300 | if (rlc_ucode_ver == 0x108) { |
1301 | dev_info(adev->dev, |
1302 | "Using rlc debug ucode. regRLC_GPM_GENERAL_6 ==0x08%x / fw_ver == %i \n" , |
1303 | rlc_ucode_ver, adev->gfx.rlc_fw_version); |
1304 | /* RLC_GPM_TIMER_INT_3 : Timer interval in RefCLK cycles, |
1305 | * default is 0x9C4 to create a 100us interval */ |
1306 | WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_TIMER_INT_3, 0x9C4); |
1307 | /* RLC_GPM_GENERAL_12 : Minimum gap between wptr and rptr |
1308 | * to disable the page fault retry interrupts, default is |
1309 | * 0x100 (256) */ |
1310 | WREG32_SOC15(GC, GET_INST(GC, i), regRLC_GPM_GENERAL_12, 0x100); |
1311 | } |
1312 | #endif |
1313 | } |
1314 | } |
1315 | |
1316 | static int gfx_v9_4_3_xcc_rlc_load_microcode(struct amdgpu_device *adev, |
1317 | int xcc_id) |
1318 | { |
1319 | const struct rlc_firmware_header_v2_0 *hdr; |
1320 | const __le32 *fw_data; |
1321 | unsigned i, fw_size; |
1322 | |
1323 | if (!adev->gfx.rlc_fw) |
1324 | return -EINVAL; |
1325 | |
1326 | hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; |
1327 | amdgpu_ucode_print_rlc_hdr(hdr: &hdr->header); |
1328 | |
1329 | fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + |
1330 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
1331 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
1332 | |
1333 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, |
1334 | RLCG_UCODE_LOADING_START_ADDRESS); |
1335 | for (i = 0; i < fw_size; i++) { |
1336 | if (amdgpu_emu_mode == 1 && i % 100 == 0) { |
1337 | dev_info(adev->dev, "Write RLC ucode data %u DWs\n" , i); |
1338 | msleep(msecs: 1); |
1339 | } |
1340 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); |
1341 | } |
1342 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); |
1343 | |
1344 | return 0; |
1345 | } |
1346 | |
1347 | static int gfx_v9_4_3_xcc_rlc_resume(struct amdgpu_device *adev, int xcc_id) |
1348 | { |
1349 | int r; |
1350 | |
1351 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
1352 | gfx_v9_4_3_xcc_rlc_stop(adev, xcc_id); |
1353 | /* legacy rlc firmware loading */ |
1354 | r = gfx_v9_4_3_xcc_rlc_load_microcode(adev, xcc_id); |
1355 | if (r) |
1356 | return r; |
1357 | gfx_v9_4_3_xcc_rlc_start(adev, xcc_id); |
1358 | } |
1359 | |
1360 | amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); |
1361 | /* disable CG */ |
1362 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, 0); |
1363 | gfx_v9_4_3_xcc_init_pg(adev, xcc_id); |
1364 | amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); |
1365 | |
1366 | return 0; |
1367 | } |
1368 | |
1369 | static int gfx_v9_4_3_rlc_resume(struct amdgpu_device *adev) |
1370 | { |
1371 | int r, i, num_xcc; |
1372 | |
1373 | if (amdgpu_sriov_vf(adev)) |
1374 | return 0; |
1375 | |
1376 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1377 | for (i = 0; i < num_xcc; i++) { |
1378 | r = gfx_v9_4_3_xcc_rlc_resume(adev, xcc_id: i); |
1379 | if (r) |
1380 | return r; |
1381 | } |
1382 | |
1383 | return 0; |
1384 | } |
1385 | |
1386 | static void gfx_v9_4_3_update_spm_vmid(struct amdgpu_device *adev, struct amdgpu_ring *ring, |
1387 | unsigned vmid) |
1388 | { |
1389 | u32 reg, data; |
1390 | |
1391 | reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL); |
1392 | if (amdgpu_sriov_is_pp_one_vf(adev)) |
1393 | data = RREG32_NO_KIQ(reg); |
1394 | else |
1395 | data = RREG32(reg); |
1396 | |
1397 | data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK; |
1398 | data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT; |
1399 | |
1400 | if (amdgpu_sriov_is_pp_one_vf(adev)) |
1401 | WREG32_SOC15_NO_KIQ(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); |
1402 | else |
1403 | WREG32_SOC15(GC, GET_INST(GC, 0), regRLC_SPM_MC_CNTL, data); |
1404 | } |
1405 | |
1406 | static const struct soc15_reg_rlcg rlcg_access_gc_9_4_3[] = { |
1407 | {SOC15_REG_ENTRY(GC, 0, regGRBM_GFX_INDEX)}, |
1408 | {SOC15_REG_ENTRY(GC, 0, regSQ_IND_INDEX)}, |
1409 | }; |
1410 | |
1411 | static bool gfx_v9_4_3_check_rlcg_range(struct amdgpu_device *adev, |
1412 | uint32_t offset, |
1413 | struct soc15_reg_rlcg *entries, int arr_size) |
1414 | { |
1415 | int i, inst; |
1416 | uint32_t reg; |
1417 | |
1418 | if (!entries) |
1419 | return false; |
1420 | |
1421 | for (i = 0; i < arr_size; i++) { |
1422 | const struct soc15_reg_rlcg *entry; |
1423 | |
1424 | entry = &entries[i]; |
1425 | inst = adev->ip_map.logical_to_dev_inst ? |
1426 | adev->ip_map.logical_to_dev_inst( |
1427 | adev, entry->hwip, entry->instance) : |
1428 | entry->instance; |
1429 | reg = adev->reg_offset[entry->hwip][inst][entry->segment] + |
1430 | entry->reg; |
1431 | if (offset == reg) |
1432 | return true; |
1433 | } |
1434 | |
1435 | return false; |
1436 | } |
1437 | |
1438 | static bool gfx_v9_4_3_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset) |
1439 | { |
1440 | return gfx_v9_4_3_check_rlcg_range(adev, offset, |
1441 | entries: (void *)rlcg_access_gc_9_4_3, |
1442 | ARRAY_SIZE(rlcg_access_gc_9_4_3)); |
1443 | } |
1444 | |
1445 | static void gfx_v9_4_3_xcc_cp_compute_enable(struct amdgpu_device *adev, |
1446 | bool enable, int xcc_id) |
1447 | { |
1448 | if (enable) { |
1449 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, 0); |
1450 | } else { |
1451 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MEC_CNTL, |
1452 | (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); |
1453 | adev->gfx.kiq[xcc_id].ring.sched.ready = false; |
1454 | } |
1455 | udelay(50); |
1456 | } |
1457 | |
1458 | static int gfx_v9_4_3_xcc_cp_compute_load_microcode(struct amdgpu_device *adev, |
1459 | int xcc_id) |
1460 | { |
1461 | const struct gfx_firmware_header_v1_0 *mec_hdr; |
1462 | const __le32 *fw_data; |
1463 | unsigned i; |
1464 | u32 tmp; |
1465 | u32 mec_ucode_addr_offset; |
1466 | u32 mec_ucode_data_offset; |
1467 | |
1468 | if (!adev->gfx.mec_fw) |
1469 | return -EINVAL; |
1470 | |
1471 | gfx_v9_4_3_xcc_cp_compute_enable(adev, enable: false, xcc_id); |
1472 | |
1473 | mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; |
1474 | amdgpu_ucode_print_gfx_hdr(hdr: &mec_hdr->header); |
1475 | |
1476 | fw_data = (const __le32 *) |
1477 | (adev->gfx.mec_fw->data + |
1478 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); |
1479 | tmp = 0; |
1480 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0); |
1481 | tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); |
1482 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_CNTL, tmp); |
1483 | |
1484 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_LO, |
1485 | adev->gfx.mec.mec_fw_gpu_addr & 0xFFFFF000); |
1486 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_CPC_IC_BASE_HI, |
1487 | upper_32_bits(adev->gfx.mec.mec_fw_gpu_addr)); |
1488 | |
1489 | mec_ucode_addr_offset = |
1490 | SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_ADDR); |
1491 | mec_ucode_data_offset = |
1492 | SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_MEC_ME1_UCODE_DATA); |
1493 | |
1494 | /* MEC1 */ |
1495 | WREG32(mec_ucode_addr_offset, mec_hdr->jt_offset); |
1496 | for (i = 0; i < mec_hdr->jt_size; i++) |
1497 | WREG32(mec_ucode_data_offset, |
1498 | le32_to_cpup(fw_data + mec_hdr->jt_offset + i)); |
1499 | |
1500 | WREG32(mec_ucode_addr_offset, adev->gfx.mec_fw_version); |
1501 | /* Todo : Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ |
1502 | |
1503 | return 0; |
1504 | } |
1505 | |
1506 | /* KIQ functions */ |
1507 | static void gfx_v9_4_3_xcc_kiq_setting(struct amdgpu_ring *ring, int xcc_id) |
1508 | { |
1509 | uint32_t tmp; |
1510 | struct amdgpu_device *adev = ring->adev; |
1511 | |
1512 | /* tell RLC which is KIQ queue */ |
1513 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS); |
1514 | tmp &= 0xffffff00; |
1515 | tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); |
1516 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); |
1517 | tmp |= 0x80; |
1518 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regRLC_CP_SCHEDULERS, tmp); |
1519 | } |
1520 | |
1521 | static void gfx_v9_4_3_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd) |
1522 | { |
1523 | struct amdgpu_device *adev = ring->adev; |
1524 | |
1525 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { |
1526 | if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { |
1527 | mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; |
1528 | mqd->cp_hqd_queue_priority = |
1529 | AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; |
1530 | } |
1531 | } |
1532 | } |
1533 | |
1534 | static int gfx_v9_4_3_xcc_mqd_init(struct amdgpu_ring *ring, int xcc_id) |
1535 | { |
1536 | struct amdgpu_device *adev = ring->adev; |
1537 | struct v9_mqd *mqd = ring->mqd_ptr; |
1538 | uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; |
1539 | uint32_t tmp; |
1540 | |
1541 | mqd->header = 0xC0310800; |
1542 | mqd->compute_pipelinestat_enable = 0x00000001; |
1543 | mqd->compute_static_thread_mgmt_se0 = 0xffffffff; |
1544 | mqd->compute_static_thread_mgmt_se1 = 0xffffffff; |
1545 | mqd->compute_static_thread_mgmt_se2 = 0xffffffff; |
1546 | mqd->compute_static_thread_mgmt_se3 = 0xffffffff; |
1547 | mqd->compute_misc_reserved = 0x00000003; |
1548 | |
1549 | mqd->dynamic_cu_mask_addr_lo = |
1550 | lower_32_bits(ring->mqd_gpu_addr |
1551 | + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); |
1552 | mqd->dynamic_cu_mask_addr_hi = |
1553 | upper_32_bits(ring->mqd_gpu_addr |
1554 | + offsetof(struct v9_mqd_allocation, dynamic_cu_mask)); |
1555 | |
1556 | eop_base_addr = ring->eop_gpu_addr >> 8; |
1557 | mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; |
1558 | mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); |
1559 | |
1560 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ |
1561 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL); |
1562 | tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, |
1563 | (order_base_2(GFX9_MEC_HPD_SIZE / 4) - 1)); |
1564 | |
1565 | mqd->cp_hqd_eop_control = tmp; |
1566 | |
1567 | /* enable doorbell? */ |
1568 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL); |
1569 | |
1570 | if (ring->use_doorbell) { |
1571 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1572 | DOORBELL_OFFSET, ring->doorbell_index); |
1573 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1574 | DOORBELL_EN, 1); |
1575 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1576 | DOORBELL_SOURCE, 0); |
1577 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1578 | DOORBELL_HIT, 0); |
1579 | } else { |
1580 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1581 | DOORBELL_EN, 0); |
1582 | } |
1583 | |
1584 | mqd->cp_hqd_pq_doorbell_control = tmp; |
1585 | |
1586 | /* disable the queue if it's active */ |
1587 | ring->wptr = 0; |
1588 | mqd->cp_hqd_dequeue_request = 0; |
1589 | mqd->cp_hqd_pq_rptr = 0; |
1590 | mqd->cp_hqd_pq_wptr_lo = 0; |
1591 | mqd->cp_hqd_pq_wptr_hi = 0; |
1592 | |
1593 | /* set the pointer to the MQD */ |
1594 | mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; |
1595 | mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); |
1596 | |
1597 | /* set MQD vmid to 0 */ |
1598 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL); |
1599 | tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); |
1600 | mqd->cp_mqd_control = tmp; |
1601 | |
1602 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ |
1603 | hqd_gpu_addr = ring->gpu_addr >> 8; |
1604 | mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; |
1605 | mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); |
1606 | |
1607 | /* set up the HQD, this is similar to CP_RB0_CNTL */ |
1608 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL); |
1609 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, |
1610 | (order_base_2(ring->ring_size / 4) - 1)); |
1611 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, |
1612 | ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); |
1613 | #ifdef __BIG_ENDIAN |
1614 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); |
1615 | #endif |
1616 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); |
1617 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); |
1618 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); |
1619 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); |
1620 | mqd->cp_hqd_pq_control = tmp; |
1621 | |
1622 | /* set the wb address whether it's enabled or not */ |
1623 | wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); |
1624 | mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; |
1625 | mqd->cp_hqd_pq_rptr_report_addr_hi = |
1626 | upper_32_bits(wb_gpu_addr) & 0xffff; |
1627 | |
1628 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ |
1629 | wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); |
1630 | mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; |
1631 | mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; |
1632 | |
1633 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ |
1634 | ring->wptr = 0; |
1635 | mqd->cp_hqd_pq_rptr = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR); |
1636 | |
1637 | /* set the vmid for the queue */ |
1638 | mqd->cp_hqd_vmid = 0; |
1639 | |
1640 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE); |
1641 | tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); |
1642 | mqd->cp_hqd_persistent_state = tmp; |
1643 | |
1644 | /* set MIN_IB_AVAIL_SIZE */ |
1645 | tmp = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL); |
1646 | tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3); |
1647 | mqd->cp_hqd_ib_control = tmp; |
1648 | |
1649 | /* set static priority for a queue/ring */ |
1650 | gfx_v9_4_3_mqd_set_priority(ring, mqd); |
1651 | mqd->cp_hqd_quantum = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_QUANTUM); |
1652 | |
1653 | /* map_queues packet doesn't need activate the queue, |
1654 | * so only kiq need set this field. |
1655 | */ |
1656 | if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) |
1657 | mqd->cp_hqd_active = 1; |
1658 | |
1659 | return 0; |
1660 | } |
1661 | |
1662 | static int gfx_v9_4_3_xcc_kiq_init_register(struct amdgpu_ring *ring, |
1663 | int xcc_id) |
1664 | { |
1665 | struct amdgpu_device *adev = ring->adev; |
1666 | struct v9_mqd *mqd = ring->mqd_ptr; |
1667 | int j; |
1668 | |
1669 | /* disable wptr polling */ |
1670 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); |
1671 | |
1672 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR, |
1673 | mqd->cp_hqd_eop_base_addr_lo); |
1674 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_BASE_ADDR_HI, |
1675 | mqd->cp_hqd_eop_base_addr_hi); |
1676 | |
1677 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ |
1678 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_EOP_CONTROL, |
1679 | mqd->cp_hqd_eop_control); |
1680 | |
1681 | /* enable doorbell? */ |
1682 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, |
1683 | mqd->cp_hqd_pq_doorbell_control); |
1684 | |
1685 | /* disable the queue if it's active */ |
1686 | if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { |
1687 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); |
1688 | for (j = 0; j < adev->usec_timeout; j++) { |
1689 | if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) |
1690 | break; |
1691 | udelay(1); |
1692 | } |
1693 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, |
1694 | mqd->cp_hqd_dequeue_request); |
1695 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, |
1696 | mqd->cp_hqd_pq_rptr); |
1697 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, |
1698 | mqd->cp_hqd_pq_wptr_lo); |
1699 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, |
1700 | mqd->cp_hqd_pq_wptr_hi); |
1701 | } |
1702 | |
1703 | /* set the pointer to the MQD */ |
1704 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR, |
1705 | mqd->cp_mqd_base_addr_lo); |
1706 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_BASE_ADDR_HI, |
1707 | mqd->cp_mqd_base_addr_hi); |
1708 | |
1709 | /* set MQD vmid to 0 */ |
1710 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_MQD_CONTROL, |
1711 | mqd->cp_mqd_control); |
1712 | |
1713 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ |
1714 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE, |
1715 | mqd->cp_hqd_pq_base_lo); |
1716 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_BASE_HI, |
1717 | mqd->cp_hqd_pq_base_hi); |
1718 | |
1719 | /* set up the HQD, this is similar to CP_RB0_CNTL */ |
1720 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_CONTROL, |
1721 | mqd->cp_hqd_pq_control); |
1722 | |
1723 | /* set the wb address whether it's enabled or not */ |
1724 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR, |
1725 | mqd->cp_hqd_pq_rptr_report_addr_lo); |
1726 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, |
1727 | mqd->cp_hqd_pq_rptr_report_addr_hi); |
1728 | |
1729 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ |
1730 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR, |
1731 | mqd->cp_hqd_pq_wptr_poll_addr_lo); |
1732 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_POLL_ADDR_HI, |
1733 | mqd->cp_hqd_pq_wptr_poll_addr_hi); |
1734 | |
1735 | /* enable the doorbell if requested */ |
1736 | if (ring->use_doorbell) { |
1737 | WREG32_SOC15( |
1738 | GC, GET_INST(GC, xcc_id), |
1739 | regCP_MEC_DOORBELL_RANGE_LOWER, |
1740 | ((adev->doorbell_index.kiq + |
1741 | xcc_id * adev->doorbell_index.xcc_doorbell_range) * |
1742 | 2) << 2); |
1743 | WREG32_SOC15( |
1744 | GC, GET_INST(GC, xcc_id), |
1745 | regCP_MEC_DOORBELL_RANGE_UPPER, |
1746 | ((adev->doorbell_index.userqueue_end + |
1747 | xcc_id * adev->doorbell_index.xcc_doorbell_range) * |
1748 | 2) << 2); |
1749 | } |
1750 | |
1751 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, |
1752 | mqd->cp_hqd_pq_doorbell_control); |
1753 | |
1754 | /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ |
1755 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, |
1756 | mqd->cp_hqd_pq_wptr_lo); |
1757 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, |
1758 | mqd->cp_hqd_pq_wptr_hi); |
1759 | |
1760 | /* set the vmid for the queue */ |
1761 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_VMID, mqd->cp_hqd_vmid); |
1762 | |
1763 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, |
1764 | mqd->cp_hqd_persistent_state); |
1765 | |
1766 | /* activate the queue */ |
1767 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, |
1768 | mqd->cp_hqd_active); |
1769 | |
1770 | if (ring->use_doorbell) |
1771 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_STATUS, DOORBELL_ENABLE, 1); |
1772 | |
1773 | return 0; |
1774 | } |
1775 | |
1776 | static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, |
1777 | int xcc_id) |
1778 | { |
1779 | struct amdgpu_device *adev = ring->adev; |
1780 | int j; |
1781 | |
1782 | /* disable the queue if it's active */ |
1783 | if (RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1) { |
1784 | |
1785 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, 1); |
1786 | |
1787 | for (j = 0; j < adev->usec_timeout; j++) { |
1788 | if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1)) |
1789 | break; |
1790 | udelay(1); |
1791 | } |
1792 | |
1793 | if (j == AMDGPU_MAX_USEC_TIMEOUT) { |
1794 | DRM_DEBUG("%s dequeue request failed.\n" , ring->name); |
1795 | |
1796 | /* Manual disable if dequeue request times out */ |
1797 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE, 0); |
1798 | } |
1799 | |
1800 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_DEQUEUE_REQUEST, |
1801 | 0); |
1802 | } |
1803 | |
1804 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); |
1805 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); |
1806 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT); |
1807 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); |
1808 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); |
1809 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); |
1810 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_HI, 0); |
1811 | WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_WPTR_LO, 0); |
1812 | |
1813 | return 0; |
1814 | } |
1815 | |
1816 | static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id) |
1817 | { |
1818 | struct amdgpu_device *adev = ring->adev; |
1819 | struct v9_mqd *mqd = ring->mqd_ptr; |
1820 | struct v9_mqd *tmp_mqd; |
1821 | |
1822 | gfx_v9_4_3_xcc_kiq_setting(ring, xcc_id); |
1823 | |
1824 | /* GPU could be in bad state during probe, driver trigger the reset |
1825 | * after load the SMU, in this case , the mqd is not be initialized. |
1826 | * driver need to re-init the mqd. |
1827 | * check mqd->cp_hqd_pq_control since this value should not be 0 |
1828 | */ |
1829 | tmp_mqd = (struct v9_mqd *)adev->gfx.kiq[xcc_id].mqd_backup; |
1830 | if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control) { |
1831 | /* for GPU_RESET case , reset MQD to a clean status */ |
1832 | if (adev->gfx.kiq[xcc_id].mqd_backup) |
1833 | memcpy(mqd, adev->gfx.kiq[xcc_id].mqd_backup, sizeof(struct v9_mqd_allocation)); |
1834 | |
1835 | /* reset ring buffer */ |
1836 | ring->wptr = 0; |
1837 | amdgpu_ring_clear_ring(ring); |
1838 | mutex_lock(&adev->srbm_mutex); |
1839 | soc15_grbm_select(adev, me: ring->me, pipe: ring->pipe, queue: ring->queue, vmid: 0, GET_INST(GC, xcc_id)); |
1840 | gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); |
1841 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0, GET_INST(GC, xcc_id)); |
1842 | mutex_unlock(lock: &adev->srbm_mutex); |
1843 | } else { |
1844 | memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); |
1845 | ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; |
1846 | ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; |
1847 | mutex_lock(&adev->srbm_mutex); |
1848 | if (amdgpu_sriov_vf(adev) && adev->in_suspend) |
1849 | amdgpu_ring_clear_ring(ring); |
1850 | soc15_grbm_select(adev, me: ring->me, pipe: ring->pipe, queue: ring->queue, vmid: 0, GET_INST(GC, xcc_id)); |
1851 | gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); |
1852 | gfx_v9_4_3_xcc_kiq_init_register(ring, xcc_id); |
1853 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0, GET_INST(GC, xcc_id)); |
1854 | mutex_unlock(lock: &adev->srbm_mutex); |
1855 | |
1856 | if (adev->gfx.kiq[xcc_id].mqd_backup) |
1857 | memcpy(adev->gfx.kiq[xcc_id].mqd_backup, mqd, sizeof(struct v9_mqd_allocation)); |
1858 | } |
1859 | |
1860 | return 0; |
1861 | } |
1862 | |
1863 | static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id) |
1864 | { |
1865 | struct amdgpu_device *adev = ring->adev; |
1866 | struct v9_mqd *mqd = ring->mqd_ptr; |
1867 | int mqd_idx = ring - &adev->gfx.compute_ring[0]; |
1868 | struct v9_mqd *tmp_mqd; |
1869 | |
1870 | /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control |
1871 | * is not be initialized before |
1872 | */ |
1873 | tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx]; |
1874 | |
1875 | if (!tmp_mqd->cp_hqd_pq_control || |
1876 | (!amdgpu_in_reset(adev) && !adev->in_suspend)) { |
1877 | memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); |
1878 | ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; |
1879 | ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; |
1880 | mutex_lock(&adev->srbm_mutex); |
1881 | soc15_grbm_select(adev, me: ring->me, pipe: ring->pipe, queue: ring->queue, vmid: 0, GET_INST(GC, xcc_id)); |
1882 | gfx_v9_4_3_xcc_mqd_init(ring, xcc_id); |
1883 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0, GET_INST(GC, xcc_id)); |
1884 | mutex_unlock(lock: &adev->srbm_mutex); |
1885 | |
1886 | if (adev->gfx.mec.mqd_backup[mqd_idx]) |
1887 | memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation)); |
1888 | } else { |
1889 | /* restore MQD to a clean status */ |
1890 | if (adev->gfx.mec.mqd_backup[mqd_idx]) |
1891 | memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation)); |
1892 | /* reset ring buffer */ |
1893 | ring->wptr = 0; |
1894 | atomic64_set(v: (atomic64_t *)&adev->wb.wb[ring->wptr_offs], i: 0); |
1895 | amdgpu_ring_clear_ring(ring); |
1896 | } |
1897 | |
1898 | return 0; |
1899 | } |
1900 | |
1901 | static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id) |
1902 | { |
1903 | struct amdgpu_ring *ring; |
1904 | int j; |
1905 | |
1906 | for (j = 0; j < adev->gfx.num_compute_rings; j++) { |
1907 | ring = &adev->gfx.compute_ring[j + xcc_id * adev->gfx.num_compute_rings]; |
1908 | if (!amdgpu_in_reset(adev) && !adev->in_suspend) { |
1909 | mutex_lock(&adev->srbm_mutex); |
1910 | soc15_grbm_select(adev, me: ring->me, |
1911 | pipe: ring->pipe, |
1912 | queue: ring->queue, vmid: 0, GET_INST(GC, xcc_id)); |
1913 | gfx_v9_4_3_xcc_q_fini_register(ring, xcc_id); |
1914 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0, GET_INST(GC, xcc_id)); |
1915 | mutex_unlock(lock: &adev->srbm_mutex); |
1916 | } |
1917 | } |
1918 | |
1919 | return 0; |
1920 | } |
1921 | |
1922 | static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id) |
1923 | { |
1924 | struct amdgpu_ring *ring; |
1925 | int r; |
1926 | |
1927 | ring = &adev->gfx.kiq[xcc_id].ring; |
1928 | |
1929 | r = amdgpu_bo_reserve(bo: ring->mqd_obj, no_intr: false); |
1930 | if (unlikely(r != 0)) |
1931 | return r; |
1932 | |
1933 | r = amdgpu_bo_kmap(bo: ring->mqd_obj, ptr: (void **)&ring->mqd_ptr); |
1934 | if (unlikely(r != 0)) { |
1935 | amdgpu_bo_unreserve(bo: ring->mqd_obj); |
1936 | return r; |
1937 | } |
1938 | |
1939 | gfx_v9_4_3_xcc_kiq_init_queue(ring, xcc_id); |
1940 | amdgpu_bo_kunmap(bo: ring->mqd_obj); |
1941 | ring->mqd_ptr = NULL; |
1942 | amdgpu_bo_unreserve(bo: ring->mqd_obj); |
1943 | return 0; |
1944 | } |
1945 | |
1946 | static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id) |
1947 | { |
1948 | struct amdgpu_ring *ring = NULL; |
1949 | int r = 0, i; |
1950 | |
1951 | gfx_v9_4_3_xcc_cp_compute_enable(adev, enable: true, xcc_id); |
1952 | |
1953 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
1954 | ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; |
1955 | |
1956 | r = amdgpu_bo_reserve(bo: ring->mqd_obj, no_intr: false); |
1957 | if (unlikely(r != 0)) |
1958 | goto done; |
1959 | r = amdgpu_bo_kmap(bo: ring->mqd_obj, ptr: (void **)&ring->mqd_ptr); |
1960 | if (!r) { |
1961 | r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id); |
1962 | amdgpu_bo_kunmap(bo: ring->mqd_obj); |
1963 | ring->mqd_ptr = NULL; |
1964 | } |
1965 | amdgpu_bo_unreserve(bo: ring->mqd_obj); |
1966 | if (r) |
1967 | goto done; |
1968 | } |
1969 | |
1970 | r = amdgpu_gfx_enable_kcq(adev, xcc_id); |
1971 | done: |
1972 | return r; |
1973 | } |
1974 | |
1975 | static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id) |
1976 | { |
1977 | struct amdgpu_ring *ring; |
1978 | int r, j; |
1979 | |
1980 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, enable: false, xcc_id); |
1981 | |
1982 | if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
1983 | gfx_v9_4_3_xcc_disable_gpa_mode(adev, xcc_id); |
1984 | |
1985 | r = gfx_v9_4_3_xcc_cp_compute_load_microcode(adev, xcc_id); |
1986 | if (r) |
1987 | return r; |
1988 | } |
1989 | |
1990 | r = gfx_v9_4_3_xcc_kiq_resume(adev, xcc_id); |
1991 | if (r) |
1992 | return r; |
1993 | |
1994 | r = gfx_v9_4_3_xcc_kcq_resume(adev, xcc_id); |
1995 | if (r) |
1996 | return r; |
1997 | |
1998 | for (j = 0; j < adev->gfx.num_compute_rings; j++) { |
1999 | ring = &adev->gfx.compute_ring |
2000 | [j + xcc_id * adev->gfx.num_compute_rings]; |
2001 | r = amdgpu_ring_test_helper(ring); |
2002 | if (r) |
2003 | return r; |
2004 | } |
2005 | |
2006 | gfx_v9_4_3_xcc_enable_gui_idle_interrupt(adev, enable: true, xcc_id); |
2007 | |
2008 | return 0; |
2009 | } |
2010 | |
2011 | static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) |
2012 | { |
2013 | int r = 0, i, num_xcc; |
2014 | |
2015 | if (amdgpu_xcp_query_partition_mode(xcp_mgr: adev->xcp_mgr, |
2016 | AMDGPU_XCP_FL_NONE) == |
2017 | AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) |
2018 | r = amdgpu_xcp_switch_partition_mode(xcp_mgr: adev->xcp_mgr, |
2019 | mode: amdgpu_user_partt_mode); |
2020 | |
2021 | if (r) |
2022 | return r; |
2023 | |
2024 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2025 | for (i = 0; i < num_xcc; i++) { |
2026 | r = gfx_v9_4_3_xcc_cp_resume(adev, xcc_id: i); |
2027 | if (r) |
2028 | return r; |
2029 | } |
2030 | |
2031 | return 0; |
2032 | } |
2033 | |
2034 | static void gfx_v9_4_3_xcc_cp_enable(struct amdgpu_device *adev, bool enable, |
2035 | int xcc_id) |
2036 | { |
2037 | gfx_v9_4_3_xcc_cp_compute_enable(adev, enable, xcc_id); |
2038 | } |
2039 | |
2040 | static void gfx_v9_4_3_xcc_fini(struct amdgpu_device *adev, int xcc_id) |
2041 | { |
2042 | if (amdgpu_gfx_disable_kcq(adev, xcc_id)) |
2043 | DRM_ERROR("XCD %d KCQ disable failed\n" , xcc_id); |
2044 | |
2045 | if (amdgpu_sriov_vf(adev)) { |
2046 | /* must disable polling for SRIOV when hw finished, otherwise |
2047 | * CPC engine may still keep fetching WB address which is already |
2048 | * invalid after sw finished and trigger DMAR reading error in |
2049 | * hypervisor side. |
2050 | */ |
2051 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, xcc_id), CP_PQ_WPTR_POLL_CNTL, EN, 0); |
2052 | return; |
2053 | } |
2054 | |
2055 | /* Use deinitialize sequence from CAIL when unbinding device |
2056 | * from driver, otherwise KIQ is hanging when binding back |
2057 | */ |
2058 | if (!amdgpu_in_reset(adev) && !adev->in_suspend) { |
2059 | mutex_lock(&adev->srbm_mutex); |
2060 | soc15_grbm_select(adev, me: adev->gfx.kiq[xcc_id].ring.me, |
2061 | pipe: adev->gfx.kiq[xcc_id].ring.pipe, |
2062 | queue: adev->gfx.kiq[xcc_id].ring.queue, vmid: 0, |
2063 | GET_INST(GC, xcc_id)); |
2064 | gfx_v9_4_3_xcc_q_fini_register(ring: &adev->gfx.kiq[xcc_id].ring, |
2065 | xcc_id); |
2066 | soc15_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0, GET_INST(GC, xcc_id)); |
2067 | mutex_unlock(lock: &adev->srbm_mutex); |
2068 | } |
2069 | |
2070 | gfx_v9_4_3_xcc_kcq_fini_register(adev, xcc_id); |
2071 | gfx_v9_4_3_xcc_cp_enable(adev, enable: false, xcc_id); |
2072 | } |
2073 | |
2074 | static int gfx_v9_4_3_hw_init(void *handle) |
2075 | { |
2076 | int r; |
2077 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2078 | |
2079 | if (!amdgpu_sriov_vf(adev)) |
2080 | gfx_v9_4_3_init_golden_registers(adev); |
2081 | |
2082 | gfx_v9_4_3_constants_init(adev); |
2083 | |
2084 | r = adev->gfx.rlc.funcs->resume(adev); |
2085 | if (r) |
2086 | return r; |
2087 | |
2088 | r = gfx_v9_4_3_cp_resume(adev); |
2089 | if (r) |
2090 | return r; |
2091 | |
2092 | return r; |
2093 | } |
2094 | |
2095 | static int gfx_v9_4_3_hw_fini(void *handle) |
2096 | { |
2097 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2098 | int i, num_xcc; |
2099 | |
2100 | amdgpu_irq_put(adev, src: &adev->gfx.priv_reg_irq, type: 0); |
2101 | amdgpu_irq_put(adev, src: &adev->gfx.priv_inst_irq, type: 0); |
2102 | |
2103 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2104 | for (i = 0; i < num_xcc; i++) { |
2105 | gfx_v9_4_3_xcc_fini(adev, xcc_id: i); |
2106 | } |
2107 | |
2108 | return 0; |
2109 | } |
2110 | |
2111 | static int gfx_v9_4_3_suspend(void *handle) |
2112 | { |
2113 | return gfx_v9_4_3_hw_fini(handle); |
2114 | } |
2115 | |
2116 | static int gfx_v9_4_3_resume(void *handle) |
2117 | { |
2118 | return gfx_v9_4_3_hw_init(handle); |
2119 | } |
2120 | |
2121 | static bool gfx_v9_4_3_is_idle(void *handle) |
2122 | { |
2123 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2124 | int i, num_xcc; |
2125 | |
2126 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2127 | for (i = 0; i < num_xcc; i++) { |
2128 | if (REG_GET_FIELD(RREG32_SOC15(GC, GET_INST(GC, i), regGRBM_STATUS), |
2129 | GRBM_STATUS, GUI_ACTIVE)) |
2130 | return false; |
2131 | } |
2132 | return true; |
2133 | } |
2134 | |
2135 | static int gfx_v9_4_3_wait_for_idle(void *handle) |
2136 | { |
2137 | unsigned i; |
2138 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2139 | |
2140 | for (i = 0; i < adev->usec_timeout; i++) { |
2141 | if (gfx_v9_4_3_is_idle(handle)) |
2142 | return 0; |
2143 | udelay(1); |
2144 | } |
2145 | return -ETIMEDOUT; |
2146 | } |
2147 | |
2148 | static int gfx_v9_4_3_soft_reset(void *handle) |
2149 | { |
2150 | u32 grbm_soft_reset = 0; |
2151 | u32 tmp; |
2152 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2153 | |
2154 | /* GRBM_STATUS */ |
2155 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS); |
2156 | if (tmp & (GRBM_STATUS__PA_BUSY_MASK | GRBM_STATUS__SC_BUSY_MASK | |
2157 | GRBM_STATUS__BCI_BUSY_MASK | GRBM_STATUS__SX_BUSY_MASK | |
2158 | GRBM_STATUS__TA_BUSY_MASK | GRBM_STATUS__VGT_BUSY_MASK | |
2159 | GRBM_STATUS__DB_BUSY_MASK | GRBM_STATUS__CB_BUSY_MASK | |
2160 | GRBM_STATUS__GDS_BUSY_MASK | GRBM_STATUS__SPI_BUSY_MASK | |
2161 | GRBM_STATUS__IA_BUSY_MASK | GRBM_STATUS__IA_BUSY_NO_DMA_MASK)) { |
2162 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, |
2163 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); |
2164 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, |
2165 | GRBM_SOFT_RESET, SOFT_RESET_GFX, 1); |
2166 | } |
2167 | |
2168 | if (tmp & (GRBM_STATUS__CP_BUSY_MASK | GRBM_STATUS__CP_COHERENCY_BUSY_MASK)) { |
2169 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, |
2170 | GRBM_SOFT_RESET, SOFT_RESET_CP, 1); |
2171 | } |
2172 | |
2173 | /* GRBM_STATUS2 */ |
2174 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_STATUS2); |
2175 | if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY)) |
2176 | grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset, |
2177 | GRBM_SOFT_RESET, SOFT_RESET_RLC, 1); |
2178 | |
2179 | |
2180 | if (grbm_soft_reset) { |
2181 | /* stop the rlc */ |
2182 | adev->gfx.rlc.funcs->stop(adev); |
2183 | |
2184 | /* Disable MEC parsing/prefetching */ |
2185 | gfx_v9_4_3_xcc_cp_compute_enable(adev, enable: false, xcc_id: 0); |
2186 | |
2187 | if (grbm_soft_reset) { |
2188 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); |
2189 | tmp |= grbm_soft_reset; |
2190 | dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n" , tmp); |
2191 | WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); |
2192 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); |
2193 | |
2194 | udelay(50); |
2195 | |
2196 | tmp &= ~grbm_soft_reset; |
2197 | WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp); |
2198 | tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET); |
2199 | } |
2200 | |
2201 | /* Wait a little for things to settle down */ |
2202 | udelay(50); |
2203 | } |
2204 | return 0; |
2205 | } |
2206 | |
2207 | static void gfx_v9_4_3_ring_emit_gds_switch(struct amdgpu_ring *ring, |
2208 | uint32_t vmid, |
2209 | uint32_t gds_base, uint32_t gds_size, |
2210 | uint32_t gws_base, uint32_t gws_size, |
2211 | uint32_t oa_base, uint32_t oa_size) |
2212 | { |
2213 | struct amdgpu_device *adev = ring->adev; |
2214 | |
2215 | /* GDS Base */ |
2216 | gfx_v9_4_3_write_data_to_reg(ring, eng_sel: 0, wc: false, |
2217 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_BASE) + 2 * vmid, |
2218 | val: gds_base); |
2219 | |
2220 | /* GDS Size */ |
2221 | gfx_v9_4_3_write_data_to_reg(ring, eng_sel: 0, wc: false, |
2222 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_VMID0_SIZE) + 2 * vmid, |
2223 | val: gds_size); |
2224 | |
2225 | /* GWS */ |
2226 | gfx_v9_4_3_write_data_to_reg(ring, eng_sel: 0, wc: false, |
2227 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_GWS_VMID0) + vmid, |
2228 | val: gws_size << GDS_GWS_VMID0__SIZE__SHIFT | gws_base); |
2229 | |
2230 | /* OA */ |
2231 | gfx_v9_4_3_write_data_to_reg(ring, eng_sel: 0, wc: false, |
2232 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regGDS_OA_VMID0) + vmid, |
2233 | val: (1 << (oa_size + oa_base)) - (1 << oa_base)); |
2234 | } |
2235 | |
2236 | static int gfx_v9_4_3_early_init(void *handle) |
2237 | { |
2238 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2239 | |
2240 | adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev), |
2241 | AMDGPU_MAX_COMPUTE_RINGS); |
2242 | gfx_v9_4_3_set_kiq_pm4_funcs(adev); |
2243 | gfx_v9_4_3_set_ring_funcs(adev); |
2244 | gfx_v9_4_3_set_irq_funcs(adev); |
2245 | gfx_v9_4_3_set_gds_init(adev); |
2246 | gfx_v9_4_3_set_rlc_funcs(adev); |
2247 | |
2248 | /* init rlcg reg access ctrl */ |
2249 | gfx_v9_4_3_init_rlcg_reg_access_ctrl(adev); |
2250 | |
2251 | return gfx_v9_4_3_init_microcode(adev); |
2252 | } |
2253 | |
2254 | static int gfx_v9_4_3_late_init(void *handle) |
2255 | { |
2256 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2257 | int r; |
2258 | |
2259 | r = amdgpu_irq_get(adev, src: &adev->gfx.priv_reg_irq, type: 0); |
2260 | if (r) |
2261 | return r; |
2262 | |
2263 | r = amdgpu_irq_get(adev, src: &adev->gfx.priv_inst_irq, type: 0); |
2264 | if (r) |
2265 | return r; |
2266 | |
2267 | if (adev->gfx.ras && |
2268 | adev->gfx.ras->enable_watchdog_timer) |
2269 | adev->gfx.ras->enable_watchdog_timer(adev); |
2270 | |
2271 | return 0; |
2272 | } |
2273 | |
2274 | static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev, |
2275 | bool enable, int xcc_id) |
2276 | { |
2277 | uint32_t def, data; |
2278 | |
2279 | if (!(adev->cg_flags & AMD_CG_SUPPORT_GFX_FGCG)) |
2280 | return; |
2281 | |
2282 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), |
2283 | regRLC_CGTT_MGCG_OVERRIDE); |
2284 | |
2285 | if (enable) |
2286 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; |
2287 | else |
2288 | data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_FGCG_OVERRIDE_MASK; |
2289 | |
2290 | if (def != data) |
2291 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), |
2292 | regRLC_CGTT_MGCG_OVERRIDE, data); |
2293 | |
2294 | } |
2295 | |
2296 | static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev, |
2297 | bool enable, int xcc_id) |
2298 | { |
2299 | uint32_t def, data; |
2300 | |
2301 | if (!(adev->cg_flags & AMD_CG_SUPPORT_REPEATER_FGCG)) |
2302 | return; |
2303 | |
2304 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), |
2305 | regRLC_CGTT_MGCG_OVERRIDE); |
2306 | |
2307 | if (enable) |
2308 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; |
2309 | else |
2310 | data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_REP_FGCG_OVERRIDE_MASK; |
2311 | |
2312 | if (def != data) |
2313 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), |
2314 | regRLC_CGTT_MGCG_OVERRIDE, data); |
2315 | } |
2316 | |
2317 | static void |
2318 | gfx_v9_4_3_xcc_update_medium_grain_clock_gating(struct amdgpu_device *adev, |
2319 | bool enable, int xcc_id) |
2320 | { |
2321 | uint32_t data, def; |
2322 | |
2323 | /* It is disabled by HW by default */ |
2324 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { |
2325 | /* 1 - RLC_CGTT_MGCG_OVERRIDE */ |
2326 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); |
2327 | |
2328 | data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | |
2329 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | |
2330 | RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | |
2331 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); |
2332 | |
2333 | if (def != data) |
2334 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); |
2335 | |
2336 | /* MGLS is a global flag to control all MGLS in GFX */ |
2337 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { |
2338 | /* 2 - RLC memory Light sleep */ |
2339 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) { |
2340 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); |
2341 | data |= RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; |
2342 | if (def != data) |
2343 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); |
2344 | } |
2345 | /* 3 - CP memory Light sleep */ |
2346 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { |
2347 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); |
2348 | data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; |
2349 | if (def != data) |
2350 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); |
2351 | } |
2352 | } |
2353 | } else { |
2354 | /* 1 - MGCG_OVERRIDE */ |
2355 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); |
2356 | |
2357 | data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK | |
2358 | RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK | |
2359 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK | |
2360 | RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK); |
2361 | |
2362 | if (def != data) |
2363 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); |
2364 | |
2365 | /* 2 - disable MGLS in RLC */ |
2366 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL); |
2367 | if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) { |
2368 | data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK; |
2369 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_MEM_SLP_CNTL, data); |
2370 | } |
2371 | |
2372 | /* 3 - disable MGLS in CP */ |
2373 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL); |
2374 | if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) { |
2375 | data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; |
2376 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_MEM_SLP_CNTL, data); |
2377 | } |
2378 | } |
2379 | |
2380 | } |
2381 | |
2382 | static void |
2383 | gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(struct amdgpu_device *adev, |
2384 | bool enable, int xcc_id) |
2385 | { |
2386 | uint32_t def, data; |
2387 | |
2388 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { |
2389 | |
2390 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE); |
2391 | /* unset CGCG override */ |
2392 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGCG_OVERRIDE_MASK; |
2393 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) |
2394 | data &= ~RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; |
2395 | else |
2396 | data |= RLC_CGTT_MGCG_OVERRIDE__GFXIP_CGLS_OVERRIDE_MASK; |
2397 | /* update CGCG and CGLS override bits */ |
2398 | if (def != data) |
2399 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGTT_MGCG_OVERRIDE, data); |
2400 | |
2401 | /* enable cgcg FSM(0x0000363F) */ |
2402 | def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); |
2403 | |
2404 | data = (0x36 |
2405 | << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) | |
2406 | RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK; |
2407 | if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) |
2408 | data |= (0x000F << RLC_CGCG_CGLS_CTRL__CGLS_REP_COMPANSAT_DELAY__SHIFT) | |
2409 | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK; |
2410 | if (def != data) |
2411 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); |
2412 | |
2413 | /* set IDLE_POLL_COUNT(0x00900100) */ |
2414 | def = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL); |
2415 | data = (0x0100 << CP_RB_WPTR_POLL_CNTL__POLL_FREQUENCY__SHIFT) | |
2416 | (0x0090 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); |
2417 | if (def != data) |
2418 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_RB_WPTR_POLL_CNTL, data); |
2419 | } else { |
2420 | def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL); |
2421 | /* reset CGCG/CGLS bits */ |
2422 | data &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); |
2423 | /* disable cgcg and cgls in FSM */ |
2424 | if (def != data) |
2425 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CGCG_CGLS_CTRL, data); |
2426 | } |
2427 | |
2428 | } |
2429 | |
2430 | static int gfx_v9_4_3_xcc_update_gfx_clock_gating(struct amdgpu_device *adev, |
2431 | bool enable, int xcc_id) |
2432 | { |
2433 | amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id); |
2434 | |
2435 | if (enable) { |
2436 | /* FGCG */ |
2437 | gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); |
2438 | gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); |
2439 | |
2440 | /* CGCG/CGLS should be enabled after MGCG/MGLS |
2441 | * === MGCG + MGLS === |
2442 | */ |
2443 | gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, |
2444 | xcc_id); |
2445 | /* === CGCG + CGLS === */ |
2446 | gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, |
2447 | xcc_id); |
2448 | } else { |
2449 | /* CGCG/CGLS should be disabled before MGCG/MGLS |
2450 | * === CGCG + CGLS === |
2451 | */ |
2452 | gfx_v9_4_3_xcc_update_coarse_grain_clock_gating(adev, enable, |
2453 | xcc_id); |
2454 | /* === MGCG + MGLS === */ |
2455 | gfx_v9_4_3_xcc_update_medium_grain_clock_gating(adev, enable, |
2456 | xcc_id); |
2457 | |
2458 | /* FGCG */ |
2459 | gfx_v9_4_3_xcc_update_sram_fgcg(adev, enable, xcc_id); |
2460 | gfx_v9_4_3_xcc_update_repeater_fgcg(adev, enable, xcc_id); |
2461 | } |
2462 | |
2463 | amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id); |
2464 | |
2465 | return 0; |
2466 | } |
2467 | |
2468 | static const struct amdgpu_rlc_funcs gfx_v9_4_3_rlc_funcs = { |
2469 | .is_rlc_enabled = gfx_v9_4_3_is_rlc_enabled, |
2470 | .set_safe_mode = gfx_v9_4_3_xcc_set_safe_mode, |
2471 | .unset_safe_mode = gfx_v9_4_3_xcc_unset_safe_mode, |
2472 | .init = gfx_v9_4_3_rlc_init, |
2473 | .resume = gfx_v9_4_3_rlc_resume, |
2474 | .stop = gfx_v9_4_3_rlc_stop, |
2475 | .reset = gfx_v9_4_3_rlc_reset, |
2476 | .start = gfx_v9_4_3_rlc_start, |
2477 | .update_spm_vmid = gfx_v9_4_3_update_spm_vmid, |
2478 | .is_rlcg_access_range = gfx_v9_4_3_is_rlcg_access_range, |
2479 | }; |
2480 | |
2481 | static int gfx_v9_4_3_set_powergating_state(void *handle, |
2482 | enum amd_powergating_state state) |
2483 | { |
2484 | return 0; |
2485 | } |
2486 | |
2487 | static int gfx_v9_4_3_set_clockgating_state(void *handle, |
2488 | enum amd_clockgating_state state) |
2489 | { |
2490 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2491 | int i, num_xcc; |
2492 | |
2493 | if (amdgpu_sriov_vf(adev)) |
2494 | return 0; |
2495 | |
2496 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2497 | switch (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0)) { |
2498 | case IP_VERSION(9, 4, 3): |
2499 | for (i = 0; i < num_xcc; i++) |
2500 | gfx_v9_4_3_xcc_update_gfx_clock_gating( |
2501 | adev, enable: state == AMD_CG_STATE_GATE, xcc_id: i); |
2502 | break; |
2503 | default: |
2504 | break; |
2505 | } |
2506 | return 0; |
2507 | } |
2508 | |
2509 | static void gfx_v9_4_3_get_clockgating_state(void *handle, u64 *flags) |
2510 | { |
2511 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
2512 | int data; |
2513 | |
2514 | if (amdgpu_sriov_vf(adev)) |
2515 | *flags = 0; |
2516 | |
2517 | /* AMD_CG_SUPPORT_GFX_MGCG */ |
2518 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGTT_MGCG_OVERRIDE)); |
2519 | if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) |
2520 | *flags |= AMD_CG_SUPPORT_GFX_MGCG; |
2521 | |
2522 | /* AMD_CG_SUPPORT_GFX_CGCG */ |
2523 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_CGCG_CGLS_CTRL)); |
2524 | if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) |
2525 | *flags |= AMD_CG_SUPPORT_GFX_CGCG; |
2526 | |
2527 | /* AMD_CG_SUPPORT_GFX_CGLS */ |
2528 | if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) |
2529 | *flags |= AMD_CG_SUPPORT_GFX_CGLS; |
2530 | |
2531 | /* AMD_CG_SUPPORT_GFX_RLC_LS */ |
2532 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regRLC_MEM_SLP_CNTL)); |
2533 | if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) |
2534 | *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; |
2535 | |
2536 | /* AMD_CG_SUPPORT_GFX_CP_LS */ |
2537 | data = RREG32_KIQ(SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCP_MEM_SLP_CNTL)); |
2538 | if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) |
2539 | *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; |
2540 | } |
2541 | |
2542 | static void gfx_v9_4_3_ring_emit_hdp_flush(struct amdgpu_ring *ring) |
2543 | { |
2544 | struct amdgpu_device *adev = ring->adev; |
2545 | u32 ref_and_mask, reg_mem_engine; |
2546 | const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; |
2547 | |
2548 | if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { |
2549 | switch (ring->me) { |
2550 | case 1: |
2551 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp2 << ring->pipe; |
2552 | break; |
2553 | case 2: |
2554 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp6 << ring->pipe; |
2555 | break; |
2556 | default: |
2557 | return; |
2558 | } |
2559 | reg_mem_engine = 0; |
2560 | } else { |
2561 | ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; |
2562 | reg_mem_engine = 1; /* pfp */ |
2563 | } |
2564 | |
2565 | gfx_v9_4_3_wait_reg_mem(ring, eng_sel: reg_mem_engine, mem_space: 0, opt: 1, |
2566 | addr0: adev->nbio.funcs->get_hdp_flush_req_offset(adev), |
2567 | addr1: adev->nbio.funcs->get_hdp_flush_done_offset(adev), |
2568 | ref: ref_and_mask, mask: ref_and_mask, inv: 0x20); |
2569 | } |
2570 | |
2571 | static void gfx_v9_4_3_ring_emit_ib_compute(struct amdgpu_ring *ring, |
2572 | struct amdgpu_job *job, |
2573 | struct amdgpu_ib *ib, |
2574 | uint32_t flags) |
2575 | { |
2576 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
2577 | u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24); |
2578 | |
2579 | /* Currently, there is a high possibility to get wave ID mismatch |
2580 | * between ME and GDS, leading to a hw deadlock, because ME generates |
2581 | * different wave IDs than the GDS expects. This situation happens |
2582 | * randomly when at least 5 compute pipes use GDS ordered append. |
2583 | * The wave IDs generated by ME are also wrong after suspend/resume. |
2584 | * Those are probably bugs somewhere else in the kernel driver. |
2585 | * |
2586 | * Writing GDS_COMPUTE_MAX_WAVE_ID resets wave ID counters in ME and |
2587 | * GDS to 0 for this ring (me/pipe). |
2588 | */ |
2589 | if (ib->flags & AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID) { |
2590 | amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
2591 | amdgpu_ring_write(ring, regGDS_COMPUTE_MAX_WAVE_ID); |
2592 | amdgpu_ring_write(ring, v: ring->adev->gds.gds_compute_max_wave_id); |
2593 | } |
2594 | |
2595 | amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); |
2596 | BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ |
2597 | amdgpu_ring_write(ring, |
2598 | #ifdef __BIG_ENDIAN |
2599 | (2 << 0) | |
2600 | #endif |
2601 | lower_32_bits(ib->gpu_addr)); |
2602 | amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); |
2603 | amdgpu_ring_write(ring, v: control); |
2604 | } |
2605 | |
2606 | static void gfx_v9_4_3_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, |
2607 | u64 seq, unsigned flags) |
2608 | { |
2609 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; |
2610 | bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; |
2611 | bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; |
2612 | |
2613 | /* RELEASE_MEM - flush caches, send int */ |
2614 | amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); |
2615 | amdgpu_ring_write(ring, v: ((writeback ? (EOP_TC_WB_ACTION_EN | |
2616 | EOP_TC_NC_ACTION_EN) : |
2617 | (EOP_TCL1_ACTION_EN | |
2618 | EOP_TC_ACTION_EN | |
2619 | EOP_TC_WB_ACTION_EN | |
2620 | EOP_TC_MD_ACTION_EN)) | |
2621 | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | |
2622 | EVENT_INDEX(5))); |
2623 | amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); |
2624 | |
2625 | /* |
2626 | * the address should be Qword aligned if 64bit write, Dword |
2627 | * aligned if only send 32bit data low (discard data high) |
2628 | */ |
2629 | if (write64bit) |
2630 | BUG_ON(addr & 0x7); |
2631 | else |
2632 | BUG_ON(addr & 0x3); |
2633 | amdgpu_ring_write(ring, lower_32_bits(addr)); |
2634 | amdgpu_ring_write(ring, upper_32_bits(addr)); |
2635 | amdgpu_ring_write(ring, lower_32_bits(seq)); |
2636 | amdgpu_ring_write(ring, upper_32_bits(seq)); |
2637 | amdgpu_ring_write(ring, v: 0); |
2638 | } |
2639 | |
2640 | static void gfx_v9_4_3_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
2641 | { |
2642 | int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); |
2643 | uint32_t seq = ring->fence_drv.sync_seq; |
2644 | uint64_t addr = ring->fence_drv.gpu_addr; |
2645 | |
2646 | gfx_v9_4_3_wait_reg_mem(ring, eng_sel: usepfp, mem_space: 1, opt: 0, |
2647 | lower_32_bits(addr), upper_32_bits(addr), |
2648 | ref: seq, mask: 0xffffffff, inv: 4); |
2649 | } |
2650 | |
2651 | static void gfx_v9_4_3_ring_emit_vm_flush(struct amdgpu_ring *ring, |
2652 | unsigned vmid, uint64_t pd_addr) |
2653 | { |
2654 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
2655 | } |
2656 | |
2657 | static u64 gfx_v9_4_3_ring_get_rptr_compute(struct amdgpu_ring *ring) |
2658 | { |
2659 | return ring->adev->wb.wb[ring->rptr_offs]; /* gfx9 hardware is 32bit rptr */ |
2660 | } |
2661 | |
2662 | static u64 gfx_v9_4_3_ring_get_wptr_compute(struct amdgpu_ring *ring) |
2663 | { |
2664 | u64 wptr; |
2665 | |
2666 | /* XXX check if swapping is necessary on BE */ |
2667 | if (ring->use_doorbell) |
2668 | wptr = atomic64_read(v: (atomic64_t *)&ring->adev->wb.wb[ring->wptr_offs]); |
2669 | else |
2670 | BUG(); |
2671 | return wptr; |
2672 | } |
2673 | |
2674 | static void gfx_v9_4_3_ring_set_wptr_compute(struct amdgpu_ring *ring) |
2675 | { |
2676 | struct amdgpu_device *adev = ring->adev; |
2677 | |
2678 | /* XXX check if swapping is necessary on BE */ |
2679 | if (ring->use_doorbell) { |
2680 | atomic64_set(v: (atomic64_t *)&adev->wb.wb[ring->wptr_offs], i: ring->wptr); |
2681 | WDOORBELL64(ring->doorbell_index, ring->wptr); |
2682 | } else { |
2683 | BUG(); /* only DOORBELL method supported on gfx9 now */ |
2684 | } |
2685 | } |
2686 | |
2687 | static void gfx_v9_4_3_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, |
2688 | u64 seq, unsigned int flags) |
2689 | { |
2690 | struct amdgpu_device *adev = ring->adev; |
2691 | |
2692 | /* we only allocate 32bit for each seq wb address */ |
2693 | BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); |
2694 | |
2695 | /* write fence seq to the "addr" */ |
2696 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
2697 | amdgpu_ring_write(ring, v: (WRITE_DATA_ENGINE_SEL(0) | |
2698 | WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); |
2699 | amdgpu_ring_write(ring, lower_32_bits(addr)); |
2700 | amdgpu_ring_write(ring, upper_32_bits(addr)); |
2701 | amdgpu_ring_write(ring, lower_32_bits(seq)); |
2702 | |
2703 | if (flags & AMDGPU_FENCE_FLAG_INT) { |
2704 | /* set register to trigger INT */ |
2705 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
2706 | amdgpu_ring_write(ring, v: (WRITE_DATA_ENGINE_SEL(0) | |
2707 | WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); |
2708 | amdgpu_ring_write(ring, SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regCPC_INT_STATUS)); |
2709 | amdgpu_ring_write(ring, v: 0); |
2710 | amdgpu_ring_write(ring, v: 0x20000000); /* src_id is 178 */ |
2711 | } |
2712 | } |
2713 | |
2714 | static void gfx_v9_4_3_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg, |
2715 | uint32_t reg_val_offs) |
2716 | { |
2717 | struct amdgpu_device *adev = ring->adev; |
2718 | |
2719 | amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); |
2720 | amdgpu_ring_write(ring, v: 0 | /* src: register*/ |
2721 | (5 << 8) | /* dst: memory */ |
2722 | (1 << 20)); /* write confirm */ |
2723 | amdgpu_ring_write(ring, v: reg); |
2724 | amdgpu_ring_write(ring, v: 0); |
2725 | amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + |
2726 | reg_val_offs * 4)); |
2727 | amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + |
2728 | reg_val_offs * 4)); |
2729 | } |
2730 | |
2731 | static void gfx_v9_4_3_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, |
2732 | uint32_t val) |
2733 | { |
2734 | uint32_t cmd = 0; |
2735 | |
2736 | switch (ring->funcs->type) { |
2737 | case AMDGPU_RING_TYPE_GFX: |
2738 | cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM; |
2739 | break; |
2740 | case AMDGPU_RING_TYPE_KIQ: |
2741 | cmd = (1 << 16); /* no inc addr */ |
2742 | break; |
2743 | default: |
2744 | cmd = WR_CONFIRM; |
2745 | break; |
2746 | } |
2747 | amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
2748 | amdgpu_ring_write(ring, v: cmd); |
2749 | amdgpu_ring_write(ring, v: reg); |
2750 | amdgpu_ring_write(ring, v: 0); |
2751 | amdgpu_ring_write(ring, v: val); |
2752 | } |
2753 | |
2754 | static void gfx_v9_4_3_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, |
2755 | uint32_t val, uint32_t mask) |
2756 | { |
2757 | gfx_v9_4_3_wait_reg_mem(ring, eng_sel: 0, mem_space: 0, opt: 0, addr0: reg, addr1: 0, ref: val, mask, inv: 0x20); |
2758 | } |
2759 | |
2760 | static void gfx_v9_4_3_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, |
2761 | uint32_t reg0, uint32_t reg1, |
2762 | uint32_t ref, uint32_t mask) |
2763 | { |
2764 | amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, val0: reg1, |
2765 | reg1: ref, val1: mask); |
2766 | } |
2767 | |
2768 | static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2769 | struct amdgpu_device *adev, int me, int pipe, |
2770 | enum amdgpu_interrupt_state state, int xcc_id) |
2771 | { |
2772 | u32 mec_int_cntl, mec_int_cntl_reg; |
2773 | |
2774 | /* |
2775 | * amdgpu controls only the first MEC. That's why this function only |
2776 | * handles the setting of interrupts for this specific MEC. All other |
2777 | * pipes' interrupts are set by amdkfd. |
2778 | */ |
2779 | |
2780 | if (me == 1) { |
2781 | switch (pipe) { |
2782 | case 0: |
2783 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE0_INT_CNTL); |
2784 | break; |
2785 | case 1: |
2786 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE1_INT_CNTL); |
2787 | break; |
2788 | case 2: |
2789 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE2_INT_CNTL); |
2790 | break; |
2791 | case 3: |
2792 | mec_int_cntl_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regCP_ME1_PIPE3_INT_CNTL); |
2793 | break; |
2794 | default: |
2795 | DRM_DEBUG("invalid pipe %d\n" , pipe); |
2796 | return; |
2797 | } |
2798 | } else { |
2799 | DRM_DEBUG("invalid me %d\n" , me); |
2800 | return; |
2801 | } |
2802 | |
2803 | switch (state) { |
2804 | case AMDGPU_IRQ_STATE_DISABLE: |
2805 | mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); |
2806 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, |
2807 | TIME_STAMP_INT_ENABLE, 0); |
2808 | WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); |
2809 | break; |
2810 | case AMDGPU_IRQ_STATE_ENABLE: |
2811 | mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id); |
2812 | mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL, |
2813 | TIME_STAMP_INT_ENABLE, 1); |
2814 | WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id); |
2815 | break; |
2816 | default: |
2817 | break; |
2818 | } |
2819 | } |
2820 | |
2821 | static int gfx_v9_4_3_set_priv_reg_fault_state(struct amdgpu_device *adev, |
2822 | struct amdgpu_irq_src *source, |
2823 | unsigned type, |
2824 | enum amdgpu_interrupt_state state) |
2825 | { |
2826 | int i, num_xcc; |
2827 | |
2828 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2829 | switch (state) { |
2830 | case AMDGPU_IRQ_STATE_DISABLE: |
2831 | case AMDGPU_IRQ_STATE_ENABLE: |
2832 | for (i = 0; i < num_xcc; i++) |
2833 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, |
2834 | PRIV_REG_INT_ENABLE, |
2835 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); |
2836 | break; |
2837 | default: |
2838 | break; |
2839 | } |
2840 | |
2841 | return 0; |
2842 | } |
2843 | |
2844 | static int gfx_v9_4_3_set_priv_inst_fault_state(struct amdgpu_device *adev, |
2845 | struct amdgpu_irq_src *source, |
2846 | unsigned type, |
2847 | enum amdgpu_interrupt_state state) |
2848 | { |
2849 | int i, num_xcc; |
2850 | |
2851 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2852 | switch (state) { |
2853 | case AMDGPU_IRQ_STATE_DISABLE: |
2854 | case AMDGPU_IRQ_STATE_ENABLE: |
2855 | for (i = 0; i < num_xcc; i++) |
2856 | WREG32_FIELD15_PREREG(GC, GET_INST(GC, i), CP_INT_CNTL_RING0, |
2857 | PRIV_INSTR_INT_ENABLE, |
2858 | state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); |
2859 | break; |
2860 | default: |
2861 | break; |
2862 | } |
2863 | |
2864 | return 0; |
2865 | } |
2866 | |
2867 | static int gfx_v9_4_3_set_eop_interrupt_state(struct amdgpu_device *adev, |
2868 | struct amdgpu_irq_src *src, |
2869 | unsigned type, |
2870 | enum amdgpu_interrupt_state state) |
2871 | { |
2872 | int i, num_xcc; |
2873 | |
2874 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
2875 | for (i = 0; i < num_xcc; i++) { |
2876 | switch (type) { |
2877 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP: |
2878 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2879 | adev, me: 1, pipe: 0, state, xcc_id: i); |
2880 | break; |
2881 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP: |
2882 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2883 | adev, me: 1, pipe: 1, state, xcc_id: i); |
2884 | break; |
2885 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP: |
2886 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2887 | adev, me: 1, pipe: 2, state, xcc_id: i); |
2888 | break; |
2889 | case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP: |
2890 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2891 | adev, me: 1, pipe: 3, state, xcc_id: i); |
2892 | break; |
2893 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP: |
2894 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2895 | adev, me: 2, pipe: 0, state, xcc_id: i); |
2896 | break; |
2897 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP: |
2898 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2899 | adev, me: 2, pipe: 1, state, xcc_id: i); |
2900 | break; |
2901 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP: |
2902 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2903 | adev, me: 2, pipe: 2, state, xcc_id: i); |
2904 | break; |
2905 | case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP: |
2906 | gfx_v9_4_3_xcc_set_compute_eop_interrupt_state( |
2907 | adev, me: 2, pipe: 3, state, xcc_id: i); |
2908 | break; |
2909 | default: |
2910 | break; |
2911 | } |
2912 | } |
2913 | |
2914 | return 0; |
2915 | } |
2916 | |
2917 | static int gfx_v9_4_3_eop_irq(struct amdgpu_device *adev, |
2918 | struct amdgpu_irq_src *source, |
2919 | struct amdgpu_iv_entry *entry) |
2920 | { |
2921 | int i, xcc_id; |
2922 | u8 me_id, pipe_id, queue_id; |
2923 | struct amdgpu_ring *ring; |
2924 | |
2925 | DRM_DEBUG("IH: CP EOP\n" ); |
2926 | me_id = (entry->ring_id & 0x0c) >> 2; |
2927 | pipe_id = (entry->ring_id & 0x03) >> 0; |
2928 | queue_id = (entry->ring_id & 0x70) >> 4; |
2929 | |
2930 | xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, ih_node: entry->node_id); |
2931 | |
2932 | if (xcc_id == -EINVAL) |
2933 | return -EINVAL; |
2934 | |
2935 | switch (me_id) { |
2936 | case 0: |
2937 | case 1: |
2938 | case 2: |
2939 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
2940 | ring = &adev->gfx.compute_ring |
2941 | [i + |
2942 | xcc_id * adev->gfx.num_compute_rings]; |
2943 | /* Per-queue interrupt is supported for MEC starting from VI. |
2944 | * The interrupt can only be enabled/disabled per pipe instead of per queue. |
2945 | */ |
2946 | |
2947 | if ((ring->me == me_id) && (ring->pipe == pipe_id) && (ring->queue == queue_id)) |
2948 | amdgpu_fence_process(ring); |
2949 | } |
2950 | break; |
2951 | } |
2952 | return 0; |
2953 | } |
2954 | |
2955 | static void gfx_v9_4_3_fault(struct amdgpu_device *adev, |
2956 | struct amdgpu_iv_entry *entry) |
2957 | { |
2958 | u8 me_id, pipe_id, queue_id; |
2959 | struct amdgpu_ring *ring; |
2960 | int i, xcc_id; |
2961 | |
2962 | me_id = (entry->ring_id & 0x0c) >> 2; |
2963 | pipe_id = (entry->ring_id & 0x03) >> 0; |
2964 | queue_id = (entry->ring_id & 0x70) >> 4; |
2965 | |
2966 | xcc_id = gfx_v9_4_3_ih_to_xcc_inst(adev, ih_node: entry->node_id); |
2967 | |
2968 | if (xcc_id == -EINVAL) |
2969 | return; |
2970 | |
2971 | switch (me_id) { |
2972 | case 0: |
2973 | case 1: |
2974 | case 2: |
2975 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
2976 | ring = &adev->gfx.compute_ring |
2977 | [i + |
2978 | xcc_id * adev->gfx.num_compute_rings]; |
2979 | if (ring->me == me_id && ring->pipe == pipe_id && |
2980 | ring->queue == queue_id) |
2981 | drm_sched_fault(sched: &ring->sched); |
2982 | } |
2983 | break; |
2984 | } |
2985 | } |
2986 | |
2987 | static int gfx_v9_4_3_priv_reg_irq(struct amdgpu_device *adev, |
2988 | struct amdgpu_irq_src *source, |
2989 | struct amdgpu_iv_entry *entry) |
2990 | { |
2991 | DRM_ERROR("Illegal register access in command stream\n" ); |
2992 | gfx_v9_4_3_fault(adev, entry); |
2993 | return 0; |
2994 | } |
2995 | |
2996 | static int gfx_v9_4_3_priv_inst_irq(struct amdgpu_device *adev, |
2997 | struct amdgpu_irq_src *source, |
2998 | struct amdgpu_iv_entry *entry) |
2999 | { |
3000 | DRM_ERROR("Illegal instruction in command stream\n" ); |
3001 | gfx_v9_4_3_fault(adev, entry); |
3002 | return 0; |
3003 | } |
3004 | |
3005 | static void gfx_v9_4_3_emit_mem_sync(struct amdgpu_ring *ring) |
3006 | { |
3007 | const unsigned int cp_coher_cntl = |
3008 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | |
3009 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | |
3010 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | |
3011 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | |
3012 | PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); |
3013 | |
3014 | /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ |
3015 | amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); |
3016 | amdgpu_ring_write(ring, v: cp_coher_cntl); /* CP_COHER_CNTL */ |
3017 | amdgpu_ring_write(ring, v: 0xffffffff); /* CP_COHER_SIZE */ |
3018 | amdgpu_ring_write(ring, v: 0xffffff); /* CP_COHER_SIZE_HI */ |
3019 | amdgpu_ring_write(ring, v: 0); /* CP_COHER_BASE */ |
3020 | amdgpu_ring_write(ring, v: 0); /* CP_COHER_BASE_HI */ |
3021 | amdgpu_ring_write(ring, v: 0x0000000A); /* POLL_INTERVAL */ |
3022 | } |
3023 | |
3024 | static void gfx_v9_4_3_emit_wave_limit_cs(struct amdgpu_ring *ring, |
3025 | uint32_t pipe, bool enable) |
3026 | { |
3027 | struct amdgpu_device *adev = ring->adev; |
3028 | uint32_t val; |
3029 | uint32_t wcl_cs_reg; |
3030 | |
3031 | /* regSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ |
3032 | val = enable ? 0x1 : 0x7f; |
3033 | |
3034 | switch (pipe) { |
3035 | case 0: |
3036 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS0); |
3037 | break; |
3038 | case 1: |
3039 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS1); |
3040 | break; |
3041 | case 2: |
3042 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS2); |
3043 | break; |
3044 | case 3: |
3045 | wcl_cs_reg = SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_CS3); |
3046 | break; |
3047 | default: |
3048 | DRM_DEBUG("invalid pipe %d\n" , pipe); |
3049 | return; |
3050 | } |
3051 | |
3052 | amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); |
3053 | |
3054 | } |
3055 | static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable) |
3056 | { |
3057 | struct amdgpu_device *adev = ring->adev; |
3058 | uint32_t val; |
3059 | int i; |
3060 | |
3061 | /* regSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit |
3062 | * number of gfx waves. Setting 5 bit will make sure gfx only gets |
3063 | * around 25% of gpu resources. |
3064 | */ |
3065 | val = enable ? 0x1f : 0x07ffffff; |
3066 | amdgpu_ring_emit_wreg(ring, |
3067 | SOC15_REG_OFFSET(GC, GET_INST(GC, 0), regSPI_WCL_PIPE_PERCENT_GFX), |
3068 | val); |
3069 | |
3070 | /* Restrict waves for normal/low priority compute queues as well |
3071 | * to get best QoS for high priority compute jobs. |
3072 | * |
3073 | * amdgpu controls only 1st ME(0-3 CS pipes). |
3074 | */ |
3075 | for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { |
3076 | if (i != ring->pipe) |
3077 | gfx_v9_4_3_emit_wave_limit_cs(ring, pipe: i, enable); |
3078 | |
3079 | } |
3080 | } |
3081 | |
3082 | enum amdgpu_gfx_cp_ras_mem_id { |
3083 | AMDGPU_GFX_CP_MEM1 = 1, |
3084 | AMDGPU_GFX_CP_MEM2, |
3085 | AMDGPU_GFX_CP_MEM3, |
3086 | AMDGPU_GFX_CP_MEM4, |
3087 | AMDGPU_GFX_CP_MEM5, |
3088 | }; |
3089 | |
3090 | enum amdgpu_gfx_gcea_ras_mem_id { |
3091 | AMDGPU_GFX_GCEA_IOWR_CMDMEM = 4, |
3092 | AMDGPU_GFX_GCEA_IORD_CMDMEM, |
3093 | AMDGPU_GFX_GCEA_GMIWR_CMDMEM, |
3094 | AMDGPU_GFX_GCEA_GMIRD_CMDMEM, |
3095 | AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, |
3096 | AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, |
3097 | AMDGPU_GFX_GCEA_MAM_DMEM0, |
3098 | AMDGPU_GFX_GCEA_MAM_DMEM1, |
3099 | AMDGPU_GFX_GCEA_MAM_DMEM2, |
3100 | AMDGPU_GFX_GCEA_MAM_DMEM3, |
3101 | AMDGPU_GFX_GCEA_MAM_AMEM0, |
3102 | AMDGPU_GFX_GCEA_MAM_AMEM1, |
3103 | AMDGPU_GFX_GCEA_MAM_AMEM2, |
3104 | AMDGPU_GFX_GCEA_MAM_AMEM3, |
3105 | AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, |
3106 | AMDGPU_GFX_GCEA_WRET_TAGMEM, |
3107 | AMDGPU_GFX_GCEA_RRET_TAGMEM, |
3108 | AMDGPU_GFX_GCEA_IOWR_DATAMEM, |
3109 | AMDGPU_GFX_GCEA_GMIWR_DATAMEM, |
3110 | AMDGPU_GFX_GCEA_DRAM_DATAMEM, |
3111 | }; |
3112 | |
3113 | enum amdgpu_gfx_gc_cane_ras_mem_id { |
3114 | AMDGPU_GFX_GC_CANE_MEM0 = 0, |
3115 | }; |
3116 | |
3117 | enum amdgpu_gfx_gcutcl2_ras_mem_id { |
3118 | AMDGPU_GFX_GCUTCL2_MEM2P512X95 = 160, |
3119 | }; |
3120 | |
3121 | enum amdgpu_gfx_gds_ras_mem_id { |
3122 | AMDGPU_GFX_GDS_MEM0 = 0, |
3123 | }; |
3124 | |
3125 | enum amdgpu_gfx_lds_ras_mem_id { |
3126 | AMDGPU_GFX_LDS_BANK0 = 0, |
3127 | AMDGPU_GFX_LDS_BANK1, |
3128 | AMDGPU_GFX_LDS_BANK2, |
3129 | AMDGPU_GFX_LDS_BANK3, |
3130 | AMDGPU_GFX_LDS_BANK4, |
3131 | AMDGPU_GFX_LDS_BANK5, |
3132 | AMDGPU_GFX_LDS_BANK6, |
3133 | AMDGPU_GFX_LDS_BANK7, |
3134 | AMDGPU_GFX_LDS_BANK8, |
3135 | AMDGPU_GFX_LDS_BANK9, |
3136 | AMDGPU_GFX_LDS_BANK10, |
3137 | AMDGPU_GFX_LDS_BANK11, |
3138 | AMDGPU_GFX_LDS_BANK12, |
3139 | AMDGPU_GFX_LDS_BANK13, |
3140 | AMDGPU_GFX_LDS_BANK14, |
3141 | AMDGPU_GFX_LDS_BANK15, |
3142 | AMDGPU_GFX_LDS_BANK16, |
3143 | AMDGPU_GFX_LDS_BANK17, |
3144 | AMDGPU_GFX_LDS_BANK18, |
3145 | AMDGPU_GFX_LDS_BANK19, |
3146 | AMDGPU_GFX_LDS_BANK20, |
3147 | AMDGPU_GFX_LDS_BANK21, |
3148 | AMDGPU_GFX_LDS_BANK22, |
3149 | AMDGPU_GFX_LDS_BANK23, |
3150 | AMDGPU_GFX_LDS_BANK24, |
3151 | AMDGPU_GFX_LDS_BANK25, |
3152 | AMDGPU_GFX_LDS_BANK26, |
3153 | AMDGPU_GFX_LDS_BANK27, |
3154 | AMDGPU_GFX_LDS_BANK28, |
3155 | AMDGPU_GFX_LDS_BANK29, |
3156 | AMDGPU_GFX_LDS_BANK30, |
3157 | AMDGPU_GFX_LDS_BANK31, |
3158 | AMDGPU_GFX_LDS_SP_BUFFER_A, |
3159 | AMDGPU_GFX_LDS_SP_BUFFER_B, |
3160 | }; |
3161 | |
3162 | enum amdgpu_gfx_rlc_ras_mem_id { |
3163 | AMDGPU_GFX_RLC_GPMF32 = 1, |
3164 | AMDGPU_GFX_RLC_RLCVF32, |
3165 | AMDGPU_GFX_RLC_SCRATCH, |
3166 | AMDGPU_GFX_RLC_SRM_ARAM, |
3167 | AMDGPU_GFX_RLC_SRM_DRAM, |
3168 | AMDGPU_GFX_RLC_TCTAG, |
3169 | AMDGPU_GFX_RLC_SPM_SE, |
3170 | AMDGPU_GFX_RLC_SPM_GRBMT, |
3171 | }; |
3172 | |
3173 | enum amdgpu_gfx_sp_ras_mem_id { |
3174 | AMDGPU_GFX_SP_SIMDID0 = 0, |
3175 | }; |
3176 | |
3177 | enum amdgpu_gfx_spi_ras_mem_id { |
3178 | AMDGPU_GFX_SPI_MEM0 = 0, |
3179 | AMDGPU_GFX_SPI_MEM1, |
3180 | AMDGPU_GFX_SPI_MEM2, |
3181 | AMDGPU_GFX_SPI_MEM3, |
3182 | }; |
3183 | |
3184 | enum amdgpu_gfx_sqc_ras_mem_id { |
3185 | AMDGPU_GFX_SQC_INST_CACHE_A = 100, |
3186 | AMDGPU_GFX_SQC_INST_CACHE_B = 101, |
3187 | AMDGPU_GFX_SQC_INST_CACHE_TAG_A = 102, |
3188 | AMDGPU_GFX_SQC_INST_CACHE_TAG_B = 103, |
3189 | AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A = 104, |
3190 | AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B = 105, |
3191 | AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A = 106, |
3192 | AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B = 107, |
3193 | AMDGPU_GFX_SQC_DATA_CACHE_A = 200, |
3194 | AMDGPU_GFX_SQC_DATA_CACHE_B = 201, |
3195 | AMDGPU_GFX_SQC_DATA_CACHE_TAG_A = 202, |
3196 | AMDGPU_GFX_SQC_DATA_CACHE_TAG_B = 203, |
3197 | AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A = 204, |
3198 | AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B = 205, |
3199 | AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A = 206, |
3200 | AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B = 207, |
3201 | AMDGPU_GFX_SQC_DIRTY_BIT_A = 208, |
3202 | AMDGPU_GFX_SQC_DIRTY_BIT_B = 209, |
3203 | AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0 = 210, |
3204 | AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1 = 211, |
3205 | AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A = 212, |
3206 | AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B = 213, |
3207 | AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE = 108, |
3208 | }; |
3209 | |
3210 | enum amdgpu_gfx_sq_ras_mem_id { |
3211 | AMDGPU_GFX_SQ_SGPR_MEM0 = 0, |
3212 | AMDGPU_GFX_SQ_SGPR_MEM1, |
3213 | AMDGPU_GFX_SQ_SGPR_MEM2, |
3214 | AMDGPU_GFX_SQ_SGPR_MEM3, |
3215 | }; |
3216 | |
3217 | enum amdgpu_gfx_ta_ras_mem_id { |
3218 | AMDGPU_GFX_TA_FS_AFIFO_RAM_LO = 1, |
3219 | AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, |
3220 | AMDGPU_GFX_TA_FS_CFIFO_RAM, |
3221 | AMDGPU_GFX_TA_FSX_LFIFO, |
3222 | AMDGPU_GFX_TA_FS_DFIFO_RAM, |
3223 | }; |
3224 | |
3225 | enum amdgpu_gfx_tcc_ras_mem_id { |
3226 | AMDGPU_GFX_TCC_MEM1 = 1, |
3227 | }; |
3228 | |
3229 | enum amdgpu_gfx_tca_ras_mem_id { |
3230 | AMDGPU_GFX_TCA_MEM1 = 1, |
3231 | }; |
3232 | |
3233 | enum amdgpu_gfx_tci_ras_mem_id { |
3234 | AMDGPU_GFX_TCIW_MEM = 1, |
3235 | }; |
3236 | |
3237 | enum amdgpu_gfx_tcp_ras_mem_id { |
3238 | AMDGPU_GFX_TCP_LFIFO0 = 1, |
3239 | AMDGPU_GFX_TCP_SET0BANK0_RAM, |
3240 | AMDGPU_GFX_TCP_SET0BANK1_RAM, |
3241 | AMDGPU_GFX_TCP_SET0BANK2_RAM, |
3242 | AMDGPU_GFX_TCP_SET0BANK3_RAM, |
3243 | AMDGPU_GFX_TCP_SET1BANK0_RAM, |
3244 | AMDGPU_GFX_TCP_SET1BANK1_RAM, |
3245 | AMDGPU_GFX_TCP_SET1BANK2_RAM, |
3246 | AMDGPU_GFX_TCP_SET1BANK3_RAM, |
3247 | AMDGPU_GFX_TCP_SET2BANK0_RAM, |
3248 | AMDGPU_GFX_TCP_SET2BANK1_RAM, |
3249 | AMDGPU_GFX_TCP_SET2BANK2_RAM, |
3250 | AMDGPU_GFX_TCP_SET2BANK3_RAM, |
3251 | AMDGPU_GFX_TCP_SET3BANK0_RAM, |
3252 | AMDGPU_GFX_TCP_SET3BANK1_RAM, |
3253 | AMDGPU_GFX_TCP_SET3BANK2_RAM, |
3254 | AMDGPU_GFX_TCP_SET3BANK3_RAM, |
3255 | AMDGPU_GFX_TCP_VM_FIFO, |
3256 | AMDGPU_GFX_TCP_DB_TAGRAM0, |
3257 | AMDGPU_GFX_TCP_DB_TAGRAM1, |
3258 | AMDGPU_GFX_TCP_DB_TAGRAM2, |
3259 | AMDGPU_GFX_TCP_DB_TAGRAM3, |
3260 | AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, |
3261 | AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, |
3262 | AMDGPU_GFX_TCP_CMD_FIFO, |
3263 | }; |
3264 | |
3265 | enum amdgpu_gfx_td_ras_mem_id { |
3266 | AMDGPU_GFX_TD_UTD_CS_FIFO_MEM = 1, |
3267 | AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, |
3268 | AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, |
3269 | }; |
3270 | |
3271 | enum amdgpu_gfx_tcx_ras_mem_id { |
3272 | AMDGPU_GFX_TCX_FIFOD0 = 0, |
3273 | AMDGPU_GFX_TCX_FIFOD1, |
3274 | AMDGPU_GFX_TCX_FIFOD2, |
3275 | AMDGPU_GFX_TCX_FIFOD3, |
3276 | AMDGPU_GFX_TCX_FIFOD4, |
3277 | AMDGPU_GFX_TCX_FIFOD5, |
3278 | AMDGPU_GFX_TCX_FIFOD6, |
3279 | AMDGPU_GFX_TCX_FIFOD7, |
3280 | AMDGPU_GFX_TCX_FIFOB0, |
3281 | AMDGPU_GFX_TCX_FIFOB1, |
3282 | AMDGPU_GFX_TCX_FIFOB2, |
3283 | AMDGPU_GFX_TCX_FIFOB3, |
3284 | AMDGPU_GFX_TCX_FIFOB4, |
3285 | AMDGPU_GFX_TCX_FIFOB5, |
3286 | AMDGPU_GFX_TCX_FIFOB6, |
3287 | AMDGPU_GFX_TCX_FIFOB7, |
3288 | AMDGPU_GFX_TCX_FIFOA0, |
3289 | AMDGPU_GFX_TCX_FIFOA1, |
3290 | AMDGPU_GFX_TCX_FIFOA2, |
3291 | AMDGPU_GFX_TCX_FIFOA3, |
3292 | AMDGPU_GFX_TCX_FIFOA4, |
3293 | AMDGPU_GFX_TCX_FIFOA5, |
3294 | AMDGPU_GFX_TCX_FIFOA6, |
3295 | AMDGPU_GFX_TCX_FIFOA7, |
3296 | AMDGPU_GFX_TCX_CFIFO0, |
3297 | AMDGPU_GFX_TCX_CFIFO1, |
3298 | AMDGPU_GFX_TCX_CFIFO2, |
3299 | AMDGPU_GFX_TCX_CFIFO3, |
3300 | AMDGPU_GFX_TCX_CFIFO4, |
3301 | AMDGPU_GFX_TCX_CFIFO5, |
3302 | AMDGPU_GFX_TCX_CFIFO6, |
3303 | AMDGPU_GFX_TCX_CFIFO7, |
3304 | AMDGPU_GFX_TCX_FIFO_ACKB0, |
3305 | AMDGPU_GFX_TCX_FIFO_ACKB1, |
3306 | AMDGPU_GFX_TCX_FIFO_ACKB2, |
3307 | AMDGPU_GFX_TCX_FIFO_ACKB3, |
3308 | AMDGPU_GFX_TCX_FIFO_ACKB4, |
3309 | AMDGPU_GFX_TCX_FIFO_ACKB5, |
3310 | AMDGPU_GFX_TCX_FIFO_ACKB6, |
3311 | AMDGPU_GFX_TCX_FIFO_ACKB7, |
3312 | AMDGPU_GFX_TCX_FIFO_ACKD0, |
3313 | AMDGPU_GFX_TCX_FIFO_ACKD1, |
3314 | AMDGPU_GFX_TCX_FIFO_ACKD2, |
3315 | AMDGPU_GFX_TCX_FIFO_ACKD3, |
3316 | AMDGPU_GFX_TCX_FIFO_ACKD4, |
3317 | AMDGPU_GFX_TCX_FIFO_ACKD5, |
3318 | AMDGPU_GFX_TCX_FIFO_ACKD6, |
3319 | AMDGPU_GFX_TCX_FIFO_ACKD7, |
3320 | AMDGPU_GFX_TCX_DST_FIFOA0, |
3321 | AMDGPU_GFX_TCX_DST_FIFOA1, |
3322 | AMDGPU_GFX_TCX_DST_FIFOA2, |
3323 | AMDGPU_GFX_TCX_DST_FIFOA3, |
3324 | AMDGPU_GFX_TCX_DST_FIFOA4, |
3325 | AMDGPU_GFX_TCX_DST_FIFOA5, |
3326 | AMDGPU_GFX_TCX_DST_FIFOA6, |
3327 | AMDGPU_GFX_TCX_DST_FIFOA7, |
3328 | AMDGPU_GFX_TCX_DST_FIFOB0, |
3329 | AMDGPU_GFX_TCX_DST_FIFOB1, |
3330 | AMDGPU_GFX_TCX_DST_FIFOB2, |
3331 | AMDGPU_GFX_TCX_DST_FIFOB3, |
3332 | AMDGPU_GFX_TCX_DST_FIFOB4, |
3333 | AMDGPU_GFX_TCX_DST_FIFOB5, |
3334 | AMDGPU_GFX_TCX_DST_FIFOB6, |
3335 | AMDGPU_GFX_TCX_DST_FIFOB7, |
3336 | AMDGPU_GFX_TCX_DST_FIFOD0, |
3337 | AMDGPU_GFX_TCX_DST_FIFOD1, |
3338 | AMDGPU_GFX_TCX_DST_FIFOD2, |
3339 | AMDGPU_GFX_TCX_DST_FIFOD3, |
3340 | AMDGPU_GFX_TCX_DST_FIFOD4, |
3341 | AMDGPU_GFX_TCX_DST_FIFOD5, |
3342 | AMDGPU_GFX_TCX_DST_FIFOD6, |
3343 | AMDGPU_GFX_TCX_DST_FIFOD7, |
3344 | AMDGPU_GFX_TCX_DST_FIFO_ACKB0, |
3345 | AMDGPU_GFX_TCX_DST_FIFO_ACKB1, |
3346 | AMDGPU_GFX_TCX_DST_FIFO_ACKB2, |
3347 | AMDGPU_GFX_TCX_DST_FIFO_ACKB3, |
3348 | AMDGPU_GFX_TCX_DST_FIFO_ACKB4, |
3349 | AMDGPU_GFX_TCX_DST_FIFO_ACKB5, |
3350 | AMDGPU_GFX_TCX_DST_FIFO_ACKB6, |
3351 | AMDGPU_GFX_TCX_DST_FIFO_ACKB7, |
3352 | AMDGPU_GFX_TCX_DST_FIFO_ACKD0, |
3353 | AMDGPU_GFX_TCX_DST_FIFO_ACKD1, |
3354 | AMDGPU_GFX_TCX_DST_FIFO_ACKD2, |
3355 | AMDGPU_GFX_TCX_DST_FIFO_ACKD3, |
3356 | AMDGPU_GFX_TCX_DST_FIFO_ACKD4, |
3357 | AMDGPU_GFX_TCX_DST_FIFO_ACKD5, |
3358 | AMDGPU_GFX_TCX_DST_FIFO_ACKD6, |
3359 | AMDGPU_GFX_TCX_DST_FIFO_ACKD7, |
3360 | }; |
3361 | |
3362 | enum amdgpu_gfx_atc_l2_ras_mem_id { |
3363 | AMDGPU_GFX_ATC_L2_MEM0 = 0, |
3364 | }; |
3365 | |
3366 | enum amdgpu_gfx_utcl2_ras_mem_id { |
3367 | AMDGPU_GFX_UTCL2_MEM0 = 0, |
3368 | }; |
3369 | |
3370 | enum amdgpu_gfx_vml2_ras_mem_id { |
3371 | AMDGPU_GFX_VML2_MEM0 = 0, |
3372 | }; |
3373 | |
3374 | enum amdgpu_gfx_vml2_walker_ras_mem_id { |
3375 | AMDGPU_GFX_VML2_WALKER_MEM0 = 0, |
3376 | }; |
3377 | |
3378 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_cp_mem_list[] = { |
3379 | {AMDGPU_GFX_CP_MEM1, "CP_MEM1" }, |
3380 | {AMDGPU_GFX_CP_MEM2, "CP_MEM2" }, |
3381 | {AMDGPU_GFX_CP_MEM3, "CP_MEM3" }, |
3382 | {AMDGPU_GFX_CP_MEM4, "CP_MEM4" }, |
3383 | {AMDGPU_GFX_CP_MEM5, "CP_MEM5" }, |
3384 | }; |
3385 | |
3386 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcea_mem_list[] = { |
3387 | {AMDGPU_GFX_GCEA_IOWR_CMDMEM, "GCEA_IOWR_CMDMEM" }, |
3388 | {AMDGPU_GFX_GCEA_IORD_CMDMEM, "GCEA_IORD_CMDMEM" }, |
3389 | {AMDGPU_GFX_GCEA_GMIWR_CMDMEM, "GCEA_GMIWR_CMDMEM" }, |
3390 | {AMDGPU_GFX_GCEA_GMIRD_CMDMEM, "GCEA_GMIRD_CMDMEM" }, |
3391 | {AMDGPU_GFX_GCEA_DRAMWR_CMDMEM, "GCEA_DRAMWR_CMDMEM" }, |
3392 | {AMDGPU_GFX_GCEA_DRAMRD_CMDMEM, "GCEA_DRAMRD_CMDMEM" }, |
3393 | {AMDGPU_GFX_GCEA_MAM_DMEM0, "GCEA_MAM_DMEM0" }, |
3394 | {AMDGPU_GFX_GCEA_MAM_DMEM1, "GCEA_MAM_DMEM1" }, |
3395 | {AMDGPU_GFX_GCEA_MAM_DMEM2, "GCEA_MAM_DMEM2" }, |
3396 | {AMDGPU_GFX_GCEA_MAM_DMEM3, "GCEA_MAM_DMEM3" }, |
3397 | {AMDGPU_GFX_GCEA_MAM_AMEM0, "GCEA_MAM_AMEM0" }, |
3398 | {AMDGPU_GFX_GCEA_MAM_AMEM1, "GCEA_MAM_AMEM1" }, |
3399 | {AMDGPU_GFX_GCEA_MAM_AMEM2, "GCEA_MAM_AMEM2" }, |
3400 | {AMDGPU_GFX_GCEA_MAM_AMEM3, "GCEA_MAM_AMEM3" }, |
3401 | {AMDGPU_GFX_GCEA_MAM_AFLUSH_BUFFER, "GCEA_MAM_AFLUSH_BUFFER" }, |
3402 | {AMDGPU_GFX_GCEA_WRET_TAGMEM, "GCEA_WRET_TAGMEM" }, |
3403 | {AMDGPU_GFX_GCEA_RRET_TAGMEM, "GCEA_RRET_TAGMEM" }, |
3404 | {AMDGPU_GFX_GCEA_IOWR_DATAMEM, "GCEA_IOWR_DATAMEM" }, |
3405 | {AMDGPU_GFX_GCEA_GMIWR_DATAMEM, "GCEA_GMIWR_DATAMEM" }, |
3406 | {AMDGPU_GFX_GCEA_DRAM_DATAMEM, "GCEA_DRAM_DATAMEM" }, |
3407 | }; |
3408 | |
3409 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gc_cane_mem_list[] = { |
3410 | {AMDGPU_GFX_GC_CANE_MEM0, "GC_CANE_MEM0" }, |
3411 | }; |
3412 | |
3413 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gcutcl2_mem_list[] = { |
3414 | {AMDGPU_GFX_GCUTCL2_MEM2P512X95, "GCUTCL2_MEM2P512X95" }, |
3415 | }; |
3416 | |
3417 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_gds_mem_list[] = { |
3418 | {AMDGPU_GFX_GDS_MEM0, "GDS_MEM" }, |
3419 | }; |
3420 | |
3421 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_lds_mem_list[] = { |
3422 | {AMDGPU_GFX_LDS_BANK0, "LDS_BANK0" }, |
3423 | {AMDGPU_GFX_LDS_BANK1, "LDS_BANK1" }, |
3424 | {AMDGPU_GFX_LDS_BANK2, "LDS_BANK2" }, |
3425 | {AMDGPU_GFX_LDS_BANK3, "LDS_BANK3" }, |
3426 | {AMDGPU_GFX_LDS_BANK4, "LDS_BANK4" }, |
3427 | {AMDGPU_GFX_LDS_BANK5, "LDS_BANK5" }, |
3428 | {AMDGPU_GFX_LDS_BANK6, "LDS_BANK6" }, |
3429 | {AMDGPU_GFX_LDS_BANK7, "LDS_BANK7" }, |
3430 | {AMDGPU_GFX_LDS_BANK8, "LDS_BANK8" }, |
3431 | {AMDGPU_GFX_LDS_BANK9, "LDS_BANK9" }, |
3432 | {AMDGPU_GFX_LDS_BANK10, "LDS_BANK10" }, |
3433 | {AMDGPU_GFX_LDS_BANK11, "LDS_BANK11" }, |
3434 | {AMDGPU_GFX_LDS_BANK12, "LDS_BANK12" }, |
3435 | {AMDGPU_GFX_LDS_BANK13, "LDS_BANK13" }, |
3436 | {AMDGPU_GFX_LDS_BANK14, "LDS_BANK14" }, |
3437 | {AMDGPU_GFX_LDS_BANK15, "LDS_BANK15" }, |
3438 | {AMDGPU_GFX_LDS_BANK16, "LDS_BANK16" }, |
3439 | {AMDGPU_GFX_LDS_BANK17, "LDS_BANK17" }, |
3440 | {AMDGPU_GFX_LDS_BANK18, "LDS_BANK18" }, |
3441 | {AMDGPU_GFX_LDS_BANK19, "LDS_BANK19" }, |
3442 | {AMDGPU_GFX_LDS_BANK20, "LDS_BANK20" }, |
3443 | {AMDGPU_GFX_LDS_BANK21, "LDS_BANK21" }, |
3444 | {AMDGPU_GFX_LDS_BANK22, "LDS_BANK22" }, |
3445 | {AMDGPU_GFX_LDS_BANK23, "LDS_BANK23" }, |
3446 | {AMDGPU_GFX_LDS_BANK24, "LDS_BANK24" }, |
3447 | {AMDGPU_GFX_LDS_BANK25, "LDS_BANK25" }, |
3448 | {AMDGPU_GFX_LDS_BANK26, "LDS_BANK26" }, |
3449 | {AMDGPU_GFX_LDS_BANK27, "LDS_BANK27" }, |
3450 | {AMDGPU_GFX_LDS_BANK28, "LDS_BANK28" }, |
3451 | {AMDGPU_GFX_LDS_BANK29, "LDS_BANK29" }, |
3452 | {AMDGPU_GFX_LDS_BANK30, "LDS_BANK30" }, |
3453 | {AMDGPU_GFX_LDS_BANK31, "LDS_BANK31" }, |
3454 | {AMDGPU_GFX_LDS_SP_BUFFER_A, "LDS_SP_BUFFER_A" }, |
3455 | {AMDGPU_GFX_LDS_SP_BUFFER_B, "LDS_SP_BUFFER_B" }, |
3456 | }; |
3457 | |
3458 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_rlc_mem_list[] = { |
3459 | {AMDGPU_GFX_RLC_GPMF32, "RLC_GPMF32" }, |
3460 | {AMDGPU_GFX_RLC_RLCVF32, "RLC_RLCVF32" }, |
3461 | {AMDGPU_GFX_RLC_SCRATCH, "RLC_SCRATCH" }, |
3462 | {AMDGPU_GFX_RLC_SRM_ARAM, "RLC_SRM_ARAM" }, |
3463 | {AMDGPU_GFX_RLC_SRM_DRAM, "RLC_SRM_DRAM" }, |
3464 | {AMDGPU_GFX_RLC_TCTAG, "RLC_TCTAG" }, |
3465 | {AMDGPU_GFX_RLC_SPM_SE, "RLC_SPM_SE" }, |
3466 | {AMDGPU_GFX_RLC_SPM_GRBMT, "RLC_SPM_GRBMT" }, |
3467 | }; |
3468 | |
3469 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sp_mem_list[] = { |
3470 | {AMDGPU_GFX_SP_SIMDID0, "SP_SIMDID0" }, |
3471 | }; |
3472 | |
3473 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_spi_mem_list[] = { |
3474 | {AMDGPU_GFX_SPI_MEM0, "SPI_MEM0" }, |
3475 | {AMDGPU_GFX_SPI_MEM1, "SPI_MEM1" }, |
3476 | {AMDGPU_GFX_SPI_MEM2, "SPI_MEM2" }, |
3477 | {AMDGPU_GFX_SPI_MEM3, "SPI_MEM3" }, |
3478 | }; |
3479 | |
3480 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sqc_mem_list[] = { |
3481 | {AMDGPU_GFX_SQC_INST_CACHE_A, "SQC_INST_CACHE_A" }, |
3482 | {AMDGPU_GFX_SQC_INST_CACHE_B, "SQC_INST_CACHE_B" }, |
3483 | {AMDGPU_GFX_SQC_INST_CACHE_TAG_A, "SQC_INST_CACHE_TAG_A" }, |
3484 | {AMDGPU_GFX_SQC_INST_CACHE_TAG_B, "SQC_INST_CACHE_TAG_B" }, |
3485 | {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_A, "SQC_INST_CACHE_MISS_FIFO_A" }, |
3486 | {AMDGPU_GFX_SQC_INST_CACHE_MISS_FIFO_B, "SQC_INST_CACHE_MISS_FIFO_B" }, |
3487 | {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_A, "SQC_INST_CACHE_GATCL1_MISS_FIFO_A" }, |
3488 | {AMDGPU_GFX_SQC_INST_CACHE_GATCL1_MISS_FIFO_B, "SQC_INST_CACHE_GATCL1_MISS_FIFO_B" }, |
3489 | {AMDGPU_GFX_SQC_DATA_CACHE_A, "SQC_DATA_CACHE_A" }, |
3490 | {AMDGPU_GFX_SQC_DATA_CACHE_B, "SQC_DATA_CACHE_B" }, |
3491 | {AMDGPU_GFX_SQC_DATA_CACHE_TAG_A, "SQC_DATA_CACHE_TAG_A" }, |
3492 | {AMDGPU_GFX_SQC_DATA_CACHE_TAG_B, "SQC_DATA_CACHE_TAG_B" }, |
3493 | {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_A, "SQC_DATA_CACHE_MISS_FIFO_A" }, |
3494 | {AMDGPU_GFX_SQC_DATA_CACHE_MISS_FIFO_B, "SQC_DATA_CACHE_MISS_FIFO_B" }, |
3495 | {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_A, "SQC_DATA_CACHE_HIT_FIFO_A" }, |
3496 | {AMDGPU_GFX_SQC_DATA_CACHE_HIT_FIFO_B, "SQC_DATA_CACHE_HIT_FIFO_B" }, |
3497 | {AMDGPU_GFX_SQC_DIRTY_BIT_A, "SQC_DIRTY_BIT_A" }, |
3498 | {AMDGPU_GFX_SQC_DIRTY_BIT_B, "SQC_DIRTY_BIT_B" }, |
3499 | {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU0, "SQC_WRITE_DATA_BUFFER_CU0" }, |
3500 | {AMDGPU_GFX_SQC_WRITE_DATA_BUFFER_CU1, "SQC_WRITE_DATA_BUFFER_CU1" }, |
3501 | {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_A" }, |
3502 | {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B, "SQC_UTCL1_MISS_LFIFO_DATA_CACHE_B" }, |
3503 | {AMDGPU_GFX_SQC_UTCL1_MISS_LFIFO_INST_CACHE, "SQC_UTCL1_MISS_LFIFO_INST_CACHE" }, |
3504 | }; |
3505 | |
3506 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_sq_mem_list[] = { |
3507 | {AMDGPU_GFX_SQ_SGPR_MEM0, "SQ_SGPR_MEM0" }, |
3508 | {AMDGPU_GFX_SQ_SGPR_MEM1, "SQ_SGPR_MEM1" }, |
3509 | {AMDGPU_GFX_SQ_SGPR_MEM2, "SQ_SGPR_MEM2" }, |
3510 | {AMDGPU_GFX_SQ_SGPR_MEM3, "SQ_SGPR_MEM3" }, |
3511 | }; |
3512 | |
3513 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_ta_mem_list[] = { |
3514 | {AMDGPU_GFX_TA_FS_AFIFO_RAM_LO, "TA_FS_AFIFO_RAM_LO" }, |
3515 | {AMDGPU_GFX_TA_FS_AFIFO_RAM_HI, "TA_FS_AFIFO_RAM_HI" }, |
3516 | {AMDGPU_GFX_TA_FS_CFIFO_RAM, "TA_FS_CFIFO_RAM" }, |
3517 | {AMDGPU_GFX_TA_FSX_LFIFO, "TA_FSX_LFIFO" }, |
3518 | {AMDGPU_GFX_TA_FS_DFIFO_RAM, "TA_FS_DFIFO_RAM" }, |
3519 | }; |
3520 | |
3521 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcc_mem_list[] = { |
3522 | {AMDGPU_GFX_TCC_MEM1, "TCC_MEM1" }, |
3523 | }; |
3524 | |
3525 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tca_mem_list[] = { |
3526 | {AMDGPU_GFX_TCA_MEM1, "TCA_MEM1" }, |
3527 | }; |
3528 | |
3529 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tci_mem_list[] = { |
3530 | {AMDGPU_GFX_TCIW_MEM, "TCIW_MEM" }, |
3531 | }; |
3532 | |
3533 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcp_mem_list[] = { |
3534 | {AMDGPU_GFX_TCP_LFIFO0, "TCP_LFIFO0" }, |
3535 | {AMDGPU_GFX_TCP_SET0BANK0_RAM, "TCP_SET0BANK0_RAM" }, |
3536 | {AMDGPU_GFX_TCP_SET0BANK1_RAM, "TCP_SET0BANK1_RAM" }, |
3537 | {AMDGPU_GFX_TCP_SET0BANK2_RAM, "TCP_SET0BANK2_RAM" }, |
3538 | {AMDGPU_GFX_TCP_SET0BANK3_RAM, "TCP_SET0BANK3_RAM" }, |
3539 | {AMDGPU_GFX_TCP_SET1BANK0_RAM, "TCP_SET1BANK0_RAM" }, |
3540 | {AMDGPU_GFX_TCP_SET1BANK1_RAM, "TCP_SET1BANK1_RAM" }, |
3541 | {AMDGPU_GFX_TCP_SET1BANK2_RAM, "TCP_SET1BANK2_RAM" }, |
3542 | {AMDGPU_GFX_TCP_SET1BANK3_RAM, "TCP_SET1BANK3_RAM" }, |
3543 | {AMDGPU_GFX_TCP_SET2BANK0_RAM, "TCP_SET2BANK0_RAM" }, |
3544 | {AMDGPU_GFX_TCP_SET2BANK1_RAM, "TCP_SET2BANK1_RAM" }, |
3545 | {AMDGPU_GFX_TCP_SET2BANK2_RAM, "TCP_SET2BANK2_RAM" }, |
3546 | {AMDGPU_GFX_TCP_SET2BANK3_RAM, "TCP_SET2BANK3_RAM" }, |
3547 | {AMDGPU_GFX_TCP_SET3BANK0_RAM, "TCP_SET3BANK0_RAM" }, |
3548 | {AMDGPU_GFX_TCP_SET3BANK1_RAM, "TCP_SET3BANK1_RAM" }, |
3549 | {AMDGPU_GFX_TCP_SET3BANK2_RAM, "TCP_SET3BANK2_RAM" }, |
3550 | {AMDGPU_GFX_TCP_SET3BANK3_RAM, "TCP_SET3BANK3_RAM" }, |
3551 | {AMDGPU_GFX_TCP_VM_FIFO, "TCP_VM_FIFO" }, |
3552 | {AMDGPU_GFX_TCP_DB_TAGRAM0, "TCP_DB_TAGRAM0" }, |
3553 | {AMDGPU_GFX_TCP_DB_TAGRAM1, "TCP_DB_TAGRAM1" }, |
3554 | {AMDGPU_GFX_TCP_DB_TAGRAM2, "TCP_DB_TAGRAM2" }, |
3555 | {AMDGPU_GFX_TCP_DB_TAGRAM3, "TCP_DB_TAGRAM3" }, |
3556 | {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE0, "TCP_UTCL1_LFIFO_PROBE0" }, |
3557 | {AMDGPU_GFX_TCP_UTCL1_LFIFO_PROBE1, "TCP_UTCL1_LFIFO_PROBE1" }, |
3558 | {AMDGPU_GFX_TCP_CMD_FIFO, "TCP_CMD_FIFO" }, |
3559 | }; |
3560 | |
3561 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_td_mem_list[] = { |
3562 | {AMDGPU_GFX_TD_UTD_CS_FIFO_MEM, "TD_UTD_CS_FIFO_MEM" }, |
3563 | {AMDGPU_GFX_TD_UTD_SS_FIFO_LO_MEM, "TD_UTD_SS_FIFO_LO_MEM" }, |
3564 | {AMDGPU_GFX_TD_UTD_SS_FIFO_HI_MEM, "TD_UTD_SS_FIFO_HI_MEM" }, |
3565 | }; |
3566 | |
3567 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_tcx_mem_list[] = { |
3568 | {AMDGPU_GFX_TCX_FIFOD0, "TCX_FIFOD0" }, |
3569 | {AMDGPU_GFX_TCX_FIFOD1, "TCX_FIFOD1" }, |
3570 | {AMDGPU_GFX_TCX_FIFOD2, "TCX_FIFOD2" }, |
3571 | {AMDGPU_GFX_TCX_FIFOD3, "TCX_FIFOD3" }, |
3572 | {AMDGPU_GFX_TCX_FIFOD4, "TCX_FIFOD4" }, |
3573 | {AMDGPU_GFX_TCX_FIFOD5, "TCX_FIFOD5" }, |
3574 | {AMDGPU_GFX_TCX_FIFOD6, "TCX_FIFOD6" }, |
3575 | {AMDGPU_GFX_TCX_FIFOD7, "TCX_FIFOD7" }, |
3576 | {AMDGPU_GFX_TCX_FIFOB0, "TCX_FIFOB0" }, |
3577 | {AMDGPU_GFX_TCX_FIFOB1, "TCX_FIFOB1" }, |
3578 | {AMDGPU_GFX_TCX_FIFOB2, "TCX_FIFOB2" }, |
3579 | {AMDGPU_GFX_TCX_FIFOB3, "TCX_FIFOB3" }, |
3580 | {AMDGPU_GFX_TCX_FIFOB4, "TCX_FIFOB4" }, |
3581 | {AMDGPU_GFX_TCX_FIFOB5, "TCX_FIFOB5" }, |
3582 | {AMDGPU_GFX_TCX_FIFOB6, "TCX_FIFOB6" }, |
3583 | {AMDGPU_GFX_TCX_FIFOB7, "TCX_FIFOB7" }, |
3584 | {AMDGPU_GFX_TCX_FIFOA0, "TCX_FIFOA0" }, |
3585 | {AMDGPU_GFX_TCX_FIFOA1, "TCX_FIFOA1" }, |
3586 | {AMDGPU_GFX_TCX_FIFOA2, "TCX_FIFOA2" }, |
3587 | {AMDGPU_GFX_TCX_FIFOA3, "TCX_FIFOA3" }, |
3588 | {AMDGPU_GFX_TCX_FIFOA4, "TCX_FIFOA4" }, |
3589 | {AMDGPU_GFX_TCX_FIFOA5, "TCX_FIFOA5" }, |
3590 | {AMDGPU_GFX_TCX_FIFOA6, "TCX_FIFOA6" }, |
3591 | {AMDGPU_GFX_TCX_FIFOA7, "TCX_FIFOA7" }, |
3592 | {AMDGPU_GFX_TCX_CFIFO0, "TCX_CFIFO0" }, |
3593 | {AMDGPU_GFX_TCX_CFIFO1, "TCX_CFIFO1" }, |
3594 | {AMDGPU_GFX_TCX_CFIFO2, "TCX_CFIFO2" }, |
3595 | {AMDGPU_GFX_TCX_CFIFO3, "TCX_CFIFO3" }, |
3596 | {AMDGPU_GFX_TCX_CFIFO4, "TCX_CFIFO4" }, |
3597 | {AMDGPU_GFX_TCX_CFIFO5, "TCX_CFIFO5" }, |
3598 | {AMDGPU_GFX_TCX_CFIFO6, "TCX_CFIFO6" }, |
3599 | {AMDGPU_GFX_TCX_CFIFO7, "TCX_CFIFO7" }, |
3600 | {AMDGPU_GFX_TCX_FIFO_ACKB0, "TCX_FIFO_ACKB0" }, |
3601 | {AMDGPU_GFX_TCX_FIFO_ACKB1, "TCX_FIFO_ACKB1" }, |
3602 | {AMDGPU_GFX_TCX_FIFO_ACKB2, "TCX_FIFO_ACKB2" }, |
3603 | {AMDGPU_GFX_TCX_FIFO_ACKB3, "TCX_FIFO_ACKB3" }, |
3604 | {AMDGPU_GFX_TCX_FIFO_ACKB4, "TCX_FIFO_ACKB4" }, |
3605 | {AMDGPU_GFX_TCX_FIFO_ACKB5, "TCX_FIFO_ACKB5" }, |
3606 | {AMDGPU_GFX_TCX_FIFO_ACKB6, "TCX_FIFO_ACKB6" }, |
3607 | {AMDGPU_GFX_TCX_FIFO_ACKB7, "TCX_FIFO_ACKB7" }, |
3608 | {AMDGPU_GFX_TCX_FIFO_ACKD0, "TCX_FIFO_ACKD0" }, |
3609 | {AMDGPU_GFX_TCX_FIFO_ACKD1, "TCX_FIFO_ACKD1" }, |
3610 | {AMDGPU_GFX_TCX_FIFO_ACKD2, "TCX_FIFO_ACKD2" }, |
3611 | {AMDGPU_GFX_TCX_FIFO_ACKD3, "TCX_FIFO_ACKD3" }, |
3612 | {AMDGPU_GFX_TCX_FIFO_ACKD4, "TCX_FIFO_ACKD4" }, |
3613 | {AMDGPU_GFX_TCX_FIFO_ACKD5, "TCX_FIFO_ACKD5" }, |
3614 | {AMDGPU_GFX_TCX_FIFO_ACKD6, "TCX_FIFO_ACKD6" }, |
3615 | {AMDGPU_GFX_TCX_FIFO_ACKD7, "TCX_FIFO_ACKD7" }, |
3616 | {AMDGPU_GFX_TCX_DST_FIFOA0, "TCX_DST_FIFOA0" }, |
3617 | {AMDGPU_GFX_TCX_DST_FIFOA1, "TCX_DST_FIFOA1" }, |
3618 | {AMDGPU_GFX_TCX_DST_FIFOA2, "TCX_DST_FIFOA2" }, |
3619 | {AMDGPU_GFX_TCX_DST_FIFOA3, "TCX_DST_FIFOA3" }, |
3620 | {AMDGPU_GFX_TCX_DST_FIFOA4, "TCX_DST_FIFOA4" }, |
3621 | {AMDGPU_GFX_TCX_DST_FIFOA5, "TCX_DST_FIFOA5" }, |
3622 | {AMDGPU_GFX_TCX_DST_FIFOA6, "TCX_DST_FIFOA6" }, |
3623 | {AMDGPU_GFX_TCX_DST_FIFOA7, "TCX_DST_FIFOA7" }, |
3624 | {AMDGPU_GFX_TCX_DST_FIFOB0, "TCX_DST_FIFOB0" }, |
3625 | {AMDGPU_GFX_TCX_DST_FIFOB1, "TCX_DST_FIFOB1" }, |
3626 | {AMDGPU_GFX_TCX_DST_FIFOB2, "TCX_DST_FIFOB2" }, |
3627 | {AMDGPU_GFX_TCX_DST_FIFOB3, "TCX_DST_FIFOB3" }, |
3628 | {AMDGPU_GFX_TCX_DST_FIFOB4, "TCX_DST_FIFOB4" }, |
3629 | {AMDGPU_GFX_TCX_DST_FIFOB5, "TCX_DST_FIFOB5" }, |
3630 | {AMDGPU_GFX_TCX_DST_FIFOB6, "TCX_DST_FIFOB6" }, |
3631 | {AMDGPU_GFX_TCX_DST_FIFOB7, "TCX_DST_FIFOB7" }, |
3632 | {AMDGPU_GFX_TCX_DST_FIFOD0, "TCX_DST_FIFOD0" }, |
3633 | {AMDGPU_GFX_TCX_DST_FIFOD1, "TCX_DST_FIFOD1" }, |
3634 | {AMDGPU_GFX_TCX_DST_FIFOD2, "TCX_DST_FIFOD2" }, |
3635 | {AMDGPU_GFX_TCX_DST_FIFOD3, "TCX_DST_FIFOD3" }, |
3636 | {AMDGPU_GFX_TCX_DST_FIFOD4, "TCX_DST_FIFOD4" }, |
3637 | {AMDGPU_GFX_TCX_DST_FIFOD5, "TCX_DST_FIFOD5" }, |
3638 | {AMDGPU_GFX_TCX_DST_FIFOD6, "TCX_DST_FIFOD6" }, |
3639 | {AMDGPU_GFX_TCX_DST_FIFOD7, "TCX_DST_FIFOD7" }, |
3640 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB0, "TCX_DST_FIFO_ACKB0" }, |
3641 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB1, "TCX_DST_FIFO_ACKB1" }, |
3642 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB2, "TCX_DST_FIFO_ACKB2" }, |
3643 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB3, "TCX_DST_FIFO_ACKB3" }, |
3644 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB4, "TCX_DST_FIFO_ACKB4" }, |
3645 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB5, "TCX_DST_FIFO_ACKB5" }, |
3646 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB6, "TCX_DST_FIFO_ACKB6" }, |
3647 | {AMDGPU_GFX_TCX_DST_FIFO_ACKB7, "TCX_DST_FIFO_ACKB7" }, |
3648 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD0, "TCX_DST_FIFO_ACKD0" }, |
3649 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD1, "TCX_DST_FIFO_ACKD1" }, |
3650 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD2, "TCX_DST_FIFO_ACKD2" }, |
3651 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD3, "TCX_DST_FIFO_ACKD3" }, |
3652 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD4, "TCX_DST_FIFO_ACKD4" }, |
3653 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD5, "TCX_DST_FIFO_ACKD5" }, |
3654 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD6, "TCX_DST_FIFO_ACKD6" }, |
3655 | {AMDGPU_GFX_TCX_DST_FIFO_ACKD7, "TCX_DST_FIFO_ACKD7" }, |
3656 | }; |
3657 | |
3658 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_atc_l2_mem_list[] = { |
3659 | {AMDGPU_GFX_ATC_L2_MEM, "ATC_L2_MEM" }, |
3660 | }; |
3661 | |
3662 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_utcl2_mem_list[] = { |
3663 | {AMDGPU_GFX_UTCL2_MEM, "UTCL2_MEM" }, |
3664 | }; |
3665 | |
3666 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_mem_list[] = { |
3667 | {AMDGPU_GFX_VML2_MEM, "VML2_MEM" }, |
3668 | }; |
3669 | |
3670 | static const struct amdgpu_ras_memory_id_entry gfx_v9_4_3_ras_vml2_walker_mem_list[] = { |
3671 | {AMDGPU_GFX_VML2_WALKER_MEM, "VML2_WALKER_MEM" }, |
3672 | }; |
3673 | |
3674 | static const struct amdgpu_gfx_ras_mem_id_entry gfx_v9_4_3_ras_mem_list_array[AMDGPU_GFX_MEM_TYPE_NUM] = { |
3675 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_cp_mem_list) |
3676 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcea_mem_list) |
3677 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gc_cane_mem_list) |
3678 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gcutcl2_mem_list) |
3679 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_gds_mem_list) |
3680 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_lds_mem_list) |
3681 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_rlc_mem_list) |
3682 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sp_mem_list) |
3683 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_spi_mem_list) |
3684 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sqc_mem_list) |
3685 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_sq_mem_list) |
3686 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_ta_mem_list) |
3687 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcc_mem_list) |
3688 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tca_mem_list) |
3689 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tci_mem_list) |
3690 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcp_mem_list) |
3691 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_td_mem_list) |
3692 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_tcx_mem_list) |
3693 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_atc_l2_mem_list) |
3694 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_utcl2_mem_list) |
3695 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_mem_list) |
3696 | AMDGPU_GFX_MEMID_ENT(gfx_v9_4_3_ras_vml2_walker_mem_list) |
3697 | }; |
3698 | |
3699 | static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ce_reg_list[] = { |
3700 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_CE_ERR_STATUS_LOW, regRLC_CE_ERR_STATUS_HIGH), |
3701 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC" }, |
3702 | AMDGPU_GFX_RLC_MEM, 1}, |
3703 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_CE_ERR_STATUS_LO, regCPC_CE_ERR_STATUS_HI), |
3704 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC" }, |
3705 | AMDGPU_GFX_CP_MEM, 1}, |
3706 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_CE_ERR_STATUS_LO, regCPF_CE_ERR_STATUS_HI), |
3707 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF" }, |
3708 | AMDGPU_GFX_CP_MEM, 1}, |
3709 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_CE_ERR_STATUS_LO, regCPG_CE_ERR_STATUS_HI), |
3710 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG" }, |
3711 | AMDGPU_GFX_CP_MEM, 1}, |
3712 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_CE_ERR_STATUS_LO, regGDS_CE_ERR_STATUS_HI), |
3713 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS" }, |
3714 | AMDGPU_GFX_GDS_MEM, 1}, |
3715 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_CE_ERR_STATUS_LO, regGC_CANE_CE_ERR_STATUS_HI), |
3716 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE" }, |
3717 | AMDGPU_GFX_GC_CANE_MEM, 1}, |
3718 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_CE_ERR_STATUS_LO, regSPI_CE_ERR_STATUS_HI), |
3719 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI" }, |
3720 | AMDGPU_GFX_SPI_MEM, 1}, |
3721 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_CE_ERR_STATUS_LO, regSP0_CE_ERR_STATUS_HI), |
3722 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0" }, |
3723 | AMDGPU_GFX_SP_MEM, 4}, |
3724 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_CE_ERR_STATUS_LO, regSP1_CE_ERR_STATUS_HI), |
3725 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1" }, |
3726 | AMDGPU_GFX_SP_MEM, 4}, |
3727 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_CE_ERR_STATUS_LO, regSQ_CE_ERR_STATUS_HI), |
3728 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ" }, |
3729 | AMDGPU_GFX_SQ_MEM, 4}, |
3730 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_CE_EDC_LO, regSQC_CE_EDC_HI), |
3731 | 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC" }, |
3732 | AMDGPU_GFX_SQC_MEM, 4}, |
3733 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_CE_ERR_STATUS_LO, regTCX_CE_ERR_STATUS_HI), |
3734 | 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX" }, |
3735 | AMDGPU_GFX_TCX_MEM, 1}, |
3736 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_CE_ERR_STATUS_LO, regTCC_CE_ERR_STATUS_HI), |
3737 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC" }, |
3738 | AMDGPU_GFX_TCC_MEM, 1}, |
3739 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_CE_EDC_LO, regTA_CE_EDC_HI), |
3740 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA" }, |
3741 | AMDGPU_GFX_TA_MEM, 4}, |
3742 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_CE_EDC_LO_REG, regTCI_CE_EDC_HI_REG), |
3743 | 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI" }, |
3744 | AMDGPU_GFX_TCI_MEM, 1}, |
3745 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_CE_EDC_LO_REG, regTCP_CE_EDC_HI_REG), |
3746 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP" }, |
3747 | AMDGPU_GFX_TCP_MEM, 4}, |
3748 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_CE_EDC_LO, regTD_CE_EDC_HI), |
3749 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD" }, |
3750 | AMDGPU_GFX_TD_MEM, 4}, |
3751 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_CE_ERR_STATUS_LO, regGCEA_CE_ERR_STATUS_HI), |
3752 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA" }, |
3753 | AMDGPU_GFX_GCEA_MEM, 1}, |
3754 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_CE_ERR_STATUS_LO, regLDS_CE_ERR_STATUS_HI), |
3755 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS" }, |
3756 | AMDGPU_GFX_LDS_MEM, 4}, |
3757 | }; |
3758 | |
3759 | static const struct amdgpu_gfx_ras_reg_entry gfx_v9_4_3_ue_reg_list[] = { |
3760 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regRLC_UE_ERR_STATUS_LOW, regRLC_UE_ERR_STATUS_HIGH), |
3761 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "RLC" }, |
3762 | AMDGPU_GFX_RLC_MEM, 1}, |
3763 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPC_UE_ERR_STATUS_LO, regCPC_UE_ERR_STATUS_HI), |
3764 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPC" }, |
3765 | AMDGPU_GFX_CP_MEM, 1}, |
3766 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPF_UE_ERR_STATUS_LO, regCPF_UE_ERR_STATUS_HI), |
3767 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPF" }, |
3768 | AMDGPU_GFX_CP_MEM, 1}, |
3769 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regCPG_UE_ERR_STATUS_LO, regCPG_UE_ERR_STATUS_HI), |
3770 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CPG" }, |
3771 | AMDGPU_GFX_CP_MEM, 1}, |
3772 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGDS_UE_ERR_STATUS_LO, regGDS_UE_ERR_STATUS_HI), |
3773 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GDS" }, |
3774 | AMDGPU_GFX_GDS_MEM, 1}, |
3775 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGC_CANE_UE_ERR_STATUS_LO, regGC_CANE_UE_ERR_STATUS_HI), |
3776 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "CANE" }, |
3777 | AMDGPU_GFX_GC_CANE_MEM, 1}, |
3778 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSPI_UE_ERR_STATUS_LO, regSPI_UE_ERR_STATUS_HI), |
3779 | 1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SPI" }, |
3780 | AMDGPU_GFX_SPI_MEM, 1}, |
3781 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP0_UE_ERR_STATUS_LO, regSP0_UE_ERR_STATUS_HI), |
3782 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP0" }, |
3783 | AMDGPU_GFX_SP_MEM, 4}, |
3784 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSP1_UE_ERR_STATUS_LO, regSP1_UE_ERR_STATUS_HI), |
3785 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SP1" }, |
3786 | AMDGPU_GFX_SP_MEM, 4}, |
3787 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQ_UE_ERR_STATUS_LO, regSQ_UE_ERR_STATUS_HI), |
3788 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQ" }, |
3789 | AMDGPU_GFX_SQ_MEM, 4}, |
3790 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regSQC_UE_EDC_LO, regSQC_UE_EDC_HI), |
3791 | 5, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SQC" }, |
3792 | AMDGPU_GFX_SQC_MEM, 4}, |
3793 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCX_UE_ERR_STATUS_LO, regTCX_UE_ERR_STATUS_HI), |
3794 | 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCX" }, |
3795 | AMDGPU_GFX_TCX_MEM, 1}, |
3796 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCC_UE_ERR_STATUS_LO, regTCC_UE_ERR_STATUS_HI), |
3797 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCC" }, |
3798 | AMDGPU_GFX_TCC_MEM, 1}, |
3799 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTA_UE_EDC_LO, regTA_UE_EDC_HI), |
3800 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TA" }, |
3801 | AMDGPU_GFX_TA_MEM, 4}, |
3802 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCI_UE_EDC_LO_REG, regTCI_UE_EDC_HI_REG), |
3803 | 27, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCI" }, |
3804 | AMDGPU_GFX_TCI_MEM, 1}, |
3805 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCP_UE_EDC_LO_REG, regTCP_UE_EDC_HI_REG), |
3806 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCP" }, |
3807 | AMDGPU_GFX_TCP_MEM, 4}, |
3808 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTD_UE_EDC_LO, regTD_UE_EDC_HI), |
3809 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TD" }, |
3810 | AMDGPU_GFX_TD_MEM, 4}, |
3811 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regTCA_UE_ERR_STATUS_LO, regTCA_UE_ERR_STATUS_HI), |
3812 | 2, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "TCA" }, |
3813 | AMDGPU_GFX_TCA_MEM, 1}, |
3814 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regGCEA_UE_ERR_STATUS_LO, regGCEA_UE_ERR_STATUS_HI), |
3815 | 16, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "GCEA" }, |
3816 | AMDGPU_GFX_GCEA_MEM, 1}, |
3817 | {{AMDGPU_RAS_REG_ENTRY(GC, 0, regLDS_UE_ERR_STATUS_LO, regLDS_UE_ERR_STATUS_HI), |
3818 | 10, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "LDS" }, |
3819 | AMDGPU_GFX_LDS_MEM, 4}, |
3820 | }; |
3821 | |
3822 | static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev, |
3823 | void *ras_error_status, int xcc_id) |
3824 | { |
3825 | struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; |
3826 | unsigned long ce_count = 0, ue_count = 0; |
3827 | uint32_t i, j, k; |
3828 | |
3829 | /* NOTE: convert xcc_id to physical XCD ID (XCD0 or XCD1) */ |
3830 | struct amdgpu_smuio_mcm_config_info mcm_info = { |
3831 | .socket_id = adev->smuio.funcs->get_socket_id(adev), |
3832 | .die_id = xcc_id & 0x01 ? 1 : 0, |
3833 | }; |
3834 | |
3835 | mutex_lock(&adev->grbm_idx_mutex); |
3836 | |
3837 | for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { |
3838 | for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { |
3839 | for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { |
3840 | /* no need to select if instance number is 1 */ |
3841 | if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || |
3842 | gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) |
3843 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: j, sh_num: 0, instance: k, xcc_id); |
3844 | |
3845 | amdgpu_ras_inst_query_ras_error_count(adev, |
3846 | reg_list: &(gfx_v9_4_3_ce_reg_list[i].reg_entry), |
3847 | reg_list_size: 1, |
3848 | mem_list: gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].mem_id_ent, |
3849 | mem_list_size: gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ce_reg_list[i].mem_id_type].size, |
3850 | GET_INST(GC, xcc_id), |
3851 | err_type: AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE, |
3852 | err_count: &ce_count); |
3853 | |
3854 | amdgpu_ras_inst_query_ras_error_count(adev, |
3855 | reg_list: &(gfx_v9_4_3_ue_reg_list[i].reg_entry), |
3856 | reg_list_size: 1, |
3857 | mem_list: gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, |
3858 | mem_list_size: gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, |
3859 | GET_INST(GC, xcc_id), |
3860 | err_type: AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, |
3861 | err_count: &ue_count); |
3862 | } |
3863 | } |
3864 | } |
3865 | |
3866 | /* handle extra register entries of UE */ |
3867 | for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { |
3868 | for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { |
3869 | for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { |
3870 | /* no need to select if instance number is 1 */ |
3871 | if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || |
3872 | gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) |
3873 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: j, sh_num: 0, instance: k, xcc_id); |
3874 | |
3875 | amdgpu_ras_inst_query_ras_error_count(adev, |
3876 | reg_list: &(gfx_v9_4_3_ue_reg_list[i].reg_entry), |
3877 | reg_list_size: 1, |
3878 | mem_list: gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent, |
3879 | mem_list_size: gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size, |
3880 | GET_INST(GC, xcc_id), |
3881 | err_type: AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, |
3882 | err_count: &ue_count); |
3883 | } |
3884 | } |
3885 | } |
3886 | |
3887 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: 0xffffffff, sh_num: 0xffffffff, instance: 0xffffffff, |
3888 | xcc_id); |
3889 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
3890 | |
3891 | /* the caller should make sure initialize value of |
3892 | * err_data->ue_count and err_data->ce_count |
3893 | */ |
3894 | amdgpu_ras_error_statistic_ue_count(err_data, mcm_info: &mcm_info, NULL, count: ue_count); |
3895 | amdgpu_ras_error_statistic_ce_count(err_data, mcm_info: &mcm_info, NULL, count: ce_count); |
3896 | } |
3897 | |
3898 | static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev, |
3899 | void *ras_error_status, int xcc_id) |
3900 | { |
3901 | uint32_t i, j, k; |
3902 | |
3903 | mutex_lock(&adev->grbm_idx_mutex); |
3904 | |
3905 | for (i = 0; i < ARRAY_SIZE(gfx_v9_4_3_ce_reg_list); i++) { |
3906 | for (j = 0; j < gfx_v9_4_3_ce_reg_list[i].se_num; j++) { |
3907 | for (k = 0; k < gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst; k++) { |
3908 | /* no need to select if instance number is 1 */ |
3909 | if (gfx_v9_4_3_ce_reg_list[i].se_num > 1 || |
3910 | gfx_v9_4_3_ce_reg_list[i].reg_entry.reg_inst > 1) |
3911 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: j, sh_num: 0, instance: k, xcc_id); |
3912 | |
3913 | amdgpu_ras_inst_reset_ras_error_count(adev, |
3914 | reg_list: &(gfx_v9_4_3_ce_reg_list[i].reg_entry), |
3915 | reg_list_size: 1, |
3916 | GET_INST(GC, xcc_id)); |
3917 | |
3918 | amdgpu_ras_inst_reset_ras_error_count(adev, |
3919 | reg_list: &(gfx_v9_4_3_ue_reg_list[i].reg_entry), |
3920 | reg_list_size: 1, |
3921 | GET_INST(GC, xcc_id)); |
3922 | } |
3923 | } |
3924 | } |
3925 | |
3926 | /* handle extra register entries of UE */ |
3927 | for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) { |
3928 | for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) { |
3929 | for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) { |
3930 | /* no need to select if instance number is 1 */ |
3931 | if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 || |
3932 | gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1) |
3933 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: j, sh_num: 0, instance: k, xcc_id); |
3934 | |
3935 | amdgpu_ras_inst_reset_ras_error_count(adev, |
3936 | reg_list: &(gfx_v9_4_3_ue_reg_list[i].reg_entry), |
3937 | reg_list_size: 1, |
3938 | GET_INST(GC, xcc_id)); |
3939 | } |
3940 | } |
3941 | } |
3942 | |
3943 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: 0xffffffff, sh_num: 0xffffffff, instance: 0xffffffff, |
3944 | xcc_id); |
3945 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
3946 | } |
3947 | |
3948 | static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev, |
3949 | void *ras_error_status, int xcc_id) |
3950 | { |
3951 | uint32_t i; |
3952 | uint32_t data; |
3953 | |
3954 | if (amdgpu_sriov_vf(adev)) |
3955 | return; |
3956 | |
3957 | data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG); |
3958 | data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE, |
3959 | amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0); |
3960 | |
3961 | if (amdgpu_watchdog_timer.timeout_fatal_disable && |
3962 | (amdgpu_watchdog_timer.period < 1 || |
3963 | amdgpu_watchdog_timer.period > 0x23)) { |
3964 | dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n" ); |
3965 | amdgpu_watchdog_timer.period = 0x23; |
3966 | } |
3967 | data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL, |
3968 | amdgpu_watchdog_timer.period); |
3969 | |
3970 | mutex_lock(&adev->grbm_idx_mutex); |
3971 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { |
3972 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: i, sh_num: 0xffffffff, instance: 0xffffffff, xcc_id); |
3973 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regSQ_TIMEOUT_CONFIG, data); |
3974 | } |
3975 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: 0xffffffff, sh_num: 0xffffffff, instance: 0xffffffff, |
3976 | xcc_id); |
3977 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
3978 | } |
3979 | |
3980 | static void gfx_v9_4_3_query_ras_error_count(struct amdgpu_device *adev, |
3981 | void *ras_error_status) |
3982 | { |
3983 | amdgpu_gfx_ras_error_func(adev, ras_error_status, |
3984 | func: gfx_v9_4_3_inst_query_ras_err_count); |
3985 | } |
3986 | |
3987 | static void gfx_v9_4_3_reset_ras_error_count(struct amdgpu_device *adev) |
3988 | { |
3989 | amdgpu_gfx_ras_error_func(adev, NULL, func: gfx_v9_4_3_inst_reset_ras_err_count); |
3990 | } |
3991 | |
3992 | static void gfx_v9_4_3_enable_watchdog_timer(struct amdgpu_device *adev) |
3993 | { |
3994 | amdgpu_gfx_ras_error_func(adev, NULL, func: gfx_v9_4_3_inst_enable_watchdog_timer); |
3995 | } |
3996 | |
3997 | static const struct amd_ip_funcs gfx_v9_4_3_ip_funcs = { |
3998 | .name = "gfx_v9_4_3" , |
3999 | .early_init = gfx_v9_4_3_early_init, |
4000 | .late_init = gfx_v9_4_3_late_init, |
4001 | .sw_init = gfx_v9_4_3_sw_init, |
4002 | .sw_fini = gfx_v9_4_3_sw_fini, |
4003 | .hw_init = gfx_v9_4_3_hw_init, |
4004 | .hw_fini = gfx_v9_4_3_hw_fini, |
4005 | .suspend = gfx_v9_4_3_suspend, |
4006 | .resume = gfx_v9_4_3_resume, |
4007 | .is_idle = gfx_v9_4_3_is_idle, |
4008 | .wait_for_idle = gfx_v9_4_3_wait_for_idle, |
4009 | .soft_reset = gfx_v9_4_3_soft_reset, |
4010 | .set_clockgating_state = gfx_v9_4_3_set_clockgating_state, |
4011 | .set_powergating_state = gfx_v9_4_3_set_powergating_state, |
4012 | .get_clockgating_state = gfx_v9_4_3_get_clockgating_state, |
4013 | }; |
4014 | |
4015 | static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_compute = { |
4016 | .type = AMDGPU_RING_TYPE_COMPUTE, |
4017 | .align_mask = 0xff, |
4018 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), |
4019 | .support_64bit_ptrs = true, |
4020 | .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, |
4021 | .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, |
4022 | .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, |
4023 | .emit_frame_size = |
4024 | 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ |
4025 | 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ |
4026 | 5 + /* hdp invalidate */ |
4027 | 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ |
4028 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + |
4029 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + |
4030 | 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ |
4031 | 8 + 8 + 8 + /* gfx_v9_4_3_ring_emit_fence x3 for user fence, vm fence */ |
4032 | 7 + /* gfx_v9_4_3_emit_mem_sync */ |
4033 | 5 + /* gfx_v9_4_3_emit_wave_limit for updating regSPI_WCL_PIPE_PERCENT_GFX register */ |
4034 | 15, /* for updating 3 regSPI_WCL_PIPE_PERCENT_CS registers */ |
4035 | .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ |
4036 | .emit_ib = gfx_v9_4_3_ring_emit_ib_compute, |
4037 | .emit_fence = gfx_v9_4_3_ring_emit_fence, |
4038 | .emit_pipeline_sync = gfx_v9_4_3_ring_emit_pipeline_sync, |
4039 | .emit_vm_flush = gfx_v9_4_3_ring_emit_vm_flush, |
4040 | .emit_gds_switch = gfx_v9_4_3_ring_emit_gds_switch, |
4041 | .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush, |
4042 | .test_ring = gfx_v9_4_3_ring_test_ring, |
4043 | .test_ib = gfx_v9_4_3_ring_test_ib, |
4044 | .insert_nop = amdgpu_ring_insert_nop, |
4045 | .pad_ib = amdgpu_ring_generic_pad_ib, |
4046 | .emit_wreg = gfx_v9_4_3_ring_emit_wreg, |
4047 | .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, |
4048 | .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, |
4049 | .emit_mem_sync = gfx_v9_4_3_emit_mem_sync, |
4050 | .emit_wave_limit = gfx_v9_4_3_emit_wave_limit, |
4051 | }; |
4052 | |
4053 | static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = { |
4054 | .type = AMDGPU_RING_TYPE_KIQ, |
4055 | .align_mask = 0xff, |
4056 | .nop = PACKET3(PACKET3_NOP, 0x3FFF), |
4057 | .support_64bit_ptrs = true, |
4058 | .get_rptr = gfx_v9_4_3_ring_get_rptr_compute, |
4059 | .get_wptr = gfx_v9_4_3_ring_get_wptr_compute, |
4060 | .set_wptr = gfx_v9_4_3_ring_set_wptr_compute, |
4061 | .emit_frame_size = |
4062 | 20 + /* gfx_v9_4_3_ring_emit_gds_switch */ |
4063 | 7 + /* gfx_v9_4_3_ring_emit_hdp_flush */ |
4064 | 5 + /* hdp invalidate */ |
4065 | 7 + /* gfx_v9_4_3_ring_emit_pipeline_sync */ |
4066 | SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 + |
4067 | SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + |
4068 | 2 + /* gfx_v9_4_3_ring_emit_vm_flush */ |
4069 | 8 + 8 + 8, /* gfx_v9_4_3_ring_emit_fence_kiq x3 for user fence, vm fence */ |
4070 | .emit_ib_size = 7, /* gfx_v9_4_3_ring_emit_ib_compute */ |
4071 | .emit_fence = gfx_v9_4_3_ring_emit_fence_kiq, |
4072 | .test_ring = gfx_v9_4_3_ring_test_ring, |
4073 | .insert_nop = amdgpu_ring_insert_nop, |
4074 | .pad_ib = amdgpu_ring_generic_pad_ib, |
4075 | .emit_rreg = gfx_v9_4_3_ring_emit_rreg, |
4076 | .emit_wreg = gfx_v9_4_3_ring_emit_wreg, |
4077 | .emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait, |
4078 | .emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait, |
4079 | }; |
4080 | |
4081 | static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev) |
4082 | { |
4083 | int i, j, num_xcc; |
4084 | |
4085 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
4086 | for (i = 0; i < num_xcc; i++) { |
4087 | adev->gfx.kiq[i].ring.funcs = &gfx_v9_4_3_ring_funcs_kiq; |
4088 | |
4089 | for (j = 0; j < adev->gfx.num_compute_rings; j++) |
4090 | adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings].funcs |
4091 | = &gfx_v9_4_3_ring_funcs_compute; |
4092 | } |
4093 | } |
4094 | |
4095 | static const struct amdgpu_irq_src_funcs gfx_v9_4_3_eop_irq_funcs = { |
4096 | .set = gfx_v9_4_3_set_eop_interrupt_state, |
4097 | .process = gfx_v9_4_3_eop_irq, |
4098 | }; |
4099 | |
4100 | static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_reg_irq_funcs = { |
4101 | .set = gfx_v9_4_3_set_priv_reg_fault_state, |
4102 | .process = gfx_v9_4_3_priv_reg_irq, |
4103 | }; |
4104 | |
4105 | static const struct amdgpu_irq_src_funcs gfx_v9_4_3_priv_inst_irq_funcs = { |
4106 | .set = gfx_v9_4_3_set_priv_inst_fault_state, |
4107 | .process = gfx_v9_4_3_priv_inst_irq, |
4108 | }; |
4109 | |
4110 | static void gfx_v9_4_3_set_irq_funcs(struct amdgpu_device *adev) |
4111 | { |
4112 | adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; |
4113 | adev->gfx.eop_irq.funcs = &gfx_v9_4_3_eop_irq_funcs; |
4114 | |
4115 | adev->gfx.priv_reg_irq.num_types = 1; |
4116 | adev->gfx.priv_reg_irq.funcs = &gfx_v9_4_3_priv_reg_irq_funcs; |
4117 | |
4118 | adev->gfx.priv_inst_irq.num_types = 1; |
4119 | adev->gfx.priv_inst_irq.funcs = &gfx_v9_4_3_priv_inst_irq_funcs; |
4120 | } |
4121 | |
4122 | static void gfx_v9_4_3_set_rlc_funcs(struct amdgpu_device *adev) |
4123 | { |
4124 | adev->gfx.rlc.funcs = &gfx_v9_4_3_rlc_funcs; |
4125 | } |
4126 | |
4127 | |
4128 | static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev) |
4129 | { |
4130 | /* init asci gds info */ |
4131 | switch (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0)) { |
4132 | case IP_VERSION(9, 4, 3): |
4133 | /* 9.4.3 removed all the GDS internal memory, |
4134 | * only support GWS opcode in kernel, like barrier |
4135 | * semaphore.etc */ |
4136 | adev->gds.gds_size = 0; |
4137 | break; |
4138 | default: |
4139 | adev->gds.gds_size = 0x10000; |
4140 | break; |
4141 | } |
4142 | |
4143 | switch (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0)) { |
4144 | case IP_VERSION(9, 4, 3): |
4145 | /* deprecated for 9.4.3, no usage at all */ |
4146 | adev->gds.gds_compute_max_wave_id = 0; |
4147 | break; |
4148 | default: |
4149 | /* this really depends on the chip */ |
4150 | adev->gds.gds_compute_max_wave_id = 0x7ff; |
4151 | break; |
4152 | } |
4153 | |
4154 | adev->gds.gws_size = 64; |
4155 | adev->gds.oa_size = 16; |
4156 | } |
4157 | |
4158 | static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, |
4159 | u32 bitmap, int xcc_id) |
4160 | { |
4161 | u32 data; |
4162 | |
4163 | if (!bitmap) |
4164 | return; |
4165 | |
4166 | data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; |
4167 | data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; |
4168 | |
4169 | WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data); |
4170 | } |
4171 | |
4172 | static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id) |
4173 | { |
4174 | u32 data, mask; |
4175 | |
4176 | data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG); |
4177 | data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG); |
4178 | |
4179 | data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; |
4180 | data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; |
4181 | |
4182 | mask = amdgpu_gfx_create_bitmask(bit_width: adev->gfx.config.max_cu_per_sh); |
4183 | |
4184 | return (~data) & mask; |
4185 | } |
4186 | |
4187 | static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev, |
4188 | struct amdgpu_cu_info *cu_info) |
4189 | { |
4190 | int i, j, k, counter, xcc_id, active_cu_number = 0; |
4191 | u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; |
4192 | unsigned disable_masks[4 * 4]; |
4193 | |
4194 | if (!adev || !cu_info) |
4195 | return -EINVAL; |
4196 | |
4197 | /* |
4198 | * 16 comes from bitmap array size 4*4, and it can cover all gfx9 ASICs |
4199 | */ |
4200 | if (adev->gfx.config.max_shader_engines * |
4201 | adev->gfx.config.max_sh_per_se > 16) |
4202 | return -EINVAL; |
4203 | |
4204 | amdgpu_gfx_parse_disable_cu(mask: disable_masks, |
4205 | max_se: adev->gfx.config.max_shader_engines, |
4206 | max_sh: adev->gfx.config.max_sh_per_se); |
4207 | |
4208 | mutex_lock(&adev->grbm_idx_mutex); |
4209 | for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) { |
4210 | for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { |
4211 | for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { |
4212 | mask = 1; |
4213 | ao_bitmap = 0; |
4214 | counter = 0; |
4215 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: i, sh_num: j, instance: 0xffffffff, xcc_id); |
4216 | gfx_v9_4_3_set_user_cu_inactive_bitmap( |
4217 | adev, |
4218 | bitmap: disable_masks[i * adev->gfx.config.max_sh_per_se + j], |
4219 | xcc_id); |
4220 | bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id); |
4221 | |
4222 | cu_info->bitmap[xcc_id][i][j] = bitmap; |
4223 | |
4224 | for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) { |
4225 | if (bitmap & mask) { |
4226 | if (counter < adev->gfx.config.max_cu_per_sh) |
4227 | ao_bitmap |= mask; |
4228 | counter++; |
4229 | } |
4230 | mask <<= 1; |
4231 | } |
4232 | active_cu_number += counter; |
4233 | if (i < 2 && j < 2) |
4234 | ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); |
4235 | cu_info->ao_cu_bitmap[i][j] = ao_bitmap; |
4236 | } |
4237 | } |
4238 | gfx_v9_4_3_xcc_select_se_sh(adev, se_num: 0xffffffff, sh_num: 0xffffffff, instance: 0xffffffff, |
4239 | xcc_id); |
4240 | } |
4241 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
4242 | |
4243 | cu_info->number = active_cu_number; |
4244 | cu_info->ao_cu_mask = ao_cu_mask; |
4245 | cu_info->simd_per_cu = NUM_SIMD_PER_CU; |
4246 | |
4247 | return 0; |
4248 | } |
4249 | |
4250 | const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = { |
4251 | .type = AMD_IP_BLOCK_TYPE_GFX, |
4252 | .major = 9, |
4253 | .minor = 4, |
4254 | .rev = 3, |
4255 | .funcs = &gfx_v9_4_3_ip_funcs, |
4256 | }; |
4257 | |
4258 | static int gfx_v9_4_3_xcp_resume(void *handle, uint32_t inst_mask) |
4259 | { |
4260 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
4261 | uint32_t tmp_mask; |
4262 | int i, r; |
4263 | |
4264 | /* TODO : Initialize golden regs */ |
4265 | /* gfx_v9_4_3_init_golden_registers(adev); */ |
4266 | |
4267 | tmp_mask = inst_mask; |
4268 | for_each_inst(i, tmp_mask) |
4269 | gfx_v9_4_3_xcc_constants_init(adev, xcc_id: i); |
4270 | |
4271 | if (!amdgpu_sriov_vf(adev)) { |
4272 | tmp_mask = inst_mask; |
4273 | for_each_inst(i, tmp_mask) { |
4274 | r = gfx_v9_4_3_xcc_rlc_resume(adev, xcc_id: i); |
4275 | if (r) |
4276 | return r; |
4277 | } |
4278 | } |
4279 | |
4280 | tmp_mask = inst_mask; |
4281 | for_each_inst(i, tmp_mask) { |
4282 | r = gfx_v9_4_3_xcc_cp_resume(adev, xcc_id: i); |
4283 | if (r) |
4284 | return r; |
4285 | } |
4286 | |
4287 | return 0; |
4288 | } |
4289 | |
4290 | static int gfx_v9_4_3_xcp_suspend(void *handle, uint32_t inst_mask) |
4291 | { |
4292 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
4293 | int i; |
4294 | |
4295 | for_each_inst(i, inst_mask) |
4296 | gfx_v9_4_3_xcc_fini(adev, xcc_id: i); |
4297 | |
4298 | return 0; |
4299 | } |
4300 | |
4301 | struct amdgpu_xcp_ip_funcs gfx_v9_4_3_xcp_funcs = { |
4302 | .suspend = &gfx_v9_4_3_xcp_suspend, |
4303 | .resume = &gfx_v9_4_3_xcp_resume |
4304 | }; |
4305 | |
4306 | struct amdgpu_ras_block_hw_ops gfx_v9_4_3_ras_ops = { |
4307 | .query_ras_error_count = &gfx_v9_4_3_query_ras_error_count, |
4308 | .reset_ras_error_count = &gfx_v9_4_3_reset_ras_error_count, |
4309 | }; |
4310 | |
4311 | static int gfx_v9_4_3_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) |
4312 | { |
4313 | int r; |
4314 | |
4315 | r = amdgpu_ras_block_late_init(adev, ras_block); |
4316 | if (r) |
4317 | return r; |
4318 | |
4319 | r = amdgpu_ras_bind_aca(adev, blk: AMDGPU_RAS_BLOCK__GFX, |
4320 | aca_info: &gfx_v9_4_3_aca_info, |
4321 | NULL); |
4322 | if (r) |
4323 | goto late_fini; |
4324 | |
4325 | return 0; |
4326 | |
4327 | late_fini: |
4328 | amdgpu_ras_block_late_fini(adev, ras_block); |
4329 | |
4330 | return r; |
4331 | } |
4332 | |
4333 | struct amdgpu_gfx_ras gfx_v9_4_3_ras = { |
4334 | .ras_block = { |
4335 | .hw_ops = &gfx_v9_4_3_ras_ops, |
4336 | .ras_late_init = &gfx_v9_4_3_ras_late_init, |
4337 | }, |
4338 | .enable_watchdog_timer = &gfx_v9_4_3_enable_watchdog_timer, |
4339 | }; |
4340 | |