1 | /* |
2 | * Copyright 2023 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | #include <linux/module.h> |
26 | #include "amdgpu.h" |
27 | #include "gfx_v12_0.h" |
28 | #include "soc15_common.h" |
29 | #include "soc21.h" |
30 | #include "gc/gc_12_0_0_offset.h" |
31 | #include "gc/gc_12_0_0_sh_mask.h" |
32 | #include "gc/gc_11_0_0_default.h" |
33 | #include "v12_structs.h" |
34 | #include "mes_v12_api_def.h" |
35 | |
36 | MODULE_FIRMWARE("amdgpu/gc_12_0_0_mes.bin" ); |
37 | MODULE_FIRMWARE("amdgpu/gc_12_0_0_mes1.bin" ); |
38 | MODULE_FIRMWARE("amdgpu/gc_12_0_0_uni_mes.bin" ); |
39 | MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes.bin" ); |
40 | MODULE_FIRMWARE("amdgpu/gc_12_0_1_mes1.bin" ); |
41 | MODULE_FIRMWARE("amdgpu/gc_12_0_1_uni_mes.bin" ); |
42 | |
43 | static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block); |
44 | static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block); |
45 | static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev); |
46 | static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev); |
47 | |
48 | #define MES_EOP_SIZE 2048 |
49 | |
50 | static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring) |
51 | { |
52 | struct amdgpu_device *adev = ring->adev; |
53 | |
54 | if (ring->use_doorbell) { |
55 | atomic64_set(v: (atomic64_t *)ring->wptr_cpu_addr, |
56 | i: ring->wptr); |
57 | WDOORBELL64(ring->doorbell_index, ring->wptr); |
58 | } else { |
59 | BUG(); |
60 | } |
61 | } |
62 | |
63 | static u64 mes_v12_0_ring_get_rptr(struct amdgpu_ring *ring) |
64 | { |
65 | return *ring->rptr_cpu_addr; |
66 | } |
67 | |
68 | static u64 mes_v12_0_ring_get_wptr(struct amdgpu_ring *ring) |
69 | { |
70 | u64 wptr; |
71 | |
72 | if (ring->use_doorbell) |
73 | wptr = atomic64_read(v: (atomic64_t *)ring->wptr_cpu_addr); |
74 | else |
75 | BUG(); |
76 | return wptr; |
77 | } |
78 | |
79 | static const struct amdgpu_ring_funcs mes_v12_0_ring_funcs = { |
80 | .type = AMDGPU_RING_TYPE_MES, |
81 | .align_mask = 1, |
82 | .nop = 0, |
83 | .support_64bit_ptrs = true, |
84 | .get_rptr = mes_v12_0_ring_get_rptr, |
85 | .get_wptr = mes_v12_0_ring_get_wptr, |
86 | .set_wptr = mes_v12_0_ring_set_wptr, |
87 | .insert_nop = amdgpu_ring_insert_nop, |
88 | }; |
89 | |
90 | static const char *mes_v12_0_opcodes[] = { |
91 | "SET_HW_RSRC" , |
92 | "SET_SCHEDULING_CONFIG" , |
93 | "ADD_QUEUE" , |
94 | "REMOVE_QUEUE" , |
95 | "PERFORM_YIELD" , |
96 | "SET_GANG_PRIORITY_LEVEL" , |
97 | "SUSPEND" , |
98 | "RESUME" , |
99 | "RESET" , |
100 | "SET_LOG_BUFFER" , |
101 | "CHANGE_GANG_PRORITY" , |
102 | "QUERY_SCHEDULER_STATUS" , |
103 | "unused" , |
104 | "SET_DEBUG_VMID" , |
105 | "MISC" , |
106 | "UPDATE_ROOT_PAGE_TABLE" , |
107 | "AMD_LOG" , |
108 | "SET_SE_MODE" , |
109 | "SET_GANG_SUBMIT" , |
110 | "SET_HW_RSRC_1" , |
111 | }; |
112 | |
113 | static const char *mes_v12_0_misc_opcodes[] = { |
114 | "WRITE_REG" , |
115 | "INV_GART" , |
116 | "QUERY_STATUS" , |
117 | "READ_REG" , |
118 | "WAIT_REG_MEM" , |
119 | "SET_SHADER_DEBUGGER" , |
120 | "NOTIFY_WORK_ON_UNMAPPED_QUEUE" , |
121 | "NOTIFY_TO_UNMAP_PROCESSES" , |
122 | }; |
123 | |
124 | static const char *mes_v12_0_get_op_string(union MESAPI__MISC *x_pkt) |
125 | { |
126 | const char *op_str = NULL; |
127 | |
128 | if (x_pkt->header.opcode < ARRAY_SIZE(mes_v12_0_opcodes)) |
129 | op_str = mes_v12_0_opcodes[x_pkt->header.opcode]; |
130 | |
131 | return op_str; |
132 | } |
133 | |
134 | static const char *mes_v12_0_get_misc_op_string(union MESAPI__MISC *x_pkt) |
135 | { |
136 | const char *op_str = NULL; |
137 | |
138 | if ((x_pkt->header.opcode == MES_SCH_API_MISC) && |
139 | (x_pkt->opcode < ARRAY_SIZE(mes_v12_0_misc_opcodes))) |
140 | op_str = mes_v12_0_misc_opcodes[x_pkt->opcode]; |
141 | |
142 | return op_str; |
143 | } |
144 | |
145 | static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, |
146 | int pipe, void *pkt, int size, |
147 | int api_status_off) |
148 | { |
149 | union MESAPI__QUERY_MES_STATUS mes_status_pkt; |
150 | signed long timeout = 2100000; /* 2100 ms */ |
151 | struct amdgpu_device *adev = mes->adev; |
152 | struct amdgpu_ring *ring = &mes->ring[pipe]; |
153 | spinlock_t *ring_lock = &mes->ring_lock[pipe]; |
154 | struct MES_API_STATUS *api_status; |
155 | union MESAPI__MISC *x_pkt = pkt; |
156 | const char *op_str, *misc_op_str; |
157 | unsigned long flags; |
158 | u64 status_gpu_addr; |
159 | u32 seq, status_offset; |
160 | u64 *status_ptr; |
161 | signed long r; |
162 | int ret; |
163 | |
164 | if (x_pkt->header.opcode >= MES_SCH_API_MAX) |
165 | return -EINVAL; |
166 | |
167 | if (amdgpu_emu_mode) { |
168 | timeout *= 100; |
169 | } else if (amdgpu_sriov_vf(adev)) { |
170 | /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */ |
171 | timeout = 15 * 600 * 1000; |
172 | } |
173 | |
174 | ret = amdgpu_device_wb_get(adev, wb: &status_offset); |
175 | if (ret) |
176 | return ret; |
177 | |
178 | status_gpu_addr = adev->wb.gpu_addr + (status_offset * 4); |
179 | status_ptr = (u64 *)&adev->wb.wb[status_offset]; |
180 | *status_ptr = 0; |
181 | |
182 | spin_lock_irqsave(ring_lock, flags); |
183 | r = amdgpu_ring_alloc(ring, ndw: (size + sizeof(mes_status_pkt)) / 4); |
184 | if (r) |
185 | goto error_unlock_free; |
186 | |
187 | seq = ++ring->fence_drv.sync_seq; |
188 | r = amdgpu_fence_wait_polling(ring, |
189 | wait_seq: seq - ring->fence_drv.num_fences_mask, |
190 | timeout); |
191 | if (r < 1) |
192 | goto error_undo; |
193 | |
194 | api_status = (struct MES_API_STATUS *)((char *)pkt + api_status_off); |
195 | api_status->api_completion_fence_addr = status_gpu_addr; |
196 | api_status->api_completion_fence_value = 1; |
197 | |
198 | amdgpu_ring_write_multiple(ring, src: pkt, count_dw: size / 4); |
199 | |
200 | memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); |
201 | mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; |
202 | mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; |
203 | mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
204 | mes_status_pkt.api_status.api_completion_fence_addr = |
205 | ring->fence_drv.gpu_addr; |
206 | mes_status_pkt.api_status.api_completion_fence_value = seq; |
207 | |
208 | amdgpu_ring_write_multiple(ring, src: &mes_status_pkt, |
209 | count_dw: sizeof(mes_status_pkt) / 4); |
210 | |
211 | amdgpu_ring_commit(ring); |
212 | spin_unlock_irqrestore(lock: ring_lock, flags); |
213 | |
214 | op_str = mes_v12_0_get_op_string(x_pkt); |
215 | misc_op_str = mes_v12_0_get_misc_op_string(x_pkt); |
216 | |
217 | if (misc_op_str) |
218 | dev_dbg(adev->dev, "MES(%d) msg=%s (%s) was emitted\n" , |
219 | pipe, op_str, misc_op_str); |
220 | else if (op_str) |
221 | dev_dbg(adev->dev, "MES(%d) msg=%s was emitted\n" , |
222 | pipe, op_str); |
223 | else |
224 | dev_dbg(adev->dev, "MES(%d) msg=%d was emitted\n" , |
225 | pipe, x_pkt->header.opcode); |
226 | |
227 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, timeout); |
228 | if (r < 1 || !*status_ptr) { |
229 | |
230 | if (misc_op_str) |
231 | dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n" , |
232 | pipe, op_str, misc_op_str); |
233 | else if (op_str) |
234 | dev_err(adev->dev, "MES(%d) failed to respond to msg=%s\n" , |
235 | pipe, op_str); |
236 | else |
237 | dev_err(adev->dev, "MES(%d) failed to respond to msg=%d\n" , |
238 | pipe, x_pkt->header.opcode); |
239 | |
240 | while (halt_if_hws_hang) |
241 | schedule(); |
242 | |
243 | r = -ETIMEDOUT; |
244 | goto error_wb_free; |
245 | } |
246 | |
247 | amdgpu_device_wb_free(adev, wb: status_offset); |
248 | return 0; |
249 | |
250 | error_undo: |
251 | dev_err(adev->dev, "MES ring buffer is full.\n" ); |
252 | amdgpu_ring_undo(ring); |
253 | |
254 | error_unlock_free: |
255 | spin_unlock_irqrestore(lock: ring_lock, flags); |
256 | |
257 | error_wb_free: |
258 | amdgpu_device_wb_free(adev, wb: status_offset); |
259 | return r; |
260 | } |
261 | |
262 | static int convert_to_mes_queue_type(int queue_type) |
263 | { |
264 | if (queue_type == AMDGPU_RING_TYPE_GFX) |
265 | return MES_QUEUE_TYPE_GFX; |
266 | else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) |
267 | return MES_QUEUE_TYPE_COMPUTE; |
268 | else if (queue_type == AMDGPU_RING_TYPE_SDMA) |
269 | return MES_QUEUE_TYPE_SDMA; |
270 | else if (queue_type == AMDGPU_RING_TYPE_MES) |
271 | return MES_QUEUE_TYPE_SCHQ; |
272 | else |
273 | BUG(); |
274 | return -1; |
275 | } |
276 | |
277 | static int convert_to_mes_priority_level(int priority_level) |
278 | { |
279 | switch (priority_level) { |
280 | case AMDGPU_MES_PRIORITY_LEVEL_LOW: |
281 | return AMD_PRIORITY_LEVEL_LOW; |
282 | case AMDGPU_MES_PRIORITY_LEVEL_NORMAL: |
283 | default: |
284 | return AMD_PRIORITY_LEVEL_NORMAL; |
285 | case AMDGPU_MES_PRIORITY_LEVEL_MEDIUM: |
286 | return AMD_PRIORITY_LEVEL_MEDIUM; |
287 | case AMDGPU_MES_PRIORITY_LEVEL_HIGH: |
288 | return AMD_PRIORITY_LEVEL_HIGH; |
289 | case AMDGPU_MES_PRIORITY_LEVEL_REALTIME: |
290 | return AMD_PRIORITY_LEVEL_REALTIME; |
291 | } |
292 | } |
293 | |
294 | static int mes_v12_0_add_hw_queue(struct amdgpu_mes *mes, |
295 | struct mes_add_queue_input *input) |
296 | { |
297 | struct amdgpu_device *adev = mes->adev; |
298 | union MESAPI__ADD_QUEUE mes_add_queue_pkt; |
299 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; |
300 | uint32_t vm_cntx_cntl = hub->vm_cntx_cntl; |
301 | |
302 | memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); |
303 | |
304 | mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; |
305 | mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE; |
306 | mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
307 | |
308 | mes_add_queue_pkt.process_id = input->process_id; |
309 | mes_add_queue_pkt.page_table_base_addr = input->page_table_base_addr; |
310 | mes_add_queue_pkt.process_va_start = input->process_va_start; |
311 | mes_add_queue_pkt.process_va_end = input->process_va_end; |
312 | mes_add_queue_pkt.process_quantum = input->process_quantum; |
313 | mes_add_queue_pkt.process_context_addr = input->process_context_addr; |
314 | mes_add_queue_pkt.gang_quantum = input->gang_quantum; |
315 | mes_add_queue_pkt.gang_context_addr = input->gang_context_addr; |
316 | mes_add_queue_pkt.inprocess_gang_priority = |
317 | convert_to_mes_priority_level(priority_level: input->inprocess_gang_priority); |
318 | mes_add_queue_pkt.gang_global_priority_level = |
319 | convert_to_mes_priority_level(priority_level: input->gang_global_priority_level); |
320 | mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; |
321 | mes_add_queue_pkt.mqd_addr = input->mqd_addr; |
322 | |
323 | mes_add_queue_pkt.wptr_addr = input->wptr_mc_addr; |
324 | |
325 | mes_add_queue_pkt.queue_type = |
326 | convert_to_mes_queue_type(queue_type: input->queue_type); |
327 | mes_add_queue_pkt.paging = input->paging; |
328 | mes_add_queue_pkt.vm_context_cntl = vm_cntx_cntl; |
329 | mes_add_queue_pkt.gws_base = input->gws_base; |
330 | mes_add_queue_pkt.gws_size = input->gws_size; |
331 | mes_add_queue_pkt.trap_handler_addr = input->tba_addr; |
332 | mes_add_queue_pkt.tma_addr = input->tma_addr; |
333 | mes_add_queue_pkt.trap_en = input->trap_en; |
334 | mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear; |
335 | mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; |
336 | |
337 | /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ |
338 | mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; |
339 | mes_add_queue_pkt.gds_size = input->queue_size; |
340 | |
341 | /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ |
342 | mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; |
343 | mes_add_queue_pkt.gds_size = input->queue_size; |
344 | |
345 | return mes_v12_0_submit_pkt_and_poll_completion(mes, |
346 | pipe: AMDGPU_MES_SCHED_PIPE, |
347 | pkt: &mes_add_queue_pkt, size: sizeof(mes_add_queue_pkt), |
348 | offsetof(union MESAPI__ADD_QUEUE, api_status)); |
349 | } |
350 | |
351 | static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes, |
352 | struct mes_remove_queue_input *input) |
353 | { |
354 | union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; |
355 | |
356 | memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); |
357 | |
358 | mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; |
359 | mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE; |
360 | mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
361 | |
362 | mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; |
363 | mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr; |
364 | |
365 | return mes_v12_0_submit_pkt_and_poll_completion(mes, |
366 | pipe: AMDGPU_MES_SCHED_PIPE, |
367 | pkt: &mes_remove_queue_pkt, size: sizeof(mes_remove_queue_pkt), |
368 | offsetof(union MESAPI__REMOVE_QUEUE, api_status)); |
369 | } |
370 | |
371 | int gfx_v12_0_request_gfx_index_mutex(struct amdgpu_device *adev, |
372 | bool req) |
373 | { |
374 | u32 i, tmp, val; |
375 | |
376 | for (i = 0; i < adev->usec_timeout; i++) { |
377 | /* Request with MeId=2, PipeId=0 */ |
378 | tmp = REG_SET_FIELD(0, CP_GFX_INDEX_MUTEX, REQUEST, req); |
379 | tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, CLIENTID, 4); |
380 | WREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX, tmp); |
381 | |
382 | val = RREG32_SOC15(GC, 0, regCP_GFX_INDEX_MUTEX); |
383 | if (req) { |
384 | if (val == tmp) |
385 | break; |
386 | } else { |
387 | tmp = REG_SET_FIELD(tmp, CP_GFX_INDEX_MUTEX, |
388 | REQUEST, 1); |
389 | |
390 | /* unlocked or locked by firmware */ |
391 | if (val != tmp) |
392 | break; |
393 | } |
394 | udelay(usec: 1); |
395 | } |
396 | |
397 | if (i >= adev->usec_timeout) |
398 | return -EINVAL; |
399 | |
400 | return 0; |
401 | } |
402 | |
403 | static int mes_v12_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type, |
404 | uint32_t me_id, uint32_t pipe_id, |
405 | uint32_t queue_id, uint32_t vmid) |
406 | { |
407 | struct amdgpu_device *adev = mes->adev; |
408 | uint32_t value, reg; |
409 | int i, r = 0; |
410 | |
411 | amdgpu_gfx_rlc_enter_safe_mode(adev, xcc_id: 0); |
412 | |
413 | if (queue_type == AMDGPU_RING_TYPE_GFX) { |
414 | dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n" , |
415 | me_id, pipe_id, queue_id, vmid); |
416 | |
417 | mutex_lock(&adev->gfx.reset_sem_mutex); |
418 | gfx_v12_0_request_gfx_index_mutex(adev, req: true); |
419 | /* all se allow writes */ |
420 | WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX, |
421 | (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT)); |
422 | value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); |
423 | if (pipe_id == 0) |
424 | value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id); |
425 | else |
426 | value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id); |
427 | WREG32_SOC15(GC, 0, regCP_VMID_RESET, value); |
428 | gfx_v12_0_request_gfx_index_mutex(adev, req: false); |
429 | mutex_unlock(lock: &adev->gfx.reset_sem_mutex); |
430 | |
431 | mutex_lock(&adev->srbm_mutex); |
432 | soc21_grbm_select(adev, me: me_id, pipe: pipe_id, queue: queue_id, vmid: 0); |
433 | /* wait till dequeue take effects */ |
434 | for (i = 0; i < adev->usec_timeout; i++) { |
435 | if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1)) |
436 | break; |
437 | udelay(usec: 1); |
438 | } |
439 | if (i >= adev->usec_timeout) { |
440 | dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n" ); |
441 | r = -ETIMEDOUT; |
442 | } |
443 | |
444 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
445 | mutex_unlock(lock: &adev->srbm_mutex); |
446 | } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) { |
447 | dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n" , |
448 | me_id, pipe_id, queue_id); |
449 | mutex_lock(&adev->srbm_mutex); |
450 | soc21_grbm_select(adev, me: me_id, pipe: pipe_id, queue: queue_id, vmid: 0); |
451 | WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); |
452 | WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); |
453 | |
454 | /* wait till dequeue take effects */ |
455 | for (i = 0; i < adev->usec_timeout; i++) { |
456 | if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) |
457 | break; |
458 | udelay(usec: 1); |
459 | } |
460 | if (i >= adev->usec_timeout) { |
461 | dev_err(adev->dev, "failed to wait on hqd deactivate\n" ); |
462 | r = -ETIMEDOUT; |
463 | } |
464 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
465 | mutex_unlock(lock: &adev->srbm_mutex); |
466 | } else if (queue_type == AMDGPU_RING_TYPE_SDMA) { |
467 | dev_info(adev->dev, "reset sdma queue (%d:%d:%d)\n" , |
468 | me_id, pipe_id, queue_id); |
469 | switch (me_id) { |
470 | case 1: |
471 | reg = SOC15_REG_OFFSET(GC, 0, regSDMA1_QUEUE_RESET_REQ); |
472 | break; |
473 | case 0: |
474 | default: |
475 | reg = SOC15_REG_OFFSET(GC, 0, regSDMA0_QUEUE_RESET_REQ); |
476 | break; |
477 | } |
478 | |
479 | value = 1 << queue_id; |
480 | WREG32(reg, value); |
481 | /* wait for queue reset done */ |
482 | for (i = 0; i < adev->usec_timeout; i++) { |
483 | if (!(RREG32(reg) & value)) |
484 | break; |
485 | udelay(usec: 1); |
486 | } |
487 | if (i >= adev->usec_timeout) { |
488 | dev_err(adev->dev, "failed to wait on sdma queue reset done\n" ); |
489 | r = -ETIMEDOUT; |
490 | } |
491 | } |
492 | |
493 | amdgpu_gfx_rlc_exit_safe_mode(adev, xcc_id: 0); |
494 | return r; |
495 | } |
496 | |
497 | static int mes_v12_0_map_legacy_queue(struct amdgpu_mes *mes, |
498 | struct mes_map_legacy_queue_input *input) |
499 | { |
500 | union MESAPI__ADD_QUEUE mes_add_queue_pkt; |
501 | int pipe; |
502 | |
503 | memset(&mes_add_queue_pkt, 0, sizeof(mes_add_queue_pkt)); |
504 | |
505 | mes_add_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; |
506 | mes_add_queue_pkt.header.opcode = MES_SCH_API_ADD_QUEUE; |
507 | mes_add_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
508 | |
509 | mes_add_queue_pkt.pipe_id = input->pipe_id; |
510 | mes_add_queue_pkt.queue_id = input->queue_id; |
511 | mes_add_queue_pkt.doorbell_offset = input->doorbell_offset; |
512 | mes_add_queue_pkt.mqd_addr = input->mqd_addr; |
513 | mes_add_queue_pkt.wptr_addr = input->wptr_addr; |
514 | mes_add_queue_pkt.queue_type = |
515 | convert_to_mes_queue_type(queue_type: input->queue_type); |
516 | mes_add_queue_pkt.map_legacy_kq = 1; |
517 | |
518 | if (mes->adev->enable_uni_mes) |
519 | pipe = AMDGPU_MES_KIQ_PIPE; |
520 | else |
521 | pipe = AMDGPU_MES_SCHED_PIPE; |
522 | |
523 | return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, |
524 | pkt: &mes_add_queue_pkt, size: sizeof(mes_add_queue_pkt), |
525 | offsetof(union MESAPI__ADD_QUEUE, api_status)); |
526 | } |
527 | |
528 | static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes, |
529 | struct mes_unmap_legacy_queue_input *input) |
530 | { |
531 | union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt; |
532 | int pipe; |
533 | |
534 | memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt)); |
535 | |
536 | mes_remove_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; |
537 | mes_remove_queue_pkt.header.opcode = MES_SCH_API_REMOVE_QUEUE; |
538 | mes_remove_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
539 | |
540 | mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset; |
541 | mes_remove_queue_pkt.gang_context_addr = 0; |
542 | |
543 | mes_remove_queue_pkt.pipe_id = input->pipe_id; |
544 | mes_remove_queue_pkt.queue_id = input->queue_id; |
545 | |
546 | if (input->action == PREEMPT_QUEUES_NO_UNMAP) { |
547 | mes_remove_queue_pkt.preempt_legacy_gfx_queue = 1; |
548 | mes_remove_queue_pkt.tf_addr = input->trail_fence_addr; |
549 | mes_remove_queue_pkt.tf_data = |
550 | lower_32_bits(input->trail_fence_data); |
551 | } else { |
552 | mes_remove_queue_pkt.unmap_legacy_queue = 1; |
553 | mes_remove_queue_pkt.queue_type = |
554 | convert_to_mes_queue_type(queue_type: input->queue_type); |
555 | } |
556 | |
557 | if (mes->adev->enable_uni_mes) |
558 | pipe = AMDGPU_MES_KIQ_PIPE; |
559 | else |
560 | pipe = AMDGPU_MES_SCHED_PIPE; |
561 | |
562 | return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, |
563 | pkt: &mes_remove_queue_pkt, size: sizeof(mes_remove_queue_pkt), |
564 | offsetof(union MESAPI__REMOVE_QUEUE, api_status)); |
565 | } |
566 | |
567 | static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes, |
568 | struct mes_suspend_gang_input *input) |
569 | { |
570 | return 0; |
571 | } |
572 | |
573 | static int mes_v12_0_resume_gang(struct amdgpu_mes *mes, |
574 | struct mes_resume_gang_input *input) |
575 | { |
576 | return 0; |
577 | } |
578 | |
579 | static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe) |
580 | { |
581 | union MESAPI__QUERY_MES_STATUS mes_status_pkt; |
582 | |
583 | memset(&mes_status_pkt, 0, sizeof(mes_status_pkt)); |
584 | |
585 | mes_status_pkt.header.type = MES_API_TYPE_SCHEDULER; |
586 | mes_status_pkt.header.opcode = MES_SCH_API_QUERY_SCHEDULER_STATUS; |
587 | mes_status_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
588 | |
589 | return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, |
590 | pkt: &mes_status_pkt, size: sizeof(mes_status_pkt), |
591 | offsetof(union MESAPI__QUERY_MES_STATUS, api_status)); |
592 | } |
593 | |
594 | static int mes_v12_0_misc_op(struct amdgpu_mes *mes, |
595 | struct mes_misc_op_input *input) |
596 | { |
597 | union MESAPI__MISC misc_pkt; |
598 | int pipe; |
599 | |
600 | if (mes->adev->enable_uni_mes) |
601 | pipe = AMDGPU_MES_KIQ_PIPE; |
602 | else |
603 | pipe = AMDGPU_MES_SCHED_PIPE; |
604 | |
605 | memset(&misc_pkt, 0, sizeof(misc_pkt)); |
606 | |
607 | misc_pkt.header.type = MES_API_TYPE_SCHEDULER; |
608 | misc_pkt.header.opcode = MES_SCH_API_MISC; |
609 | misc_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
610 | |
611 | switch (input->op) { |
612 | case MES_MISC_OP_READ_REG: |
613 | misc_pkt.opcode = MESAPI_MISC__READ_REG; |
614 | misc_pkt.read_reg.reg_offset = input->read_reg.reg_offset; |
615 | misc_pkt.read_reg.buffer_addr = input->read_reg.buffer_addr; |
616 | break; |
617 | case MES_MISC_OP_WRITE_REG: |
618 | misc_pkt.opcode = MESAPI_MISC__WRITE_REG; |
619 | misc_pkt.write_reg.reg_offset = input->write_reg.reg_offset; |
620 | misc_pkt.write_reg.reg_value = input->write_reg.reg_value; |
621 | break; |
622 | case MES_MISC_OP_WRM_REG_WAIT: |
623 | misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM; |
624 | misc_pkt.wait_reg_mem.op = WRM_OPERATION__WAIT_REG_MEM; |
625 | misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref; |
626 | misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask; |
627 | misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0; |
628 | misc_pkt.wait_reg_mem.reg_offset2 = 0; |
629 | break; |
630 | case MES_MISC_OP_WRM_REG_WR_WAIT: |
631 | misc_pkt.opcode = MESAPI_MISC__WAIT_REG_MEM; |
632 | misc_pkt.wait_reg_mem.op = WRM_OPERATION__WR_WAIT_WR_REG; |
633 | misc_pkt.wait_reg_mem.reference = input->wrm_reg.ref; |
634 | misc_pkt.wait_reg_mem.mask = input->wrm_reg.mask; |
635 | misc_pkt.wait_reg_mem.reg_offset1 = input->wrm_reg.reg0; |
636 | misc_pkt.wait_reg_mem.reg_offset2 = input->wrm_reg.reg1; |
637 | break; |
638 | case MES_MISC_OP_SET_SHADER_DEBUGGER: |
639 | pipe = AMDGPU_MES_SCHED_PIPE; |
640 | misc_pkt.opcode = MESAPI_MISC__SET_SHADER_DEBUGGER; |
641 | misc_pkt.set_shader_debugger.process_context_addr = |
642 | input->set_shader_debugger.process_context_addr; |
643 | misc_pkt.set_shader_debugger.flags.u32all = |
644 | input->set_shader_debugger.flags.u32all; |
645 | misc_pkt.set_shader_debugger.spi_gdbg_per_vmid_cntl = |
646 | input->set_shader_debugger.spi_gdbg_per_vmid_cntl; |
647 | memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl, |
648 | input->set_shader_debugger.tcp_watch_cntl, |
649 | sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); |
650 | misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; |
651 | break; |
652 | case MES_MISC_OP_CHANGE_CONFIG: |
653 | misc_pkt.opcode = MESAPI_MISC__CHANGE_CONFIG; |
654 | misc_pkt.change_config.opcode = |
655 | MESAPI_MISC__CHANGE_CONFIG_OPTION_LIMIT_SINGLE_PROCESS; |
656 | misc_pkt.change_config.option.bits.limit_single_process = |
657 | input->change_config.option.limit_single_process; |
658 | break; |
659 | |
660 | default: |
661 | DRM_ERROR("unsupported misc op (%d) \n" , input->op); |
662 | return -EINVAL; |
663 | } |
664 | |
665 | return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, |
666 | pkt: &misc_pkt, size: sizeof(misc_pkt), |
667 | offsetof(union MESAPI__MISC, api_status)); |
668 | } |
669 | |
670 | static int mes_v12_0_set_hw_resources_1(struct amdgpu_mes *mes, int pipe) |
671 | { |
672 | union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_1_pkt; |
673 | |
674 | memset(&mes_set_hw_res_1_pkt, 0, sizeof(mes_set_hw_res_1_pkt)); |
675 | |
676 | mes_set_hw_res_1_pkt.header.type = MES_API_TYPE_SCHEDULER; |
677 | mes_set_hw_res_1_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1; |
678 | mes_set_hw_res_1_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
679 | mes_set_hw_res_1_pkt.mes_kiq_unmap_timeout = 0xa; |
680 | mes_set_hw_res_1_pkt.cleaner_shader_fence_mc_addr = |
681 | mes->resource_1_gpu_addr[pipe]; |
682 | |
683 | return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, |
684 | pkt: &mes_set_hw_res_1_pkt, size: sizeof(mes_set_hw_res_1_pkt), |
685 | offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status)); |
686 | } |
687 | |
688 | static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe) |
689 | { |
690 | int i; |
691 | struct amdgpu_device *adev = mes->adev; |
692 | union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt; |
693 | |
694 | memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt)); |
695 | |
696 | mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER; |
697 | mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC; |
698 | mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
699 | |
700 | if (pipe == AMDGPU_MES_SCHED_PIPE) { |
701 | mes_set_hw_res_pkt.vmid_mask_mmhub = mes->vmid_mask_mmhub; |
702 | mes_set_hw_res_pkt.vmid_mask_gfxhub = mes->vmid_mask_gfxhub; |
703 | mes_set_hw_res_pkt.gds_size = adev->gds.gds_size; |
704 | mes_set_hw_res_pkt.paging_vmid = 0; |
705 | |
706 | for (i = 0; i < MAX_COMPUTE_PIPES; i++) |
707 | mes_set_hw_res_pkt.compute_hqd_mask[i] = |
708 | mes->compute_hqd_mask[i]; |
709 | |
710 | for (i = 0; i < MAX_GFX_PIPES; i++) |
711 | mes_set_hw_res_pkt.gfx_hqd_mask[i] = |
712 | mes->gfx_hqd_mask[i]; |
713 | |
714 | for (i = 0; i < MAX_SDMA_PIPES; i++) |
715 | mes_set_hw_res_pkt.sdma_hqd_mask[i] = |
716 | mes->sdma_hqd_mask[i]; |
717 | |
718 | for (i = 0; i < AMD_PRIORITY_NUM_LEVELS; i++) |
719 | mes_set_hw_res_pkt.aggregated_doorbells[i] = |
720 | mes->aggregated_doorbells[i]; |
721 | } |
722 | |
723 | mes_set_hw_res_pkt.g_sch_ctx_gpu_mc_ptr = |
724 | mes->sch_ctx_gpu_addr[pipe]; |
725 | mes_set_hw_res_pkt.query_status_fence_gpu_mc_ptr = |
726 | mes->query_status_fence_gpu_addr[pipe]; |
727 | |
728 | for (i = 0; i < 5; i++) { |
729 | mes_set_hw_res_pkt.gc_base[i] = adev->reg_offset[GC_HWIP][0][i]; |
730 | mes_set_hw_res_pkt.mmhub_base[i] = |
731 | adev->reg_offset[MMHUB_HWIP][0][i]; |
732 | mes_set_hw_res_pkt.osssys_base[i] = |
733 | adev->reg_offset[OSSSYS_HWIP][0][i]; |
734 | } |
735 | |
736 | mes_set_hw_res_pkt.disable_reset = 1; |
737 | mes_set_hw_res_pkt.disable_mes_log = 1; |
738 | mes_set_hw_res_pkt.use_different_vmid_compute = 1; |
739 | mes_set_hw_res_pkt.enable_reg_active_poll = 1; |
740 | mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; |
741 | |
742 | /* |
743 | * Keep oversubscribe timer for sdma . When we have unmapped doorbell |
744 | * handling support, other queue will not use the oversubscribe timer. |
745 | * handling mode - 0: disabled; 1: basic version; 2: basic+ version |
746 | */ |
747 | mes_set_hw_res_pkt.oversubscription_timer = 50; |
748 | mes_set_hw_res_pkt.unmapped_doorbell_handling = 1; |
749 | |
750 | if (amdgpu_mes_log_enable) { |
751 | mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; |
752 | mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + |
753 | pipe * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE); |
754 | } |
755 | |
756 | if (adev->enforce_isolation[0] == AMDGPU_ENFORCE_ISOLATION_ENABLE) |
757 | mes_set_hw_res_pkt.limit_single_process = 1; |
758 | |
759 | return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, |
760 | pkt: &mes_set_hw_res_pkt, size: sizeof(mes_set_hw_res_pkt), |
761 | offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); |
762 | } |
763 | |
764 | static void mes_v12_0_init_aggregated_doorbell(struct amdgpu_mes *mes) |
765 | { |
766 | struct amdgpu_device *adev = mes->adev; |
767 | uint32_t data; |
768 | |
769 | data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1); |
770 | data &= ~(CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET_MASK | |
771 | CP_MES_DOORBELL_CONTROL1__DOORBELL_EN_MASK | |
772 | CP_MES_DOORBELL_CONTROL1__DOORBELL_HIT_MASK); |
773 | data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_LOW] << |
774 | CP_MES_DOORBELL_CONTROL1__DOORBELL_OFFSET__SHIFT; |
775 | data |= 1 << CP_MES_DOORBELL_CONTROL1__DOORBELL_EN__SHIFT; |
776 | WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL1, data); |
777 | |
778 | data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2); |
779 | data &= ~(CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET_MASK | |
780 | CP_MES_DOORBELL_CONTROL2__DOORBELL_EN_MASK | |
781 | CP_MES_DOORBELL_CONTROL2__DOORBELL_HIT_MASK); |
782 | data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_NORMAL] << |
783 | CP_MES_DOORBELL_CONTROL2__DOORBELL_OFFSET__SHIFT; |
784 | data |= 1 << CP_MES_DOORBELL_CONTROL2__DOORBELL_EN__SHIFT; |
785 | WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL2, data); |
786 | |
787 | data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3); |
788 | data &= ~(CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET_MASK | |
789 | CP_MES_DOORBELL_CONTROL3__DOORBELL_EN_MASK | |
790 | CP_MES_DOORBELL_CONTROL3__DOORBELL_HIT_MASK); |
791 | data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_MEDIUM] << |
792 | CP_MES_DOORBELL_CONTROL3__DOORBELL_OFFSET__SHIFT; |
793 | data |= 1 << CP_MES_DOORBELL_CONTROL3__DOORBELL_EN__SHIFT; |
794 | WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL3, data); |
795 | |
796 | data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4); |
797 | data &= ~(CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET_MASK | |
798 | CP_MES_DOORBELL_CONTROL4__DOORBELL_EN_MASK | |
799 | CP_MES_DOORBELL_CONTROL4__DOORBELL_HIT_MASK); |
800 | data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_HIGH] << |
801 | CP_MES_DOORBELL_CONTROL4__DOORBELL_OFFSET__SHIFT; |
802 | data |= 1 << CP_MES_DOORBELL_CONTROL4__DOORBELL_EN__SHIFT; |
803 | WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL4, data); |
804 | |
805 | data = RREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5); |
806 | data &= ~(CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET_MASK | |
807 | CP_MES_DOORBELL_CONTROL5__DOORBELL_EN_MASK | |
808 | CP_MES_DOORBELL_CONTROL5__DOORBELL_HIT_MASK); |
809 | data |= mes->aggregated_doorbells[AMDGPU_MES_PRIORITY_LEVEL_REALTIME] << |
810 | CP_MES_DOORBELL_CONTROL5__DOORBELL_OFFSET__SHIFT; |
811 | data |= 1 << CP_MES_DOORBELL_CONTROL5__DOORBELL_EN__SHIFT; |
812 | WREG32_SOC15(GC, 0, regCP_MES_DOORBELL_CONTROL5, data); |
813 | |
814 | data = 1 << CP_HQD_GFX_CONTROL__DB_UPDATED_MSG_EN__SHIFT; |
815 | WREG32_SOC15(GC, 0, regCP_HQD_GFX_CONTROL, data); |
816 | } |
817 | |
818 | |
819 | static void mes_v12_0_enable_unmapped_doorbell_handling( |
820 | struct amdgpu_mes *mes, bool enable) |
821 | { |
822 | struct amdgpu_device *adev = mes->adev; |
823 | uint32_t data = RREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL); |
824 | |
825 | /* |
826 | * The default PROC_LSB settng is 0xc which means doorbell |
827 | * addr[16:12] gives the doorbell page number. For kfd, each |
828 | * process will use 2 pages of doorbell, we need to change the |
829 | * setting to 0xd |
830 | */ |
831 | data &= ~CP_UNMAPPED_DOORBELL__PROC_LSB_MASK; |
832 | data |= 0xd << CP_UNMAPPED_DOORBELL__PROC_LSB__SHIFT; |
833 | |
834 | data |= (enable ? 1 : 0) << CP_UNMAPPED_DOORBELL__ENABLE__SHIFT; |
835 | |
836 | WREG32_SOC15(GC, 0, regCP_UNMAPPED_DOORBELL, data); |
837 | } |
838 | |
839 | static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes, |
840 | struct mes_reset_queue_input *input) |
841 | { |
842 | union MESAPI__RESET mes_reset_queue_pkt; |
843 | int pipe; |
844 | |
845 | if (input->use_mmio) |
846 | return mes_v12_0_reset_queue_mmio(mes, queue_type: input->queue_type, |
847 | me_id: input->me_id, pipe_id: input->pipe_id, |
848 | queue_id: input->queue_id, vmid: input->vmid); |
849 | |
850 | memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt)); |
851 | |
852 | mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER; |
853 | mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET; |
854 | mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; |
855 | |
856 | mes_reset_queue_pkt.queue_type = |
857 | convert_to_mes_queue_type(queue_type: input->queue_type); |
858 | |
859 | if (input->legacy_gfx) { |
860 | mes_reset_queue_pkt.reset_legacy_gfx = 1; |
861 | mes_reset_queue_pkt.pipe_id_lp = input->pipe_id; |
862 | mes_reset_queue_pkt.queue_id_lp = input->queue_id; |
863 | mes_reset_queue_pkt.mqd_mc_addr_lp = input->mqd_addr; |
864 | mes_reset_queue_pkt.doorbell_offset_lp = input->doorbell_offset; |
865 | mes_reset_queue_pkt.wptr_addr_lp = input->wptr_addr; |
866 | mes_reset_queue_pkt.vmid_id_lp = input->vmid; |
867 | } else { |
868 | mes_reset_queue_pkt.reset_queue_only = 1; |
869 | mes_reset_queue_pkt.doorbell_offset = input->doorbell_offset; |
870 | } |
871 | |
872 | if (input->is_kq) |
873 | pipe = AMDGPU_MES_KIQ_PIPE; |
874 | else |
875 | pipe = AMDGPU_MES_SCHED_PIPE; |
876 | |
877 | return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe, |
878 | pkt: &mes_reset_queue_pkt, size: sizeof(mes_reset_queue_pkt), |
879 | offsetof(union MESAPI__RESET, api_status)); |
880 | } |
881 | |
882 | static const struct amdgpu_mes_funcs mes_v12_0_funcs = { |
883 | .add_hw_queue = mes_v12_0_add_hw_queue, |
884 | .remove_hw_queue = mes_v12_0_remove_hw_queue, |
885 | .map_legacy_queue = mes_v12_0_map_legacy_queue, |
886 | .unmap_legacy_queue = mes_v12_0_unmap_legacy_queue, |
887 | .suspend_gang = mes_v12_0_suspend_gang, |
888 | .resume_gang = mes_v12_0_resume_gang, |
889 | .misc_op = mes_v12_0_misc_op, |
890 | .reset_hw_queue = mes_v12_0_reset_hw_queue, |
891 | }; |
892 | |
893 | static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev, |
894 | enum amdgpu_mes_pipe pipe) |
895 | { |
896 | int r; |
897 | const struct mes_firmware_header_v1_0 *mes_hdr; |
898 | const __le32 *fw_data; |
899 | unsigned fw_size; |
900 | |
901 | mes_hdr = (const struct mes_firmware_header_v1_0 *) |
902 | adev->mes.fw[pipe]->data; |
903 | |
904 | fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + |
905 | le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)); |
906 | fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes); |
907 | |
908 | r = amdgpu_bo_create_reserved(adev, size: fw_size, |
909 | PAGE_SIZE, |
910 | AMDGPU_GEM_DOMAIN_VRAM, |
911 | bo_ptr: &adev->mes.ucode_fw_obj[pipe], |
912 | gpu_addr: &adev->mes.ucode_fw_gpu_addr[pipe], |
913 | cpu_addr: (void **)&adev->mes.ucode_fw_ptr[pipe]); |
914 | if (r) { |
915 | dev_err(adev->dev, "(%d) failed to create mes fw bo\n" , r); |
916 | return r; |
917 | } |
918 | |
919 | memcpy(adev->mes.ucode_fw_ptr[pipe], fw_data, fw_size); |
920 | |
921 | amdgpu_bo_kunmap(bo: adev->mes.ucode_fw_obj[pipe]); |
922 | amdgpu_bo_unreserve(bo: adev->mes.ucode_fw_obj[pipe]); |
923 | |
924 | return 0; |
925 | } |
926 | |
927 | static int mes_v12_0_allocate_ucode_data_buffer(struct amdgpu_device *adev, |
928 | enum amdgpu_mes_pipe pipe) |
929 | { |
930 | int r; |
931 | const struct mes_firmware_header_v1_0 *mes_hdr; |
932 | const __le32 *fw_data; |
933 | unsigned fw_size; |
934 | |
935 | mes_hdr = (const struct mes_firmware_header_v1_0 *) |
936 | adev->mes.fw[pipe]->data; |
937 | |
938 | fw_data = (const __le32 *)(adev->mes.fw[pipe]->data + |
939 | le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)); |
940 | fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes); |
941 | |
942 | r = amdgpu_bo_create_reserved(adev, size: fw_size, |
943 | align: 64 * 1024, |
944 | AMDGPU_GEM_DOMAIN_VRAM, |
945 | bo_ptr: &adev->mes.data_fw_obj[pipe], |
946 | gpu_addr: &adev->mes.data_fw_gpu_addr[pipe], |
947 | cpu_addr: (void **)&adev->mes.data_fw_ptr[pipe]); |
948 | if (r) { |
949 | dev_err(adev->dev, "(%d) failed to create mes data fw bo\n" , r); |
950 | return r; |
951 | } |
952 | |
953 | memcpy(adev->mes.data_fw_ptr[pipe], fw_data, fw_size); |
954 | |
955 | amdgpu_bo_kunmap(bo: adev->mes.data_fw_obj[pipe]); |
956 | amdgpu_bo_unreserve(bo: adev->mes.data_fw_obj[pipe]); |
957 | |
958 | return 0; |
959 | } |
960 | |
961 | static void mes_v12_0_free_ucode_buffers(struct amdgpu_device *adev, |
962 | enum amdgpu_mes_pipe pipe) |
963 | { |
964 | amdgpu_bo_free_kernel(bo: &adev->mes.data_fw_obj[pipe], |
965 | gpu_addr: &adev->mes.data_fw_gpu_addr[pipe], |
966 | cpu_addr: (void **)&adev->mes.data_fw_ptr[pipe]); |
967 | |
968 | amdgpu_bo_free_kernel(bo: &adev->mes.ucode_fw_obj[pipe], |
969 | gpu_addr: &adev->mes.ucode_fw_gpu_addr[pipe], |
970 | cpu_addr: (void **)&adev->mes.ucode_fw_ptr[pipe]); |
971 | } |
972 | |
973 | static void mes_v12_0_enable(struct amdgpu_device *adev, bool enable) |
974 | { |
975 | uint64_t ucode_addr; |
976 | uint32_t pipe, data = 0; |
977 | |
978 | if (enable) { |
979 | mutex_lock(&adev->srbm_mutex); |
980 | for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { |
981 | soc21_grbm_select(adev, me: 3, pipe, queue: 0, vmid: 0); |
982 | if (amdgpu_mes_log_enable) { |
983 | u32 log_size = AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE; |
984 | /* In case uni mes is not enabled, only program for pipe 0 */ |
985 | if (adev->mes.event_log_size >= (pipe + 1) * log_size) { |
986 | WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO, |
987 | lower_32_bits(adev->mes.event_log_gpu_addr + |
988 | pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE)); |
989 | WREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI, |
990 | upper_32_bits(adev->mes.event_log_gpu_addr + |
991 | pipe * log_size + AMDGPU_MES_LOG_BUFFER_SIZE)); |
992 | dev_info(adev->dev, "Setup CP MES MSCRATCH address : 0x%x. 0x%x\n" , |
993 | RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_HI), |
994 | RREG32_SOC15(GC, 0, regCP_MES_MSCRATCH_LO)); |
995 | } |
996 | } |
997 | |
998 | data = RREG32_SOC15(GC, 0, regCP_MES_CNTL); |
999 | if (pipe == 0) |
1000 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1); |
1001 | else |
1002 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1); |
1003 | WREG32_SOC15(GC, 0, regCP_MES_CNTL, data); |
1004 | |
1005 | ucode_addr = adev->mes.uc_start_addr[pipe] >> 2; |
1006 | WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START, |
1007 | lower_32_bits(ucode_addr)); |
1008 | WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI, |
1009 | upper_32_bits(ucode_addr)); |
1010 | |
1011 | /* unhalt MES and activate one pipe each loop */ |
1012 | data = REG_SET_FIELD(0, CP_MES_CNTL, MES_PIPE0_ACTIVE, 1); |
1013 | if (pipe) |
1014 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 1); |
1015 | dev_info(adev->dev, "program CP_MES_CNTL : 0x%x\n" , data); |
1016 | |
1017 | WREG32_SOC15(GC, 0, regCP_MES_CNTL, data); |
1018 | |
1019 | } |
1020 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
1021 | mutex_unlock(lock: &adev->srbm_mutex); |
1022 | |
1023 | if (amdgpu_emu_mode) |
1024 | msleep(msecs: 100); |
1025 | else if (adev->enable_uni_mes) |
1026 | udelay(usec: 500); |
1027 | else |
1028 | udelay(usec: 50); |
1029 | } else { |
1030 | data = RREG32_SOC15(GC, 0, regCP_MES_CNTL); |
1031 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_ACTIVE, 0); |
1032 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_ACTIVE, 0); |
1033 | data = REG_SET_FIELD(data, CP_MES_CNTL, |
1034 | MES_INVALIDATE_ICACHE, 1); |
1035 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE0_RESET, 1); |
1036 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_PIPE1_RESET, 1); |
1037 | data = REG_SET_FIELD(data, CP_MES_CNTL, MES_HALT, 1); |
1038 | WREG32_SOC15(GC, 0, regCP_MES_CNTL, data); |
1039 | } |
1040 | } |
1041 | |
1042 | static void mes_v12_0_set_ucode_start_addr(struct amdgpu_device *adev) |
1043 | { |
1044 | uint64_t ucode_addr; |
1045 | int pipe; |
1046 | |
1047 | mes_v12_0_enable(adev, enable: false); |
1048 | |
1049 | mutex_lock(&adev->srbm_mutex); |
1050 | for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { |
1051 | /* me=3, queue=0 */ |
1052 | soc21_grbm_select(adev, me: 3, pipe, queue: 0, vmid: 0); |
1053 | |
1054 | /* set ucode start address */ |
1055 | ucode_addr = adev->mes.uc_start_addr[pipe] >> 2; |
1056 | WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START, |
1057 | lower_32_bits(ucode_addr)); |
1058 | WREG32_SOC15(GC, 0, regCP_MES_PRGRM_CNTR_START_HI, |
1059 | upper_32_bits(ucode_addr)); |
1060 | |
1061 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
1062 | } |
1063 | mutex_unlock(lock: &adev->srbm_mutex); |
1064 | } |
1065 | |
1066 | /* This function is for backdoor MES firmware */ |
1067 | static int mes_v12_0_load_microcode(struct amdgpu_device *adev, |
1068 | enum amdgpu_mes_pipe pipe, bool prime_icache) |
1069 | { |
1070 | int r; |
1071 | uint32_t data; |
1072 | |
1073 | mes_v12_0_enable(adev, enable: false); |
1074 | |
1075 | if (!adev->mes.fw[pipe]) |
1076 | return -EINVAL; |
1077 | |
1078 | r = mes_v12_0_allocate_ucode_buffer(adev, pipe); |
1079 | if (r) |
1080 | return r; |
1081 | |
1082 | r = mes_v12_0_allocate_ucode_data_buffer(adev, pipe); |
1083 | if (r) { |
1084 | mes_v12_0_free_ucode_buffers(adev, pipe); |
1085 | return r; |
1086 | } |
1087 | |
1088 | mutex_lock(&adev->srbm_mutex); |
1089 | /* me=3, pipe=0, queue=0 */ |
1090 | soc21_grbm_select(adev, me: 3, pipe, queue: 0, vmid: 0); |
1091 | |
1092 | WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_CNTL, 0); |
1093 | |
1094 | /* set ucode fimrware address */ |
1095 | WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_LO, |
1096 | lower_32_bits(adev->mes.ucode_fw_gpu_addr[pipe])); |
1097 | WREG32_SOC15(GC, 0, regCP_MES_IC_BASE_HI, |
1098 | upper_32_bits(adev->mes.ucode_fw_gpu_addr[pipe])); |
1099 | |
1100 | /* set ucode instruction cache boundary to 2M-1 */ |
1101 | WREG32_SOC15(GC, 0, regCP_MES_MIBOUND_LO, 0x1FFFFF); |
1102 | |
1103 | /* set ucode data firmware address */ |
1104 | WREG32_SOC15(GC, 0, regCP_MES_MDBASE_LO, |
1105 | lower_32_bits(adev->mes.data_fw_gpu_addr[pipe])); |
1106 | WREG32_SOC15(GC, 0, regCP_MES_MDBASE_HI, |
1107 | upper_32_bits(adev->mes.data_fw_gpu_addr[pipe])); |
1108 | |
1109 | /* Set data cache boundary CP_MES_MDBOUND_LO */ |
1110 | WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x7FFFF); |
1111 | |
1112 | if (prime_icache) { |
1113 | /* invalidate ICACHE */ |
1114 | data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); |
1115 | data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); |
1116 | data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); |
1117 | WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); |
1118 | |
1119 | /* prime the ICACHE. */ |
1120 | data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); |
1121 | data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); |
1122 | WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); |
1123 | } |
1124 | |
1125 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
1126 | mutex_unlock(lock: &adev->srbm_mutex); |
1127 | |
1128 | return 0; |
1129 | } |
1130 | |
1131 | static int mes_v12_0_allocate_eop_buf(struct amdgpu_device *adev, |
1132 | enum amdgpu_mes_pipe pipe) |
1133 | { |
1134 | int r; |
1135 | u32 *eop; |
1136 | |
1137 | r = amdgpu_bo_create_reserved(adev, MES_EOP_SIZE, PAGE_SIZE, |
1138 | AMDGPU_GEM_DOMAIN_GTT, |
1139 | bo_ptr: &adev->mes.eop_gpu_obj[pipe], |
1140 | gpu_addr: &adev->mes.eop_gpu_addr[pipe], |
1141 | cpu_addr: (void **)&eop); |
1142 | if (r) { |
1143 | dev_warn(adev->dev, "(%d) create EOP bo failed\n" , r); |
1144 | return r; |
1145 | } |
1146 | |
1147 | memset(eop, 0, |
1148 | adev->mes.eop_gpu_obj[pipe]->tbo.base.size); |
1149 | |
1150 | amdgpu_bo_kunmap(bo: adev->mes.eop_gpu_obj[pipe]); |
1151 | amdgpu_bo_unreserve(bo: adev->mes.eop_gpu_obj[pipe]); |
1152 | |
1153 | return 0; |
1154 | } |
1155 | |
1156 | static int mes_v12_0_mqd_init(struct amdgpu_ring *ring) |
1157 | { |
1158 | struct v12_compute_mqd *mqd = ring->mqd_ptr; |
1159 | uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; |
1160 | uint32_t tmp; |
1161 | |
1162 | mqd->header = 0xC0310800; |
1163 | mqd->compute_pipelinestat_enable = 0x00000001; |
1164 | mqd->compute_static_thread_mgmt_se0 = 0xffffffff; |
1165 | mqd->compute_static_thread_mgmt_se1 = 0xffffffff; |
1166 | mqd->compute_static_thread_mgmt_se2 = 0xffffffff; |
1167 | mqd->compute_static_thread_mgmt_se3 = 0xffffffff; |
1168 | mqd->compute_misc_reserved = 0x00000007; |
1169 | |
1170 | eop_base_addr = ring->eop_gpu_addr >> 8; |
1171 | |
1172 | /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ |
1173 | tmp = regCP_HQD_EOP_CONTROL_DEFAULT; |
1174 | tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, |
1175 | (order_base_2(MES_EOP_SIZE / 4) - 1)); |
1176 | |
1177 | mqd->cp_hqd_eop_base_addr_lo = lower_32_bits(eop_base_addr); |
1178 | mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); |
1179 | mqd->cp_hqd_eop_control = tmp; |
1180 | |
1181 | /* disable the queue if it's active */ |
1182 | ring->wptr = 0; |
1183 | mqd->cp_hqd_pq_rptr = 0; |
1184 | mqd->cp_hqd_pq_wptr_lo = 0; |
1185 | mqd->cp_hqd_pq_wptr_hi = 0; |
1186 | |
1187 | /* set the pointer to the MQD */ |
1188 | mqd->cp_mqd_base_addr_lo = ring->mqd_gpu_addr & 0xfffffffc; |
1189 | mqd->cp_mqd_base_addr_hi = upper_32_bits(ring->mqd_gpu_addr); |
1190 | |
1191 | /* set MQD vmid to 0 */ |
1192 | tmp = regCP_MQD_CONTROL_DEFAULT; |
1193 | tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); |
1194 | mqd->cp_mqd_control = tmp; |
1195 | |
1196 | /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ |
1197 | hqd_gpu_addr = ring->gpu_addr >> 8; |
1198 | mqd->cp_hqd_pq_base_lo = lower_32_bits(hqd_gpu_addr); |
1199 | mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); |
1200 | |
1201 | /* set the wb address whether it's enabled or not */ |
1202 | wb_gpu_addr = ring->rptr_gpu_addr; |
1203 | mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; |
1204 | mqd->cp_hqd_pq_rptr_report_addr_hi = |
1205 | upper_32_bits(wb_gpu_addr) & 0xffff; |
1206 | |
1207 | /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ |
1208 | wb_gpu_addr = ring->wptr_gpu_addr; |
1209 | mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffff8; |
1210 | mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; |
1211 | |
1212 | /* set up the HQD, this is similar to CP_RB0_CNTL */ |
1213 | tmp = regCP_HQD_PQ_CONTROL_DEFAULT; |
1214 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, |
1215 | (order_base_2(ring->ring_size / 4) - 1)); |
1216 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, |
1217 | ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); |
1218 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); |
1219 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); |
1220 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); |
1221 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); |
1222 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, NO_UPDATE_RPTR, 1); |
1223 | mqd->cp_hqd_pq_control = tmp; |
1224 | |
1225 | /* enable doorbell */ |
1226 | tmp = 0; |
1227 | if (ring->use_doorbell) { |
1228 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1229 | DOORBELL_OFFSET, ring->doorbell_index); |
1230 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1231 | DOORBELL_EN, 1); |
1232 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1233 | DOORBELL_SOURCE, 0); |
1234 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1235 | DOORBELL_HIT, 0); |
1236 | } else { |
1237 | tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, |
1238 | DOORBELL_EN, 0); |
1239 | } |
1240 | mqd->cp_hqd_pq_doorbell_control = tmp; |
1241 | |
1242 | mqd->cp_hqd_vmid = 0; |
1243 | /* activate the queue */ |
1244 | mqd->cp_hqd_active = 1; |
1245 | |
1246 | tmp = regCP_HQD_PERSISTENT_STATE_DEFAULT; |
1247 | tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, |
1248 | PRELOAD_SIZE, 0x55); |
1249 | mqd->cp_hqd_persistent_state = tmp; |
1250 | |
1251 | mqd->cp_hqd_ib_control = regCP_HQD_IB_CONTROL_DEFAULT; |
1252 | mqd->cp_hqd_iq_timer = regCP_HQD_IQ_TIMER_DEFAULT; |
1253 | mqd->cp_hqd_quantum = regCP_HQD_QUANTUM_DEFAULT; |
1254 | |
1255 | /* |
1256 | * Set CP_HQD_GFX_CONTROL.DB_UPDATED_MSG_EN[15] to enable unmapped |
1257 | * doorbell handling. This is a reserved CP internal register can |
1258 | * not be accesss by others |
1259 | */ |
1260 | mqd->reserved_184 = BIT(15); |
1261 | |
1262 | return 0; |
1263 | } |
1264 | |
1265 | static void mes_v12_0_queue_init_register(struct amdgpu_ring *ring) |
1266 | { |
1267 | struct v12_compute_mqd *mqd = ring->mqd_ptr; |
1268 | struct amdgpu_device *adev = ring->adev; |
1269 | uint32_t data = 0; |
1270 | |
1271 | mutex_lock(&adev->srbm_mutex); |
1272 | soc21_grbm_select(adev, me: 3, pipe: ring->pipe, queue: 0, vmid: 0); |
1273 | |
1274 | /* set CP_HQD_VMID.VMID = 0. */ |
1275 | data = RREG32_SOC15(GC, 0, regCP_HQD_VMID); |
1276 | data = REG_SET_FIELD(data, CP_HQD_VMID, VMID, 0); |
1277 | WREG32_SOC15(GC, 0, regCP_HQD_VMID, data); |
1278 | |
1279 | /* set CP_HQD_PQ_DOORBELL_CONTROL.DOORBELL_EN=0 */ |
1280 | data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); |
1281 | data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, |
1282 | DOORBELL_EN, 0); |
1283 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data); |
1284 | |
1285 | /* set CP_MQD_BASE_ADDR/HI with the MQD base address */ |
1286 | WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); |
1287 | WREG32_SOC15(GC, 0, regCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); |
1288 | |
1289 | /* set CP_MQD_CONTROL.VMID=0 */ |
1290 | data = RREG32_SOC15(GC, 0, regCP_MQD_CONTROL); |
1291 | data = REG_SET_FIELD(data, CP_MQD_CONTROL, VMID, 0); |
1292 | WREG32_SOC15(GC, 0, regCP_MQD_CONTROL, 0); |
1293 | |
1294 | /* set CP_HQD_PQ_BASE/HI with the ring buffer base address */ |
1295 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); |
1296 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); |
1297 | |
1298 | /* set CP_HQD_PQ_RPTR_REPORT_ADDR/HI */ |
1299 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR, |
1300 | mqd->cp_hqd_pq_rptr_report_addr_lo); |
1301 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR_REPORT_ADDR_HI, |
1302 | mqd->cp_hqd_pq_rptr_report_addr_hi); |
1303 | |
1304 | /* set CP_HQD_PQ_CONTROL */ |
1305 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); |
1306 | |
1307 | /* set CP_HQD_PQ_WPTR_POLL_ADDR/HI */ |
1308 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR, |
1309 | mqd->cp_hqd_pq_wptr_poll_addr_lo); |
1310 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI, |
1311 | mqd->cp_hqd_pq_wptr_poll_addr_hi); |
1312 | |
1313 | /* set CP_HQD_PQ_DOORBELL_CONTROL */ |
1314 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, |
1315 | mqd->cp_hqd_pq_doorbell_control); |
1316 | |
1317 | /* set CP_HQD_PERSISTENT_STATE.PRELOAD_SIZE=0x53 */ |
1318 | WREG32_SOC15(GC, 0, regCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); |
1319 | |
1320 | /* set CP_HQD_ACTIVE.ACTIVE=1 */ |
1321 | WREG32_SOC15(GC, 0, regCP_HQD_ACTIVE, mqd->cp_hqd_active); |
1322 | |
1323 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
1324 | mutex_unlock(lock: &adev->srbm_mutex); |
1325 | } |
1326 | |
1327 | static int mes_v12_0_kiq_enable_queue(struct amdgpu_device *adev) |
1328 | { |
1329 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; |
1330 | struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; |
1331 | int r; |
1332 | |
1333 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues) |
1334 | return -EINVAL; |
1335 | |
1336 | r = amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->map_queues_size); |
1337 | if (r) { |
1338 | DRM_ERROR("Failed to lock KIQ (%d).\n" , r); |
1339 | return r; |
1340 | } |
1341 | |
1342 | kiq->pmf->kiq_map_queues(kiq_ring, &adev->mes.ring[0]); |
1343 | |
1344 | r = amdgpu_ring_test_ring(kiq_ring); |
1345 | if (r) { |
1346 | DRM_ERROR("kfq enable failed\n" ); |
1347 | kiq_ring->sched.ready = false; |
1348 | } |
1349 | return r; |
1350 | } |
1351 | |
1352 | static int mes_v12_0_queue_init(struct amdgpu_device *adev, |
1353 | enum amdgpu_mes_pipe pipe) |
1354 | { |
1355 | struct amdgpu_ring *ring; |
1356 | int r; |
1357 | |
1358 | if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) |
1359 | ring = &adev->gfx.kiq[0].ring; |
1360 | else |
1361 | ring = &adev->mes.ring[pipe]; |
1362 | |
1363 | if ((adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) && |
1364 | (amdgpu_in_reset(adev) || adev->in_suspend)) { |
1365 | *(ring->wptr_cpu_addr) = 0; |
1366 | *(ring->rptr_cpu_addr) = 0; |
1367 | amdgpu_ring_clear_ring(ring); |
1368 | } |
1369 | |
1370 | r = mes_v12_0_mqd_init(ring); |
1371 | if (r) |
1372 | return r; |
1373 | |
1374 | if (pipe == AMDGPU_MES_SCHED_PIPE) { |
1375 | if (adev->enable_uni_mes) |
1376 | r = amdgpu_mes_map_legacy_queue(adev, ring); |
1377 | else |
1378 | r = mes_v12_0_kiq_enable_queue(adev); |
1379 | if (r) |
1380 | return r; |
1381 | } else { |
1382 | mes_v12_0_queue_init_register(ring); |
1383 | } |
1384 | |
1385 | if (((pipe == AMDGPU_MES_SCHED_PIPE) && !adev->mes.sched_version) || |
1386 | ((pipe == AMDGPU_MES_KIQ_PIPE) && !adev->mes.kiq_version)) { |
1387 | /* get MES scheduler/KIQ versions */ |
1388 | mutex_lock(&adev->srbm_mutex); |
1389 | soc21_grbm_select(adev, me: 3, pipe, queue: 0, vmid: 0); |
1390 | |
1391 | if (pipe == AMDGPU_MES_SCHED_PIPE) |
1392 | adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); |
1393 | else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq) |
1394 | adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO); |
1395 | |
1396 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
1397 | mutex_unlock(lock: &adev->srbm_mutex); |
1398 | } |
1399 | |
1400 | return 0; |
1401 | } |
1402 | |
1403 | static int mes_v12_0_ring_init(struct amdgpu_device *adev, int pipe) |
1404 | { |
1405 | struct amdgpu_ring *ring; |
1406 | |
1407 | ring = &adev->mes.ring[pipe]; |
1408 | |
1409 | ring->funcs = &mes_v12_0_ring_funcs; |
1410 | |
1411 | ring->me = 3; |
1412 | ring->pipe = pipe; |
1413 | ring->queue = 0; |
1414 | |
1415 | ring->ring_obj = NULL; |
1416 | ring->use_doorbell = true; |
1417 | ring->eop_gpu_addr = adev->mes.eop_gpu_addr[pipe]; |
1418 | ring->no_scheduler = true; |
1419 | sprintf(buf: ring->name, fmt: "mes_%d.%d.%d" , ring->me, ring->pipe, ring->queue); |
1420 | |
1421 | if (pipe == AMDGPU_MES_SCHED_PIPE) |
1422 | ring->doorbell_index = adev->doorbell_index.mes_ring0 << 1; |
1423 | else |
1424 | ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1; |
1425 | |
1426 | return amdgpu_ring_init(adev, ring, max_dw: 1024, NULL, irq_type: 0, |
1427 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
1428 | } |
1429 | |
1430 | static int mes_v12_0_kiq_ring_init(struct amdgpu_device *adev) |
1431 | { |
1432 | struct amdgpu_ring *ring; |
1433 | |
1434 | spin_lock_init(&adev->gfx.kiq[0].ring_lock); |
1435 | |
1436 | ring = &adev->gfx.kiq[0].ring; |
1437 | |
1438 | ring->me = 3; |
1439 | ring->pipe = 1; |
1440 | ring->queue = 0; |
1441 | |
1442 | ring->adev = NULL; |
1443 | ring->ring_obj = NULL; |
1444 | ring->use_doorbell = true; |
1445 | ring->doorbell_index = adev->doorbell_index.mes_ring1 << 1; |
1446 | ring->eop_gpu_addr = adev->mes.eop_gpu_addr[AMDGPU_MES_KIQ_PIPE]; |
1447 | ring->no_scheduler = true; |
1448 | sprintf(buf: ring->name, fmt: "mes_kiq_%d.%d.%d" , |
1449 | ring->me, ring->pipe, ring->queue); |
1450 | |
1451 | return amdgpu_ring_init(adev, ring, max_dw: 1024, NULL, irq_type: 0, |
1452 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
1453 | } |
1454 | |
1455 | static int mes_v12_0_mqd_sw_init(struct amdgpu_device *adev, |
1456 | enum amdgpu_mes_pipe pipe) |
1457 | { |
1458 | int r, mqd_size = sizeof(struct v12_compute_mqd); |
1459 | struct amdgpu_ring *ring; |
1460 | |
1461 | if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) |
1462 | ring = &adev->gfx.kiq[0].ring; |
1463 | else |
1464 | ring = &adev->mes.ring[pipe]; |
1465 | |
1466 | if (ring->mqd_obj) |
1467 | return 0; |
1468 | |
1469 | r = amdgpu_bo_create_kernel(adev, size: mqd_size, PAGE_SIZE, |
1470 | AMDGPU_GEM_DOMAIN_GTT, bo_ptr: &ring->mqd_obj, |
1471 | gpu_addr: &ring->mqd_gpu_addr, cpu_addr: &ring->mqd_ptr); |
1472 | if (r) { |
1473 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)" , r); |
1474 | return r; |
1475 | } |
1476 | |
1477 | memset(ring->mqd_ptr, 0, mqd_size); |
1478 | |
1479 | /* prepare MQD backup */ |
1480 | adev->mes.mqd_backup[pipe] = kmalloc(mqd_size, GFP_KERNEL); |
1481 | if (!adev->mes.mqd_backup[pipe]) |
1482 | dev_warn(adev->dev, |
1483 | "no memory to create MQD backup for ring %s\n" , |
1484 | ring->name); |
1485 | |
1486 | return 0; |
1487 | } |
1488 | |
1489 | static int mes_v12_0_sw_init(struct amdgpu_ip_block *ip_block) |
1490 | { |
1491 | struct amdgpu_device *adev = ip_block->adev; |
1492 | int pipe, r; |
1493 | |
1494 | adev->mes.funcs = &mes_v12_0_funcs; |
1495 | adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init; |
1496 | adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; |
1497 | adev->mes.enable_legacy_queue_map = true; |
1498 | |
1499 | adev->mes.event_log_size = adev->enable_uni_mes ? |
1500 | (AMDGPU_MAX_MES_PIPES * (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE)) : |
1501 | (AMDGPU_MES_LOG_BUFFER_SIZE + AMDGPU_MES_MSCRATCH_SIZE); |
1502 | r = amdgpu_mes_init(adev); |
1503 | if (r) |
1504 | return r; |
1505 | |
1506 | for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { |
1507 | r = mes_v12_0_allocate_eop_buf(adev, pipe); |
1508 | if (r) |
1509 | return r; |
1510 | |
1511 | r = mes_v12_0_mqd_sw_init(adev, pipe); |
1512 | if (r) |
1513 | return r; |
1514 | |
1515 | if (!adev->enable_uni_mes && pipe == AMDGPU_MES_KIQ_PIPE) { |
1516 | r = mes_v12_0_kiq_ring_init(adev); |
1517 | } |
1518 | else { |
1519 | r = mes_v12_0_ring_init(adev, pipe); |
1520 | if (r) |
1521 | return r; |
1522 | r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, |
1523 | AMDGPU_GEM_DOMAIN_VRAM, |
1524 | bo_ptr: &adev->mes.resource_1[pipe], |
1525 | gpu_addr: &adev->mes.resource_1_gpu_addr[pipe], |
1526 | cpu_addr: &adev->mes.resource_1_addr[pipe]); |
1527 | if (r) { |
1528 | dev_err(adev->dev, "(%d) failed to create mes resource_1 bo pipe[%d]\n" , r, pipe); |
1529 | return r; |
1530 | } |
1531 | } |
1532 | } |
1533 | |
1534 | return 0; |
1535 | } |
1536 | |
1537 | static int mes_v12_0_sw_fini(struct amdgpu_ip_block *ip_block) |
1538 | { |
1539 | struct amdgpu_device *adev = ip_block->adev; |
1540 | int pipe; |
1541 | |
1542 | for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { |
1543 | amdgpu_bo_free_kernel(bo: &adev->mes.resource_1[pipe], |
1544 | gpu_addr: &adev->mes.resource_1_gpu_addr[pipe], |
1545 | cpu_addr: &adev->mes.resource_1_addr[pipe]); |
1546 | |
1547 | kfree(objp: adev->mes.mqd_backup[pipe]); |
1548 | |
1549 | amdgpu_bo_free_kernel(bo: &adev->mes.eop_gpu_obj[pipe], |
1550 | gpu_addr: &adev->mes.eop_gpu_addr[pipe], |
1551 | NULL); |
1552 | amdgpu_ucode_release(fw: &adev->mes.fw[pipe]); |
1553 | |
1554 | if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) { |
1555 | amdgpu_bo_free_kernel(bo: &adev->mes.ring[pipe].mqd_obj, |
1556 | gpu_addr: &adev->mes.ring[pipe].mqd_gpu_addr, |
1557 | cpu_addr: &adev->mes.ring[pipe].mqd_ptr); |
1558 | amdgpu_ring_fini(ring: &adev->mes.ring[pipe]); |
1559 | } |
1560 | } |
1561 | |
1562 | if (!adev->enable_uni_mes) { |
1563 | amdgpu_bo_free_kernel(bo: &adev->gfx.kiq[0].ring.mqd_obj, |
1564 | gpu_addr: &adev->gfx.kiq[0].ring.mqd_gpu_addr, |
1565 | cpu_addr: &adev->gfx.kiq[0].ring.mqd_ptr); |
1566 | amdgpu_ring_fini(ring: &adev->gfx.kiq[0].ring); |
1567 | } |
1568 | |
1569 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { |
1570 | mes_v12_0_free_ucode_buffers(adev, pipe: AMDGPU_MES_KIQ_PIPE); |
1571 | mes_v12_0_free_ucode_buffers(adev, pipe: AMDGPU_MES_SCHED_PIPE); |
1572 | } |
1573 | |
1574 | amdgpu_mes_fini(adev); |
1575 | return 0; |
1576 | } |
1577 | |
1578 | static void mes_v12_0_kiq_dequeue_sched(struct amdgpu_device *adev) |
1579 | { |
1580 | uint32_t data; |
1581 | int i; |
1582 | |
1583 | mutex_lock(&adev->srbm_mutex); |
1584 | soc21_grbm_select(adev, me: 3, pipe: AMDGPU_MES_SCHED_PIPE, queue: 0, vmid: 0); |
1585 | |
1586 | /* disable the queue if it's active */ |
1587 | if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { |
1588 | WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); |
1589 | for (i = 0; i < adev->usec_timeout; i++) { |
1590 | if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) |
1591 | break; |
1592 | udelay(usec: 1); |
1593 | } |
1594 | } |
1595 | data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); |
1596 | data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, |
1597 | DOORBELL_EN, 0); |
1598 | data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, |
1599 | DOORBELL_HIT, 1); |
1600 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data); |
1601 | |
1602 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0); |
1603 | |
1604 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0); |
1605 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0); |
1606 | WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0); |
1607 | |
1608 | soc21_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
1609 | mutex_unlock(lock: &adev->srbm_mutex); |
1610 | |
1611 | adev->mes.ring[0].sched.ready = false; |
1612 | } |
1613 | |
1614 | static void mes_v12_0_kiq_setting(struct amdgpu_ring *ring) |
1615 | { |
1616 | uint32_t tmp; |
1617 | struct amdgpu_device *adev = ring->adev; |
1618 | |
1619 | /* tell RLC which is KIQ queue */ |
1620 | tmp = RREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS); |
1621 | tmp &= 0xffffff00; |
1622 | tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); |
1623 | WREG32_SOC15(GC, 0, regRLC_CP_SCHEDULERS, tmp | 0x80); |
1624 | } |
1625 | |
1626 | static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) |
1627 | { |
1628 | int r = 0; |
1629 | struct amdgpu_ip_block *ip_block; |
1630 | |
1631 | if (adev->enable_uni_mes) |
1632 | mes_v12_0_kiq_setting(ring: &adev->mes.ring[AMDGPU_MES_KIQ_PIPE]); |
1633 | else |
1634 | mes_v12_0_kiq_setting(ring: &adev->gfx.kiq[0].ring); |
1635 | |
1636 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { |
1637 | |
1638 | r = mes_v12_0_load_microcode(adev, pipe: AMDGPU_MES_SCHED_PIPE, prime_icache: false); |
1639 | if (r) { |
1640 | DRM_ERROR("failed to load MES fw, r=%d\n" , r); |
1641 | return r; |
1642 | } |
1643 | |
1644 | r = mes_v12_0_load_microcode(adev, pipe: AMDGPU_MES_KIQ_PIPE, prime_icache: true); |
1645 | if (r) { |
1646 | DRM_ERROR("failed to load MES kiq fw, r=%d\n" , r); |
1647 | return r; |
1648 | } |
1649 | |
1650 | mes_v12_0_set_ucode_start_addr(adev); |
1651 | |
1652 | } else if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) |
1653 | mes_v12_0_set_ucode_start_addr(adev); |
1654 | |
1655 | mes_v12_0_enable(adev, enable: true); |
1656 | |
1657 | ip_block = amdgpu_device_ip_get_ip_block(adev, type: AMD_IP_BLOCK_TYPE_MES); |
1658 | if (unlikely(!ip_block)) { |
1659 | dev_err(adev->dev, "Failed to get MES handle\n" ); |
1660 | return -EINVAL; |
1661 | } |
1662 | |
1663 | r = mes_v12_0_queue_init(adev, pipe: AMDGPU_MES_KIQ_PIPE); |
1664 | if (r) |
1665 | goto failure; |
1666 | |
1667 | if (adev->enable_uni_mes) { |
1668 | r = mes_v12_0_set_hw_resources(mes: &adev->mes, pipe: AMDGPU_MES_KIQ_PIPE); |
1669 | if (r) |
1670 | goto failure; |
1671 | |
1672 | mes_v12_0_set_hw_resources_1(mes: &adev->mes, pipe: AMDGPU_MES_KIQ_PIPE); |
1673 | } |
1674 | |
1675 | if (adev->mes.enable_legacy_queue_map) { |
1676 | r = mes_v12_0_hw_init(ip_block); |
1677 | if (r) |
1678 | goto failure; |
1679 | } |
1680 | |
1681 | return r; |
1682 | |
1683 | failure: |
1684 | mes_v12_0_hw_fini(ip_block); |
1685 | return r; |
1686 | } |
1687 | |
1688 | static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev) |
1689 | { |
1690 | if (adev->mes.ring[0].sched.ready) { |
1691 | if (adev->enable_uni_mes) |
1692 | amdgpu_mes_unmap_legacy_queue(adev, |
1693 | ring: &adev->mes.ring[AMDGPU_MES_SCHED_PIPE], |
1694 | action: RESET_QUEUES, gpu_addr: 0, seq: 0); |
1695 | else |
1696 | mes_v12_0_kiq_dequeue_sched(adev); |
1697 | |
1698 | adev->mes.ring[0].sched.ready = false; |
1699 | } |
1700 | |
1701 | mes_v12_0_enable(adev, enable: false); |
1702 | |
1703 | return 0; |
1704 | } |
1705 | |
1706 | static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) |
1707 | { |
1708 | int r; |
1709 | struct amdgpu_device *adev = ip_block->adev; |
1710 | |
1711 | if (adev->mes.ring[0].sched.ready) |
1712 | goto out; |
1713 | |
1714 | if (!adev->enable_mes_kiq) { |
1715 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { |
1716 | r = mes_v12_0_load_microcode(adev, |
1717 | pipe: AMDGPU_MES_SCHED_PIPE, prime_icache: true); |
1718 | if (r) { |
1719 | DRM_ERROR("failed to MES fw, r=%d\n" , r); |
1720 | return r; |
1721 | } |
1722 | |
1723 | mes_v12_0_set_ucode_start_addr(adev); |
1724 | |
1725 | } else if (adev->firmware.load_type == |
1726 | AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { |
1727 | |
1728 | mes_v12_0_set_ucode_start_addr(adev); |
1729 | } |
1730 | |
1731 | mes_v12_0_enable(adev, enable: true); |
1732 | } |
1733 | |
1734 | /* Enable the MES to handle doorbell ring on unmapped queue */ |
1735 | mes_v12_0_enable_unmapped_doorbell_handling(mes: &adev->mes, enable: true); |
1736 | |
1737 | r = mes_v12_0_queue_init(adev, pipe: AMDGPU_MES_SCHED_PIPE); |
1738 | if (r) |
1739 | goto failure; |
1740 | |
1741 | r = mes_v12_0_set_hw_resources(mes: &adev->mes, pipe: AMDGPU_MES_SCHED_PIPE); |
1742 | if (r) |
1743 | goto failure; |
1744 | |
1745 | mes_v12_0_set_hw_resources_1(mes: &adev->mes, pipe: AMDGPU_MES_SCHED_PIPE); |
1746 | |
1747 | mes_v12_0_init_aggregated_doorbell(mes: &adev->mes); |
1748 | |
1749 | r = mes_v12_0_query_sched_status(mes: &adev->mes, pipe: AMDGPU_MES_SCHED_PIPE); |
1750 | if (r) { |
1751 | DRM_ERROR("MES is busy\n" ); |
1752 | goto failure; |
1753 | } |
1754 | |
1755 | r = amdgpu_mes_update_enforce_isolation(adev); |
1756 | if (r) |
1757 | goto failure; |
1758 | |
1759 | out: |
1760 | /* |
1761 | * Disable KIQ ring usage from the driver once MES is enabled. |
1762 | * MES uses KIQ ring exclusively so driver cannot access KIQ ring |
1763 | * with MES enabled. |
1764 | */ |
1765 | adev->gfx.kiq[0].ring.sched.ready = false; |
1766 | adev->mes.ring[0].sched.ready = true; |
1767 | |
1768 | return 0; |
1769 | |
1770 | failure: |
1771 | mes_v12_0_hw_fini(ip_block); |
1772 | return r; |
1773 | } |
1774 | |
1775 | static int mes_v12_0_hw_fini(struct amdgpu_ip_block *ip_block) |
1776 | { |
1777 | return 0; |
1778 | } |
1779 | |
1780 | static int mes_v12_0_suspend(struct amdgpu_ip_block *ip_block) |
1781 | { |
1782 | return mes_v12_0_hw_fini(ip_block); |
1783 | } |
1784 | |
1785 | static int mes_v12_0_resume(struct amdgpu_ip_block *ip_block) |
1786 | { |
1787 | return mes_v12_0_hw_init(ip_block); |
1788 | } |
1789 | |
1790 | static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block) |
1791 | { |
1792 | struct amdgpu_device *adev = ip_block->adev; |
1793 | int pipe, r; |
1794 | |
1795 | for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) { |
1796 | r = amdgpu_mes_init_microcode(adev, pipe); |
1797 | if (r) |
1798 | return r; |
1799 | } |
1800 | |
1801 | return 0; |
1802 | } |
1803 | |
1804 | static const struct amd_ip_funcs mes_v12_0_ip_funcs = { |
1805 | .name = "mes_v12_0" , |
1806 | .early_init = mes_v12_0_early_init, |
1807 | .late_init = NULL, |
1808 | .sw_init = mes_v12_0_sw_init, |
1809 | .sw_fini = mes_v12_0_sw_fini, |
1810 | .hw_init = mes_v12_0_hw_init, |
1811 | .hw_fini = mes_v12_0_hw_fini, |
1812 | .suspend = mes_v12_0_suspend, |
1813 | .resume = mes_v12_0_resume, |
1814 | }; |
1815 | |
1816 | const struct amdgpu_ip_block_version mes_v12_0_ip_block = { |
1817 | .type = AMD_IP_BLOCK_TYPE_MES, |
1818 | .major = 12, |
1819 | .minor = 0, |
1820 | .rev = 0, |
1821 | .funcs = &mes_v12_0_ip_funcs, |
1822 | }; |
1823 | |