1 | /* |
2 | * Copyright 2019 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ |
22 | #include <linux/mmu_context.h> |
23 | #include "amdgpu.h" |
24 | #include "amdgpu_amdkfd.h" |
25 | #include "amdgpu_amdkfd_gfx_v10.h" |
26 | #include "gc/gc_10_3_0_offset.h" |
27 | #include "gc/gc_10_3_0_sh_mask.h" |
28 | #include "oss/osssys_5_0_0_offset.h" |
29 | #include "oss/osssys_5_0_0_sh_mask.h" |
30 | #include "athub/athub_2_1_0_offset.h" |
31 | #include "athub/athub_2_1_0_sh_mask.h" |
32 | #include "soc15_common.h" |
33 | #include "v10_structs.h" |
34 | #include "nv.h" |
35 | #include "nvd.h" |
36 | |
37 | enum hqd_dequeue_request_type { |
38 | NO_ACTION = 0, |
39 | DRAIN_PIPE, |
40 | RESET_WAVES, |
41 | SAVE_WAVES |
42 | }; |
43 | |
44 | static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, |
45 | uint32_t queue, uint32_t vmid) |
46 | { |
47 | mutex_lock(&adev->srbm_mutex); |
48 | nv_grbm_select(adev, me: mec, pipe, queue, vmid); |
49 | } |
50 | |
51 | static void unlock_srbm(struct amdgpu_device *adev) |
52 | { |
53 | nv_grbm_select(adev, me: 0, pipe: 0, queue: 0, vmid: 0); |
54 | mutex_unlock(lock: &adev->srbm_mutex); |
55 | } |
56 | |
57 | static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, |
58 | uint32_t queue_id) |
59 | { |
60 | uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
61 | uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
62 | |
63 | lock_srbm(adev, mec, pipe, queue: queue_id, vmid: 0); |
64 | } |
65 | |
66 | static uint64_t get_queue_mask(struct amdgpu_device *adev, |
67 | uint32_t pipe_id, uint32_t queue_id) |
68 | { |
69 | unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe + |
70 | queue_id; |
71 | |
72 | return 1ull << bit; |
73 | } |
74 | |
75 | static void release_queue(struct amdgpu_device *adev) |
76 | { |
77 | unlock_srbm(adev); |
78 | } |
79 | |
80 | static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid, |
81 | uint32_t sh_mem_config, |
82 | uint32_t sh_mem_ape1_base, |
83 | uint32_t sh_mem_ape1_limit, |
84 | uint32_t sh_mem_bases, uint32_t inst) |
85 | { |
86 | lock_srbm(adev, mec: 0, pipe: 0, queue: 0, vmid); |
87 | |
88 | WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); |
89 | WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); |
90 | /* APE1 no longer exists on GFX9 */ |
91 | |
92 | unlock_srbm(adev); |
93 | } |
94 | |
95 | /* ATC is defeatured on Sienna_Cichlid */ |
96 | static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid, |
97 | unsigned int vmid, uint32_t inst) |
98 | { |
99 | uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; |
100 | |
101 | /* Mapping vmid to pasid also for IH block */ |
102 | pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n" , |
103 | vmid, pasid); |
104 | WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value); |
105 | |
106 | return 0; |
107 | } |
108 | |
109 | static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, |
110 | uint32_t inst) |
111 | { |
112 | uint32_t mec; |
113 | uint32_t pipe; |
114 | |
115 | mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
116 | pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
117 | |
118 | lock_srbm(adev, mec, pipe, queue: 0, vmid: 0); |
119 | |
120 | WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, |
121 | CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | |
122 | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); |
123 | |
124 | unlock_srbm(adev); |
125 | |
126 | return 0; |
127 | } |
128 | |
129 | static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, |
130 | unsigned int engine_id, |
131 | unsigned int queue_id) |
132 | { |
133 | uint32_t sdma_engine_reg_base = 0; |
134 | uint32_t sdma_rlc_reg_offset; |
135 | |
136 | switch (engine_id) { |
137 | default: |
138 | dev_warn(adev->dev, |
139 | "Invalid sdma engine id (%d), using engine id 0\n" , |
140 | engine_id); |
141 | fallthrough; |
142 | case 0: |
143 | sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, |
144 | mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; |
145 | break; |
146 | case 1: |
147 | sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, |
148 | mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; |
149 | break; |
150 | case 2: |
151 | sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, |
152 | mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; |
153 | break; |
154 | case 3: |
155 | sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, |
156 | mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; |
157 | break; |
158 | } |
159 | |
160 | sdma_rlc_reg_offset = sdma_engine_reg_base |
161 | + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); |
162 | |
163 | pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n" , engine_id, |
164 | queue_id, sdma_rlc_reg_offset); |
165 | |
166 | return sdma_rlc_reg_offset; |
167 | } |
168 | |
169 | static inline struct v10_compute_mqd *get_mqd(void *mqd) |
170 | { |
171 | return (struct v10_compute_mqd *)mqd; |
172 | } |
173 | |
174 | static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) |
175 | { |
176 | return (struct v10_sdma_mqd *)mqd; |
177 | } |
178 | |
179 | static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, |
180 | uint32_t pipe_id, uint32_t queue_id, |
181 | uint32_t __user *wptr, uint32_t wptr_shift, |
182 | uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) |
183 | { |
184 | struct v10_compute_mqd *m; |
185 | uint32_t *mqd_hqd; |
186 | uint32_t reg, hqd_base, data; |
187 | |
188 | m = get_mqd(mqd); |
189 | |
190 | pr_debug("Load hqd of pipe %d queue %d\n" , pipe_id, queue_id); |
191 | acquire_queue(adev, pipe_id, queue_id); |
192 | |
193 | /* HIQ is set during driver init period with vmid set to 0*/ |
194 | if (m->cp_hqd_vmid == 0) { |
195 | uint32_t value, mec, pipe; |
196 | |
197 | mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
198 | pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
199 | |
200 | pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n" , |
201 | mec, pipe, queue_id); |
202 | value = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS); |
203 | value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, |
204 | ((mec << 5) | (pipe << 3) | queue_id | 0x80)); |
205 | WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, value); |
206 | } |
207 | |
208 | /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ |
209 | mqd_hqd = &m->cp_mqd_base_addr_lo; |
210 | hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); |
211 | |
212 | for (reg = hqd_base; |
213 | reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) |
214 | WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]); |
215 | |
216 | |
217 | /* Activate doorbell logic before triggering WPTR poll. */ |
218 | data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, |
219 | CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); |
220 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data); |
221 | |
222 | if (wptr) { |
223 | /* Don't read wptr with get_user because the user |
224 | * context may not be accessible (if this function |
225 | * runs in a work queue). Instead trigger a one-shot |
226 | * polling read from memory in the CP. This assumes |
227 | * that wptr is GPU-accessible in the queue's VMID via |
228 | * ATC or SVM. WPTR==RPTR before starting the poll so |
229 | * the CP starts fetching new commands from the right |
230 | * place. |
231 | * |
232 | * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit |
233 | * tricky. Assume that the queue didn't overflow. The |
234 | * number of valid bits in the 32-bit RPTR depends on |
235 | * the queue size. The remaining bits are taken from |
236 | * the saved 64-bit WPTR. If the WPTR wrapped, add the |
237 | * queue size. |
238 | */ |
239 | uint32_t queue_size = |
240 | 2 << REG_GET_FIELD(m->cp_hqd_pq_control, |
241 | CP_HQD_PQ_CONTROL, QUEUE_SIZE); |
242 | uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); |
243 | |
244 | if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) |
245 | guessed_wptr += queue_size; |
246 | guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); |
247 | guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; |
248 | |
249 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO, |
250 | lower_32_bits(guessed_wptr)); |
251 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI, |
252 | upper_32_bits(guessed_wptr)); |
253 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR, |
254 | lower_32_bits((uint64_t)wptr)); |
255 | WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, |
256 | upper_32_bits((uint64_t)wptr)); |
257 | pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n" , __func__, |
258 | (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); |
259 | WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, |
260 | (uint32_t)get_queue_mask(adev, pipe_id, queue_id)); |
261 | } |
262 | |
263 | /* Start the EOP fetcher */ |
264 | WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), |
265 | REG_SET_FIELD(m->cp_hqd_eop_rptr, |
266 | CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); |
267 | |
268 | data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); |
269 | WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); |
270 | |
271 | release_queue(adev); |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, |
277 | uint32_t pipe_id, uint32_t queue_id, |
278 | uint32_t doorbell_off, uint32_t inst) |
279 | { |
280 | struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; |
281 | struct v10_compute_mqd *m; |
282 | uint32_t mec, pipe; |
283 | int r; |
284 | |
285 | m = get_mqd(mqd); |
286 | |
287 | acquire_queue(adev, pipe_id, queue_id); |
288 | |
289 | mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; |
290 | pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); |
291 | |
292 | pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n" , |
293 | mec, pipe, queue_id); |
294 | |
295 | spin_lock(lock: &adev->gfx.kiq[0].ring_lock); |
296 | r = amdgpu_ring_alloc(ring: kiq_ring, ndw: 7); |
297 | if (r) { |
298 | pr_err("Failed to alloc KIQ (%d).\n" , r); |
299 | goto out_unlock; |
300 | } |
301 | |
302 | amdgpu_ring_write(ring: kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); |
303 | amdgpu_ring_write(ring: kiq_ring, |
304 | PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */ |
305 | PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */ |
306 | PACKET3_MAP_QUEUES_QUEUE(queue_id) | |
307 | PACKET3_MAP_QUEUES_PIPE(pipe) | |
308 | PACKET3_MAP_QUEUES_ME((mec - 1)) | |
309 | PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ |
310 | PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ |
311 | PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */ |
312 | PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ |
313 | amdgpu_ring_write(ring: kiq_ring, |
314 | PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off)); |
315 | amdgpu_ring_write(ring: kiq_ring, v: m->cp_mqd_base_addr_lo); |
316 | amdgpu_ring_write(ring: kiq_ring, v: m->cp_mqd_base_addr_hi); |
317 | amdgpu_ring_write(ring: kiq_ring, v: m->cp_hqd_pq_wptr_poll_addr_lo); |
318 | amdgpu_ring_write(ring: kiq_ring, v: m->cp_hqd_pq_wptr_poll_addr_hi); |
319 | amdgpu_ring_commit(ring: kiq_ring); |
320 | |
321 | out_unlock: |
322 | spin_unlock(lock: &adev->gfx.kiq[0].ring_lock); |
323 | release_queue(adev); |
324 | |
325 | return r; |
326 | } |
327 | |
328 | static int hqd_dump_v10_3(struct amdgpu_device *adev, |
329 | uint32_t pipe_id, uint32_t queue_id, |
330 | uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) |
331 | { |
332 | uint32_t i = 0, reg; |
333 | #define HQD_N_REGS 56 |
334 | #define DUMP_REG(addr) do { \ |
335 | if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ |
336 | break; \ |
337 | (*dump)[i][0] = (addr) << 2; \ |
338 | (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \ |
339 | } while (0) |
340 | |
341 | *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); |
342 | if (*dump == NULL) |
343 | return -ENOMEM; |
344 | |
345 | acquire_queue(adev, pipe_id, queue_id); |
346 | |
347 | for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); |
348 | reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) |
349 | DUMP_REG(reg); |
350 | |
351 | release_queue(adev); |
352 | |
353 | WARN_ON_ONCE(i != HQD_N_REGS); |
354 | *n_regs = i; |
355 | |
356 | return 0; |
357 | } |
358 | |
359 | static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd, |
360 | uint32_t __user *wptr, struct mm_struct *mm) |
361 | { |
362 | struct v10_sdma_mqd *m; |
363 | uint32_t sdma_rlc_reg_offset; |
364 | unsigned long end_jiffies; |
365 | uint32_t data; |
366 | uint64_t data64; |
367 | uint64_t __user *wptr64 = (uint64_t __user *)wptr; |
368 | |
369 | m = get_sdma_mqd(mqd); |
370 | sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id: m->sdma_engine_id, |
371 | queue_id: m->sdma_queue_id); |
372 | |
373 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, |
374 | m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); |
375 | |
376 | end_jiffies = msecs_to_jiffies(m: 2000) + jiffies; |
377 | while (true) { |
378 | data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); |
379 | if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) |
380 | break; |
381 | if (time_after(jiffies, end_jiffies)) { |
382 | pr_err("SDMA RLC not idle in %s\n" , __func__); |
383 | return -ETIME; |
384 | } |
385 | usleep_range(min: 500, max: 1000); |
386 | } |
387 | |
388 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, |
389 | m->sdmax_rlcx_doorbell_offset); |
390 | |
391 | data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, |
392 | ENABLE, 1); |
393 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); |
394 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, |
395 | m->sdmax_rlcx_rb_rptr); |
396 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, |
397 | m->sdmax_rlcx_rb_rptr_hi); |
398 | |
399 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); |
400 | if (read_user_wptr(mm, wptr64, data64)) { |
401 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, |
402 | lower_32_bits(data64)); |
403 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, |
404 | upper_32_bits(data64)); |
405 | } else { |
406 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, |
407 | m->sdmax_rlcx_rb_rptr); |
408 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, |
409 | m->sdmax_rlcx_rb_rptr_hi); |
410 | } |
411 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); |
412 | |
413 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); |
414 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, |
415 | m->sdmax_rlcx_rb_base_hi); |
416 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, |
417 | m->sdmax_rlcx_rb_rptr_addr_lo); |
418 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, |
419 | m->sdmax_rlcx_rb_rptr_addr_hi); |
420 | |
421 | data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, |
422 | RB_ENABLE, 1); |
423 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); |
424 | |
425 | return 0; |
426 | } |
427 | |
428 | static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, |
429 | uint32_t engine_id, uint32_t queue_id, |
430 | uint32_t (**dump)[2], uint32_t *n_regs) |
431 | { |
432 | uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, |
433 | engine_id, queue_id); |
434 | uint32_t i = 0, reg; |
435 | #undef HQD_N_REGS |
436 | #define HQD_N_REGS (19+6+7+12) |
437 | |
438 | *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); |
439 | if (*dump == NULL) |
440 | return -ENOMEM; |
441 | |
442 | for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) |
443 | DUMP_REG(sdma_rlc_reg_offset + reg); |
444 | for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) |
445 | DUMP_REG(sdma_rlc_reg_offset + reg); |
446 | for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; |
447 | reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) |
448 | DUMP_REG(sdma_rlc_reg_offset + reg); |
449 | for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; |
450 | reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) |
451 | DUMP_REG(sdma_rlc_reg_offset + reg); |
452 | |
453 | WARN_ON_ONCE(i != HQD_N_REGS); |
454 | *n_regs = i; |
455 | |
456 | return 0; |
457 | } |
458 | |
459 | static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev, |
460 | uint64_t queue_address, uint32_t pipe_id, |
461 | uint32_t queue_id, uint32_t inst) |
462 | { |
463 | uint32_t act; |
464 | bool retval = false; |
465 | uint32_t low, high; |
466 | |
467 | acquire_queue(adev, pipe_id, queue_id); |
468 | act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); |
469 | if (act) { |
470 | low = lower_32_bits(queue_address >> 8); |
471 | high = upper_32_bits(queue_address >> 8); |
472 | |
473 | if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && |
474 | high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) |
475 | retval = true; |
476 | } |
477 | release_queue(adev); |
478 | return retval; |
479 | } |
480 | |
481 | static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev, |
482 | void *mqd) |
483 | { |
484 | struct v10_sdma_mqd *m; |
485 | uint32_t sdma_rlc_reg_offset; |
486 | uint32_t sdma_rlc_rb_cntl; |
487 | |
488 | m = get_sdma_mqd(mqd); |
489 | sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id: m->sdma_engine_id, |
490 | queue_id: m->sdma_queue_id); |
491 | |
492 | sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); |
493 | |
494 | if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) |
495 | return true; |
496 | |
497 | return false; |
498 | } |
499 | |
500 | static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd, |
501 | enum kfd_preempt_type reset_type, |
502 | unsigned int utimeout, uint32_t pipe_id, |
503 | uint32_t queue_id, uint32_t inst) |
504 | { |
505 | enum hqd_dequeue_request_type type; |
506 | unsigned long end_jiffies; |
507 | uint32_t temp; |
508 | struct v10_compute_mqd *m = get_mqd(mqd); |
509 | |
510 | acquire_queue(adev, pipe_id, queue_id); |
511 | |
512 | if (m->cp_hqd_vmid == 0) |
513 | WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); |
514 | |
515 | switch (reset_type) { |
516 | case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: |
517 | type = DRAIN_PIPE; |
518 | break; |
519 | case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: |
520 | type = RESET_WAVES; |
521 | break; |
522 | case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE: |
523 | type = SAVE_WAVES; |
524 | break; |
525 | default: |
526 | type = DRAIN_PIPE; |
527 | break; |
528 | } |
529 | |
530 | WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type); |
531 | |
532 | end_jiffies = (utimeout * HZ / 1000) + jiffies; |
533 | while (true) { |
534 | temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); |
535 | if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) |
536 | break; |
537 | if (time_after(jiffies, end_jiffies)) { |
538 | pr_err("cp queue pipe %d queue %d preemption failed\n" , |
539 | pipe_id, queue_id); |
540 | release_queue(adev); |
541 | return -ETIME; |
542 | } |
543 | usleep_range(min: 500, max: 1000); |
544 | } |
545 | |
546 | release_queue(adev); |
547 | return 0; |
548 | } |
549 | |
550 | static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, |
551 | unsigned int utimeout) |
552 | { |
553 | struct v10_sdma_mqd *m; |
554 | uint32_t sdma_rlc_reg_offset; |
555 | uint32_t temp; |
556 | unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; |
557 | |
558 | m = get_sdma_mqd(mqd); |
559 | sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id: m->sdma_engine_id, |
560 | queue_id: m->sdma_queue_id); |
561 | |
562 | temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); |
563 | temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; |
564 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); |
565 | |
566 | while (true) { |
567 | temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); |
568 | if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) |
569 | break; |
570 | if (time_after(jiffies, end_jiffies)) { |
571 | pr_err("SDMA RLC not idle in %s\n" , __func__); |
572 | return -ETIME; |
573 | } |
574 | usleep_range(min: 500, max: 1000); |
575 | } |
576 | |
577 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); |
578 | WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, |
579 | RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | |
580 | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); |
581 | |
582 | m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); |
583 | m->sdmax_rlcx_rb_rptr_hi = |
584 | RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); |
585 | |
586 | return 0; |
587 | } |
588 | |
589 | static int wave_control_execute_v10_3(struct amdgpu_device *adev, |
590 | uint32_t gfx_index_val, |
591 | uint32_t sq_cmd, uint32_t inst) |
592 | { |
593 | uint32_t data = 0; |
594 | |
595 | mutex_lock(&adev->grbm_idx_mutex); |
596 | |
597 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); |
598 | WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); |
599 | |
600 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
601 | INSTANCE_BROADCAST_WRITES, 1); |
602 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
603 | SA_BROADCAST_WRITES, 1); |
604 | data = REG_SET_FIELD(data, GRBM_GFX_INDEX, |
605 | SE_BROADCAST_WRITES, 1); |
606 | |
607 | WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); |
608 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
609 | |
610 | return 0; |
611 | } |
612 | |
613 | static bool get_atc_vmid_pasid_mapping_info_v10_3(struct amdgpu_device *adev, |
614 | uint8_t vmid, uint16_t *p_pasid) |
615 | { |
616 | uint32_t value; |
617 | |
618 | value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) |
619 | + vmid); |
620 | *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; |
621 | |
622 | return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); |
623 | } |
624 | |
625 | static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev, |
626 | uint32_t vmid, uint64_t page_table_base) |
627 | { |
628 | /* SDMA is on gfxhub as well for Navi1* series */ |
629 | adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); |
630 | } |
631 | |
632 | static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev, |
633 | uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, |
634 | uint32_t inst) |
635 | { |
636 | lock_srbm(adev, mec: 0, pipe: 0, queue: 0, vmid); |
637 | |
638 | /* |
639 | * Program TBA registers |
640 | */ |
641 | WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO), |
642 | lower_32_bits(tba_addr >> 8)); |
643 | WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI), |
644 | upper_32_bits(tba_addr >> 8) | |
645 | (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT)); |
646 | |
647 | /* |
648 | * Program TMA registers |
649 | */ |
650 | WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO), |
651 | lower_32_bits(tma_addr >> 8)); |
652 | WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), |
653 | upper_32_bits(tma_addr >> 8)); |
654 | |
655 | unlock_srbm(adev); |
656 | } |
657 | |
658 | const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { |
659 | .program_sh_mem_settings = program_sh_mem_settings_v10_3, |
660 | .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3, |
661 | .init_interrupts = init_interrupts_v10_3, |
662 | .hqd_load = hqd_load_v10_3, |
663 | .hiq_mqd_load = hiq_mqd_load_v10_3, |
664 | .hqd_sdma_load = hqd_sdma_load_v10_3, |
665 | .hqd_dump = hqd_dump_v10_3, |
666 | .hqd_sdma_dump = hqd_sdma_dump_v10_3, |
667 | .hqd_is_occupied = hqd_is_occupied_v10_3, |
668 | .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3, |
669 | .hqd_destroy = hqd_destroy_v10_3, |
670 | .hqd_sdma_destroy = hqd_sdma_destroy_v10_3, |
671 | .wave_control_execute = wave_control_execute_v10_3, |
672 | .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3, |
673 | .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, |
674 | .program_trap_handler_settings = program_trap_handler_settings_v10_3, |
675 | .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, |
676 | .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, |
677 | .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, |
678 | .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, |
679 | .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request, |
680 | .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override, |
681 | .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode, |
682 | .set_address_watch = kgd_gfx_v10_set_address_watch, |
683 | .clear_address_watch = kgd_gfx_v10_clear_address_watch |
684 | }; |
685 | |