1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Alex Deucher |
23 | */ |
24 | |
25 | #include "amdgpu.h" |
26 | #include "amdgpu_trace.h" |
27 | #include "si.h" |
28 | #include "sid.h" |
29 | |
30 | const u32 sdma_offsets[SDMA_MAX_INSTANCE] = |
31 | { |
32 | DMA0_REGISTER_OFFSET, |
33 | DMA1_REGISTER_OFFSET |
34 | }; |
35 | |
36 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev); |
37 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev); |
38 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev); |
39 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev); |
40 | |
41 | static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring) |
42 | { |
43 | return *ring->rptr_cpu_addr; |
44 | } |
45 | |
46 | static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring) |
47 | { |
48 | struct amdgpu_device *adev = ring->adev; |
49 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
50 | |
51 | return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; |
52 | } |
53 | |
54 | static void si_dma_ring_set_wptr(struct amdgpu_ring *ring) |
55 | { |
56 | struct amdgpu_device *adev = ring->adev; |
57 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
58 | |
59 | WREG32(DMA_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); |
60 | } |
61 | |
62 | static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, |
63 | struct amdgpu_job *job, |
64 | struct amdgpu_ib *ib, |
65 | uint32_t flags) |
66 | { |
67 | unsigned vmid = AMDGPU_JOB_GET_VMID(job); |
68 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. |
69 | * Pad as necessary with NOPs. |
70 | */ |
71 | while ((lower_32_bits(ring->wptr) & 7) != 5) |
72 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); |
73 | amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0)); |
74 | amdgpu_ring_write(ring, v: (ib->gpu_addr & 0xFFFFFFE0)); |
75 | amdgpu_ring_write(ring, v: (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); |
76 | |
77 | } |
78 | |
79 | /** |
80 | * si_dma_ring_emit_fence - emit a fence on the DMA ring |
81 | * |
82 | * @ring: amdgpu ring pointer |
83 | * @addr: address |
84 | * @seq: sequence number |
85 | * @flags: fence related flags |
86 | * |
87 | * Add a DMA fence packet to the ring to write |
88 | * the fence seq number and DMA trap packet to generate |
89 | * an interrupt if needed (VI). |
90 | */ |
91 | static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, |
92 | unsigned flags) |
93 | { |
94 | |
95 | bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; |
96 | /* write the fence */ |
97 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); |
98 | amdgpu_ring_write(ring, v: addr & 0xfffffffc); |
99 | amdgpu_ring_write(ring, v: (upper_32_bits(addr) & 0xff)); |
100 | amdgpu_ring_write(ring, v: seq); |
101 | /* optionally write high bits as well */ |
102 | if (write64bit) { |
103 | addr += 4; |
104 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0)); |
105 | amdgpu_ring_write(ring, v: addr & 0xfffffffc); |
106 | amdgpu_ring_write(ring, v: (upper_32_bits(addr) & 0xff)); |
107 | amdgpu_ring_write(ring, upper_32_bits(seq)); |
108 | } |
109 | /* generate an interrupt */ |
110 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0)); |
111 | } |
112 | |
113 | static void si_dma_stop(struct amdgpu_device *adev) |
114 | { |
115 | u32 rb_cntl; |
116 | unsigned i; |
117 | |
118 | for (i = 0; i < adev->sdma.num_instances; i++) { |
119 | /* dma0 */ |
120 | rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]); |
121 | rb_cntl &= ~DMA_RB_ENABLE; |
122 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); |
123 | } |
124 | } |
125 | |
126 | static int si_dma_start(struct amdgpu_device *adev) |
127 | { |
128 | struct amdgpu_ring *ring; |
129 | u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz; |
130 | int i, r; |
131 | uint64_t rptr_addr; |
132 | |
133 | for (i = 0; i < adev->sdma.num_instances; i++) { |
134 | ring = &adev->sdma.instance[i].ring; |
135 | |
136 | WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0); |
137 | WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); |
138 | |
139 | /* Set ring buffer size in dwords */ |
140 | rb_bufsz = order_base_2(ring->ring_size / 4); |
141 | rb_cntl = rb_bufsz << 1; |
142 | #ifdef __BIG_ENDIAN |
143 | rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE; |
144 | #endif |
145 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); |
146 | |
147 | /* Initialize the ring buffer's read and write pointers */ |
148 | WREG32(DMA_RB_RPTR + sdma_offsets[i], 0); |
149 | WREG32(DMA_RB_WPTR + sdma_offsets[i], 0); |
150 | |
151 | rptr_addr = ring->rptr_gpu_addr; |
152 | |
153 | WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr)); |
154 | WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF); |
155 | |
156 | rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE; |
157 | |
158 | WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); |
159 | |
160 | /* enable DMA IBs */ |
161 | ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE; |
162 | #ifdef __BIG_ENDIAN |
163 | ib_cntl |= DMA_IB_SWAP_ENABLE; |
164 | #endif |
165 | WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl); |
166 | |
167 | dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]); |
168 | dma_cntl &= ~CTXEMPTY_INT_ENABLE; |
169 | WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl); |
170 | |
171 | ring->wptr = 0; |
172 | WREG32(DMA_RB_WPTR + sdma_offsets[i], ring->wptr << 2); |
173 | WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); |
174 | |
175 | r = amdgpu_ring_test_helper(ring); |
176 | if (r) |
177 | return r; |
178 | } |
179 | |
180 | return 0; |
181 | } |
182 | |
183 | /** |
184 | * si_dma_ring_test_ring - simple async dma engine test |
185 | * |
186 | * @ring: amdgpu_ring structure holding ring information |
187 | * |
188 | * Test the DMA engine by writing using it to write an |
189 | * value to memory. (VI). |
190 | * Returns 0 for success, error for failure. |
191 | */ |
192 | static int si_dma_ring_test_ring(struct amdgpu_ring *ring) |
193 | { |
194 | struct amdgpu_device *adev = ring->adev; |
195 | unsigned i; |
196 | unsigned index; |
197 | int r; |
198 | u32 tmp; |
199 | u64 gpu_addr; |
200 | |
201 | r = amdgpu_device_wb_get(adev, wb: &index); |
202 | if (r) |
203 | return r; |
204 | |
205 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
206 | tmp = 0xCAFEDEAD; |
207 | adev->wb.wb[index] = cpu_to_le32(tmp); |
208 | |
209 | r = amdgpu_ring_alloc(ring, ndw: 4); |
210 | if (r) |
211 | goto error_free_wb; |
212 | |
213 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1)); |
214 | amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); |
215 | amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff); |
216 | amdgpu_ring_write(ring, v: 0xDEADBEEF); |
217 | amdgpu_ring_commit(ring); |
218 | |
219 | for (i = 0; i < adev->usec_timeout; i++) { |
220 | tmp = le32_to_cpu(adev->wb.wb[index]); |
221 | if (tmp == 0xDEADBEEF) |
222 | break; |
223 | udelay(1); |
224 | } |
225 | |
226 | if (i >= adev->usec_timeout) |
227 | r = -ETIMEDOUT; |
228 | |
229 | error_free_wb: |
230 | amdgpu_device_wb_free(adev, wb: index); |
231 | return r; |
232 | } |
233 | |
234 | /** |
235 | * si_dma_ring_test_ib - test an IB on the DMA engine |
236 | * |
237 | * @ring: amdgpu_ring structure holding ring information |
238 | * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT |
239 | * |
240 | * Test a simple IB in the DMA ring (VI). |
241 | * Returns 0 on success, error on failure. |
242 | */ |
243 | static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) |
244 | { |
245 | struct amdgpu_device *adev = ring->adev; |
246 | struct amdgpu_ib ib; |
247 | struct dma_fence *f = NULL; |
248 | unsigned index; |
249 | u32 tmp = 0; |
250 | u64 gpu_addr; |
251 | long r; |
252 | |
253 | r = amdgpu_device_wb_get(adev, wb: &index); |
254 | if (r) |
255 | return r; |
256 | |
257 | gpu_addr = adev->wb.gpu_addr + (index * 4); |
258 | tmp = 0xCAFEDEAD; |
259 | adev->wb.wb[index] = cpu_to_le32(tmp); |
260 | memset(&ib, 0, sizeof(ib)); |
261 | r = amdgpu_ib_get(adev, NULL, size: 256, |
262 | pool: AMDGPU_IB_POOL_DIRECT, ib: &ib); |
263 | if (r) |
264 | goto err0; |
265 | |
266 | ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1); |
267 | ib.ptr[1] = lower_32_bits(gpu_addr); |
268 | ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; |
269 | ib.ptr[3] = 0xDEADBEEF; |
270 | ib.length_dw = 4; |
271 | r = amdgpu_ib_schedule(ring, num_ibs: 1, ibs: &ib, NULL, f: &f); |
272 | if (r) |
273 | goto err1; |
274 | |
275 | r = dma_fence_wait_timeout(f, intr: false, timeout); |
276 | if (r == 0) { |
277 | r = -ETIMEDOUT; |
278 | goto err1; |
279 | } else if (r < 0) { |
280 | goto err1; |
281 | } |
282 | tmp = le32_to_cpu(adev->wb.wb[index]); |
283 | if (tmp == 0xDEADBEEF) |
284 | r = 0; |
285 | else |
286 | r = -EINVAL; |
287 | |
288 | err1: |
289 | amdgpu_ib_free(adev, ib: &ib, NULL); |
290 | dma_fence_put(fence: f); |
291 | err0: |
292 | amdgpu_device_wb_free(adev, wb: index); |
293 | return r; |
294 | } |
295 | |
296 | /** |
297 | * si_dma_vm_copy_pte - update PTEs by copying them from the GART |
298 | * |
299 | * @ib: indirect buffer to fill with commands |
300 | * @pe: addr of the page entry |
301 | * @src: src addr to copy from |
302 | * @count: number of page entries to update |
303 | * |
304 | * Update PTEs by copying them from the GART using DMA (SI). |
305 | */ |
306 | static void si_dma_vm_copy_pte(struct amdgpu_ib *ib, |
307 | uint64_t pe, uint64_t src, |
308 | unsigned count) |
309 | { |
310 | unsigned bytes = count * 8; |
311 | |
312 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, |
313 | 1, 0, 0, bytes); |
314 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); |
315 | ib->ptr[ib->length_dw++] = lower_32_bits(src); |
316 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; |
317 | ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff; |
318 | } |
319 | |
320 | /** |
321 | * si_dma_vm_write_pte - update PTEs by writing them manually |
322 | * |
323 | * @ib: indirect buffer to fill with commands |
324 | * @pe: addr of the page entry |
325 | * @value: dst addr to write into pe |
326 | * @count: number of page entries to update |
327 | * @incr: increase next addr by incr bytes |
328 | * |
329 | * Update PTEs by writing them manually using DMA (SI). |
330 | */ |
331 | static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, |
332 | uint64_t value, unsigned count, |
333 | uint32_t incr) |
334 | { |
335 | unsigned ndw = count * 2; |
336 | |
337 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); |
338 | ib->ptr[ib->length_dw++] = lower_32_bits(pe); |
339 | ib->ptr[ib->length_dw++] = upper_32_bits(pe); |
340 | for (; ndw > 0; ndw -= 2) { |
341 | ib->ptr[ib->length_dw++] = lower_32_bits(value); |
342 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |
343 | value += incr; |
344 | } |
345 | } |
346 | |
347 | /** |
348 | * si_dma_vm_set_pte_pde - update the page tables using sDMA |
349 | * |
350 | * @ib: indirect buffer to fill with commands |
351 | * @pe: addr of the page entry |
352 | * @addr: dst addr to write into pe |
353 | * @count: number of page entries to update |
354 | * @incr: increase next addr by incr bytes |
355 | * @flags: access flags |
356 | * |
357 | * Update the page tables using sDMA (CIK). |
358 | */ |
359 | static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib, |
360 | uint64_t pe, |
361 | uint64_t addr, unsigned count, |
362 | uint32_t incr, uint64_t flags) |
363 | { |
364 | uint64_t value; |
365 | unsigned ndw; |
366 | |
367 | while (count) { |
368 | ndw = count * 2; |
369 | if (ndw > 0xFFFFE) |
370 | ndw = 0xFFFFE; |
371 | |
372 | if (flags & AMDGPU_PTE_VALID) |
373 | value = addr; |
374 | else |
375 | value = 0; |
376 | |
377 | /* for physically contiguous pages (vram) */ |
378 | ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); |
379 | ib->ptr[ib->length_dw++] = pe; /* dst addr */ |
380 | ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; |
381 | ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ |
382 | ib->ptr[ib->length_dw++] = upper_32_bits(flags); |
383 | ib->ptr[ib->length_dw++] = value; /* value */ |
384 | ib->ptr[ib->length_dw++] = upper_32_bits(value); |
385 | ib->ptr[ib->length_dw++] = incr; /* increment size */ |
386 | ib->ptr[ib->length_dw++] = 0; |
387 | pe += ndw * 4; |
388 | addr += (ndw / 2) * incr; |
389 | count -= ndw / 2; |
390 | } |
391 | } |
392 | |
393 | /** |
394 | * si_dma_ring_pad_ib - pad the IB to the required number of dw |
395 | * |
396 | * @ring: amdgpu_ring pointer |
397 | * @ib: indirect buffer to fill with padding |
398 | * |
399 | */ |
400 | static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) |
401 | { |
402 | while (ib->length_dw & 0x7) |
403 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); |
404 | } |
405 | |
406 | /** |
407 | * si_dma_ring_emit_pipeline_sync - sync the pipeline |
408 | * |
409 | * @ring: amdgpu_ring pointer |
410 | * |
411 | * Make sure all previous operations are completed (CIK). |
412 | */ |
413 | static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring) |
414 | { |
415 | uint32_t seq = ring->fence_drv.sync_seq; |
416 | uint64_t addr = ring->fence_drv.gpu_addr; |
417 | |
418 | /* wait for idle */ |
419 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) | |
420 | (1 << 27)); /* Poll memory */ |
421 | amdgpu_ring_write(ring, lower_32_bits(addr)); |
422 | amdgpu_ring_write(ring, v: (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */ |
423 | amdgpu_ring_write(ring, v: 0xffffffff); /* mask */ |
424 | amdgpu_ring_write(ring, v: seq); /* value */ |
425 | amdgpu_ring_write(ring, v: (3 << 28) | 0x20); /* func(equal) | poll interval */ |
426 | } |
427 | |
428 | /** |
429 | * si_dma_ring_emit_vm_flush - cik vm flush using sDMA |
430 | * |
431 | * @ring: amdgpu_ring pointer |
432 | * @vmid: vmid number to use |
433 | * @pd_addr: address |
434 | * |
435 | * Update the page table base and flush the VM TLB |
436 | * using sDMA (VI). |
437 | */ |
438 | static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, |
439 | unsigned vmid, uint64_t pd_addr) |
440 | { |
441 | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); |
442 | |
443 | /* wait for invalidate to complete */ |
444 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); |
445 | amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST); |
446 | amdgpu_ring_write(ring, v: 0xff << 16); /* retry */ |
447 | amdgpu_ring_write(ring, v: 1 << vmid); /* mask */ |
448 | amdgpu_ring_write(ring, v: 0); /* value */ |
449 | amdgpu_ring_write(ring, v: (0 << 28) | 0x20); /* func(always) | poll interval */ |
450 | } |
451 | |
452 | static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring, |
453 | uint32_t reg, uint32_t val) |
454 | { |
455 | amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); |
456 | amdgpu_ring_write(ring, v: (0xf << 16) | reg); |
457 | amdgpu_ring_write(ring, v: val); |
458 | } |
459 | |
460 | static int si_dma_early_init(void *handle) |
461 | { |
462 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
463 | |
464 | adev->sdma.num_instances = 2; |
465 | |
466 | si_dma_set_ring_funcs(adev); |
467 | si_dma_set_buffer_funcs(adev); |
468 | si_dma_set_vm_pte_funcs(adev); |
469 | si_dma_set_irq_funcs(adev); |
470 | |
471 | return 0; |
472 | } |
473 | |
474 | static int si_dma_sw_init(void *handle) |
475 | { |
476 | struct amdgpu_ring *ring; |
477 | int r, i; |
478 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
479 | |
480 | /* DMA0 trap event */ |
481 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 224, |
482 | source: &adev->sdma.trap_irq); |
483 | if (r) |
484 | return r; |
485 | |
486 | /* DMA1 trap event */ |
487 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 244, |
488 | source: &adev->sdma.trap_irq); |
489 | if (r) |
490 | return r; |
491 | |
492 | for (i = 0; i < adev->sdma.num_instances; i++) { |
493 | ring = &adev->sdma.instance[i].ring; |
494 | ring->ring_obj = NULL; |
495 | ring->use_doorbell = false; |
496 | sprintf(buf: ring->name, fmt: "sdma%d" , i); |
497 | r = amdgpu_ring_init(adev, ring, max_dw: 1024, |
498 | irq_src: &adev->sdma.trap_irq, |
499 | irq_type: (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 : |
500 | AMDGPU_SDMA_IRQ_INSTANCE1, |
501 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
502 | if (r) |
503 | return r; |
504 | } |
505 | |
506 | return r; |
507 | } |
508 | |
509 | static int si_dma_sw_fini(void *handle) |
510 | { |
511 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
512 | int i; |
513 | |
514 | for (i = 0; i < adev->sdma.num_instances; i++) |
515 | amdgpu_ring_fini(ring: &adev->sdma.instance[i].ring); |
516 | |
517 | return 0; |
518 | } |
519 | |
520 | static int si_dma_hw_init(void *handle) |
521 | { |
522 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
523 | |
524 | return si_dma_start(adev); |
525 | } |
526 | |
527 | static int si_dma_hw_fini(void *handle) |
528 | { |
529 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
530 | |
531 | si_dma_stop(adev); |
532 | |
533 | return 0; |
534 | } |
535 | |
536 | static int si_dma_suspend(void *handle) |
537 | { |
538 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
539 | |
540 | return si_dma_hw_fini(handle: adev); |
541 | } |
542 | |
543 | static int si_dma_resume(void *handle) |
544 | { |
545 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
546 | |
547 | return si_dma_hw_init(handle: adev); |
548 | } |
549 | |
550 | static bool si_dma_is_idle(void *handle) |
551 | { |
552 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
553 | u32 tmp = RREG32(SRBM_STATUS2); |
554 | |
555 | if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK)) |
556 | return false; |
557 | |
558 | return true; |
559 | } |
560 | |
561 | static int si_dma_wait_for_idle(void *handle) |
562 | { |
563 | unsigned i; |
564 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
565 | |
566 | for (i = 0; i < adev->usec_timeout; i++) { |
567 | if (si_dma_is_idle(handle)) |
568 | return 0; |
569 | udelay(1); |
570 | } |
571 | return -ETIMEDOUT; |
572 | } |
573 | |
574 | static int si_dma_soft_reset(void *handle) |
575 | { |
576 | DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n" ); |
577 | return 0; |
578 | } |
579 | |
580 | static int si_dma_set_trap_irq_state(struct amdgpu_device *adev, |
581 | struct amdgpu_irq_src *src, |
582 | unsigned type, |
583 | enum amdgpu_interrupt_state state) |
584 | { |
585 | u32 sdma_cntl; |
586 | |
587 | switch (type) { |
588 | case AMDGPU_SDMA_IRQ_INSTANCE0: |
589 | switch (state) { |
590 | case AMDGPU_IRQ_STATE_DISABLE: |
591 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); |
592 | sdma_cntl &= ~TRAP_ENABLE; |
593 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); |
594 | break; |
595 | case AMDGPU_IRQ_STATE_ENABLE: |
596 | sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET); |
597 | sdma_cntl |= TRAP_ENABLE; |
598 | WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl); |
599 | break; |
600 | default: |
601 | break; |
602 | } |
603 | break; |
604 | case AMDGPU_SDMA_IRQ_INSTANCE1: |
605 | switch (state) { |
606 | case AMDGPU_IRQ_STATE_DISABLE: |
607 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); |
608 | sdma_cntl &= ~TRAP_ENABLE; |
609 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); |
610 | break; |
611 | case AMDGPU_IRQ_STATE_ENABLE: |
612 | sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET); |
613 | sdma_cntl |= TRAP_ENABLE; |
614 | WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl); |
615 | break; |
616 | default: |
617 | break; |
618 | } |
619 | break; |
620 | default: |
621 | break; |
622 | } |
623 | return 0; |
624 | } |
625 | |
626 | static int si_dma_process_trap_irq(struct amdgpu_device *adev, |
627 | struct amdgpu_irq_src *source, |
628 | struct amdgpu_iv_entry *entry) |
629 | { |
630 | if (entry->src_id == 224) |
631 | amdgpu_fence_process(ring: &adev->sdma.instance[0].ring); |
632 | else |
633 | amdgpu_fence_process(ring: &adev->sdma.instance[1].ring); |
634 | return 0; |
635 | } |
636 | |
637 | static int si_dma_set_clockgating_state(void *handle, |
638 | enum amd_clockgating_state state) |
639 | { |
640 | u32 orig, data, offset; |
641 | int i; |
642 | bool enable; |
643 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
644 | |
645 | enable = (state == AMD_CG_STATE_GATE); |
646 | |
647 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { |
648 | for (i = 0; i < adev->sdma.num_instances; i++) { |
649 | if (i == 0) |
650 | offset = DMA0_REGISTER_OFFSET; |
651 | else |
652 | offset = DMA1_REGISTER_OFFSET; |
653 | orig = data = RREG32(DMA_POWER_CNTL + offset); |
654 | data &= ~MEM_POWER_OVERRIDE; |
655 | if (data != orig) |
656 | WREG32(DMA_POWER_CNTL + offset, data); |
657 | WREG32(DMA_CLK_CTRL + offset, 0x00000100); |
658 | } |
659 | } else { |
660 | for (i = 0; i < adev->sdma.num_instances; i++) { |
661 | if (i == 0) |
662 | offset = DMA0_REGISTER_OFFSET; |
663 | else |
664 | offset = DMA1_REGISTER_OFFSET; |
665 | orig = data = RREG32(DMA_POWER_CNTL + offset); |
666 | data |= MEM_POWER_OVERRIDE; |
667 | if (data != orig) |
668 | WREG32(DMA_POWER_CNTL + offset, data); |
669 | |
670 | orig = data = RREG32(DMA_CLK_CTRL + offset); |
671 | data = 0xff000000; |
672 | if (data != orig) |
673 | WREG32(DMA_CLK_CTRL + offset, data); |
674 | } |
675 | } |
676 | |
677 | return 0; |
678 | } |
679 | |
680 | static int si_dma_set_powergating_state(void *handle, |
681 | enum amd_powergating_state state) |
682 | { |
683 | u32 tmp; |
684 | |
685 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
686 | |
687 | WREG32(DMA_PGFSM_WRITE, 0x00002000); |
688 | WREG32(DMA_PGFSM_CONFIG, 0x100010ff); |
689 | |
690 | for (tmp = 0; tmp < 5; tmp++) |
691 | WREG32(DMA_PGFSM_WRITE, 0); |
692 | |
693 | return 0; |
694 | } |
695 | |
696 | static const struct amd_ip_funcs si_dma_ip_funcs = { |
697 | .name = "si_dma" , |
698 | .early_init = si_dma_early_init, |
699 | .late_init = NULL, |
700 | .sw_init = si_dma_sw_init, |
701 | .sw_fini = si_dma_sw_fini, |
702 | .hw_init = si_dma_hw_init, |
703 | .hw_fini = si_dma_hw_fini, |
704 | .suspend = si_dma_suspend, |
705 | .resume = si_dma_resume, |
706 | .is_idle = si_dma_is_idle, |
707 | .wait_for_idle = si_dma_wait_for_idle, |
708 | .soft_reset = si_dma_soft_reset, |
709 | .set_clockgating_state = si_dma_set_clockgating_state, |
710 | .set_powergating_state = si_dma_set_powergating_state, |
711 | }; |
712 | |
713 | static const struct amdgpu_ring_funcs si_dma_ring_funcs = { |
714 | .type = AMDGPU_RING_TYPE_SDMA, |
715 | .align_mask = 0xf, |
716 | .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0), |
717 | .support_64bit_ptrs = false, |
718 | .get_rptr = si_dma_ring_get_rptr, |
719 | .get_wptr = si_dma_ring_get_wptr, |
720 | .set_wptr = si_dma_ring_set_wptr, |
721 | .emit_frame_size = |
722 | 3 + 3 + /* hdp flush / invalidate */ |
723 | 6 + /* si_dma_ring_emit_pipeline_sync */ |
724 | SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */ |
725 | 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ |
726 | .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ |
727 | .emit_ib = si_dma_ring_emit_ib, |
728 | .emit_fence = si_dma_ring_emit_fence, |
729 | .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, |
730 | .emit_vm_flush = si_dma_ring_emit_vm_flush, |
731 | .test_ring = si_dma_ring_test_ring, |
732 | .test_ib = si_dma_ring_test_ib, |
733 | .insert_nop = amdgpu_ring_insert_nop, |
734 | .pad_ib = si_dma_ring_pad_ib, |
735 | .emit_wreg = si_dma_ring_emit_wreg, |
736 | }; |
737 | |
738 | static void si_dma_set_ring_funcs(struct amdgpu_device *adev) |
739 | { |
740 | int i; |
741 | |
742 | for (i = 0; i < adev->sdma.num_instances; i++) |
743 | adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs; |
744 | } |
745 | |
746 | static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { |
747 | .set = si_dma_set_trap_irq_state, |
748 | .process = si_dma_process_trap_irq, |
749 | }; |
750 | |
751 | static void si_dma_set_irq_funcs(struct amdgpu_device *adev) |
752 | { |
753 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; |
754 | adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; |
755 | } |
756 | |
757 | /** |
758 | * si_dma_emit_copy_buffer - copy buffer using the sDMA engine |
759 | * |
760 | * @ib: indirect buffer to copy to |
761 | * @src_offset: src GPU address |
762 | * @dst_offset: dst GPU address |
763 | * @byte_count: number of bytes to xfer |
764 | * @tmz: is this a secure operation |
765 | * |
766 | * Copy GPU buffers using the DMA engine (VI). |
767 | * Used by the amdgpu ttm implementation to move pages if |
768 | * registered as the asic copy callback. |
769 | */ |
770 | static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib, |
771 | uint64_t src_offset, |
772 | uint64_t dst_offset, |
773 | uint32_t byte_count, |
774 | bool tmz) |
775 | { |
776 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY, |
777 | 1, 0, 0, byte_count); |
778 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); |
779 | ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); |
780 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff; |
781 | ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff; |
782 | } |
783 | |
784 | /** |
785 | * si_dma_emit_fill_buffer - fill buffer using the sDMA engine |
786 | * |
787 | * @ib: indirect buffer to copy to |
788 | * @src_data: value to write to buffer |
789 | * @dst_offset: dst GPU address |
790 | * @byte_count: number of bytes to xfer |
791 | * |
792 | * Fill GPU buffers using the DMA engine (VI). |
793 | */ |
794 | static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib, |
795 | uint32_t src_data, |
796 | uint64_t dst_offset, |
797 | uint32_t byte_count) |
798 | { |
799 | ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL, |
800 | 0, 0, 0, byte_count / 4); |
801 | ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); |
802 | ib->ptr[ib->length_dw++] = src_data; |
803 | ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16; |
804 | } |
805 | |
806 | |
807 | static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { |
808 | .copy_max_bytes = 0xffff8, |
809 | .copy_num_dw = 5, |
810 | .emit_copy_buffer = si_dma_emit_copy_buffer, |
811 | |
812 | .fill_max_bytes = 0xffff8, |
813 | .fill_num_dw = 4, |
814 | .emit_fill_buffer = si_dma_emit_fill_buffer, |
815 | }; |
816 | |
817 | static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) |
818 | { |
819 | adev->mman.buffer_funcs = &si_dma_buffer_funcs; |
820 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; |
821 | } |
822 | |
823 | static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { |
824 | .copy_pte_num_dw = 5, |
825 | .copy_pte = si_dma_vm_copy_pte, |
826 | |
827 | .write_pte = si_dma_vm_write_pte, |
828 | .set_pte_pde = si_dma_vm_set_pte_pde, |
829 | }; |
830 | |
831 | static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) |
832 | { |
833 | unsigned i; |
834 | |
835 | adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; |
836 | for (i = 0; i < adev->sdma.num_instances; i++) { |
837 | adev->vm_manager.vm_pte_scheds[i] = |
838 | &adev->sdma.instance[i].ring.sched; |
839 | } |
840 | adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; |
841 | } |
842 | |
843 | const struct amdgpu_ip_block_version si_dma_ip_block = |
844 | { |
845 | .type = AMD_IP_BLOCK_TYPE_SDMA, |
846 | .major = 1, |
847 | .minor = 0, |
848 | .rev = 0, |
849 | .funcs = &si_dma_ip_funcs, |
850 | }; |
851 | |