1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Christian König |
23 | */ |
24 | #ifndef __AMDGPU_RING_H__ |
25 | #define __AMDGPU_RING_H__ |
26 | |
27 | #include <drm/amdgpu_drm.h> |
28 | #include <drm/gpu_scheduler.h> |
29 | #include <drm/drm_print.h> |
30 | #include <drm/drm_suballoc.h> |
31 | |
32 | struct amdgpu_device; |
33 | struct amdgpu_ring; |
34 | struct amdgpu_ib; |
35 | struct amdgpu_cs_parser; |
36 | struct amdgpu_job; |
37 | struct amdgpu_vm; |
38 | |
39 | /* max number of rings */ |
40 | #define AMDGPU_MAX_RINGS 124 |
41 | #define AMDGPU_MAX_HWIP_RINGS 64 |
42 | #define AMDGPU_MAX_GFX_RINGS 2 |
43 | #define AMDGPU_MAX_SW_GFX_RINGS 2 |
44 | #define AMDGPU_MAX_COMPUTE_RINGS 8 |
45 | #define AMDGPU_MAX_VCE_RINGS 3 |
46 | #define AMDGPU_MAX_UVD_ENC_RINGS 2 |
47 | #define AMDGPU_MAX_VPE_RINGS 2 |
48 | |
49 | enum amdgpu_ring_priority_level { |
50 | AMDGPU_RING_PRIO_0, |
51 | AMDGPU_RING_PRIO_1, |
52 | AMDGPU_RING_PRIO_DEFAULT = 1, |
53 | AMDGPU_RING_PRIO_2, |
54 | AMDGPU_RING_PRIO_MAX |
55 | }; |
56 | |
57 | /* some special values for the owner field */ |
58 | #define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul) |
59 | #define AMDGPU_FENCE_OWNER_VM ((void *)1ul) |
60 | #define AMDGPU_FENCE_OWNER_KFD ((void *)2ul) |
61 | |
62 | #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) |
63 | #define AMDGPU_FENCE_FLAG_INT (1 << 1) |
64 | #define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) |
65 | #define AMDGPU_FENCE_FLAG_EXEC (1 << 3) |
66 | |
67 | #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched) |
68 | |
69 | #define AMDGPU_IB_POOL_SIZE (1024 * 1024) |
70 | |
71 | enum amdgpu_ring_type { |
72 | AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX, |
73 | AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE, |
74 | AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA, |
75 | AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD, |
76 | AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE, |
77 | AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC, |
78 | AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC, |
79 | AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC, |
80 | AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG, |
81 | AMDGPU_RING_TYPE_VPE = AMDGPU_HW_IP_VPE, |
82 | AMDGPU_RING_TYPE_KIQ, |
83 | AMDGPU_RING_TYPE_MES, |
84 | AMDGPU_RING_TYPE_UMSCH_MM, |
85 | }; |
86 | |
87 | enum amdgpu_ib_pool_type { |
88 | /* Normal submissions to the top of the pipeline. */ |
89 | AMDGPU_IB_POOL_DELAYED, |
90 | /* Immediate submissions to the bottom of the pipeline. */ |
91 | AMDGPU_IB_POOL_IMMEDIATE, |
92 | /* Direct submission to the ring buffer during init and reset. */ |
93 | AMDGPU_IB_POOL_DIRECT, |
94 | |
95 | AMDGPU_IB_POOL_MAX |
96 | }; |
97 | |
98 | struct amdgpu_ib { |
99 | struct drm_suballoc *sa_bo; |
100 | uint32_t length_dw; |
101 | uint64_t gpu_addr; |
102 | uint32_t *ptr; |
103 | uint32_t flags; |
104 | }; |
105 | |
106 | struct amdgpu_sched { |
107 | u32 num_scheds; |
108 | struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS]; |
109 | }; |
110 | |
111 | /* |
112 | * Fences. |
113 | */ |
114 | struct amdgpu_fence_driver { |
115 | uint64_t gpu_addr; |
116 | volatile uint32_t *cpu_addr; |
117 | /* sync_seq is protected by ring emission lock */ |
118 | uint32_t sync_seq; |
119 | atomic_t last_seq; |
120 | bool initialized; |
121 | struct amdgpu_irq_src *irq_src; |
122 | unsigned irq_type; |
123 | struct timer_list fallback_timer; |
124 | unsigned num_fences_mask; |
125 | spinlock_t lock; |
126 | struct dma_fence **fences; |
127 | }; |
128 | |
129 | extern const struct drm_sched_backend_ops amdgpu_sched_ops; |
130 | |
131 | void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); |
132 | void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); |
133 | void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); |
134 | |
135 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); |
136 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, |
137 | struct amdgpu_irq_src *irq_src, |
138 | unsigned irq_type); |
139 | void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); |
140 | void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); |
141 | int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); |
142 | void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); |
143 | int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job, |
144 | unsigned flags); |
145 | int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, |
146 | uint32_t timeout); |
147 | bool amdgpu_fence_process(struct amdgpu_ring *ring); |
148 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); |
149 | signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, |
150 | uint32_t wait_seq, |
151 | signed long timeout); |
152 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); |
153 | |
154 | void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop); |
155 | |
156 | u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring); |
157 | void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, |
158 | ktime_t timestamp); |
159 | |
160 | /* |
161 | * Rings. |
162 | */ |
163 | |
164 | /* provided by hw blocks that expose a ring buffer for commands */ |
165 | struct amdgpu_ring_funcs { |
166 | enum amdgpu_ring_type type; |
167 | uint32_t align_mask; |
168 | u32 nop; |
169 | bool support_64bit_ptrs; |
170 | bool no_user_fence; |
171 | bool secure_submission_supported; |
172 | unsigned ; |
173 | |
174 | /* ring read/write ptr handling */ |
175 | u64 (*get_rptr)(struct amdgpu_ring *ring); |
176 | u64 (*get_wptr)(struct amdgpu_ring *ring); |
177 | void (*set_wptr)(struct amdgpu_ring *ring); |
178 | /* validating and patching of IBs */ |
179 | int (*parse_cs)(struct amdgpu_cs_parser *p, |
180 | struct amdgpu_job *job, |
181 | struct amdgpu_ib *ib); |
182 | int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, |
183 | struct amdgpu_job *job, |
184 | struct amdgpu_ib *ib); |
185 | /* constants to calculate how many DW are needed for an emit */ |
186 | unsigned emit_frame_size; |
187 | unsigned emit_ib_size; |
188 | /* command emit functions */ |
189 | void (*emit_ib)(struct amdgpu_ring *ring, |
190 | struct amdgpu_job *job, |
191 | struct amdgpu_ib *ib, |
192 | uint32_t flags); |
193 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, |
194 | uint64_t seq, unsigned flags); |
195 | void (*emit_pipeline_sync)(struct amdgpu_ring *ring); |
196 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, |
197 | uint64_t pd_addr); |
198 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); |
199 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, |
200 | uint32_t gds_base, uint32_t gds_size, |
201 | uint32_t gws_base, uint32_t gws_size, |
202 | uint32_t oa_base, uint32_t oa_size); |
203 | /* testing functions */ |
204 | int (*test_ring)(struct amdgpu_ring *ring); |
205 | int (*test_ib)(struct amdgpu_ring *ring, long timeout); |
206 | /* insert NOP packets */ |
207 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); |
208 | void (*insert_start)(struct amdgpu_ring *ring); |
209 | void (*insert_end)(struct amdgpu_ring *ring); |
210 | /* pad the indirect buffer to the necessary number of dw */ |
211 | void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); |
212 | unsigned (*init_cond_exec)(struct amdgpu_ring *ring); |
213 | void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); |
214 | /* note usage for clock and power gating */ |
215 | void (*begin_use)(struct amdgpu_ring *ring); |
216 | void (*end_use)(struct amdgpu_ring *ring); |
217 | void (*emit_switch_buffer) (struct amdgpu_ring *ring); |
218 | void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); |
219 | void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va, |
220 | u64 gds_va, bool init_shadow, int vmid); |
221 | void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg, |
222 | uint32_t reg_val_offs); |
223 | void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); |
224 | void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg, |
225 | uint32_t val, uint32_t mask); |
226 | void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring, |
227 | uint32_t reg0, uint32_t reg1, |
228 | uint32_t ref, uint32_t mask); |
229 | void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start, |
230 | bool secure); |
231 | /* Try to soft recover the ring to make the fence signal */ |
232 | void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); |
233 | int (*preempt_ib)(struct amdgpu_ring *ring); |
234 | void (*emit_mem_sync)(struct amdgpu_ring *ring); |
235 | void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); |
236 | void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); |
237 | void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); |
238 | void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); |
239 | }; |
240 | |
241 | struct amdgpu_ring { |
242 | struct amdgpu_device *adev; |
243 | const struct amdgpu_ring_funcs *funcs; |
244 | struct amdgpu_fence_driver fence_drv; |
245 | struct drm_gpu_scheduler sched; |
246 | |
247 | struct amdgpu_bo *ring_obj; |
248 | volatile uint32_t *ring; |
249 | unsigned rptr_offs; |
250 | u64 rptr_gpu_addr; |
251 | volatile u32 *rptr_cpu_addr; |
252 | u64 wptr; |
253 | u64 wptr_old; |
254 | unsigned ring_size; |
255 | unsigned max_dw; |
256 | int count_dw; |
257 | uint64_t gpu_addr; |
258 | uint64_t ptr_mask; |
259 | uint32_t buf_mask; |
260 | u32 idx; |
261 | u32 xcc_id; |
262 | u32 xcp_id; |
263 | u32 me; |
264 | u32 pipe; |
265 | u32 queue; |
266 | struct amdgpu_bo *mqd_obj; |
267 | uint64_t mqd_gpu_addr; |
268 | void *mqd_ptr; |
269 | unsigned mqd_size; |
270 | uint64_t eop_gpu_addr; |
271 | u32 doorbell_index; |
272 | bool use_doorbell; |
273 | bool use_pollmem; |
274 | unsigned wptr_offs; |
275 | u64 wptr_gpu_addr; |
276 | volatile u32 *wptr_cpu_addr; |
277 | unsigned fence_offs; |
278 | u64 fence_gpu_addr; |
279 | volatile u32 *fence_cpu_addr; |
280 | uint64_t current_ctx; |
281 | char name[16]; |
282 | u32 trail_seq; |
283 | unsigned trail_fence_offs; |
284 | u64 trail_fence_gpu_addr; |
285 | volatile u32 *trail_fence_cpu_addr; |
286 | unsigned cond_exe_offs; |
287 | u64 cond_exe_gpu_addr; |
288 | volatile u32 *cond_exe_cpu_addr; |
289 | unsigned vm_hub; |
290 | unsigned vm_inv_eng; |
291 | struct dma_fence *vmid_wait; |
292 | bool has_compute_vm_bug; |
293 | bool no_scheduler; |
294 | int hw_prio; |
295 | unsigned num_hw_submission; |
296 | atomic_t *sched_score; |
297 | |
298 | /* used for mes */ |
299 | bool is_mes_queue; |
300 | uint32_t hw_queue_id; |
301 | struct amdgpu_mes_ctx_data *mes_ctx; |
302 | |
303 | bool is_sw_ring; |
304 | unsigned int entry_index; |
305 | |
306 | }; |
307 | |
308 | #define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib))) |
309 | #define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib))) |
310 | #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) |
311 | #define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0) |
312 | #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) |
313 | #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) |
314 | #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) |
315 | #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags))) |
316 | #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) |
317 | #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) |
318 | #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) |
319 | #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) |
320 | #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) |
321 | #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) |
322 | #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) |
323 | #define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v))) |
324 | #define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o)) |
325 | #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) |
326 | #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) |
327 | #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) |
328 | #define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s)) |
329 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) |
330 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) |
331 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) |
332 | #define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r) |
333 | #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) |
334 | #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) |
335 | #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) |
336 | |
337 | unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); |
338 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); |
339 | void amdgpu_ring_ib_begin(struct amdgpu_ring *ring); |
340 | void amdgpu_ring_ib_end(struct amdgpu_ring *ring); |
341 | void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring); |
342 | void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring); |
343 | void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring); |
344 | |
345 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); |
346 | void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); |
347 | void amdgpu_ring_commit(struct amdgpu_ring *ring); |
348 | void amdgpu_ring_undo(struct amdgpu_ring *ring); |
349 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, |
350 | unsigned int max_dw, struct amdgpu_irq_src *irq_src, |
351 | unsigned int irq_type, unsigned int hw_prio, |
352 | atomic_t *sched_score); |
353 | void amdgpu_ring_fini(struct amdgpu_ring *ring); |
354 | void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, |
355 | uint32_t reg0, uint32_t val0, |
356 | uint32_t reg1, uint32_t val1); |
357 | bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, |
358 | struct dma_fence *fence); |
359 | |
360 | static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring, |
361 | bool cond_exec) |
362 | { |
363 | *ring->cond_exe_cpu_addr = cond_exec; |
364 | } |
365 | |
366 | static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) |
367 | { |
368 | int i = 0; |
369 | while (i <= ring->buf_mask) |
370 | ring->ring[i++] = ring->funcs->nop; |
371 | |
372 | } |
373 | |
374 | static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) |
375 | { |
376 | if (ring->count_dw <= 0) |
377 | DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n" ); |
378 | ring->ring[ring->wptr++ & ring->buf_mask] = v; |
379 | ring->wptr &= ring->ptr_mask; |
380 | ring->count_dw--; |
381 | } |
382 | |
383 | static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, |
384 | void *src, int count_dw) |
385 | { |
386 | unsigned occupied, chunk1, chunk2; |
387 | void *dst; |
388 | |
389 | if (unlikely(ring->count_dw < count_dw)) |
390 | DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n" ); |
391 | |
392 | occupied = ring->wptr & ring->buf_mask; |
393 | dst = (void *)&ring->ring[occupied]; |
394 | chunk1 = ring->buf_mask + 1 - occupied; |
395 | chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1; |
396 | chunk2 = count_dw - chunk1; |
397 | chunk1 <<= 2; |
398 | chunk2 <<= 2; |
399 | |
400 | if (chunk1) |
401 | memcpy(dst, src, chunk1); |
402 | |
403 | if (chunk2) { |
404 | src += chunk1; |
405 | dst = (void *)ring->ring; |
406 | memcpy(dst, src, chunk2); |
407 | } |
408 | |
409 | ring->wptr += count_dw; |
410 | ring->wptr &= ring->ptr_mask; |
411 | ring->count_dw -= count_dw; |
412 | } |
413 | |
414 | #define amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset) \ |
415 | (ring->is_mes_queue && ring->mes_ctx ? \ |
416 | (ring->mes_ctx->meta_data_gpu_addr + offset) : 0) |
417 | |
418 | #define amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset) \ |
419 | (ring->is_mes_queue && ring->mes_ctx ? \ |
420 | (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \ |
421 | NULL) |
422 | |
423 | int amdgpu_ring_test_helper(struct amdgpu_ring *ring); |
424 | |
425 | void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, |
426 | struct amdgpu_ring *ring); |
427 | |
428 | int amdgpu_ring_init_mqd(struct amdgpu_ring *ring); |
429 | |
430 | static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx) |
431 | { |
432 | return ib->ptr[idx]; |
433 | } |
434 | |
435 | static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx, |
436 | uint32_t value) |
437 | { |
438 | ib->ptr[idx] = value; |
439 | } |
440 | |
441 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
442 | unsigned size, |
443 | enum amdgpu_ib_pool_type pool, |
444 | struct amdgpu_ib *ib); |
445 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
446 | struct dma_fence *f); |
447 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
448 | struct amdgpu_ib *ibs, struct amdgpu_job *job, |
449 | struct dma_fence **f); |
450 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
451 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
452 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); |
453 | |
454 | #endif |
455 | |