1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | * Christian König |
28 | */ |
29 | |
30 | #include <linux/debugfs.h> |
31 | |
32 | #include <drm/drm_file.h> |
33 | |
34 | #include "radeon.h" |
35 | |
36 | /* |
37 | * IB |
38 | * IBs (Indirect Buffers) and areas of GPU accessible memory where |
39 | * commands are stored. You can put a pointer to the IB in the |
40 | * command ring and the hw will fetch the commands from the IB |
41 | * and execute them. Generally userspace acceleration drivers |
42 | * produce command buffers which are send to the kernel and |
43 | * put in IBs for execution by the requested ring. |
44 | */ |
45 | static void radeon_debugfs_sa_init(struct radeon_device *rdev); |
46 | |
47 | /** |
48 | * radeon_ib_get - request an IB (Indirect Buffer) |
49 | * |
50 | * @rdev: radeon_device pointer |
51 | * @ring: ring index the IB is associated with |
52 | * @vm: requested vm |
53 | * @ib: IB object returned |
54 | * @size: requested IB size |
55 | * |
56 | * Request an IB (all asics). IBs are allocated using the |
57 | * suballocator. |
58 | * Returns 0 on success, error on failure. |
59 | */ |
60 | int radeon_ib_get(struct radeon_device *rdev, int ring, |
61 | struct radeon_ib *ib, struct radeon_vm *vm, |
62 | unsigned size) |
63 | { |
64 | int r; |
65 | |
66 | r = radeon_sa_bo_new(sa_manager: &rdev->ring_tmp_bo, sa_bo: &ib->sa_bo, size, align: 256); |
67 | if (r) { |
68 | dev_err(rdev->dev, "failed to get a new IB (%d)\n" , r); |
69 | return r; |
70 | } |
71 | |
72 | radeon_sync_create(sync: &ib->sync); |
73 | |
74 | ib->ring = ring; |
75 | ib->fence = NULL; |
76 | ib->ptr = radeon_sa_bo_cpu_addr(sa_bo: ib->sa_bo); |
77 | ib->vm = vm; |
78 | if (vm) { |
79 | /* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address |
80 | * space and soffset is the offset inside the pool bo |
81 | */ |
82 | ib->gpu_addr = drm_suballoc_soffset(sa: ib->sa_bo) + RADEON_VA_IB_OFFSET; |
83 | } else { |
84 | ib->gpu_addr = radeon_sa_bo_gpu_addr(sa_bo: ib->sa_bo); |
85 | } |
86 | ib->is_const_ib = false; |
87 | |
88 | return 0; |
89 | } |
90 | |
91 | /** |
92 | * radeon_ib_free - free an IB (Indirect Buffer) |
93 | * |
94 | * @rdev: radeon_device pointer |
95 | * @ib: IB object to free |
96 | * |
97 | * Free an IB (all asics). |
98 | */ |
99 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) |
100 | { |
101 | radeon_sync_free(rdev, sync: &ib->sync, fence: ib->fence); |
102 | radeon_sa_bo_free(sa_bo: &ib->sa_bo, fence: ib->fence); |
103 | radeon_fence_unref(fence: &ib->fence); |
104 | } |
105 | |
106 | /** |
107 | * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring |
108 | * |
109 | * @rdev: radeon_device pointer |
110 | * @ib: IB object to schedule |
111 | * @const_ib: Const IB to schedule (SI only) |
112 | * @hdp_flush: Whether or not to perform an HDP cache flush |
113 | * |
114 | * Schedule an IB on the associated ring (all asics). |
115 | * Returns 0 on success, error on failure. |
116 | * |
117 | * On SI, there are two parallel engines fed from the primary ring, |
118 | * the CE (Constant Engine) and the DE (Drawing Engine). Since |
119 | * resource descriptors have moved to memory, the CE allows you to |
120 | * prime the caches while the DE is updating register state so that |
121 | * the resource descriptors will be already in cache when the draw is |
122 | * processed. To accomplish this, the userspace driver submits two |
123 | * IBs, one for the CE and one for the DE. If there is a CE IB (called |
124 | * a CONST_IB), it will be put on the ring prior to the DE IB. Prior |
125 | * to SI there was just a DE IB. |
126 | */ |
127 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, |
128 | struct radeon_ib *const_ib, bool hdp_flush) |
129 | { |
130 | struct radeon_ring *ring = &rdev->ring[ib->ring]; |
131 | int r = 0; |
132 | |
133 | if (!ib->length_dw || !ring->ready) { |
134 | /* TODO: Nothings in the ib we should report. */ |
135 | dev_err(rdev->dev, "couldn't schedule ib\n" ); |
136 | return -EINVAL; |
137 | } |
138 | |
139 | /* 64 dwords should be enough for fence too */ |
140 | r = radeon_ring_lock(rdev, cp: ring, ndw: 64 + RADEON_NUM_SYNCS * 8); |
141 | if (r) { |
142 | dev_err(rdev->dev, "scheduling IB failed (%d).\n" , r); |
143 | return r; |
144 | } |
145 | |
146 | /* grab a vm id if necessary */ |
147 | if (ib->vm) { |
148 | struct radeon_fence *vm_id_fence; |
149 | vm_id_fence = radeon_vm_grab_id(rdev, vm: ib->vm, ring: ib->ring); |
150 | radeon_sync_fence(sync: &ib->sync, fence: vm_id_fence); |
151 | } |
152 | |
153 | /* sync with other rings */ |
154 | r = radeon_sync_rings(rdev, sync: &ib->sync, waiting_ring: ib->ring); |
155 | if (r) { |
156 | dev_err(rdev->dev, "failed to sync rings (%d)\n" , r); |
157 | radeon_ring_unlock_undo(rdev, cp: ring); |
158 | return r; |
159 | } |
160 | |
161 | if (ib->vm) |
162 | radeon_vm_flush(rdev, vm: ib->vm, ring: ib->ring, |
163 | fence: ib->sync.last_vm_update); |
164 | |
165 | if (const_ib) { |
166 | radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); |
167 | radeon_sync_free(rdev, sync: &const_ib->sync, NULL); |
168 | } |
169 | radeon_ring_ib_execute(rdev, ib->ring, ib); |
170 | r = radeon_fence_emit(rdev, fence: &ib->fence, ring: ib->ring); |
171 | if (r) { |
172 | dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n" , r); |
173 | radeon_ring_unlock_undo(rdev, cp: ring); |
174 | return r; |
175 | } |
176 | if (const_ib) { |
177 | const_ib->fence = radeon_fence_ref(fence: ib->fence); |
178 | } |
179 | |
180 | if (ib->vm) |
181 | radeon_vm_fence(rdev, vm: ib->vm, fence: ib->fence); |
182 | |
183 | radeon_ring_unlock_commit(rdev, cp: ring, hdp_flush); |
184 | return 0; |
185 | } |
186 | |
187 | /** |
188 | * radeon_ib_pool_init - Init the IB (Indirect Buffer) pool |
189 | * |
190 | * @rdev: radeon_device pointer |
191 | * |
192 | * Initialize the suballocator to manage a pool of memory |
193 | * for use as IBs (all asics). |
194 | * Returns 0 on success, error on failure. |
195 | */ |
196 | int radeon_ib_pool_init(struct radeon_device *rdev) |
197 | { |
198 | int r; |
199 | |
200 | if (rdev->ib_pool_ready) { |
201 | return 0; |
202 | } |
203 | |
204 | if (rdev->family >= CHIP_BONAIRE) { |
205 | r = radeon_sa_bo_manager_init(rdev, sa_manager: &rdev->ring_tmp_bo, |
206 | RADEON_IB_POOL_SIZE*64*1024, align: 256, |
207 | RADEON_GEM_DOMAIN_GTT, |
208 | RADEON_GEM_GTT_WC); |
209 | } else { |
210 | /* Before CIK, it's better to stick to cacheable GTT due |
211 | * to the command stream checking |
212 | */ |
213 | r = radeon_sa_bo_manager_init(rdev, sa_manager: &rdev->ring_tmp_bo, |
214 | RADEON_IB_POOL_SIZE*64*1024, align: 256, |
215 | RADEON_GEM_DOMAIN_GTT, flags: 0); |
216 | } |
217 | if (r) { |
218 | return r; |
219 | } |
220 | |
221 | r = radeon_sa_bo_manager_start(rdev, sa_manager: &rdev->ring_tmp_bo); |
222 | if (r) { |
223 | return r; |
224 | } |
225 | |
226 | rdev->ib_pool_ready = true; |
227 | radeon_debugfs_sa_init(rdev); |
228 | return 0; |
229 | } |
230 | |
231 | /** |
232 | * radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool |
233 | * |
234 | * @rdev: radeon_device pointer |
235 | * |
236 | * Tear down the suballocator managing the pool of memory |
237 | * for use as IBs (all asics). |
238 | */ |
239 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
240 | { |
241 | if (rdev->ib_pool_ready) { |
242 | radeon_sa_bo_manager_suspend(rdev, sa_manager: &rdev->ring_tmp_bo); |
243 | radeon_sa_bo_manager_fini(rdev, sa_manager: &rdev->ring_tmp_bo); |
244 | rdev->ib_pool_ready = false; |
245 | } |
246 | } |
247 | |
248 | /** |
249 | * radeon_ib_ring_tests - test IBs on the rings |
250 | * |
251 | * @rdev: radeon_device pointer |
252 | * |
253 | * Test an IB (Indirect Buffer) on each ring. |
254 | * If the test fails, disable the ring. |
255 | * Returns 0 on success, error if the primary GFX ring |
256 | * IB test fails. |
257 | */ |
258 | int radeon_ib_ring_tests(struct radeon_device *rdev) |
259 | { |
260 | unsigned i; |
261 | int r; |
262 | |
263 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
264 | struct radeon_ring *ring = &rdev->ring[i]; |
265 | |
266 | if (!ring->ready) |
267 | continue; |
268 | |
269 | r = radeon_ib_test(rdev, i, ring); |
270 | if (r) { |
271 | radeon_fence_driver_force_completion(rdev, ring: i); |
272 | ring->ready = false; |
273 | rdev->needs_reset = false; |
274 | |
275 | if (i == RADEON_RING_TYPE_GFX_INDEX) { |
276 | /* oh, oh, that's really bad */ |
277 | DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n" , r); |
278 | rdev->accel_working = false; |
279 | return r; |
280 | |
281 | } else { |
282 | /* still not good, but we can live with it */ |
283 | DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n" , i, r); |
284 | } |
285 | } |
286 | } |
287 | return 0; |
288 | } |
289 | |
290 | /* |
291 | * Debugfs info |
292 | */ |
293 | #if defined(CONFIG_DEBUG_FS) |
294 | |
295 | static int radeon_debugfs_sa_info_show(struct seq_file *m, void *unused) |
296 | { |
297 | struct radeon_device *rdev = m->private; |
298 | |
299 | radeon_sa_bo_dump_debug_info(sa_manager: &rdev->ring_tmp_bo, m); |
300 | |
301 | return 0; |
302 | |
303 | } |
304 | |
305 | DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_sa_info); |
306 | |
307 | #endif |
308 | |
309 | static void radeon_debugfs_sa_init(struct radeon_device *rdev) |
310 | { |
311 | #if defined(CONFIG_DEBUG_FS) |
312 | struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; |
313 | |
314 | debugfs_create_file("radeon_sa_info" , 0444, root, rdev, |
315 | &radeon_debugfs_sa_info_fops); |
316 | #endif |
317 | } |
318 | |