1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | */ |
25 | |
26 | #include <linux/firmware.h> |
27 | #include "amdgpu.h" |
28 | #include "amdgpu_gfx.h" |
29 | #include "amdgpu_rlc.h" |
30 | #include "amdgpu_ras.h" |
31 | #include "amdgpu_xcp.h" |
32 | #include "amdgpu_xgmi.h" |
33 | |
34 | /* delay 0.1 second to enable gfx off feature */ |
35 | #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) |
36 | |
37 | #define GFX_OFF_NO_DELAY 0 |
38 | |
39 | /* |
40 | * GPU GFX IP block helpers function. |
41 | */ |
42 | |
43 | int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, |
44 | int pipe, int queue) |
45 | { |
46 | int bit = 0; |
47 | |
48 | bit += mec * adev->gfx.mec.num_pipe_per_mec |
49 | * adev->gfx.mec.num_queue_per_pipe; |
50 | bit += pipe * adev->gfx.mec.num_queue_per_pipe; |
51 | bit += queue; |
52 | |
53 | return bit; |
54 | } |
55 | |
56 | void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, |
57 | int *mec, int *pipe, int *queue) |
58 | { |
59 | *queue = bit % adev->gfx.mec.num_queue_per_pipe; |
60 | *pipe = (bit / adev->gfx.mec.num_queue_per_pipe) |
61 | % adev->gfx.mec.num_pipe_per_mec; |
62 | *mec = (bit / adev->gfx.mec.num_queue_per_pipe) |
63 | / adev->gfx.mec.num_pipe_per_mec; |
64 | |
65 | } |
66 | |
67 | bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, |
68 | int xcc_id, int mec, int pipe, int queue) |
69 | { |
70 | return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue), |
71 | adev->gfx.mec_bitmap[xcc_id].queue_bitmap); |
72 | } |
73 | |
74 | int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, |
75 | int me, int pipe, int queue) |
76 | { |
77 | int bit = 0; |
78 | |
79 | bit += me * adev->gfx.me.num_pipe_per_me |
80 | * adev->gfx.me.num_queue_per_pipe; |
81 | bit += pipe * adev->gfx.me.num_queue_per_pipe; |
82 | bit += queue; |
83 | |
84 | return bit; |
85 | } |
86 | |
87 | void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, |
88 | int *me, int *pipe, int *queue) |
89 | { |
90 | *queue = bit % adev->gfx.me.num_queue_per_pipe; |
91 | *pipe = (bit / adev->gfx.me.num_queue_per_pipe) |
92 | % adev->gfx.me.num_pipe_per_me; |
93 | *me = (bit / adev->gfx.me.num_queue_per_pipe) |
94 | / adev->gfx.me.num_pipe_per_me; |
95 | } |
96 | |
97 | bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, |
98 | int me, int pipe, int queue) |
99 | { |
100 | return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue), |
101 | adev->gfx.me.queue_bitmap); |
102 | } |
103 | |
104 | /** |
105 | * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter |
106 | * |
107 | * @mask: array in which the per-shader array disable masks will be stored |
108 | * @max_se: number of SEs |
109 | * @max_sh: number of SHs |
110 | * |
111 | * The bitmask of CUs to be disabled in the shader array determined by se and |
112 | * sh is stored in mask[se * max_sh + sh]. |
113 | */ |
114 | void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh) |
115 | { |
116 | unsigned int se, sh, cu; |
117 | const char *p; |
118 | |
119 | memset(mask, 0, sizeof(*mask) * max_se * max_sh); |
120 | |
121 | if (!amdgpu_disable_cu || !*amdgpu_disable_cu) |
122 | return; |
123 | |
124 | p = amdgpu_disable_cu; |
125 | for (;;) { |
126 | char *next; |
127 | int ret = sscanf(p, "%u.%u.%u" , &se, &sh, &cu); |
128 | |
129 | if (ret < 3) { |
130 | DRM_ERROR("amdgpu: could not parse disable_cu\n" ); |
131 | return; |
132 | } |
133 | |
134 | if (se < max_se && sh < max_sh && cu < 16) { |
135 | DRM_INFO("amdgpu: disabling CU %u.%u.%u\n" , se, sh, cu); |
136 | mask[se * max_sh + sh] |= 1u << cu; |
137 | } else { |
138 | DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n" , |
139 | se, sh, cu); |
140 | } |
141 | |
142 | next = strchr(p, ','); |
143 | if (!next) |
144 | break; |
145 | p = next + 1; |
146 | } |
147 | } |
148 | |
149 | static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) |
150 | { |
151 | return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1; |
152 | } |
153 | |
154 | static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) |
155 | { |
156 | if (amdgpu_compute_multipipe != -1) { |
157 | DRM_INFO("amdgpu: forcing compute pipe policy %d\n" , |
158 | amdgpu_compute_multipipe); |
159 | return amdgpu_compute_multipipe == 1; |
160 | } |
161 | |
162 | if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) > IP_VERSION(9, 0, 0)) |
163 | return true; |
164 | |
165 | /* FIXME: spreading the queues across pipes causes perf regressions |
166 | * on POLARIS11 compute workloads */ |
167 | if (adev->asic_type == CHIP_POLARIS11) |
168 | return false; |
169 | |
170 | return adev->gfx.mec.num_mec > 1; |
171 | } |
172 | |
173 | bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, |
174 | struct amdgpu_ring *ring) |
175 | { |
176 | int queue = ring->queue; |
177 | int pipe = ring->pipe; |
178 | |
179 | /* Policy: use pipe1 queue0 as high priority graphics queue if we |
180 | * have more than one gfx pipe. |
181 | */ |
182 | if (amdgpu_gfx_is_graphics_multipipe_capable(adev) && |
183 | adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { |
184 | int me = ring->me; |
185 | int bit; |
186 | |
187 | bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue); |
188 | if (ring == &adev->gfx.gfx_ring[bit]) |
189 | return true; |
190 | } |
191 | |
192 | return false; |
193 | } |
194 | |
195 | bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, |
196 | struct amdgpu_ring *ring) |
197 | { |
198 | /* Policy: use 1st queue as high priority compute queue if we |
199 | * have more than one compute queue. |
200 | */ |
201 | if (adev->gfx.num_compute_rings > 1 && |
202 | ring == &adev->gfx.compute_ring[0]) |
203 | return true; |
204 | |
205 | return false; |
206 | } |
207 | |
208 | void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) |
209 | { |
210 | int i, j, queue, pipe; |
211 | bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); |
212 | int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * |
213 | adev->gfx.mec.num_queue_per_pipe, |
214 | adev->gfx.num_compute_rings); |
215 | int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; |
216 | |
217 | if (multipipe_policy) { |
218 | /* policy: make queues evenly cross all pipes on MEC1 only |
219 | * for multiple xcc, just use the original policy for simplicity */ |
220 | for (j = 0; j < num_xcc; j++) { |
221 | for (i = 0; i < max_queues_per_mec; i++) { |
222 | pipe = i % adev->gfx.mec.num_pipe_per_mec; |
223 | queue = (i / adev->gfx.mec.num_pipe_per_mec) % |
224 | adev->gfx.mec.num_queue_per_pipe; |
225 | |
226 | set_bit(nr: pipe * adev->gfx.mec.num_queue_per_pipe + queue, |
227 | addr: adev->gfx.mec_bitmap[j].queue_bitmap); |
228 | } |
229 | } |
230 | } else { |
231 | /* policy: amdgpu owns all queues in the given pipe */ |
232 | for (j = 0; j < num_xcc; j++) { |
233 | for (i = 0; i < max_queues_per_mec; ++i) |
234 | set_bit(nr: i, addr: adev->gfx.mec_bitmap[j].queue_bitmap); |
235 | } |
236 | } |
237 | |
238 | for (j = 0; j < num_xcc; j++) { |
239 | dev_dbg(adev->dev, "mec queue bitmap weight=%d\n" , |
240 | bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); |
241 | } |
242 | } |
243 | |
244 | void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) |
245 | { |
246 | int i, queue, pipe; |
247 | bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); |
248 | int max_queues_per_me = adev->gfx.me.num_pipe_per_me * |
249 | adev->gfx.me.num_queue_per_pipe; |
250 | |
251 | if (multipipe_policy) { |
252 | /* policy: amdgpu owns the first queue per pipe at this stage |
253 | * will extend to mulitple queues per pipe later */ |
254 | for (i = 0; i < max_queues_per_me; i++) { |
255 | pipe = i % adev->gfx.me.num_pipe_per_me; |
256 | queue = (i / adev->gfx.me.num_pipe_per_me) % |
257 | adev->gfx.me.num_queue_per_pipe; |
258 | |
259 | set_bit(nr: pipe * adev->gfx.me.num_queue_per_pipe + queue, |
260 | addr: adev->gfx.me.queue_bitmap); |
261 | } |
262 | } else { |
263 | for (i = 0; i < max_queues_per_me; ++i) |
264 | set_bit(nr: i, addr: adev->gfx.me.queue_bitmap); |
265 | } |
266 | |
267 | /* update the number of active graphics rings */ |
268 | adev->gfx.num_gfx_rings = |
269 | bitmap_weight(src: adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES); |
270 | } |
271 | |
272 | static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, |
273 | struct amdgpu_ring *ring, int xcc_id) |
274 | { |
275 | int queue_bit; |
276 | int mec, pipe, queue; |
277 | |
278 | queue_bit = adev->gfx.mec.num_mec |
279 | * adev->gfx.mec.num_pipe_per_mec |
280 | * adev->gfx.mec.num_queue_per_pipe; |
281 | |
282 | while (--queue_bit >= 0) { |
283 | if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) |
284 | continue; |
285 | |
286 | amdgpu_queue_mask_bit_to_mec_queue(adev, bit: queue_bit, mec: &mec, pipe: &pipe, queue: &queue); |
287 | |
288 | /* |
289 | * 1. Using pipes 2/3 from MEC 2 seems cause problems. |
290 | * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN |
291 | * only can be issued on queue 0. |
292 | */ |
293 | if ((mec == 1 && pipe > 1) || queue != 0) |
294 | continue; |
295 | |
296 | ring->me = mec + 1; |
297 | ring->pipe = pipe; |
298 | ring->queue = queue; |
299 | |
300 | return 0; |
301 | } |
302 | |
303 | dev_err(adev->dev, "Failed to find a queue for KIQ\n" ); |
304 | return -EINVAL; |
305 | } |
306 | |
307 | int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id) |
308 | { |
309 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
310 | struct amdgpu_irq_src *irq = &kiq->irq; |
311 | struct amdgpu_ring *ring = &kiq->ring; |
312 | int r = 0; |
313 | |
314 | spin_lock_init(&kiq->ring_lock); |
315 | |
316 | ring->adev = NULL; |
317 | ring->ring_obj = NULL; |
318 | ring->use_doorbell = true; |
319 | ring->xcc_id = xcc_id; |
320 | ring->vm_hub = AMDGPU_GFXHUB(xcc_id); |
321 | ring->doorbell_index = |
322 | (adev->doorbell_index.kiq + |
323 | xcc_id * adev->doorbell_index.xcc_doorbell_range) |
324 | << 1; |
325 | |
326 | r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id); |
327 | if (r) |
328 | return r; |
329 | |
330 | ring->eop_gpu_addr = kiq->eop_gpu_addr; |
331 | ring->no_scheduler = true; |
332 | snprintf(buf: ring->name, size: sizeof(ring->name), fmt: "kiq_%d.%d.%d.%d" , |
333 | xcc_id, ring->me, ring->pipe, ring->queue); |
334 | r = amdgpu_ring_init(adev, ring, max_dw: 1024, irq_src: irq, irq_type: AMDGPU_CP_KIQ_IRQ_DRIVER0, |
335 | hw_prio: AMDGPU_RING_PRIO_DEFAULT, NULL); |
336 | if (r) |
337 | dev_warn(adev->dev, "(%d) failed to init kiq ring\n" , r); |
338 | |
339 | return r; |
340 | } |
341 | |
342 | void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) |
343 | { |
344 | amdgpu_ring_fini(ring); |
345 | } |
346 | |
347 | void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id) |
348 | { |
349 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
350 | |
351 | amdgpu_bo_free_kernel(bo: &kiq->eop_obj, gpu_addr: &kiq->eop_gpu_addr, NULL); |
352 | } |
353 | |
354 | int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, |
355 | unsigned int hpd_size, int xcc_id) |
356 | { |
357 | int r; |
358 | u32 *hpd; |
359 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
360 | |
361 | r = amdgpu_bo_create_kernel(adev, size: hpd_size, PAGE_SIZE, |
362 | AMDGPU_GEM_DOMAIN_GTT, bo_ptr: &kiq->eop_obj, |
363 | gpu_addr: &kiq->eop_gpu_addr, cpu_addr: (void **)&hpd); |
364 | if (r) { |
365 | dev_warn(adev->dev, "failed to create KIQ bo (%d).\n" , r); |
366 | return r; |
367 | } |
368 | |
369 | memset(hpd, 0, hpd_size); |
370 | |
371 | r = amdgpu_bo_reserve(bo: kiq->eop_obj, no_intr: true); |
372 | if (unlikely(r != 0)) |
373 | dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n" , r); |
374 | amdgpu_bo_kunmap(bo: kiq->eop_obj); |
375 | amdgpu_bo_unreserve(bo: kiq->eop_obj); |
376 | |
377 | return 0; |
378 | } |
379 | |
380 | /* create MQD for each compute/gfx queue */ |
381 | int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, |
382 | unsigned int mqd_size, int xcc_id) |
383 | { |
384 | int r, i, j; |
385 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
386 | struct amdgpu_ring *ring = &kiq->ring; |
387 | u32 domain = AMDGPU_GEM_DOMAIN_GTT; |
388 | |
389 | #if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) |
390 | /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ |
391 | if (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) >= IP_VERSION(10, 0, 0)) |
392 | domain |= AMDGPU_GEM_DOMAIN_VRAM; |
393 | #endif |
394 | |
395 | /* create MQD for KIQ */ |
396 | if (!adev->enable_mes_kiq && !ring->mqd_obj) { |
397 | /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must |
398 | * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD |
399 | * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for |
400 | * KIQ MQD no matter SRIOV or Bare-metal |
401 | */ |
402 | r = amdgpu_bo_create_kernel(adev, size: mqd_size, PAGE_SIZE, |
403 | AMDGPU_GEM_DOMAIN_VRAM | |
404 | AMDGPU_GEM_DOMAIN_GTT, |
405 | bo_ptr: &ring->mqd_obj, |
406 | gpu_addr: &ring->mqd_gpu_addr, |
407 | cpu_addr: &ring->mqd_ptr); |
408 | if (r) { |
409 | dev_warn(adev->dev, "failed to create ring mqd ob (%d)" , r); |
410 | return r; |
411 | } |
412 | |
413 | /* prepare MQD backup */ |
414 | kiq->mqd_backup = kmalloc(size: mqd_size, GFP_KERNEL); |
415 | if (!kiq->mqd_backup) { |
416 | dev_warn(adev->dev, |
417 | "no memory to create MQD backup for ring %s\n" , ring->name); |
418 | return -ENOMEM; |
419 | } |
420 | } |
421 | |
422 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
423 | /* create MQD for each KGQ */ |
424 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
425 | ring = &adev->gfx.gfx_ring[i]; |
426 | if (!ring->mqd_obj) { |
427 | r = amdgpu_bo_create_kernel(adev, size: mqd_size, PAGE_SIZE, |
428 | domain, bo_ptr: &ring->mqd_obj, |
429 | gpu_addr: &ring->mqd_gpu_addr, cpu_addr: &ring->mqd_ptr); |
430 | if (r) { |
431 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)" , r); |
432 | return r; |
433 | } |
434 | |
435 | ring->mqd_size = mqd_size; |
436 | /* prepare MQD backup */ |
437 | adev->gfx.me.mqd_backup[i] = kmalloc(size: mqd_size, GFP_KERNEL); |
438 | if (!adev->gfx.me.mqd_backup[i]) { |
439 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n" , ring->name); |
440 | return -ENOMEM; |
441 | } |
442 | } |
443 | } |
444 | } |
445 | |
446 | /* create MQD for each KCQ */ |
447 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
448 | j = i + xcc_id * adev->gfx.num_compute_rings; |
449 | ring = &adev->gfx.compute_ring[j]; |
450 | if (!ring->mqd_obj) { |
451 | r = amdgpu_bo_create_kernel(adev, size: mqd_size, PAGE_SIZE, |
452 | domain, bo_ptr: &ring->mqd_obj, |
453 | gpu_addr: &ring->mqd_gpu_addr, cpu_addr: &ring->mqd_ptr); |
454 | if (r) { |
455 | dev_warn(adev->dev, "failed to create ring mqd bo (%d)" , r); |
456 | return r; |
457 | } |
458 | |
459 | ring->mqd_size = mqd_size; |
460 | /* prepare MQD backup */ |
461 | adev->gfx.mec.mqd_backup[j] = kmalloc(size: mqd_size, GFP_KERNEL); |
462 | if (!adev->gfx.mec.mqd_backup[j]) { |
463 | dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n" , ring->name); |
464 | return -ENOMEM; |
465 | } |
466 | } |
467 | } |
468 | |
469 | return 0; |
470 | } |
471 | |
472 | void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id) |
473 | { |
474 | struct amdgpu_ring *ring = NULL; |
475 | int i, j; |
476 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
477 | |
478 | if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { |
479 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
480 | ring = &adev->gfx.gfx_ring[i]; |
481 | kfree(objp: adev->gfx.me.mqd_backup[i]); |
482 | amdgpu_bo_free_kernel(bo: &ring->mqd_obj, |
483 | gpu_addr: &ring->mqd_gpu_addr, |
484 | cpu_addr: &ring->mqd_ptr); |
485 | } |
486 | } |
487 | |
488 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
489 | j = i + xcc_id * adev->gfx.num_compute_rings; |
490 | ring = &adev->gfx.compute_ring[j]; |
491 | kfree(objp: adev->gfx.mec.mqd_backup[j]); |
492 | amdgpu_bo_free_kernel(bo: &ring->mqd_obj, |
493 | gpu_addr: &ring->mqd_gpu_addr, |
494 | cpu_addr: &ring->mqd_ptr); |
495 | } |
496 | |
497 | ring = &kiq->ring; |
498 | kfree(objp: kiq->mqd_backup); |
499 | amdgpu_bo_free_kernel(bo: &ring->mqd_obj, |
500 | gpu_addr: &ring->mqd_gpu_addr, |
501 | cpu_addr: &ring->mqd_ptr); |
502 | } |
503 | |
504 | int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) |
505 | { |
506 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
507 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
508 | struct amdgpu_hive_info *hive; |
509 | struct amdgpu_ras *ras; |
510 | int hive_ras_recovery = 0; |
511 | int i, r = 0; |
512 | int j; |
513 | |
514 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) |
515 | return -EINVAL; |
516 | |
517 | spin_lock(lock: &kiq->ring_lock); |
518 | if (amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->unmap_queues_size * |
519 | adev->gfx.num_compute_rings)) { |
520 | spin_unlock(lock: &kiq->ring_lock); |
521 | return -ENOMEM; |
522 | } |
523 | |
524 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
525 | j = i + xcc_id * adev->gfx.num_compute_rings; |
526 | kiq->pmf->kiq_unmap_queues(kiq_ring, |
527 | &adev->gfx.compute_ring[j], |
528 | RESET_QUEUES, 0, 0); |
529 | } |
530 | |
531 | /** |
532 | * This is workaround: only skip kiq_ring test |
533 | * during ras recovery in suspend stage for gfx9.4.3 |
534 | */ |
535 | hive = amdgpu_get_xgmi_hive(adev); |
536 | if (hive) { |
537 | hive_ras_recovery = atomic_read(v: &hive->ras_recovery); |
538 | amdgpu_put_xgmi_hive(hive); |
539 | } |
540 | |
541 | ras = amdgpu_ras_get_context(adev); |
542 | if ((amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0) == IP_VERSION(9, 4, 3)) && |
543 | ras && (atomic_read(v: &ras->in_recovery) || hive_ras_recovery)) { |
544 | spin_unlock(lock: &kiq->ring_lock); |
545 | return 0; |
546 | } |
547 | |
548 | if (kiq_ring->sched.ready && !adev->job_hang) |
549 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
550 | spin_unlock(lock: &kiq->ring_lock); |
551 | |
552 | return r; |
553 | } |
554 | |
555 | int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id) |
556 | { |
557 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
558 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
559 | int i, r = 0; |
560 | int j; |
561 | |
562 | if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) |
563 | return -EINVAL; |
564 | |
565 | spin_lock(lock: &kiq->ring_lock); |
566 | if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { |
567 | if (amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->unmap_queues_size * |
568 | adev->gfx.num_gfx_rings)) { |
569 | spin_unlock(lock: &kiq->ring_lock); |
570 | return -ENOMEM; |
571 | } |
572 | |
573 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
574 | j = i + xcc_id * adev->gfx.num_gfx_rings; |
575 | kiq->pmf->kiq_unmap_queues(kiq_ring, |
576 | &adev->gfx.gfx_ring[j], |
577 | PREEMPT_QUEUES, 0, 0); |
578 | } |
579 | } |
580 | |
581 | if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) |
582 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
583 | spin_unlock(lock: &kiq->ring_lock); |
584 | |
585 | return r; |
586 | } |
587 | |
588 | int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, |
589 | int queue_bit) |
590 | { |
591 | int mec, pipe, queue; |
592 | int set_resource_bit = 0; |
593 | |
594 | amdgpu_queue_mask_bit_to_mec_queue(adev, bit: queue_bit, mec: &mec, pipe: &pipe, queue: &queue); |
595 | |
596 | set_resource_bit = mec * 4 * 8 + pipe * 8 + queue; |
597 | |
598 | return set_resource_bit; |
599 | } |
600 | |
601 | int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) |
602 | { |
603 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
604 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
605 | uint64_t queue_mask = 0; |
606 | int r, i, j; |
607 | |
608 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) |
609 | return -EINVAL; |
610 | |
611 | for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { |
612 | if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) |
613 | continue; |
614 | |
615 | /* This situation may be hit in the future if a new HW |
616 | * generation exposes more than 64 queues. If so, the |
617 | * definition of queue_mask needs updating */ |
618 | if (WARN_ON(i > (sizeof(queue_mask)*8))) { |
619 | DRM_ERROR("Invalid KCQ enabled: %d\n" , i); |
620 | break; |
621 | } |
622 | |
623 | queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, queue_bit: i)); |
624 | } |
625 | |
626 | DRM_INFO("kiq ring mec %d pipe %d q %d\n" , kiq_ring->me, kiq_ring->pipe, |
627 | kiq_ring->queue); |
628 | amdgpu_device_flush_hdp(adev, NULL); |
629 | |
630 | spin_lock(lock: &kiq->ring_lock); |
631 | r = amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->map_queues_size * |
632 | adev->gfx.num_compute_rings + |
633 | kiq->pmf->set_resources_size); |
634 | if (r) { |
635 | DRM_ERROR("Failed to lock KIQ (%d).\n" , r); |
636 | spin_unlock(lock: &kiq->ring_lock); |
637 | return r; |
638 | } |
639 | |
640 | if (adev->enable_mes) |
641 | queue_mask = ~0ULL; |
642 | |
643 | kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); |
644 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
645 | j = i + xcc_id * adev->gfx.num_compute_rings; |
646 | kiq->pmf->kiq_map_queues(kiq_ring, |
647 | &adev->gfx.compute_ring[j]); |
648 | } |
649 | |
650 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
651 | spin_unlock(lock: &kiq->ring_lock); |
652 | if (r) |
653 | DRM_ERROR("KCQ enable failed\n" ); |
654 | |
655 | return r; |
656 | } |
657 | |
658 | int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) |
659 | { |
660 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
661 | struct amdgpu_ring *kiq_ring = &kiq->ring; |
662 | int r, i, j; |
663 | |
664 | if (!kiq->pmf || !kiq->pmf->kiq_map_queues) |
665 | return -EINVAL; |
666 | |
667 | amdgpu_device_flush_hdp(adev, NULL); |
668 | |
669 | spin_lock(lock: &kiq->ring_lock); |
670 | /* No need to map kcq on the slave */ |
671 | if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { |
672 | r = amdgpu_ring_alloc(ring: kiq_ring, ndw: kiq->pmf->map_queues_size * |
673 | adev->gfx.num_gfx_rings); |
674 | if (r) { |
675 | DRM_ERROR("Failed to lock KIQ (%d).\n" , r); |
676 | spin_unlock(lock: &kiq->ring_lock); |
677 | return r; |
678 | } |
679 | |
680 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) { |
681 | j = i + xcc_id * adev->gfx.num_gfx_rings; |
682 | kiq->pmf->kiq_map_queues(kiq_ring, |
683 | &adev->gfx.gfx_ring[j]); |
684 | } |
685 | } |
686 | |
687 | r = amdgpu_ring_test_helper(ring: kiq_ring); |
688 | spin_unlock(lock: &kiq->ring_lock); |
689 | if (r) |
690 | DRM_ERROR("KGQ enable failed\n" ); |
691 | |
692 | return r; |
693 | } |
694 | |
695 | /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable |
696 | * |
697 | * @adev: amdgpu_device pointer |
698 | * @bool enable true: enable gfx off feature, false: disable gfx off feature |
699 | * |
700 | * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled. |
701 | * 2. other client can send request to disable gfx off feature, the request should be honored. |
702 | * 3. other client can cancel their request of disable gfx off feature |
703 | * 4. other client should not send request to enable gfx off feature before disable gfx off feature. |
704 | */ |
705 | |
706 | void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) |
707 | { |
708 | unsigned long delay = GFX_OFF_DELAY_ENABLE; |
709 | |
710 | if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) |
711 | return; |
712 | |
713 | mutex_lock(&adev->gfx.gfx_off_mutex); |
714 | |
715 | if (enable) { |
716 | /* If the count is already 0, it means there's an imbalance bug somewhere. |
717 | * Note that the bug may be in a different caller than the one which triggers the |
718 | * WARN_ON_ONCE. |
719 | */ |
720 | if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) |
721 | goto unlock; |
722 | |
723 | adev->gfx.gfx_off_req_count--; |
724 | |
725 | if (adev->gfx.gfx_off_req_count == 0 && |
726 | !adev->gfx.gfx_off_state) { |
727 | /* If going to s2idle, no need to wait */ |
728 | if (adev->in_s0ix) { |
729 | if (!amdgpu_dpm_set_powergating_by_smu(adev, |
730 | block_type: AMD_IP_BLOCK_TYPE_GFX, gate: true)) |
731 | adev->gfx.gfx_off_state = true; |
732 | } else { |
733 | schedule_delayed_work(dwork: &adev->gfx.gfx_off_delay_work, |
734 | delay); |
735 | } |
736 | } |
737 | } else { |
738 | if (adev->gfx.gfx_off_req_count == 0) { |
739 | cancel_delayed_work_sync(dwork: &adev->gfx.gfx_off_delay_work); |
740 | |
741 | if (adev->gfx.gfx_off_state && |
742 | !amdgpu_dpm_set_powergating_by_smu(adev, block_type: AMD_IP_BLOCK_TYPE_GFX, gate: false)) { |
743 | adev->gfx.gfx_off_state = false; |
744 | |
745 | if (adev->gfx.funcs->init_spm_golden) { |
746 | dev_dbg(adev->dev, |
747 | "GFXOFF is disabled, re-init SPM golden settings\n" ); |
748 | amdgpu_gfx_init_spm_golden(adev); |
749 | } |
750 | } |
751 | } |
752 | |
753 | adev->gfx.gfx_off_req_count++; |
754 | } |
755 | |
756 | unlock: |
757 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
758 | } |
759 | |
760 | int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value) |
761 | { |
762 | int r = 0; |
763 | |
764 | mutex_lock(&adev->gfx.gfx_off_mutex); |
765 | |
766 | r = amdgpu_dpm_set_residency_gfxoff(adev, value); |
767 | |
768 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
769 | |
770 | return r; |
771 | } |
772 | |
773 | int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value) |
774 | { |
775 | int r = 0; |
776 | |
777 | mutex_lock(&adev->gfx.gfx_off_mutex); |
778 | |
779 | r = amdgpu_dpm_get_residency_gfxoff(adev, value); |
780 | |
781 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
782 | |
783 | return r; |
784 | } |
785 | |
786 | int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value) |
787 | { |
788 | int r = 0; |
789 | |
790 | mutex_lock(&adev->gfx.gfx_off_mutex); |
791 | |
792 | r = amdgpu_dpm_get_entrycount_gfxoff(adev, value); |
793 | |
794 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
795 | |
796 | return r; |
797 | } |
798 | |
799 | int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) |
800 | { |
801 | |
802 | int r = 0; |
803 | |
804 | mutex_lock(&adev->gfx.gfx_off_mutex); |
805 | |
806 | r = amdgpu_dpm_get_status_gfxoff(adev, value); |
807 | |
808 | mutex_unlock(lock: &adev->gfx.gfx_off_mutex); |
809 | |
810 | return r; |
811 | } |
812 | |
813 | int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) |
814 | { |
815 | int r; |
816 | |
817 | if (amdgpu_ras_is_supported(adev, block: ras_block->block)) { |
818 | if (!amdgpu_persistent_edc_harvesting_supported(adev)) |
819 | amdgpu_ras_reset_error_status(adev, block: AMDGPU_RAS_BLOCK__GFX); |
820 | |
821 | r = amdgpu_ras_block_late_init(adev, ras_block); |
822 | if (r) |
823 | return r; |
824 | |
825 | if (adev->gfx.cp_ecc_error_irq.funcs) { |
826 | r = amdgpu_irq_get(adev, src: &adev->gfx.cp_ecc_error_irq, type: 0); |
827 | if (r) |
828 | goto late_fini; |
829 | } |
830 | } else { |
831 | amdgpu_ras_feature_enable_on_boot(adev, head: ras_block, enable: 0); |
832 | } |
833 | |
834 | return 0; |
835 | late_fini: |
836 | amdgpu_ras_block_late_fini(adev, ras_block); |
837 | return r; |
838 | } |
839 | |
840 | int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev) |
841 | { |
842 | int err = 0; |
843 | struct amdgpu_gfx_ras *ras = NULL; |
844 | |
845 | /* adev->gfx.ras is NULL, which means gfx does not |
846 | * support ras function, then do nothing here. |
847 | */ |
848 | if (!adev->gfx.ras) |
849 | return 0; |
850 | |
851 | ras = adev->gfx.ras; |
852 | |
853 | err = amdgpu_ras_register_ras_block(adev, ras_block_obj: &ras->ras_block); |
854 | if (err) { |
855 | dev_err(adev->dev, "Failed to register gfx ras block!\n" ); |
856 | return err; |
857 | } |
858 | |
859 | strcpy(p: ras->ras_block.ras_comm.name, q: "gfx" ); |
860 | ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX; |
861 | ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; |
862 | adev->gfx.ras_if = &ras->ras_block.ras_comm; |
863 | |
864 | /* If not define special ras_late_init function, use gfx default ras_late_init */ |
865 | if (!ras->ras_block.ras_late_init) |
866 | ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; |
867 | |
868 | /* If not defined special ras_cb function, use default ras_cb */ |
869 | if (!ras->ras_block.ras_cb) |
870 | ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb; |
871 | |
872 | return 0; |
873 | } |
874 | |
875 | int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, |
876 | struct amdgpu_iv_entry *entry) |
877 | { |
878 | if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) |
879 | return adev->gfx.ras->poison_consumption_handler(adev, entry); |
880 | |
881 | return 0; |
882 | } |
883 | |
884 | int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, |
885 | void *err_data, |
886 | struct amdgpu_iv_entry *entry) |
887 | { |
888 | /* TODO ue will trigger an interrupt. |
889 | * |
890 | * When “Full RAS” is enabled, the per-IP interrupt sources should |
891 | * be disabled and the driver should only look for the aggregated |
892 | * interrupt via sync flood |
893 | */ |
894 | if (!amdgpu_ras_is_supported(adev, block: AMDGPU_RAS_BLOCK__GFX)) { |
895 | kgd2kfd_set_sram_ecc_flag(kfd: adev->kfd.dev); |
896 | if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && |
897 | adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) |
898 | adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); |
899 | amdgpu_ras_reset_gpu(adev); |
900 | } |
901 | return AMDGPU_RAS_SUCCESS; |
902 | } |
903 | |
904 | int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, |
905 | struct amdgpu_irq_src *source, |
906 | struct amdgpu_iv_entry *entry) |
907 | { |
908 | struct ras_common_if *ras_if = adev->gfx.ras_if; |
909 | struct ras_dispatch_if ih_data = { |
910 | .entry = entry, |
911 | }; |
912 | |
913 | if (!ras_if) |
914 | return 0; |
915 | |
916 | ih_data.head = *ras_if; |
917 | |
918 | DRM_ERROR("CP ECC ERROR IRQ\n" ); |
919 | amdgpu_ras_interrupt_dispatch(adev, info: &ih_data); |
920 | return 0; |
921 | } |
922 | |
923 | void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, |
924 | void *ras_error_status, |
925 | void (*func)(struct amdgpu_device *adev, void *ras_error_status, |
926 | int xcc_id)) |
927 | { |
928 | int i; |
929 | int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; |
930 | uint32_t xcc_mask = GENMASK(num_xcc - 1, 0); |
931 | struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; |
932 | |
933 | if (err_data) { |
934 | err_data->ue_count = 0; |
935 | err_data->ce_count = 0; |
936 | } |
937 | |
938 | for_each_inst(i, xcc_mask) |
939 | func(adev, ras_error_status, i); |
940 | } |
941 | |
942 | uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id) |
943 | { |
944 | signed long r, cnt = 0; |
945 | unsigned long flags; |
946 | uint32_t seq, reg_val_offs = 0, value = 0; |
947 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
948 | struct amdgpu_ring *ring = &kiq->ring; |
949 | |
950 | if (amdgpu_device_skip_hw_access(adev)) |
951 | return 0; |
952 | |
953 | if (adev->mes.ring.sched.ready) |
954 | return amdgpu_mes_rreg(adev, reg); |
955 | |
956 | BUG_ON(!ring->funcs->emit_rreg); |
957 | |
958 | spin_lock_irqsave(&kiq->ring_lock, flags); |
959 | if (amdgpu_device_wb_get(adev, wb: ®_val_offs)) { |
960 | pr_err("critical bug! too many kiq readers\n" ); |
961 | goto failed_unlock; |
962 | } |
963 | amdgpu_ring_alloc(ring, ndw: 32); |
964 | amdgpu_ring_emit_rreg(ring, reg, reg_val_offs); |
965 | r = amdgpu_fence_emit_polling(ring, s: &seq, MAX_KIQ_REG_WAIT); |
966 | if (r) |
967 | goto failed_undo; |
968 | |
969 | amdgpu_ring_commit(ring); |
970 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
971 | |
972 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
973 | |
974 | /* don't wait anymore for gpu reset case because this way may |
975 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg |
976 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will |
977 | * never return if we keep waiting in virt_kiq_rreg, which cause |
978 | * gpu_recover() hang there. |
979 | * |
980 | * also don't wait anymore for IRQ context |
981 | * */ |
982 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
983 | goto failed_kiq_read; |
984 | |
985 | might_sleep(); |
986 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { |
987 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); |
988 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
989 | } |
990 | |
991 | if (cnt > MAX_KIQ_REG_TRY) |
992 | goto failed_kiq_read; |
993 | |
994 | mb(); |
995 | value = adev->wb.wb[reg_val_offs]; |
996 | amdgpu_device_wb_free(adev, wb: reg_val_offs); |
997 | return value; |
998 | |
999 | failed_undo: |
1000 | amdgpu_ring_undo(ring); |
1001 | failed_unlock: |
1002 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
1003 | failed_kiq_read: |
1004 | if (reg_val_offs) |
1005 | amdgpu_device_wb_free(adev, wb: reg_val_offs); |
1006 | dev_err(adev->dev, "failed to read reg:%x\n" , reg); |
1007 | return ~0; |
1008 | } |
1009 | |
1010 | void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id) |
1011 | { |
1012 | signed long r, cnt = 0; |
1013 | unsigned long flags; |
1014 | uint32_t seq; |
1015 | struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; |
1016 | struct amdgpu_ring *ring = &kiq->ring; |
1017 | |
1018 | BUG_ON(!ring->funcs->emit_wreg); |
1019 | |
1020 | if (amdgpu_device_skip_hw_access(adev)) |
1021 | return; |
1022 | |
1023 | if (adev->mes.ring.sched.ready) { |
1024 | amdgpu_mes_wreg(adev, reg, val: v); |
1025 | return; |
1026 | } |
1027 | |
1028 | spin_lock_irqsave(&kiq->ring_lock, flags); |
1029 | amdgpu_ring_alloc(ring, ndw: 32); |
1030 | amdgpu_ring_emit_wreg(ring, reg, v); |
1031 | r = amdgpu_fence_emit_polling(ring, s: &seq, MAX_KIQ_REG_WAIT); |
1032 | if (r) |
1033 | goto failed_undo; |
1034 | |
1035 | amdgpu_ring_commit(ring); |
1036 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
1037 | |
1038 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
1039 | |
1040 | /* don't wait anymore for gpu reset case because this way may |
1041 | * block gpu_recover() routine forever, e.g. this virt_kiq_rreg |
1042 | * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will |
1043 | * never return if we keep waiting in virt_kiq_rreg, which cause |
1044 | * gpu_recover() hang there. |
1045 | * |
1046 | * also don't wait anymore for IRQ context |
1047 | * */ |
1048 | if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) |
1049 | goto failed_kiq_write; |
1050 | |
1051 | might_sleep(); |
1052 | while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { |
1053 | |
1054 | msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); |
1055 | r = amdgpu_fence_wait_polling(ring, wait_seq: seq, MAX_KIQ_REG_WAIT); |
1056 | } |
1057 | |
1058 | if (cnt > MAX_KIQ_REG_TRY) |
1059 | goto failed_kiq_write; |
1060 | |
1061 | return; |
1062 | |
1063 | failed_undo: |
1064 | amdgpu_ring_undo(ring); |
1065 | spin_unlock_irqrestore(lock: &kiq->ring_lock, flags); |
1066 | failed_kiq_write: |
1067 | dev_err(adev->dev, "failed to write reg:%x\n" , reg); |
1068 | } |
1069 | |
1070 | int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) |
1071 | { |
1072 | if (amdgpu_num_kcq == -1) { |
1073 | return 8; |
1074 | } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) { |
1075 | dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n" ); |
1076 | return 8; |
1077 | } |
1078 | return amdgpu_num_kcq; |
1079 | } |
1080 | |
1081 | void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, |
1082 | uint32_t ucode_id) |
1083 | { |
1084 | const struct gfx_firmware_header_v1_0 *cp_hdr; |
1085 | const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0; |
1086 | struct amdgpu_firmware_info *info = NULL; |
1087 | const struct firmware *ucode_fw; |
1088 | unsigned int fw_size; |
1089 | |
1090 | switch (ucode_id) { |
1091 | case AMDGPU_UCODE_ID_CP_PFP: |
1092 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1093 | adev->gfx.pfp_fw->data; |
1094 | adev->gfx.pfp_fw_version = |
1095 | le32_to_cpu(cp_hdr->header.ucode_version); |
1096 | adev->gfx.pfp_feature_version = |
1097 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1098 | ucode_fw = adev->gfx.pfp_fw; |
1099 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); |
1100 | break; |
1101 | case AMDGPU_UCODE_ID_CP_RS64_PFP: |
1102 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1103 | adev->gfx.pfp_fw->data; |
1104 | adev->gfx.pfp_fw_version = |
1105 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); |
1106 | adev->gfx.pfp_feature_version = |
1107 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); |
1108 | ucode_fw = adev->gfx.pfp_fw; |
1109 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); |
1110 | break; |
1111 | case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK: |
1112 | case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK: |
1113 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1114 | adev->gfx.pfp_fw->data; |
1115 | ucode_fw = adev->gfx.pfp_fw; |
1116 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); |
1117 | break; |
1118 | case AMDGPU_UCODE_ID_CP_ME: |
1119 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1120 | adev->gfx.me_fw->data; |
1121 | adev->gfx.me_fw_version = |
1122 | le32_to_cpu(cp_hdr->header.ucode_version); |
1123 | adev->gfx.me_feature_version = |
1124 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1125 | ucode_fw = adev->gfx.me_fw; |
1126 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); |
1127 | break; |
1128 | case AMDGPU_UCODE_ID_CP_RS64_ME: |
1129 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1130 | adev->gfx.me_fw->data; |
1131 | adev->gfx.me_fw_version = |
1132 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); |
1133 | adev->gfx.me_feature_version = |
1134 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); |
1135 | ucode_fw = adev->gfx.me_fw; |
1136 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); |
1137 | break; |
1138 | case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK: |
1139 | case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK: |
1140 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1141 | adev->gfx.me_fw->data; |
1142 | ucode_fw = adev->gfx.me_fw; |
1143 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); |
1144 | break; |
1145 | case AMDGPU_UCODE_ID_CP_CE: |
1146 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1147 | adev->gfx.ce_fw->data; |
1148 | adev->gfx.ce_fw_version = |
1149 | le32_to_cpu(cp_hdr->header.ucode_version); |
1150 | adev->gfx.ce_feature_version = |
1151 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1152 | ucode_fw = adev->gfx.ce_fw; |
1153 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes); |
1154 | break; |
1155 | case AMDGPU_UCODE_ID_CP_MEC1: |
1156 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1157 | adev->gfx.mec_fw->data; |
1158 | adev->gfx.mec_fw_version = |
1159 | le32_to_cpu(cp_hdr->header.ucode_version); |
1160 | adev->gfx.mec_feature_version = |
1161 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1162 | ucode_fw = adev->gfx.mec_fw; |
1163 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - |
1164 | le32_to_cpu(cp_hdr->jt_size) * 4; |
1165 | break; |
1166 | case AMDGPU_UCODE_ID_CP_MEC1_JT: |
1167 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1168 | adev->gfx.mec_fw->data; |
1169 | ucode_fw = adev->gfx.mec_fw; |
1170 | fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; |
1171 | break; |
1172 | case AMDGPU_UCODE_ID_CP_MEC2: |
1173 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1174 | adev->gfx.mec2_fw->data; |
1175 | adev->gfx.mec2_fw_version = |
1176 | le32_to_cpu(cp_hdr->header.ucode_version); |
1177 | adev->gfx.mec2_feature_version = |
1178 | le32_to_cpu(cp_hdr->ucode_feature_version); |
1179 | ucode_fw = adev->gfx.mec2_fw; |
1180 | fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) - |
1181 | le32_to_cpu(cp_hdr->jt_size) * 4; |
1182 | break; |
1183 | case AMDGPU_UCODE_ID_CP_MEC2_JT: |
1184 | cp_hdr = (const struct gfx_firmware_header_v1_0 *) |
1185 | adev->gfx.mec2_fw->data; |
1186 | ucode_fw = adev->gfx.mec2_fw; |
1187 | fw_size = le32_to_cpu(cp_hdr->jt_size) * 4; |
1188 | break; |
1189 | case AMDGPU_UCODE_ID_CP_RS64_MEC: |
1190 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1191 | adev->gfx.mec_fw->data; |
1192 | adev->gfx.mec_fw_version = |
1193 | le32_to_cpu(cp_hdr_v2_0->header.ucode_version); |
1194 | adev->gfx.mec_feature_version = |
1195 | le32_to_cpu(cp_hdr_v2_0->ucode_feature_version); |
1196 | ucode_fw = adev->gfx.mec_fw; |
1197 | fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes); |
1198 | break; |
1199 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK: |
1200 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK: |
1201 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK: |
1202 | case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK: |
1203 | cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *) |
1204 | adev->gfx.mec_fw->data; |
1205 | ucode_fw = adev->gfx.mec_fw; |
1206 | fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes); |
1207 | break; |
1208 | default: |
1209 | break; |
1210 | } |
1211 | |
1212 | if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
1213 | info = &adev->firmware.ucode[ucode_id]; |
1214 | info->ucode_id = ucode_id; |
1215 | info->fw = ucode_fw; |
1216 | adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); |
1217 | } |
1218 | } |
1219 | |
1220 | bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id) |
1221 | { |
1222 | return !(xcc_id % (adev->gfx.num_xcc_per_xcp ? |
1223 | adev->gfx.num_xcc_per_xcp : 1)); |
1224 | } |
1225 | |
1226 | static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev, |
1227 | struct device_attribute *addr, |
1228 | char *buf) |
1229 | { |
1230 | struct drm_device *ddev = dev_get_drvdata(dev); |
1231 | struct amdgpu_device *adev = drm_to_adev(ddev); |
1232 | int mode; |
1233 | |
1234 | mode = amdgpu_xcp_query_partition_mode(xcp_mgr: adev->xcp_mgr, |
1235 | AMDGPU_XCP_FL_NONE); |
1236 | |
1237 | return sysfs_emit(buf, fmt: "%s\n" , amdgpu_gfx_compute_mode_desc(mode)); |
1238 | } |
1239 | |
1240 | static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, |
1241 | struct device_attribute *addr, |
1242 | const char *buf, size_t count) |
1243 | { |
1244 | struct drm_device *ddev = dev_get_drvdata(dev); |
1245 | struct amdgpu_device *adev = drm_to_adev(ddev); |
1246 | enum amdgpu_gfx_partition mode; |
1247 | int ret = 0, num_xcc; |
1248 | |
1249 | num_xcc = NUM_XCC(adev->gfx.xcc_mask); |
1250 | if (num_xcc % 2 != 0) |
1251 | return -EINVAL; |
1252 | |
1253 | if (!strncasecmp(s1: "SPX" , s2: buf, strlen("SPX" ))) { |
1254 | mode = AMDGPU_SPX_PARTITION_MODE; |
1255 | } else if (!strncasecmp(s1: "DPX" , s2: buf, strlen("DPX" ))) { |
1256 | /* |
1257 | * DPX mode needs AIDs to be in multiple of 2. |
1258 | * Each AID connects 2 XCCs. |
1259 | */ |
1260 | if (num_xcc%4) |
1261 | return -EINVAL; |
1262 | mode = AMDGPU_DPX_PARTITION_MODE; |
1263 | } else if (!strncasecmp(s1: "TPX" , s2: buf, strlen("TPX" ))) { |
1264 | if (num_xcc != 6) |
1265 | return -EINVAL; |
1266 | mode = AMDGPU_TPX_PARTITION_MODE; |
1267 | } else if (!strncasecmp(s1: "QPX" , s2: buf, strlen("QPX" ))) { |
1268 | if (num_xcc != 8) |
1269 | return -EINVAL; |
1270 | mode = AMDGPU_QPX_PARTITION_MODE; |
1271 | } else if (!strncasecmp(s1: "CPX" , s2: buf, strlen("CPX" ))) { |
1272 | mode = AMDGPU_CPX_PARTITION_MODE; |
1273 | } else { |
1274 | return -EINVAL; |
1275 | } |
1276 | |
1277 | ret = amdgpu_xcp_switch_partition_mode(xcp_mgr: adev->xcp_mgr, mode); |
1278 | |
1279 | if (ret) |
1280 | return ret; |
1281 | |
1282 | return count; |
1283 | } |
1284 | |
1285 | static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev, |
1286 | struct device_attribute *addr, |
1287 | char *buf) |
1288 | { |
1289 | struct drm_device *ddev = dev_get_drvdata(dev); |
1290 | struct amdgpu_device *adev = drm_to_adev(ddev); |
1291 | char *supported_partition; |
1292 | |
1293 | /* TBD */ |
1294 | switch (NUM_XCC(adev->gfx.xcc_mask)) { |
1295 | case 8: |
1296 | supported_partition = "SPX, DPX, QPX, CPX" ; |
1297 | break; |
1298 | case 6: |
1299 | supported_partition = "SPX, TPX, CPX" ; |
1300 | break; |
1301 | case 4: |
1302 | supported_partition = "SPX, DPX, CPX" ; |
1303 | break; |
1304 | /* this seems only existing in emulation phase */ |
1305 | case 2: |
1306 | supported_partition = "SPX, CPX" ; |
1307 | break; |
1308 | default: |
1309 | supported_partition = "Not supported" ; |
1310 | break; |
1311 | } |
1312 | |
1313 | return sysfs_emit(buf, fmt: "%s\n" , supported_partition); |
1314 | } |
1315 | |
1316 | static DEVICE_ATTR(current_compute_partition, 0644, |
1317 | amdgpu_gfx_get_current_compute_partition, |
1318 | amdgpu_gfx_set_compute_partition); |
1319 | |
1320 | static DEVICE_ATTR(available_compute_partition, 0444, |
1321 | amdgpu_gfx_get_available_compute_partition, NULL); |
1322 | |
1323 | int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev) |
1324 | { |
1325 | int r; |
1326 | |
1327 | r = device_create_file(device: adev->dev, entry: &dev_attr_current_compute_partition); |
1328 | if (r) |
1329 | return r; |
1330 | |
1331 | r = device_create_file(device: adev->dev, entry: &dev_attr_available_compute_partition); |
1332 | |
1333 | return r; |
1334 | } |
1335 | |
1336 | void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev) |
1337 | { |
1338 | device_remove_file(dev: adev->dev, attr: &dev_attr_current_compute_partition); |
1339 | device_remove_file(dev: adev->dev, attr: &dev_attr_available_compute_partition); |
1340 | } |
1341 | |