1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */ |
3 | |
4 | #include <linux/bitfield.h> |
5 | #include <linux/clk.h> |
6 | #include <linux/interconnect.h> |
7 | #include <linux/of_platform.h> |
8 | #include <linux/platform_device.h> |
9 | #include <linux/pm_domain.h> |
10 | #include <linux/pm_opp.h> |
11 | #include <soc/qcom/cmd-db.h> |
12 | #include <drm/drm_gem.h> |
13 | |
14 | #include "a6xx_gpu.h" |
15 | #include "a6xx_gmu.xml.h" |
16 | #include "msm_gem.h" |
17 | #include "msm_gpu_trace.h" |
18 | #include "msm_mmu.h" |
19 | |
20 | static void a6xx_gmu_fault(struct a6xx_gmu *gmu) |
21 | { |
22 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
23 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
24 | struct msm_gpu *gpu = &adreno_gpu->base; |
25 | |
26 | /* FIXME: add a banner here */ |
27 | gmu->hung = true; |
28 | |
29 | /* Turn off the hangcheck timer while we are resetting */ |
30 | del_timer(timer: &gpu->hangcheck_timer); |
31 | |
32 | /* Queue the GPU handler because we need to treat this as a recovery */ |
33 | kthread_queue_work(gpu->worker, &gpu->recover_work); |
34 | } |
35 | |
36 | static irqreturn_t a6xx_gmu_irq(int irq, void *data) |
37 | { |
38 | struct a6xx_gmu *gmu = data; |
39 | u32 status; |
40 | |
41 | status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); |
42 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, value: status); |
43 | |
44 | if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) { |
45 | dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n" ); |
46 | |
47 | a6xx_gmu_fault(gmu); |
48 | } |
49 | |
50 | if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR) |
51 | dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n" ); |
52 | |
53 | if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) |
54 | dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n" , |
55 | gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS)); |
56 | |
57 | return IRQ_HANDLED; |
58 | } |
59 | |
60 | static irqreturn_t a6xx_hfi_irq(int irq, void *data) |
61 | { |
62 | struct a6xx_gmu *gmu = data; |
63 | u32 status; |
64 | |
65 | status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO); |
66 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, value: status); |
67 | |
68 | if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) { |
69 | dev_err_ratelimited(gmu->dev, "GMU firmware fault\n" ); |
70 | |
71 | a6xx_gmu_fault(gmu); |
72 | } |
73 | |
74 | return IRQ_HANDLED; |
75 | } |
76 | |
77 | bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) |
78 | { |
79 | u32 val; |
80 | |
81 | /* This can be called from gpu state code so make sure GMU is valid */ |
82 | if (!gmu->initialized) |
83 | return false; |
84 | |
85 | val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); |
86 | |
87 | return !(val & |
88 | (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF | |
89 | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF)); |
90 | } |
91 | |
92 | /* Check to see if the GX rail is still powered */ |
93 | bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) |
94 | { |
95 | u32 val; |
96 | |
97 | /* This can be called from gpu state code so make sure GMU is valid */ |
98 | if (!gmu->initialized) |
99 | return false; |
100 | |
101 | val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS); |
102 | |
103 | return !(val & |
104 | (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF | |
105 | A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF)); |
106 | } |
107 | |
108 | void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, |
109 | bool suspended) |
110 | { |
111 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
112 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
113 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
114 | u32 perf_index; |
115 | unsigned long gpu_freq; |
116 | int ret = 0; |
117 | |
118 | gpu_freq = dev_pm_opp_get_freq(opp); |
119 | |
120 | if (gpu_freq == gmu->freq) |
121 | return; |
122 | |
123 | for (perf_index = 0; perf_index < gmu->nr_gpu_freqs - 1; perf_index++) |
124 | if (gpu_freq == gmu->gpu_freqs[perf_index]) |
125 | break; |
126 | |
127 | gmu->current_perf_index = perf_index; |
128 | gmu->freq = gmu->gpu_freqs[perf_index]; |
129 | |
130 | trace_msm_gmu_freq_change(gmu->freq, perf_index); |
131 | |
132 | /* |
133 | * This can get called from devfreq while the hardware is idle. Don't |
134 | * bring up the power if it isn't already active. All we're doing here |
135 | * is updating the frequency so that when we come back online we're at |
136 | * the right rate. |
137 | */ |
138 | if (suspended) |
139 | return; |
140 | |
141 | if (!gmu->legacy) { |
142 | a6xx_hfi_set_freq(gmu, index: perf_index); |
143 | dev_pm_opp_set_opp(dev: &gpu->pdev->dev, opp); |
144 | return; |
145 | } |
146 | |
147 | gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, value: 0); |
148 | |
149 | gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING, |
150 | value: ((3 & 0xf) << 28) | perf_index); |
151 | |
152 | /* |
153 | * Send an invalid index as a vote for the bus bandwidth and let the |
154 | * firmware decide on the right vote |
155 | */ |
156 | gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, value: 0xff); |
157 | |
158 | /* Set and clear the OOB for DCVS to trigger the GMU */ |
159 | a6xx_gmu_set_oob(gmu, state: GMU_OOB_DCVS_SET); |
160 | a6xx_gmu_clear_oob(gmu, state: GMU_OOB_DCVS_SET); |
161 | |
162 | ret = gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN); |
163 | if (ret) |
164 | dev_err(gmu->dev, "GMU set GPU frequency error: %d\n" , ret); |
165 | |
166 | dev_pm_opp_set_opp(dev: &gpu->pdev->dev, opp); |
167 | } |
168 | |
169 | unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu) |
170 | { |
171 | struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); |
172 | struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); |
173 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
174 | |
175 | return gmu->freq; |
176 | } |
177 | |
178 | static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) |
179 | { |
180 | u32 val; |
181 | int local = gmu->idle_level; |
182 | |
183 | /* SPTP and IFPC both report as IFPC */ |
184 | if (gmu->idle_level == GMU_IDLE_STATE_SPTP) |
185 | local = GMU_IDLE_STATE_IFPC; |
186 | |
187 | val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); |
188 | |
189 | if (val == local) { |
190 | if (gmu->idle_level != GMU_IDLE_STATE_IFPC || |
191 | !a6xx_gmu_gx_is_on(gmu)) |
192 | return true; |
193 | } |
194 | |
195 | return false; |
196 | } |
197 | |
198 | /* Wait for the GMU to get to its most idle state */ |
199 | int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) |
200 | { |
201 | return spin_until(a6xx_gmu_check_idle_level(gmu)); |
202 | } |
203 | |
204 | static int a6xx_gmu_start(struct a6xx_gmu *gmu) |
205 | { |
206 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
207 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
208 | u32 mask, reset_val, val; |
209 | int ret; |
210 | |
211 | val = gmu_read(gmu, REG_A6XX_GMU_CM3_DTCM_START + 0xff8); |
212 | if (val <= 0x20010004) { |
213 | mask = 0xffffffff; |
214 | reset_val = 0xbabeface; |
215 | } else { |
216 | mask = 0x1ff; |
217 | reset_val = 0x100; |
218 | } |
219 | |
220 | gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, value: 1); |
221 | |
222 | /* Set the log wptr index |
223 | * note: downstream saves the value in poweroff and restores it here |
224 | */ |
225 | if (adreno_is_a7xx(gpu: adreno_gpu)) |
226 | gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, value: 0); |
227 | else |
228 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, value: 0); |
229 | |
230 | |
231 | gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, value: 0); |
232 | |
233 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val, |
234 | (val & mask) == reset_val, 100, 10000); |
235 | |
236 | if (ret) |
237 | DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n" ); |
238 | |
239 | return ret; |
240 | } |
241 | |
242 | static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) |
243 | { |
244 | u32 val; |
245 | int ret; |
246 | |
247 | gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, value: 1); |
248 | |
249 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val, |
250 | val & 1, 100, 10000); |
251 | if (ret) |
252 | DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n" ); |
253 | |
254 | return ret; |
255 | } |
256 | |
257 | struct a6xx_gmu_oob_bits { |
258 | int set, ack, set_new, ack_new, clear, clear_new; |
259 | const char *name; |
260 | }; |
261 | |
262 | /* These are the interrupt / ack bits for each OOB request that are set |
263 | * in a6xx_gmu_set_oob and a6xx_clear_oob |
264 | */ |
265 | static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = { |
266 | [GMU_OOB_GPU_SET] = { |
267 | .name = "GPU_SET" , |
268 | .set = 16, |
269 | .ack = 24, |
270 | .set_new = 30, |
271 | .ack_new = 31, |
272 | .clear = 24, |
273 | .clear_new = 31, |
274 | }, |
275 | |
276 | [GMU_OOB_PERFCOUNTER_SET] = { |
277 | .name = "PERFCOUNTER" , |
278 | .set = 17, |
279 | .ack = 25, |
280 | .set_new = 28, |
281 | .ack_new = 30, |
282 | .clear = 25, |
283 | .clear_new = 29, |
284 | }, |
285 | |
286 | [GMU_OOB_BOOT_SLUMBER] = { |
287 | .name = "BOOT_SLUMBER" , |
288 | .set = 22, |
289 | .ack = 30, |
290 | .clear = 30, |
291 | }, |
292 | |
293 | [GMU_OOB_DCVS_SET] = { |
294 | .name = "GPU_DCVS" , |
295 | .set = 23, |
296 | .ack = 31, |
297 | .clear = 31, |
298 | }, |
299 | }; |
300 | |
301 | /* Trigger a OOB (out of band) request to the GMU */ |
302 | int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) |
303 | { |
304 | int ret; |
305 | u32 val; |
306 | int request, ack; |
307 | |
308 | WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); |
309 | |
310 | if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) |
311 | return -EINVAL; |
312 | |
313 | if (gmu->legacy) { |
314 | request = a6xx_gmu_oob_bits[state].set; |
315 | ack = a6xx_gmu_oob_bits[state].ack; |
316 | } else { |
317 | request = a6xx_gmu_oob_bits[state].set_new; |
318 | ack = a6xx_gmu_oob_bits[state].ack_new; |
319 | if (!request || !ack) { |
320 | DRM_DEV_ERROR(gmu->dev, |
321 | "Invalid non-legacy GMU request %s\n" , |
322 | a6xx_gmu_oob_bits[state].name); |
323 | return -EINVAL; |
324 | } |
325 | } |
326 | |
327 | /* Trigger the equested OOB operation */ |
328 | gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, value: 1 << request); |
329 | |
330 | /* Wait for the acknowledge interrupt */ |
331 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, |
332 | val & (1 << ack), 100, 10000); |
333 | |
334 | if (ret) |
335 | DRM_DEV_ERROR(gmu->dev, |
336 | "Timeout waiting for GMU OOB set %s: 0x%x\n" , |
337 | a6xx_gmu_oob_bits[state].name, |
338 | gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO)); |
339 | |
340 | /* Clear the acknowledge interrupt */ |
341 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, value: 1 << ack); |
342 | |
343 | return ret; |
344 | } |
345 | |
346 | /* Clear a pending OOB state in the GMU */ |
347 | void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) |
348 | { |
349 | int bit; |
350 | |
351 | WARN_ON_ONCE(!mutex_is_locked(&gmu->lock)); |
352 | |
353 | if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits)) |
354 | return; |
355 | |
356 | if (gmu->legacy) |
357 | bit = a6xx_gmu_oob_bits[state].clear; |
358 | else |
359 | bit = a6xx_gmu_oob_bits[state].clear_new; |
360 | |
361 | gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, value: 1 << bit); |
362 | } |
363 | |
364 | /* Enable CPU control of SPTP power power collapse */ |
365 | int a6xx_sptprac_enable(struct a6xx_gmu *gmu) |
366 | { |
367 | int ret; |
368 | u32 val; |
369 | |
370 | if (!gmu->legacy) |
371 | return 0; |
372 | |
373 | gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, value: 0x778000); |
374 | |
375 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, |
376 | (val & 0x38) == 0x28, 1, 100); |
377 | |
378 | if (ret) { |
379 | DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n" , |
380 | gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); |
381 | } |
382 | |
383 | return 0; |
384 | } |
385 | |
386 | /* Disable CPU control of SPTP power power collapse */ |
387 | void a6xx_sptprac_disable(struct a6xx_gmu *gmu) |
388 | { |
389 | u32 val; |
390 | int ret; |
391 | |
392 | if (!gmu->legacy) |
393 | return; |
394 | |
395 | /* Make sure retention is on */ |
396 | gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, mask: 0, or: (1 << 11)); |
397 | |
398 | gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, value: 0x778001); |
399 | |
400 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val, |
401 | (val & 0x04), 100, 10000); |
402 | |
403 | if (ret) |
404 | DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n" , |
405 | gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS)); |
406 | } |
407 | |
408 | /* Let the GMU know we are starting a boot sequence */ |
409 | static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) |
410 | { |
411 | u32 vote; |
412 | |
413 | /* Let the GMU know we are getting ready for boot */ |
414 | gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, value: 0); |
415 | |
416 | /* Choose the "default" power level as the highest available */ |
417 | vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1]; |
418 | |
419 | gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, value: vote & 0xff); |
420 | gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, value: (vote >> 8) & 0xff); |
421 | |
422 | /* Let the GMU know the boot sequence has started */ |
423 | return a6xx_gmu_set_oob(gmu, state: GMU_OOB_BOOT_SLUMBER); |
424 | } |
425 | |
426 | /* Let the GMU know that we are about to go into slumber */ |
427 | static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) |
428 | { |
429 | int ret; |
430 | |
431 | /* Disable the power counter so the GMU isn't busy */ |
432 | gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, value: 0); |
433 | |
434 | /* Disable SPTP_PC if the CPU is responsible for it */ |
435 | if (gmu->idle_level < GMU_IDLE_STATE_SPTP) |
436 | a6xx_sptprac_disable(gmu); |
437 | |
438 | if (!gmu->legacy) { |
439 | ret = a6xx_hfi_send_prep_slumber(gmu); |
440 | goto out; |
441 | } |
442 | |
443 | /* Tell the GMU to get ready to slumber */ |
444 | gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, value: 1); |
445 | |
446 | ret = a6xx_gmu_set_oob(gmu, state: GMU_OOB_BOOT_SLUMBER); |
447 | a6xx_gmu_clear_oob(gmu, state: GMU_OOB_BOOT_SLUMBER); |
448 | |
449 | if (!ret) { |
450 | /* Check to see if the GMU really did slumber */ |
451 | if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE) |
452 | != 0x0f) { |
453 | DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n" ); |
454 | ret = -ETIMEDOUT; |
455 | } |
456 | } |
457 | |
458 | out: |
459 | /* Put fence into allow mode */ |
460 | gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, value: 0); |
461 | return ret; |
462 | } |
463 | |
464 | static int a6xx_rpmh_start(struct a6xx_gmu *gmu) |
465 | { |
466 | int ret; |
467 | u32 val; |
468 | |
469 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, value: 1 << 1); |
470 | /* Wait for the register to finish posting */ |
471 | wmb(); |
472 | |
473 | ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val, |
474 | val & (1 << 1), 100, 10000); |
475 | if (ret) { |
476 | DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n" ); |
477 | return ret; |
478 | } |
479 | |
480 | ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val, |
481 | !val, 100, 10000); |
482 | |
483 | if (ret) { |
484 | DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n" ); |
485 | return ret; |
486 | } |
487 | |
488 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, value: 0); |
489 | |
490 | return 0; |
491 | } |
492 | |
493 | static void a6xx_rpmh_stop(struct a6xx_gmu *gmu) |
494 | { |
495 | int ret; |
496 | u32 val; |
497 | |
498 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, value: 1); |
499 | |
500 | ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, |
501 | val, val & (1 << 16), 100, 10000); |
502 | if (ret) |
503 | DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n" ); |
504 | |
505 | gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, value: 0); |
506 | } |
507 | |
508 | static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value) |
509 | { |
510 | msm_writel(value, ptr + (offset << 2)); |
511 | } |
512 | |
513 | static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, |
514 | const char *name); |
515 | |
516 | static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) |
517 | { |
518 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
519 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
520 | struct platform_device *pdev = to_platform_device(gmu->dev); |
521 | void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, name: "gmu_pdc" ); |
522 | u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0; |
523 | void __iomem *seqptr = NULL; |
524 | uint32_t pdc_address_offset; |
525 | bool pdc_in_aop = false; |
526 | |
527 | if (IS_ERR(ptr: pdcptr)) |
528 | goto err; |
529 | |
530 | if (adreno_is_a650(gpu: adreno_gpu) || |
531 | adreno_is_a660_family(gpu: adreno_gpu) || |
532 | adreno_is_a7xx(gpu: adreno_gpu)) |
533 | pdc_in_aop = true; |
534 | else if (adreno_is_a618(gpu: adreno_gpu) || adreno_is_a640_family(gpu: adreno_gpu)) |
535 | pdc_address_offset = 0x30090; |
536 | else if (adreno_is_a619(gpu: adreno_gpu)) |
537 | pdc_address_offset = 0x300a0; |
538 | else |
539 | pdc_address_offset = 0x30080; |
540 | |
541 | if (!pdc_in_aop) { |
542 | seqptr = a6xx_gmu_get_mmio(pdev, name: "gmu_pdc_seq" ); |
543 | if (IS_ERR(ptr: seqptr)) |
544 | goto err; |
545 | } |
546 | |
547 | /* Disable SDE clock gating */ |
548 | gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24)); |
549 | |
550 | /* Setup RSC PDC handshake for sleep and wakeup */ |
551 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, value: 1); |
552 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, value: 0); |
553 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, value: 0); |
554 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, value: 0); |
555 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, value: 0); |
556 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, |
557 | value: adreno_is_a740_family(gpu: adreno_gpu) ? 0x80000021 : 0x80000000); |
558 | gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, value: 0); |
559 | gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, value: 0); |
560 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, value: 0x4520); |
561 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, value: 0x4510); |
562 | gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, value: 0x4514); |
563 | |
564 | /* The second spin of A7xx GPUs messed with some register offsets.. */ |
565 | if (adreno_is_a740_family(gpu: adreno_gpu)) |
566 | seqmem0_drv0_reg = REG_A7XX_RSCC_SEQ_MEM_0_DRV0_A740; |
567 | |
568 | /* Load RSC sequencer uCode for sleep and wakeup */ |
569 | if (adreno_is_a650_family(gpu: adreno_gpu) || |
570 | adreno_is_a7xx(gpu: adreno_gpu)) { |
571 | gmu_write_rscc(gmu, offset: seqmem0_drv0_reg, value: 0xeaaae5a0); |
572 | gmu_write_rscc(gmu, offset: seqmem0_drv0_reg + 1, value: 0xe1a1ebab); |
573 | gmu_write_rscc(gmu, offset: seqmem0_drv0_reg + 2, value: 0xa2e0a581); |
574 | gmu_write_rscc(gmu, offset: seqmem0_drv0_reg + 3, value: 0xecac82e2); |
575 | gmu_write_rscc(gmu, offset: seqmem0_drv0_reg + 4, value: 0x0020edad); |
576 | } else { |
577 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, value: 0xa7a506a0); |
578 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, value: 0xa1e6a6e7); |
579 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, value: 0xa2e081e1); |
580 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, value: 0xe9a982e2); |
581 | gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, value: 0x0020e8a8); |
582 | } |
583 | |
584 | if (pdc_in_aop) |
585 | goto setup_pdc; |
586 | |
587 | /* Load PDC sequencer uCode for power up and power down sequence */ |
588 | pdc_write(ptr: seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, value: 0xfebea1e1); |
589 | pdc_write(ptr: seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, value: 0xa5a4a3a2); |
590 | pdc_write(ptr: seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, value: 0x8382a6e0); |
591 | pdc_write(ptr: seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, value: 0xbce3e284); |
592 | pdc_write(ptr: seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, value: 0x002081fc); |
593 | |
594 | /* Set TCS commands used by PDC sequence for low power modes */ |
595 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, value: 7); |
596 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, value: 0); |
597 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CONTROL, value: 0); |
598 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, value: 0x10108); |
599 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, value: 0x30010); |
600 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, value: 1); |
601 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, value: 0x10108); |
602 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, value: 0x30000); |
603 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, value: 0x0); |
604 | |
605 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, value: 0x10108); |
606 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, value: pdc_address_offset); |
607 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, value: 0x0); |
608 | |
609 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, value: 7); |
610 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, value: 0); |
611 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CONTROL, value: 0); |
612 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, value: 0x10108); |
613 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, value: 0x30010); |
614 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, value: 2); |
615 | |
616 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, value: 0x10108); |
617 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, value: 0x30000); |
618 | if (adreno_is_a618(gpu: adreno_gpu) || adreno_is_a619(gpu: adreno_gpu) || |
619 | adreno_is_a650_family(gpu: adreno_gpu)) |
620 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, value: 0x2); |
621 | else |
622 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, value: 0x3); |
623 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, value: 0x10108); |
624 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, value: pdc_address_offset); |
625 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, value: 0x3); |
626 | |
627 | /* Setup GPU PDC */ |
628 | setup_pdc: |
629 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_SEQ_START_ADDR, value: 0); |
630 | pdc_write(ptr: pdcptr, REG_A6XX_PDC_GPU_ENABLE_PDC, value: 0x80000001); |
631 | |
632 | /* ensure no writes happen before the uCode is fully written */ |
633 | wmb(); |
634 | |
635 | a6xx_rpmh_stop(gmu); |
636 | |
637 | err: |
638 | if (!IS_ERR_OR_NULL(ptr: pdcptr)) |
639 | iounmap(addr: pdcptr); |
640 | if (!IS_ERR_OR_NULL(ptr: seqptr)) |
641 | iounmap(addr: seqptr); |
642 | } |
643 | |
644 | /* |
645 | * The lowest 16 bits of this value are the number of XO clock cycles for main |
646 | * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are |
647 | * for the shorter hysteresis that happens after main - this is 0xa (.5 us) |
648 | */ |
649 | |
650 | #define GMU_PWR_COL_HYST 0x000a1680 |
651 | |
652 | /* Set up the idle state for the GMU */ |
653 | static void a6xx_gmu_power_config(struct a6xx_gmu *gmu) |
654 | { |
655 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
656 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
657 | |
658 | /* Disable GMU WB/RB buffer */ |
659 | gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, value: 0x1); |
660 | gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, value: 0x1); |
661 | gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, value: 0x1); |
662 | |
663 | /* A7xx knows better by default! */ |
664 | if (adreno_is_a7xx(gpu: adreno_gpu)) |
665 | return; |
666 | |
667 | gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, value: 0x9c40400); |
668 | |
669 | switch (gmu->idle_level) { |
670 | case GMU_IDLE_STATE_IFPC: |
671 | gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST, |
672 | GMU_PWR_COL_HYST); |
673 | gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, mask: 0, |
674 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | |
675 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE); |
676 | fallthrough; |
677 | case GMU_IDLE_STATE_SPTP: |
678 | gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST, |
679 | GMU_PWR_COL_HYST); |
680 | gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, mask: 0, |
681 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE | |
682 | A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE); |
683 | } |
684 | |
685 | /* Enable RPMh GPU client */ |
686 | gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, mask: 0, |
687 | A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE | |
688 | A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE | |
689 | A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE | |
690 | A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE | |
691 | A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE | |
692 | A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE); |
693 | } |
694 | |
695 | struct { |
696 | u32 ; |
697 | u32 ; |
698 | u32 ; |
699 | u32 ; |
700 | u32 []; |
701 | }; |
702 | |
703 | static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk) |
704 | { |
705 | if (!in_range(blk->addr, bo->iova, bo->size)) |
706 | return false; |
707 | |
708 | memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size); |
709 | return true; |
710 | } |
711 | |
712 | static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu) |
713 | { |
714 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
715 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
716 | const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU]; |
717 | const struct block_header *blk; |
718 | u32 reg_offset; |
719 | |
720 | u32 itcm_base = 0x00000000; |
721 | u32 dtcm_base = 0x00040000; |
722 | |
723 | if (adreno_is_a650_family(gpu: adreno_gpu) || adreno_is_a7xx(gpu: adreno_gpu)) |
724 | dtcm_base = 0x10004000; |
725 | |
726 | if (gmu->legacy) { |
727 | /* Sanity check the size of the firmware that was loaded */ |
728 | if (fw_image->size > 0x8000) { |
729 | DRM_DEV_ERROR(gmu->dev, |
730 | "GMU firmware is bigger than the available region\n" ); |
731 | return -EINVAL; |
732 | } |
733 | |
734 | gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START, |
735 | data: (u32*) fw_image->data, size: fw_image->size); |
736 | return 0; |
737 | } |
738 | |
739 | |
740 | for (blk = (const struct block_header *) fw_image->data; |
741 | (const u8*) blk < fw_image->data + fw_image->size; |
742 | blk = (const struct block_header *) &blk->data[blk->size >> 2]) { |
743 | if (blk->size == 0) |
744 | continue; |
745 | |
746 | if (in_range(blk->addr, itcm_base, SZ_16K)) { |
747 | reg_offset = (blk->addr - itcm_base) >> 2; |
748 | gmu_write_bulk(gmu, |
749 | REG_A6XX_GMU_CM3_ITCM_START + reg_offset, |
750 | data: blk->data, size: blk->size); |
751 | } else if (in_range(blk->addr, dtcm_base, SZ_16K)) { |
752 | reg_offset = (blk->addr - dtcm_base) >> 2; |
753 | gmu_write_bulk(gmu, |
754 | REG_A6XX_GMU_CM3_DTCM_START + reg_offset, |
755 | data: blk->data, size: blk->size); |
756 | } else if (!fw_block_mem(bo: &gmu->icache, blk) && |
757 | !fw_block_mem(bo: &gmu->dcache, blk) && |
758 | !fw_block_mem(bo: &gmu->dummy, blk)) { |
759 | DRM_DEV_ERROR(gmu->dev, |
760 | "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n" , |
761 | blk->addr, blk->size, blk->data[0]); |
762 | } |
763 | } |
764 | |
765 | return 0; |
766 | } |
767 | |
768 | static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) |
769 | { |
770 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
771 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
772 | u32 fence_range_lower, fence_range_upper; |
773 | u32 chipid, chipid_min = 0; |
774 | int ret; |
775 | |
776 | /* Vote veto for FAL10 */ |
777 | if (adreno_is_a650_family(gpu: adreno_gpu) || adreno_is_a7xx(gpu: adreno_gpu)) { |
778 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, value: 1); |
779 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, value: 1); |
780 | } |
781 | |
782 | /* Turn on TCM (Tightly Coupled Memory) retention */ |
783 | if (adreno_is_a7xx(gpu: adreno_gpu)) |
784 | a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, value: 1); |
785 | else |
786 | gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, value: 1); |
787 | |
788 | if (state == GMU_WARM_BOOT) { |
789 | ret = a6xx_rpmh_start(gmu); |
790 | if (ret) |
791 | return ret; |
792 | } else { |
793 | if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU], |
794 | "GMU firmware is not loaded\n" )) |
795 | return -ENOENT; |
796 | |
797 | ret = a6xx_rpmh_start(gmu); |
798 | if (ret) |
799 | return ret; |
800 | |
801 | ret = a6xx_gmu_fw_load(gmu); |
802 | if (ret) |
803 | return ret; |
804 | } |
805 | |
806 | /* Clear init result to make sure we are getting a fresh value */ |
807 | gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, value: 0); |
808 | gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, value: 0x02); |
809 | |
810 | /* Write the iova of the HFI table */ |
811 | gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, value: gmu->hfi.iova); |
812 | gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, value: 1); |
813 | |
814 | if (adreno_is_a7xx(gpu: adreno_gpu)) { |
815 | fence_range_upper = 0x32; |
816 | fence_range_lower = 0x8a0; |
817 | } else { |
818 | fence_range_upper = 0xa; |
819 | fence_range_lower = 0xa0; |
820 | } |
821 | |
822 | gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0, |
823 | BIT(31) | |
824 | FIELD_PREP(GENMASK(30, 18), fence_range_upper) | |
825 | FIELD_PREP(GENMASK(17, 0), fence_range_lower)); |
826 | |
827 | /* |
828 | * Snapshots toggle the NMI bit which will result in a jump to the NMI |
829 | * handler instead of __main. Set the M3 config value to avoid that. |
830 | */ |
831 | gmu_write(gmu, REG_A6XX_GMU_CM3_CFG, value: 0x4052); |
832 | |
833 | /* NOTE: A730 may also fall in this if-condition with a future GMU fw update. */ |
834 | if (adreno_is_a7xx(gpu: adreno_gpu) && !adreno_is_a730(gpu: adreno_gpu)) { |
835 | /* A7xx GPUs have obfuscated chip IDs. Use constant maj = 7 */ |
836 | chipid = FIELD_PREP(GENMASK(31, 24), 0x7); |
837 | |
838 | /* |
839 | * The min part has a 1-1 mapping for each GPU SKU. |
840 | * This chipid that the GMU expects corresponds to the "GENX_Y_Z" naming, |
841 | * where X = major, Y = minor, Z = patchlevel, e.g. GEN7_2_1 for prod A740. |
842 | */ |
843 | if (adreno_is_a740(gpu: adreno_gpu)) |
844 | chipid_min = 2; |
845 | else if (adreno_is_a750(gpu: adreno_gpu)) |
846 | chipid_min = 9; |
847 | else |
848 | return -EINVAL; |
849 | |
850 | chipid |= FIELD_PREP(GENMASK(23, 16), chipid_min); |
851 | |
852 | /* Get the patchid (which may vary) from the device tree */ |
853 | chipid |= FIELD_PREP(GENMASK(15, 8), adreno_patchid(adreno_gpu)); |
854 | } else { |
855 | /* |
856 | * Note that the GMU has a slightly different layout for |
857 | * chip_id, for whatever reason, so a bit of massaging |
858 | * is needed. The upper 16b are the same, but minor and |
859 | * patchid are packed in four bits each with the lower |
860 | * 8b unused: |
861 | */ |
862 | chipid = adreno_gpu->chip_id & 0xffff0000; |
863 | chipid |= (adreno_gpu->chip_id << 4) & 0xf000; /* minor */ |
864 | chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */ |
865 | } |
866 | |
867 | if (adreno_is_a7xx(gpu: adreno_gpu)) { |
868 | gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, value: chipid); |
869 | gmu_write(gmu, REG_A7XX_GMU_GENERAL_8, |
870 | value: (gmu->log.iova & GENMASK(31, 12)) | |
871 | ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0))); |
872 | } else { |
873 | gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, value: chipid); |
874 | |
875 | gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG, |
876 | value: gmu->log.iova | (gmu->log.size / SZ_4K - 1)); |
877 | } |
878 | |
879 | /* Set up the lowest idle level on the GMU */ |
880 | a6xx_gmu_power_config(gmu); |
881 | |
882 | ret = a6xx_gmu_start(gmu); |
883 | if (ret) |
884 | return ret; |
885 | |
886 | if (gmu->legacy) { |
887 | ret = a6xx_gmu_gfx_rail_on(gmu); |
888 | if (ret) |
889 | return ret; |
890 | } |
891 | |
892 | /* Enable SPTP_PC if the CPU is responsible for it */ |
893 | if (gmu->idle_level < GMU_IDLE_STATE_SPTP) { |
894 | ret = a6xx_sptprac_enable(gmu); |
895 | if (ret) |
896 | return ret; |
897 | } |
898 | |
899 | ret = a6xx_gmu_hfi_start(gmu); |
900 | if (ret) |
901 | return ret; |
902 | |
903 | /* FIXME: Do we need this wmb() here? */ |
904 | wmb(); |
905 | |
906 | return 0; |
907 | } |
908 | |
909 | #define A6XX_HFI_IRQ_MASK \ |
910 | (A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) |
911 | |
912 | #define A6XX_GMU_IRQ_MASK \ |
913 | (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \ |
914 | A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \ |
915 | A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR) |
916 | |
917 | static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) |
918 | { |
919 | disable_irq(irq: gmu->gmu_irq); |
920 | disable_irq(irq: gmu->hfi_irq); |
921 | |
922 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, value: ~0); |
923 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, value: ~0); |
924 | } |
925 | |
926 | static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) |
927 | { |
928 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
929 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
930 | u32 val, seqmem_off = 0; |
931 | |
932 | /* The second spin of A7xx GPUs messed with some register offsets.. */ |
933 | if (adreno_is_a740_family(gpu: adreno_gpu)) |
934 | seqmem_off = 4; |
935 | |
936 | /* Make sure there are no outstanding RPMh votes */ |
937 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS + seqmem_off, |
938 | val, (val & 1), 100, 10000); |
939 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS + seqmem_off, |
940 | val, (val & 1), 100, 10000); |
941 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS + seqmem_off, |
942 | val, (val & 1), 100, 10000); |
943 | gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off, |
944 | val, (val & 1), 100, 1000); |
945 | } |
946 | |
947 | /* Force the GMU off in case it isn't responsive */ |
948 | static void a6xx_gmu_force_off(struct a6xx_gmu *gmu) |
949 | { |
950 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
951 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
952 | struct msm_gpu *gpu = &adreno_gpu->base; |
953 | |
954 | /* |
955 | * Turn off keep alive that might have been enabled by the hang |
956 | * interrupt |
957 | */ |
958 | gmu_write(gmu: &a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, value: 0); |
959 | |
960 | /* Flush all the queues */ |
961 | a6xx_hfi_stop(gmu); |
962 | |
963 | /* Stop the interrupts */ |
964 | a6xx_gmu_irq_disable(gmu); |
965 | |
966 | /* Force off SPTP in case the GMU is managing it */ |
967 | a6xx_sptprac_disable(gmu); |
968 | |
969 | /* Make sure there are no outstanding RPMh votes */ |
970 | a6xx_gmu_rpmh_off(gmu); |
971 | |
972 | /* Clear the WRITEDROPPED fields and put fence into allow mode */ |
973 | gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS_CLR, value: 0x7); |
974 | gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, value: 0); |
975 | |
976 | /* Make sure the above writes go through */ |
977 | wmb(); |
978 | |
979 | /* Halt the gmu cm3 core */ |
980 | gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, value: 1); |
981 | |
982 | a6xx_bus_clear_pending_transactions(adreno_gpu, gx_off: true); |
983 | |
984 | /* Reset GPU core blocks */ |
985 | a6xx_gpu_sw_reset(gpu, assert: true); |
986 | } |
987 | |
988 | static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) |
989 | { |
990 | struct dev_pm_opp *gpu_opp; |
991 | unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; |
992 | |
993 | gpu_opp = dev_pm_opp_find_freq_exact(dev: &gpu->pdev->dev, freq: gpu_freq, available: true); |
994 | if (IS_ERR(ptr: gpu_opp)) |
995 | return; |
996 | |
997 | gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */ |
998 | a6xx_gmu_set_freq(gpu, opp: gpu_opp, suspended: false); |
999 | dev_pm_opp_put(opp: gpu_opp); |
1000 | } |
1001 | |
1002 | static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) |
1003 | { |
1004 | struct dev_pm_opp *gpu_opp; |
1005 | unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index]; |
1006 | |
1007 | gpu_opp = dev_pm_opp_find_freq_exact(dev: &gpu->pdev->dev, freq: gpu_freq, available: true); |
1008 | if (IS_ERR(ptr: gpu_opp)) |
1009 | return; |
1010 | |
1011 | dev_pm_opp_set_opp(dev: &gpu->pdev->dev, opp: gpu_opp); |
1012 | dev_pm_opp_put(opp: gpu_opp); |
1013 | } |
1014 | |
1015 | int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu) |
1016 | { |
1017 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
1018 | struct msm_gpu *gpu = &adreno_gpu->base; |
1019 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1020 | int status, ret; |
1021 | |
1022 | if (WARN(!gmu->initialized, "The GMU is not set up yet\n" )) |
1023 | return -EINVAL; |
1024 | |
1025 | gmu->hung = false; |
1026 | |
1027 | /* Notify AOSS about the ACD state (unimplemented for now => disable it) */ |
1028 | if (!IS_ERR(ptr: gmu->qmp)) { |
1029 | ret = qmp_send(qmp: gmu->qmp, fmt: "{class: gpu, res: acd, val: %d}" , |
1030 | 0 /* Hardcode ACD to be disabled for now */); |
1031 | if (ret) |
1032 | dev_err(gmu->dev, "failed to send GPU ACD state\n" ); |
1033 | } |
1034 | |
1035 | /* Turn on the resources */ |
1036 | pm_runtime_get_sync(gmu->dev); |
1037 | |
1038 | /* |
1039 | * "enable" the GX power domain which won't actually do anything but it |
1040 | * will make sure that the refcounting is correct in case we need to |
1041 | * bring down the GX after a GMU failure |
1042 | */ |
1043 | if (!IS_ERR_OR_NULL(ptr: gmu->gxpd)) |
1044 | pm_runtime_get_sync(gmu->gxpd); |
1045 | |
1046 | /* Use a known rate to bring up the GMU */ |
1047 | clk_set_rate(clk: gmu->core_clk, rate: 200000000); |
1048 | clk_set_rate(clk: gmu->hub_clk, rate: adreno_is_a740_family(gpu: adreno_gpu) ? |
1049 | 200000000 : 150000000); |
1050 | ret = clk_bulk_prepare_enable(num_clks: gmu->nr_clocks, clks: gmu->clocks); |
1051 | if (ret) { |
1052 | pm_runtime_put(gmu->gxpd); |
1053 | pm_runtime_put(gmu->dev); |
1054 | return ret; |
1055 | } |
1056 | |
1057 | /* Set the bus quota to a reasonable value for boot */ |
1058 | a6xx_gmu_set_initial_bw(gpu, gmu); |
1059 | |
1060 | /* Enable the GMU interrupt */ |
1061 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, value: ~0); |
1062 | gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, value: ~A6XX_GMU_IRQ_MASK); |
1063 | enable_irq(irq: gmu->gmu_irq); |
1064 | |
1065 | /* Check to see if we are doing a cold or warm boot */ |
1066 | if (adreno_is_a7xx(gpu: adreno_gpu)) { |
1067 | status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ? |
1068 | GMU_WARM_BOOT : GMU_COLD_BOOT; |
1069 | } else if (gmu->legacy) { |
1070 | status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ? |
1071 | GMU_WARM_BOOT : GMU_COLD_BOOT; |
1072 | } else { |
1073 | /* |
1074 | * Warm boot path does not work on newer A6xx GPUs |
1075 | * Presumably this is because icache/dcache regions must be restored |
1076 | */ |
1077 | status = GMU_COLD_BOOT; |
1078 | } |
1079 | |
1080 | ret = a6xx_gmu_fw_start(gmu, state: status); |
1081 | if (ret) |
1082 | goto out; |
1083 | |
1084 | ret = a6xx_hfi_start(gmu, boot_state: status); |
1085 | if (ret) |
1086 | goto out; |
1087 | |
1088 | /* |
1089 | * Turn on the GMU firmware fault interrupt after we know the boot |
1090 | * sequence is successful |
1091 | */ |
1092 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, value: ~0); |
1093 | gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, value: ~A6XX_HFI_IRQ_MASK); |
1094 | enable_irq(irq: gmu->hfi_irq); |
1095 | |
1096 | /* Set the GPU to the current freq */ |
1097 | a6xx_gmu_set_initial_freq(gpu, gmu); |
1098 | |
1099 | out: |
1100 | /* On failure, shut down the GMU to leave it in a good state */ |
1101 | if (ret) { |
1102 | disable_irq(irq: gmu->gmu_irq); |
1103 | a6xx_rpmh_stop(gmu); |
1104 | pm_runtime_put(gmu->gxpd); |
1105 | pm_runtime_put(gmu->dev); |
1106 | } |
1107 | |
1108 | return ret; |
1109 | } |
1110 | |
1111 | bool a6xx_gmu_isidle(struct a6xx_gmu *gmu) |
1112 | { |
1113 | u32 reg; |
1114 | |
1115 | if (!gmu->initialized) |
1116 | return true; |
1117 | |
1118 | reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS); |
1119 | |
1120 | if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB) |
1121 | return false; |
1122 | |
1123 | return true; |
1124 | } |
1125 | |
1126 | /* Gracefully try to shut down the GMU and by extension the GPU */ |
1127 | static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) |
1128 | { |
1129 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
1130 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
1131 | u32 val; |
1132 | |
1133 | /* |
1134 | * The GMU may still be in slumber unless the GPU started so check and |
1135 | * skip putting it back into slumber if so |
1136 | */ |
1137 | val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); |
1138 | |
1139 | if (val != 0xf) { |
1140 | int ret = a6xx_gmu_wait_for_idle(gmu); |
1141 | |
1142 | /* If the GMU isn't responding assume it is hung */ |
1143 | if (ret) { |
1144 | a6xx_gmu_force_off(gmu); |
1145 | return; |
1146 | } |
1147 | |
1148 | a6xx_bus_clear_pending_transactions(adreno_gpu, gx_off: a6xx_gpu->hung); |
1149 | |
1150 | /* tell the GMU we want to slumber */ |
1151 | ret = a6xx_gmu_notify_slumber(gmu); |
1152 | if (ret) { |
1153 | a6xx_gmu_force_off(gmu); |
1154 | return; |
1155 | } |
1156 | |
1157 | ret = gmu_poll_timeout(gmu, |
1158 | REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, |
1159 | !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), |
1160 | 100, 10000); |
1161 | |
1162 | /* |
1163 | * Let the user know we failed to slumber but don't worry too |
1164 | * much because we are powering down anyway |
1165 | */ |
1166 | |
1167 | if (ret) |
1168 | DRM_DEV_ERROR(gmu->dev, |
1169 | "Unable to slumber GMU: status = 0%x/0%x\n" , |
1170 | gmu_read(gmu, |
1171 | REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), |
1172 | gmu_read(gmu, |
1173 | REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); |
1174 | } |
1175 | |
1176 | /* Turn off HFI */ |
1177 | a6xx_hfi_stop(gmu); |
1178 | |
1179 | /* Stop the interrupts and mask the hardware */ |
1180 | a6xx_gmu_irq_disable(gmu); |
1181 | |
1182 | /* Tell RPMh to power off the GPU */ |
1183 | a6xx_rpmh_stop(gmu); |
1184 | } |
1185 | |
1186 | |
1187 | int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) |
1188 | { |
1189 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1190 | struct msm_gpu *gpu = &a6xx_gpu->base.base; |
1191 | |
1192 | if (!pm_runtime_active(gmu->dev)) |
1193 | return 0; |
1194 | |
1195 | /* |
1196 | * Force the GMU off if we detected a hang, otherwise try to shut it |
1197 | * down gracefully |
1198 | */ |
1199 | if (gmu->hung) |
1200 | a6xx_gmu_force_off(gmu); |
1201 | else |
1202 | a6xx_gmu_shutdown(gmu); |
1203 | |
1204 | /* Remove the bus vote */ |
1205 | dev_pm_opp_set_opp(dev: &gpu->pdev->dev, NULL); |
1206 | |
1207 | /* |
1208 | * Make sure the GX domain is off before turning off the GMU (CX) |
1209 | * domain. Usually the GMU does this but only if the shutdown sequence |
1210 | * was successful |
1211 | */ |
1212 | if (!IS_ERR_OR_NULL(ptr: gmu->gxpd)) |
1213 | pm_runtime_put_sync(gmu->gxpd); |
1214 | |
1215 | clk_bulk_disable_unprepare(num_clks: gmu->nr_clocks, clks: gmu->clocks); |
1216 | |
1217 | pm_runtime_put_sync(gmu->dev); |
1218 | |
1219 | return 0; |
1220 | } |
1221 | |
1222 | static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) |
1223 | { |
1224 | msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace); |
1225 | msm_gem_kernel_put(gmu->debug.obj, gmu->aspace); |
1226 | msm_gem_kernel_put(gmu->icache.obj, gmu->aspace); |
1227 | msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace); |
1228 | msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace); |
1229 | msm_gem_kernel_put(gmu->log.obj, gmu->aspace); |
1230 | |
1231 | gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu); |
1232 | msm_gem_address_space_put(gmu->aspace); |
1233 | } |
1234 | |
1235 | static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, |
1236 | size_t size, u64 iova, const char *name) |
1237 | { |
1238 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
1239 | struct drm_device *dev = a6xx_gpu->base.base.dev; |
1240 | uint32_t flags = MSM_BO_WC; |
1241 | u64 range_start, range_end; |
1242 | int ret; |
1243 | |
1244 | size = PAGE_ALIGN(size); |
1245 | if (!iova) { |
1246 | /* no fixed address - use GMU's uncached range */ |
1247 | range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */ |
1248 | range_end = 0x80000000; |
1249 | } else { |
1250 | /* range for fixed address */ |
1251 | range_start = iova; |
1252 | range_end = iova + size; |
1253 | /* use IOMMU_PRIV for icache/dcache */ |
1254 | flags |= MSM_BO_MAP_PRIV; |
1255 | } |
1256 | |
1257 | bo->obj = msm_gem_new(dev, size, flags); |
1258 | if (IS_ERR(ptr: bo->obj)) |
1259 | return PTR_ERR(ptr: bo->obj); |
1260 | |
1261 | ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova, |
1262 | range_start, range_end); |
1263 | if (ret) { |
1264 | drm_gem_object_put(obj: bo->obj); |
1265 | return ret; |
1266 | } |
1267 | |
1268 | bo->virt = msm_gem_get_vaddr(bo->obj); |
1269 | bo->size = size; |
1270 | |
1271 | msm_gem_object_set_name(bo->obj, name); |
1272 | |
1273 | return 0; |
1274 | } |
1275 | |
1276 | static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) |
1277 | { |
1278 | struct msm_mmu *mmu; |
1279 | |
1280 | mmu = msm_iommu_new(gmu->dev, 0); |
1281 | if (!mmu) |
1282 | return -ENODEV; |
1283 | if (IS_ERR(ptr: mmu)) |
1284 | return PTR_ERR(ptr: mmu); |
1285 | |
1286 | gmu->aspace = msm_gem_address_space_create(mmu, "gmu" , 0x0, 0x80000000); |
1287 | if (IS_ERR(ptr: gmu->aspace)) |
1288 | return PTR_ERR(ptr: gmu->aspace); |
1289 | |
1290 | return 0; |
1291 | } |
1292 | |
1293 | /* Return the 'arc-level' for the given frequency */ |
1294 | static unsigned int a6xx_gmu_get_arc_level(struct device *dev, |
1295 | unsigned long freq) |
1296 | { |
1297 | struct dev_pm_opp *opp; |
1298 | unsigned int val; |
1299 | |
1300 | if (!freq) |
1301 | return 0; |
1302 | |
1303 | opp = dev_pm_opp_find_freq_exact(dev, freq, available: true); |
1304 | if (IS_ERR(ptr: opp)) |
1305 | return 0; |
1306 | |
1307 | val = dev_pm_opp_get_level(opp); |
1308 | |
1309 | dev_pm_opp_put(opp); |
1310 | |
1311 | return val; |
1312 | } |
1313 | |
1314 | static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, |
1315 | unsigned long *freqs, int freqs_count, const char *id) |
1316 | { |
1317 | int i, j; |
1318 | const u16 *pri, *sec; |
1319 | size_t pri_count, sec_count; |
1320 | |
1321 | pri = cmd_db_read_aux_data(resource_id: id, len: &pri_count); |
1322 | if (IS_ERR(ptr: pri)) |
1323 | return PTR_ERR(ptr: pri); |
1324 | /* |
1325 | * The data comes back as an array of unsigned shorts so adjust the |
1326 | * count accordingly |
1327 | */ |
1328 | pri_count >>= 1; |
1329 | if (!pri_count) |
1330 | return -EINVAL; |
1331 | |
1332 | sec = cmd_db_read_aux_data(resource_id: "mx.lvl" , len: &sec_count); |
1333 | if (IS_ERR(ptr: sec)) |
1334 | return PTR_ERR(ptr: sec); |
1335 | |
1336 | sec_count >>= 1; |
1337 | if (!sec_count) |
1338 | return -EINVAL; |
1339 | |
1340 | /* Construct a vote for each frequency */ |
1341 | for (i = 0; i < freqs_count; i++) { |
1342 | u8 pindex = 0, sindex = 0; |
1343 | unsigned int level = a6xx_gmu_get_arc_level(dev, freq: freqs[i]); |
1344 | |
1345 | /* Get the primary index that matches the arc level */ |
1346 | for (j = 0; j < pri_count; j++) { |
1347 | if (pri[j] >= level) { |
1348 | pindex = j; |
1349 | break; |
1350 | } |
1351 | } |
1352 | |
1353 | if (j == pri_count) { |
1354 | DRM_DEV_ERROR(dev, |
1355 | "Level %u not found in the RPMh list\n" , |
1356 | level); |
1357 | DRM_DEV_ERROR(dev, "Available levels:\n" ); |
1358 | for (j = 0; j < pri_count; j++) |
1359 | DRM_DEV_ERROR(dev, " %u\n" , pri[j]); |
1360 | |
1361 | return -EINVAL; |
1362 | } |
1363 | |
1364 | /* |
1365 | * Look for a level in in the secondary list that matches. If |
1366 | * nothing fits, use the maximum non zero vote |
1367 | */ |
1368 | |
1369 | for (j = 0; j < sec_count; j++) { |
1370 | if (sec[j] >= level) { |
1371 | sindex = j; |
1372 | break; |
1373 | } else if (sec[j]) { |
1374 | sindex = j; |
1375 | } |
1376 | } |
1377 | |
1378 | /* Construct the vote */ |
1379 | votes[i] = ((pri[pindex] & 0xffff) << 16) | |
1380 | (sindex << 8) | pindex; |
1381 | } |
1382 | |
1383 | return 0; |
1384 | } |
1385 | |
1386 | /* |
1387 | * The GMU votes with the RPMh for itself and on behalf of the GPU but we need |
1388 | * to construct the list of votes on the CPU and send it over. Query the RPMh |
1389 | * voltage levels and build the votes |
1390 | */ |
1391 | |
1392 | static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) |
1393 | { |
1394 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
1395 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
1396 | struct msm_gpu *gpu = &adreno_gpu->base; |
1397 | int ret; |
1398 | |
1399 | /* Build the GX votes */ |
1400 | ret = a6xx_gmu_rpmh_arc_votes_init(dev: &gpu->pdev->dev, votes: gmu->gx_arc_votes, |
1401 | freqs: gmu->gpu_freqs, freqs_count: gmu->nr_gpu_freqs, id: "gfx.lvl" ); |
1402 | |
1403 | /* Build the CX votes */ |
1404 | ret |= a6xx_gmu_rpmh_arc_votes_init(dev: gmu->dev, votes: gmu->cx_arc_votes, |
1405 | freqs: gmu->gmu_freqs, freqs_count: gmu->nr_gmu_freqs, id: "cx.lvl" ); |
1406 | |
1407 | return ret; |
1408 | } |
1409 | |
1410 | static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs, |
1411 | u32 size) |
1412 | { |
1413 | int count = dev_pm_opp_get_opp_count(dev); |
1414 | struct dev_pm_opp *opp; |
1415 | int i, index = 0; |
1416 | unsigned long freq = 1; |
1417 | |
1418 | /* |
1419 | * The OPP table doesn't contain the "off" frequency level so we need to |
1420 | * add 1 to the table size to account for it |
1421 | */ |
1422 | |
1423 | if (WARN(count + 1 > size, |
1424 | "The GMU frequency table is being truncated\n" )) |
1425 | count = size - 1; |
1426 | |
1427 | /* Set the "off" frequency */ |
1428 | freqs[index++] = 0; |
1429 | |
1430 | for (i = 0; i < count; i++) { |
1431 | opp = dev_pm_opp_find_freq_ceil(dev, freq: &freq); |
1432 | if (IS_ERR(ptr: opp)) |
1433 | break; |
1434 | |
1435 | dev_pm_opp_put(opp); |
1436 | freqs[index++] = freq++; |
1437 | } |
1438 | |
1439 | return index; |
1440 | } |
1441 | |
1442 | static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) |
1443 | { |
1444 | struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); |
1445 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
1446 | struct msm_gpu *gpu = &adreno_gpu->base; |
1447 | |
1448 | int ret = 0; |
1449 | |
1450 | /* |
1451 | * The GMU handles its own frequency switching so build a list of |
1452 | * available frequencies to send during initialization |
1453 | */ |
1454 | ret = devm_pm_opp_of_add_table(dev: gmu->dev); |
1455 | if (ret) { |
1456 | DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n" ); |
1457 | return ret; |
1458 | } |
1459 | |
1460 | gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(dev: gmu->dev, |
1461 | freqs: gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs)); |
1462 | |
1463 | /* |
1464 | * The GMU also handles GPU frequency switching so build a list |
1465 | * from the GPU OPP table |
1466 | */ |
1467 | gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(dev: &gpu->pdev->dev, |
1468 | freqs: gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs)); |
1469 | |
1470 | gmu->current_perf_index = gmu->nr_gpu_freqs - 1; |
1471 | |
1472 | /* Build the list of RPMh votes that we'll send to the GMU */ |
1473 | return a6xx_gmu_rpmh_votes_init(gmu); |
1474 | } |
1475 | |
1476 | static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) |
1477 | { |
1478 | int ret = devm_clk_bulk_get_all(dev: gmu->dev, clks: &gmu->clocks); |
1479 | |
1480 | if (ret < 1) |
1481 | return ret; |
1482 | |
1483 | gmu->nr_clocks = ret; |
1484 | |
1485 | gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks, |
1486 | gmu->nr_clocks, "gmu" ); |
1487 | |
1488 | gmu->hub_clk = msm_clk_bulk_get_clock(gmu->clocks, |
1489 | gmu->nr_clocks, "hub" ); |
1490 | |
1491 | return 0; |
1492 | } |
1493 | |
1494 | static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev, |
1495 | const char *name) |
1496 | { |
1497 | void __iomem *ret; |
1498 | struct resource *res = platform_get_resource_byname(pdev, |
1499 | IORESOURCE_MEM, name); |
1500 | |
1501 | if (!res) { |
1502 | DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n" , name); |
1503 | return ERR_PTR(error: -EINVAL); |
1504 | } |
1505 | |
1506 | ret = ioremap(offset: res->start, size: resource_size(res)); |
1507 | if (!ret) { |
1508 | DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n" , name); |
1509 | return ERR_PTR(error: -EINVAL); |
1510 | } |
1511 | |
1512 | return ret; |
1513 | } |
1514 | |
1515 | static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, |
1516 | const char *name, irq_handler_t handler) |
1517 | { |
1518 | int irq, ret; |
1519 | |
1520 | irq = platform_get_irq_byname(pdev, name); |
1521 | |
1522 | ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, dev: gmu); |
1523 | if (ret) { |
1524 | DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n" , |
1525 | name, ret); |
1526 | return ret; |
1527 | } |
1528 | |
1529 | disable_irq(irq); |
1530 | |
1531 | return irq; |
1532 | } |
1533 | |
1534 | void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) |
1535 | { |
1536 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
1537 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1538 | struct platform_device *pdev = to_platform_device(gmu->dev); |
1539 | |
1540 | mutex_lock(&gmu->lock); |
1541 | if (!gmu->initialized) { |
1542 | mutex_unlock(lock: &gmu->lock); |
1543 | return; |
1544 | } |
1545 | |
1546 | gmu->initialized = false; |
1547 | |
1548 | mutex_unlock(lock: &gmu->lock); |
1549 | |
1550 | pm_runtime_force_suspend(gmu->dev); |
1551 | |
1552 | /* |
1553 | * Since cxpd is a virt device, the devlink with gmu-dev will be removed |
1554 | * automatically when we do detach |
1555 | */ |
1556 | dev_pm_domain_detach(dev: gmu->cxpd, power_off: false); |
1557 | |
1558 | if (!IS_ERR_OR_NULL(ptr: gmu->gxpd)) { |
1559 | pm_runtime_disable(gmu->gxpd); |
1560 | dev_pm_domain_detach(dev: gmu->gxpd, power_off: false); |
1561 | } |
1562 | |
1563 | if (!IS_ERR_OR_NULL(ptr: gmu->qmp)) |
1564 | qmp_put(qmp: gmu->qmp); |
1565 | |
1566 | iounmap(addr: gmu->mmio); |
1567 | if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc" )) |
1568 | iounmap(addr: gmu->rscc); |
1569 | gmu->mmio = NULL; |
1570 | gmu->rscc = NULL; |
1571 | |
1572 | if (!adreno_has_gmu_wrapper(gpu: adreno_gpu)) { |
1573 | a6xx_gmu_memory_free(gmu); |
1574 | |
1575 | free_irq(gmu->gmu_irq, gmu); |
1576 | free_irq(gmu->hfi_irq, gmu); |
1577 | } |
1578 | |
1579 | /* Drop reference taken in of_find_device_by_node */ |
1580 | put_device(dev: gmu->dev); |
1581 | } |
1582 | |
1583 | static int cxpd_notifier_cb(struct notifier_block *nb, |
1584 | unsigned long action, void *data) |
1585 | { |
1586 | struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); |
1587 | |
1588 | if (action == GENPD_NOTIFY_OFF) |
1589 | complete_all(&gmu->pd_gate); |
1590 | |
1591 | return 0; |
1592 | } |
1593 | |
1594 | int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) |
1595 | { |
1596 | struct platform_device *pdev = of_find_device_by_node(np: node); |
1597 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1598 | int ret; |
1599 | |
1600 | if (!pdev) |
1601 | return -ENODEV; |
1602 | |
1603 | gmu->dev = &pdev->dev; |
1604 | |
1605 | of_dma_configure(gmu->dev, node, true); |
1606 | |
1607 | pm_runtime_enable(gmu->dev); |
1608 | |
1609 | /* Mark legacy for manual SPTPRAC control */ |
1610 | gmu->legacy = true; |
1611 | |
1612 | /* Map the GMU registers */ |
1613 | gmu->mmio = a6xx_gmu_get_mmio(pdev, name: "gmu" ); |
1614 | if (IS_ERR(ptr: gmu->mmio)) { |
1615 | ret = PTR_ERR(ptr: gmu->mmio); |
1616 | goto err_mmio; |
1617 | } |
1618 | |
1619 | gmu->cxpd = dev_pm_domain_attach_by_name(dev: gmu->dev, name: "cx" ); |
1620 | if (IS_ERR(ptr: gmu->cxpd)) { |
1621 | ret = PTR_ERR(ptr: gmu->cxpd); |
1622 | goto err_mmio; |
1623 | } |
1624 | |
1625 | if (!device_link_add(consumer: gmu->dev, supplier: gmu->cxpd, DL_FLAG_PM_RUNTIME)) { |
1626 | ret = -ENODEV; |
1627 | goto detach_cxpd; |
1628 | } |
1629 | |
1630 | init_completion(x: &gmu->pd_gate); |
1631 | complete_all(&gmu->pd_gate); |
1632 | gmu->pd_nb.notifier_call = cxpd_notifier_cb; |
1633 | |
1634 | /* Get a link to the GX power domain to reset the GPU */ |
1635 | gmu->gxpd = dev_pm_domain_attach_by_name(dev: gmu->dev, name: "gx" ); |
1636 | if (IS_ERR(ptr: gmu->gxpd)) { |
1637 | ret = PTR_ERR(ptr: gmu->gxpd); |
1638 | goto err_mmio; |
1639 | } |
1640 | |
1641 | gmu->initialized = true; |
1642 | |
1643 | return 0; |
1644 | |
1645 | detach_cxpd: |
1646 | dev_pm_domain_detach(dev: gmu->cxpd, power_off: false); |
1647 | |
1648 | err_mmio: |
1649 | iounmap(addr: gmu->mmio); |
1650 | |
1651 | /* Drop reference taken in of_find_device_by_node */ |
1652 | put_device(dev: gmu->dev); |
1653 | |
1654 | return ret; |
1655 | } |
1656 | |
1657 | int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) |
1658 | { |
1659 | struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; |
1660 | struct a6xx_gmu *gmu = &a6xx_gpu->gmu; |
1661 | struct platform_device *pdev = of_find_device_by_node(np: node); |
1662 | struct device_link *link; |
1663 | int ret; |
1664 | |
1665 | if (!pdev) |
1666 | return -ENODEV; |
1667 | |
1668 | gmu->dev = &pdev->dev; |
1669 | |
1670 | of_dma_configure(gmu->dev, node, true); |
1671 | |
1672 | /* Fow now, don't do anything fancy until we get our feet under us */ |
1673 | gmu->idle_level = GMU_IDLE_STATE_ACTIVE; |
1674 | |
1675 | pm_runtime_enable(gmu->dev); |
1676 | |
1677 | /* Get the list of clocks */ |
1678 | ret = a6xx_gmu_clocks_probe(gmu); |
1679 | if (ret) |
1680 | goto err_put_device; |
1681 | |
1682 | ret = a6xx_gmu_memory_probe(gmu); |
1683 | if (ret) |
1684 | goto err_put_device; |
1685 | |
1686 | |
1687 | /* A660 now requires handling "prealloc requests" in GMU firmware |
1688 | * For now just hardcode allocations based on the known firmware. |
1689 | * note: there is no indication that these correspond to "dummy" or |
1690 | * "debug" regions, but this "guess" allows reusing these BOs which |
1691 | * are otherwise unused by a660. |
1692 | */ |
1693 | gmu->dummy.size = SZ_4K; |
1694 | if (adreno_is_a660_family(gpu: adreno_gpu) || |
1695 | adreno_is_a7xx(gpu: adreno_gpu)) { |
1696 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->debug, SZ_4K * 7, |
1697 | iova: 0x60400000, name: "debug" ); |
1698 | if (ret) |
1699 | goto err_memory; |
1700 | |
1701 | gmu->dummy.size = SZ_8K; |
1702 | } |
1703 | |
1704 | /* Allocate memory for the GMU dummy page */ |
1705 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->dummy, size: gmu->dummy.size, |
1706 | iova: 0x60000000, name: "dummy" ); |
1707 | if (ret) |
1708 | goto err_memory; |
1709 | |
1710 | /* Note that a650 family also includes a660 family: */ |
1711 | if (adreno_is_a650_family(gpu: adreno_gpu) || |
1712 | adreno_is_a7xx(gpu: adreno_gpu)) { |
1713 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->icache, |
1714 | SZ_16M - SZ_16K, iova: 0x04000, name: "icache" ); |
1715 | if (ret) |
1716 | goto err_memory; |
1717 | /* |
1718 | * NOTE: when porting legacy ("pre-650-family") GPUs you may be tempted to add a condition |
1719 | * to allocate icache/dcache here, as per downstream code flow, but it may not actually be |
1720 | * necessary. If you omit this step and you don't get random pagefaults, you are likely |
1721 | * good to go without this! |
1722 | */ |
1723 | } else if (adreno_is_a640_family(gpu: adreno_gpu)) { |
1724 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->icache, |
1725 | SZ_256K - SZ_16K, iova: 0x04000, name: "icache" ); |
1726 | if (ret) |
1727 | goto err_memory; |
1728 | |
1729 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->dcache, |
1730 | SZ_256K - SZ_16K, iova: 0x44000, name: "dcache" ); |
1731 | if (ret) |
1732 | goto err_memory; |
1733 | } else if (adreno_is_a630_family(gpu: adreno_gpu)) { |
1734 | /* HFI v1, has sptprac */ |
1735 | gmu->legacy = true; |
1736 | |
1737 | /* Allocate memory for the GMU debug region */ |
1738 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->debug, SZ_16K, iova: 0, name: "debug" ); |
1739 | if (ret) |
1740 | goto err_memory; |
1741 | } |
1742 | |
1743 | /* Allocate memory for the GMU log region */ |
1744 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->log, SZ_16K, iova: 0, name: "log" ); |
1745 | if (ret) |
1746 | goto err_memory; |
1747 | |
1748 | /* Allocate memory for for the HFI queues */ |
1749 | ret = a6xx_gmu_memory_alloc(gmu, bo: &gmu->hfi, SZ_16K, iova: 0, name: "hfi" ); |
1750 | if (ret) |
1751 | goto err_memory; |
1752 | |
1753 | /* Map the GMU registers */ |
1754 | gmu->mmio = a6xx_gmu_get_mmio(pdev, name: "gmu" ); |
1755 | if (IS_ERR(ptr: gmu->mmio)) { |
1756 | ret = PTR_ERR(ptr: gmu->mmio); |
1757 | goto err_memory; |
1758 | } |
1759 | |
1760 | if (adreno_is_a650_family(gpu: adreno_gpu) || |
1761 | adreno_is_a7xx(gpu: adreno_gpu)) { |
1762 | gmu->rscc = a6xx_gmu_get_mmio(pdev, name: "rscc" ); |
1763 | if (IS_ERR(ptr: gmu->rscc)) { |
1764 | ret = -ENODEV; |
1765 | goto err_mmio; |
1766 | } |
1767 | } else { |
1768 | gmu->rscc = gmu->mmio + 0x23000; |
1769 | } |
1770 | |
1771 | /* Get the HFI and GMU interrupts */ |
1772 | gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, name: "hfi" , handler: a6xx_hfi_irq); |
1773 | gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, name: "gmu" , handler: a6xx_gmu_irq); |
1774 | |
1775 | if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) { |
1776 | ret = -ENODEV; |
1777 | goto err_mmio; |
1778 | } |
1779 | |
1780 | gmu->cxpd = dev_pm_domain_attach_by_name(dev: gmu->dev, name: "cx" ); |
1781 | if (IS_ERR(ptr: gmu->cxpd)) { |
1782 | ret = PTR_ERR(ptr: gmu->cxpd); |
1783 | goto err_mmio; |
1784 | } |
1785 | |
1786 | link = device_link_add(consumer: gmu->dev, supplier: gmu->cxpd, DL_FLAG_PM_RUNTIME); |
1787 | if (!link) { |
1788 | ret = -ENODEV; |
1789 | goto detach_cxpd; |
1790 | } |
1791 | |
1792 | gmu->qmp = qmp_get(dev: gmu->dev); |
1793 | if (IS_ERR(ptr: gmu->qmp) && adreno_is_a7xx(gpu: adreno_gpu)) { |
1794 | ret = PTR_ERR(ptr: gmu->qmp); |
1795 | goto remove_device_link; |
1796 | } |
1797 | |
1798 | init_completion(x: &gmu->pd_gate); |
1799 | complete_all(&gmu->pd_gate); |
1800 | gmu->pd_nb.notifier_call = cxpd_notifier_cb; |
1801 | |
1802 | /* |
1803 | * Get a link to the GX power domain to reset the GPU in case of GMU |
1804 | * crash |
1805 | */ |
1806 | gmu->gxpd = dev_pm_domain_attach_by_name(dev: gmu->dev, name: "gx" ); |
1807 | |
1808 | /* Get the power levels for the GMU and GPU */ |
1809 | a6xx_gmu_pwrlevels_probe(gmu); |
1810 | |
1811 | /* Set up the HFI queues */ |
1812 | a6xx_hfi_init(gmu); |
1813 | |
1814 | /* Initialize RPMh */ |
1815 | a6xx_gmu_rpmh_init(gmu); |
1816 | |
1817 | gmu->initialized = true; |
1818 | |
1819 | return 0; |
1820 | |
1821 | remove_device_link: |
1822 | device_link_del(link); |
1823 | |
1824 | detach_cxpd: |
1825 | dev_pm_domain_detach(dev: gmu->cxpd, power_off: false); |
1826 | |
1827 | err_mmio: |
1828 | iounmap(addr: gmu->mmio); |
1829 | if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc" )) |
1830 | iounmap(addr: gmu->rscc); |
1831 | free_irq(gmu->gmu_irq, gmu); |
1832 | free_irq(gmu->hfi_irq, gmu); |
1833 | |
1834 | err_memory: |
1835 | a6xx_gmu_memory_free(gmu); |
1836 | err_put_device: |
1837 | /* Drop reference taken in of_find_device_by_node */ |
1838 | put_device(dev: gmu->dev); |
1839 | |
1840 | return ret; |
1841 | } |
1842 | |