1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ |
3 | /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ |
4 | /* Copyright 2019 Collabora ltd. */ |
5 | #include <linux/bitfield.h> |
6 | #include <linux/bitmap.h> |
7 | #include <linux/delay.h> |
8 | #include <linux/dma-mapping.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/io.h> |
11 | #include <linux/iopoll.h> |
12 | #include <linux/platform_device.h> |
13 | #include <linux/pm_runtime.h> |
14 | |
15 | #include "panfrost_device.h" |
16 | #include "panfrost_features.h" |
17 | #include "panfrost_issues.h" |
18 | #include "panfrost_gpu.h" |
19 | #include "panfrost_perfcnt.h" |
20 | #include "panfrost_regs.h" |
21 | |
22 | static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data) |
23 | { |
24 | struct panfrost_device *pfdev = data; |
25 | u32 state = gpu_read(pfdev, GPU_INT_STAT); |
26 | u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS); |
27 | |
28 | if (!state) |
29 | return IRQ_NONE; |
30 | |
31 | if (state & GPU_IRQ_MASK_ERROR) { |
32 | u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32; |
33 | address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO); |
34 | |
35 | dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n" , |
36 | fault_status, panfrost_exception_name(fault_status & 0xFF), |
37 | address); |
38 | |
39 | if (state & GPU_IRQ_MULTIPLE_FAULT) |
40 | dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n" ); |
41 | |
42 | gpu_write(pfdev, GPU_INT_MASK, 0); |
43 | } |
44 | |
45 | if (state & GPU_IRQ_PERFCNT_SAMPLE_COMPLETED) |
46 | panfrost_perfcnt_sample_done(pfdev); |
47 | |
48 | if (state & GPU_IRQ_CLEAN_CACHES_COMPLETED) |
49 | panfrost_perfcnt_clean_cache_done(pfdev); |
50 | |
51 | gpu_write(pfdev, GPU_INT_CLEAR, state); |
52 | |
53 | return IRQ_HANDLED; |
54 | } |
55 | |
56 | int panfrost_gpu_soft_reset(struct panfrost_device *pfdev) |
57 | { |
58 | int ret; |
59 | u32 val; |
60 | |
61 | gpu_write(pfdev, GPU_INT_MASK, 0); |
62 | gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED); |
63 | gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET); |
64 | |
65 | ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, |
66 | val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000); |
67 | |
68 | if (ret) { |
69 | dev_err(pfdev->dev, "gpu soft reset timed out\n" ); |
70 | return ret; |
71 | } |
72 | |
73 | gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL); |
74 | gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL); |
75 | |
76 | /* |
77 | * All in-flight jobs should have released their cycle |
78 | * counter references upon reset, but let us make sure |
79 | */ |
80 | if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0)) |
81 | atomic_set(v: &pfdev->cycle_counter.use_count, i: 0); |
82 | |
83 | return 0; |
84 | } |
85 | |
86 | void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev) |
87 | { |
88 | /* |
89 | * The Amlogic integrated Mali-T820, Mali-G31 & Mali-G52 needs |
90 | * these undocumented bits in GPU_PWR_OVERRIDE1 to be set in order |
91 | * to operate correctly. |
92 | */ |
93 | gpu_write(pfdev, GPU_PWR_KEY, GPU_PWR_KEY_UNLOCK); |
94 | gpu_write(pfdev, GPU_PWR_OVERRIDE1, 0xfff | (0x20 << 16)); |
95 | } |
96 | |
97 | static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) |
98 | { |
99 | u32 quirks = 0; |
100 | |
101 | if (panfrost_has_hw_issue(pfdev, issue: HW_ISSUE_8443) || |
102 | panfrost_has_hw_issue(pfdev, issue: HW_ISSUE_11035)) |
103 | quirks |= SC_LS_PAUSEBUFFER_DISABLE; |
104 | |
105 | if (panfrost_has_hw_issue(pfdev, issue: HW_ISSUE_10327)) |
106 | quirks |= SC_SDC_DISABLE_OQ_DISCARD; |
107 | |
108 | if (panfrost_has_hw_issue(pfdev, issue: HW_ISSUE_10797)) |
109 | quirks |= SC_ENABLE_TEXGRD_FLAGS; |
110 | |
111 | if (!panfrost_has_hw_issue(pfdev, issue: GPUCORE_1619)) { |
112 | if (panfrost_model_cmp(pfdev, id: 0x750) < 0) /* T60x, T62x, T72x */ |
113 | quirks |= SC_LS_ATTR_CHECK_DISABLE; |
114 | else if (panfrost_model_cmp(pfdev, id: 0x880) <= 0) /* T76x, T8xx */ |
115 | quirks |= SC_LS_ALLOW_ATTR_TYPES; |
116 | } |
117 | |
118 | if (panfrost_has_hw_issue(pfdev, issue: HW_ISSUE_TTRX_2968_TTRX_3162)) |
119 | quirks |= SC_VAR_ALGORITHM; |
120 | |
121 | if (panfrost_has_hw_feature(pfdev, feat: HW_FEATURE_TLS_HASHING)) |
122 | quirks |= SC_TLS_HASH_ENABLE; |
123 | |
124 | if (quirks) |
125 | gpu_write(pfdev, GPU_SHADER_CONFIG, quirks); |
126 | |
127 | |
128 | quirks = gpu_read(pfdev, GPU_TILER_CONFIG); |
129 | |
130 | /* Set tiler clock gate override if required */ |
131 | if (panfrost_has_hw_issue(pfdev, issue: HW_ISSUE_T76X_3953)) |
132 | quirks |= TC_CLOCK_GATE_OVERRIDE; |
133 | |
134 | gpu_write(pfdev, GPU_TILER_CONFIG, quirks); |
135 | |
136 | |
137 | quirks = 0; |
138 | if ((panfrost_model_eq(pfdev, id: 0x860) || panfrost_model_eq(pfdev, id: 0x880)) && |
139 | pfdev->features.revision >= 0x2000) |
140 | quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT; |
141 | else if (panfrost_model_eq(pfdev, id: 0x6000) && |
142 | pfdev->features.coherency_features == COHERENCY_ACE) |
143 | quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) << |
144 | JM_FORCE_COHERENCY_FEATURES_SHIFT; |
145 | |
146 | if (panfrost_has_hw_feature(pfdev, feat: HW_FEATURE_IDVS_GROUP_SIZE)) |
147 | quirks |= JM_DEFAULT_IDVS_GROUP_SIZE << JM_IDVS_GROUP_SIZE_SHIFT; |
148 | |
149 | if (quirks) |
150 | gpu_write(pfdev, GPU_JM_CONFIG, quirks); |
151 | |
152 | /* Here goes platform specific quirks */ |
153 | if (pfdev->comp->vendor_quirk) |
154 | pfdev->comp->vendor_quirk(pfdev); |
155 | } |
156 | |
157 | #define MAX_HW_REVS 6 |
158 | |
159 | struct panfrost_model { |
160 | const char *name; |
161 | u32 id; |
162 | u32 id_mask; |
163 | u64 features; |
164 | u64 issues; |
165 | struct { |
166 | u32 revision; |
167 | u64 issues; |
168 | } revs[MAX_HW_REVS]; |
169 | }; |
170 | |
171 | #define GPU_MODEL(_name, _id, ...) \ |
172 | {\ |
173 | .name = __stringify(_name), \ |
174 | .id = _id, \ |
175 | .features = hw_features_##_name, \ |
176 | .issues = hw_issues_##_name, \ |
177 | .revs = { __VA_ARGS__ }, \ |
178 | } |
179 | |
180 | #define GPU_REV_EXT(name, _rev, _p, _s, stat) \ |
181 | {\ |
182 | .revision = (_rev) << 12 | (_p) << 4 | (_s), \ |
183 | .issues = hw_issues_##name##_r##_rev##p##_p##stat, \ |
184 | } |
185 | #define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, ) |
186 | |
187 | static const struct panfrost_model gpu_models[] = { |
188 | /* T60x has an oddball version */ |
189 | GPU_MODEL(t600, 0x600, |
190 | GPU_REV_EXT(t600, 0, 0, 1, _15dev0)), |
191 | GPU_MODEL(t620, 0x620, |
192 | GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)), |
193 | GPU_MODEL(t720, 0x720), |
194 | GPU_MODEL(t760, 0x750, |
195 | GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1), |
196 | GPU_REV_EXT(t760, 0, 1, 0, _50rel0), |
197 | GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)), |
198 | GPU_MODEL(t820, 0x820), |
199 | GPU_MODEL(t830, 0x830), |
200 | GPU_MODEL(t860, 0x860), |
201 | GPU_MODEL(t880, 0x880), |
202 | |
203 | GPU_MODEL(g71, 0x6000, |
204 | GPU_REV_EXT(g71, 0, 0, 1, _05dev0)), |
205 | GPU_MODEL(g72, 0x6001), |
206 | GPU_MODEL(g51, 0x7000), |
207 | GPU_MODEL(g76, 0x7001), |
208 | GPU_MODEL(g52, 0x7002), |
209 | GPU_MODEL(g31, 0x7003, |
210 | GPU_REV(g31, 1, 0)), |
211 | |
212 | GPU_MODEL(g57, 0x9001, |
213 | GPU_REV(g57, 0, 0)), |
214 | |
215 | /* MediaTek MT8192 has a Mali-G57 with a different GPU ID from the |
216 | * standard. Arm's driver does not appear to handle this model. |
217 | * ChromeOS has a hack downstream for it. Treat it as equivalent to |
218 | * standard Mali-G57 for now. |
219 | */ |
220 | GPU_MODEL(g57, 0x9003, |
221 | GPU_REV(g57, 0, 0)), |
222 | }; |
223 | |
224 | static void panfrost_gpu_init_features(struct panfrost_device *pfdev) |
225 | { |
226 | u32 gpu_id, num_js, major, minor, status, rev; |
227 | const char *name = "unknown" ; |
228 | u64 hw_feat = 0; |
229 | u64 hw_issues = hw_issues_all; |
230 | const struct panfrost_model *model; |
231 | int i; |
232 | |
233 | pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES); |
234 | pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES); |
235 | pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES); |
236 | pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES); |
237 | pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES); |
238 | pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES); |
239 | pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS); |
240 | pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE); |
241 | pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE); |
242 | pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES); |
243 | pfdev->features.afbc_features = gpu_read(pfdev, GPU_AFBC_FEATURES); |
244 | for (i = 0; i < 4; i++) |
245 | pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i)); |
246 | |
247 | pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT); |
248 | |
249 | pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT); |
250 | num_js = hweight32(pfdev->features.js_present); |
251 | for (i = 0; i < num_js; i++) |
252 | pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i)); |
253 | |
254 | pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO); |
255 | pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32; |
256 | |
257 | pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO); |
258 | pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32; |
259 | |
260 | pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO); |
261 | pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32; |
262 | pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present); |
263 | |
264 | pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO); |
265 | pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32; |
266 | |
267 | pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC); |
268 | |
269 | gpu_id = gpu_read(pfdev, GPU_ID); |
270 | pfdev->features.revision = gpu_id & 0xffff; |
271 | pfdev->features.id = gpu_id >> 16; |
272 | |
273 | /* The T60x has an oddball ID value. Fix it up to the standard Midgard |
274 | * format so we (and userspace) don't have to special case it. |
275 | */ |
276 | if (pfdev->features.id == 0x6956) |
277 | pfdev->features.id = 0x0600; |
278 | |
279 | major = (pfdev->features.revision >> 12) & 0xf; |
280 | minor = (pfdev->features.revision >> 4) & 0xff; |
281 | status = pfdev->features.revision & 0xf; |
282 | rev = pfdev->features.revision; |
283 | |
284 | gpu_id = pfdev->features.id; |
285 | |
286 | for (model = gpu_models; model->name; model++) { |
287 | int best = -1; |
288 | |
289 | if (!panfrost_model_eq(pfdev, id: model->id)) |
290 | continue; |
291 | |
292 | name = model->name; |
293 | hw_feat = model->features; |
294 | hw_issues |= model->issues; |
295 | for (i = 0; i < MAX_HW_REVS; i++) { |
296 | if (model->revs[i].revision == rev) { |
297 | best = i; |
298 | break; |
299 | } else if (model->revs[i].revision == (rev & ~0xf)) |
300 | best = i; |
301 | } |
302 | |
303 | if (best >= 0) |
304 | hw_issues |= model->revs[best].issues; |
305 | |
306 | break; |
307 | } |
308 | |
309 | bitmap_from_u64(dst: pfdev->features.hw_features, mask: hw_feat); |
310 | bitmap_from_u64(dst: pfdev->features.hw_issues, mask: hw_issues); |
311 | |
312 | dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x" , |
313 | name, gpu_id, major, minor, status); |
314 | dev_info(pfdev->dev, "features: %64pb, issues: %64pb" , |
315 | pfdev->features.hw_features, |
316 | pfdev->features.hw_issues); |
317 | |
318 | dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x" , |
319 | pfdev->features.l2_features, |
320 | pfdev->features.core_features, |
321 | pfdev->features.tiler_features, |
322 | pfdev->features.mem_features, |
323 | pfdev->features.mmu_features, |
324 | pfdev->features.as_present, |
325 | pfdev->features.js_present); |
326 | |
327 | dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx" , |
328 | pfdev->features.shader_present, pfdev->features.l2_present); |
329 | } |
330 | |
331 | void panfrost_cycle_counter_get(struct panfrost_device *pfdev) |
332 | { |
333 | if (atomic_inc_not_zero(v: &pfdev->cycle_counter.use_count)) |
334 | return; |
335 | |
336 | spin_lock(lock: &pfdev->cycle_counter.lock); |
337 | if (atomic_inc_return(v: &pfdev->cycle_counter.use_count) == 1) |
338 | gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_START); |
339 | spin_unlock(lock: &pfdev->cycle_counter.lock); |
340 | } |
341 | |
342 | void panfrost_cycle_counter_put(struct panfrost_device *pfdev) |
343 | { |
344 | if (atomic_add_unless(v: &pfdev->cycle_counter.use_count, a: -1, u: 1)) |
345 | return; |
346 | |
347 | spin_lock(lock: &pfdev->cycle_counter.lock); |
348 | if (atomic_dec_return(v: &pfdev->cycle_counter.use_count) == 0) |
349 | gpu_write(pfdev, GPU_CMD, GPU_CMD_CYCLE_COUNT_STOP); |
350 | spin_unlock(lock: &pfdev->cycle_counter.lock); |
351 | } |
352 | |
353 | unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev) |
354 | { |
355 | u32 hi, lo; |
356 | |
357 | do { |
358 | hi = gpu_read(pfdev, GPU_CYCLE_COUNT_HI); |
359 | lo = gpu_read(pfdev, GPU_CYCLE_COUNT_LO); |
360 | } while (hi != gpu_read(pfdev, GPU_CYCLE_COUNT_HI)); |
361 | |
362 | return ((u64)hi << 32) | lo; |
363 | } |
364 | |
365 | void panfrost_gpu_power_on(struct panfrost_device *pfdev) |
366 | { |
367 | int ret; |
368 | u32 val; |
369 | u64 core_mask = U64_MAX; |
370 | |
371 | panfrost_gpu_init_quirks(pfdev); |
372 | |
373 | if (pfdev->features.l2_present != 1) { |
374 | /* |
375 | * Only support one core group now. |
376 | * ~(l2_present - 1) unsets all bits in l2_present except |
377 | * the bottom bit. (l2_present - 2) has all the bits in |
378 | * the first core group set. AND them together to generate |
379 | * a mask of cores in the first core group. |
380 | */ |
381 | core_mask = ~(pfdev->features.l2_present - 1) & |
382 | (pfdev->features.l2_present - 2); |
383 | dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n" , |
384 | hweight64(core_mask), |
385 | hweight64(pfdev->features.shader_present)); |
386 | } |
387 | gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present & core_mask); |
388 | ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO, |
389 | val, val == (pfdev->features.l2_present & core_mask), |
390 | 100, 20000); |
391 | if (ret) |
392 | dev_err(pfdev->dev, "error powering up gpu L2" ); |
393 | |
394 | gpu_write(pfdev, SHADER_PWRON_LO, |
395 | pfdev->features.shader_present & core_mask); |
396 | ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO, |
397 | val, val == (pfdev->features.shader_present & core_mask), |
398 | 100, 20000); |
399 | if (ret) |
400 | dev_err(pfdev->dev, "error powering up gpu shader" ); |
401 | |
402 | gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present); |
403 | ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO, |
404 | val, val == pfdev->features.tiler_present, 100, 1000); |
405 | if (ret) |
406 | dev_err(pfdev->dev, "error powering up gpu tiler" ); |
407 | } |
408 | |
409 | void panfrost_gpu_power_off(struct panfrost_device *pfdev) |
410 | { |
411 | gpu_write(pfdev, TILER_PWROFF_LO, 0); |
412 | gpu_write(pfdev, SHADER_PWROFF_LO, 0); |
413 | gpu_write(pfdev, L2_PWROFF_LO, 0); |
414 | } |
415 | |
416 | int panfrost_gpu_init(struct panfrost_device *pfdev) |
417 | { |
418 | int err, irq; |
419 | |
420 | err = panfrost_gpu_soft_reset(pfdev); |
421 | if (err) |
422 | return err; |
423 | |
424 | panfrost_gpu_init_features(pfdev); |
425 | |
426 | err = dma_set_mask_and_coherent(dev: pfdev->dev, |
427 | DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features))); |
428 | if (err) |
429 | return err; |
430 | |
431 | dma_set_max_seg_size(dev: pfdev->dev, UINT_MAX); |
432 | |
433 | irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu" ); |
434 | if (irq < 0) |
435 | return irq; |
436 | |
437 | err = devm_request_irq(dev: pfdev->dev, irq, handler: panfrost_gpu_irq_handler, |
438 | IRQF_SHARED, KBUILD_MODNAME "-gpu" , dev_id: pfdev); |
439 | if (err) { |
440 | dev_err(pfdev->dev, "failed to request gpu irq" ); |
441 | return err; |
442 | } |
443 | |
444 | panfrost_gpu_power_on(pfdev); |
445 | |
446 | return 0; |
447 | } |
448 | |
449 | void panfrost_gpu_fini(struct panfrost_device *pfdev) |
450 | { |
451 | panfrost_gpu_power_off(pfdev); |
452 | } |
453 | |
454 | u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev) |
455 | { |
456 | u32 flush_id; |
457 | |
458 | if (panfrost_has_hw_feature(pfdev, feat: HW_FEATURE_FLUSH_REDUCTION)) { |
459 | /* Flush reduction only makes sense when the GPU is kept powered on between jobs */ |
460 | if (pm_runtime_get_if_in_use(dev: pfdev->dev)) { |
461 | flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID); |
462 | pm_runtime_put(dev: pfdev->dev); |
463 | return flush_id; |
464 | } |
465 | } |
466 | |
467 | return 0; |
468 | } |
469 | |