1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */ |
3 | /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ |
4 | /* Copyright 2019 Collabora ltd. */ |
5 | |
6 | #include <linux/module.h> |
7 | #include <linux/of.h> |
8 | #include <linux/pagemap.h> |
9 | #include <linux/platform_device.h> |
10 | #include <linux/pm_runtime.h> |
11 | #include <drm/panfrost_drm.h> |
12 | #include <drm/drm_drv.h> |
13 | #include <drm/drm_ioctl.h> |
14 | #include <drm/drm_syncobj.h> |
15 | #include <drm/drm_utils.h> |
16 | |
17 | #include "panfrost_device.h" |
18 | #include "panfrost_gem.h" |
19 | #include "panfrost_mmu.h" |
20 | #include "panfrost_job.h" |
21 | #include "panfrost_gpu.h" |
22 | #include "panfrost_perfcnt.h" |
23 | #include "panfrost_debugfs.h" |
24 | |
25 | static bool unstable_ioctls; |
26 | module_param_unsafe(unstable_ioctls, bool, 0600); |
27 | |
28 | static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) |
29 | { |
30 | struct drm_panfrost_get_param *param = data; |
31 | struct panfrost_device *pfdev = ddev->dev_private; |
32 | |
33 | if (param->pad != 0) |
34 | return -EINVAL; |
35 | |
36 | #define PANFROST_FEATURE(name, member) \ |
37 | case DRM_PANFROST_PARAM_ ## name: \ |
38 | param->value = pfdev->features.member; \ |
39 | break |
40 | #define PANFROST_FEATURE_ARRAY(name, member, max) \ |
41 | case DRM_PANFROST_PARAM_ ## name ## 0 ... \ |
42 | DRM_PANFROST_PARAM_ ## name ## max: \ |
43 | param->value = pfdev->features.member[param->param - \ |
44 | DRM_PANFROST_PARAM_ ## name ## 0]; \ |
45 | break |
46 | |
47 | switch (param->param) { |
48 | PANFROST_FEATURE(GPU_PROD_ID, id); |
49 | PANFROST_FEATURE(GPU_REVISION, revision); |
50 | PANFROST_FEATURE(SHADER_PRESENT, shader_present); |
51 | PANFROST_FEATURE(TILER_PRESENT, tiler_present); |
52 | PANFROST_FEATURE(L2_PRESENT, l2_present); |
53 | PANFROST_FEATURE(STACK_PRESENT, stack_present); |
54 | PANFROST_FEATURE(AS_PRESENT, as_present); |
55 | PANFROST_FEATURE(JS_PRESENT, js_present); |
56 | PANFROST_FEATURE(L2_FEATURES, l2_features); |
57 | PANFROST_FEATURE(CORE_FEATURES, core_features); |
58 | PANFROST_FEATURE(TILER_FEATURES, tiler_features); |
59 | PANFROST_FEATURE(MEM_FEATURES, mem_features); |
60 | PANFROST_FEATURE(MMU_FEATURES, mmu_features); |
61 | PANFROST_FEATURE(THREAD_FEATURES, thread_features); |
62 | PANFROST_FEATURE(MAX_THREADS, max_threads); |
63 | PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ, |
64 | thread_max_workgroup_sz); |
65 | PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ, |
66 | thread_max_barrier_sz); |
67 | PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features); |
68 | PANFROST_FEATURE(AFBC_FEATURES, afbc_features); |
69 | PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3); |
70 | PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); |
71 | PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); |
72 | PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); |
73 | default: |
74 | return -EINVAL; |
75 | } |
76 | |
77 | return 0; |
78 | } |
79 | |
80 | static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, |
81 | struct drm_file *file) |
82 | { |
83 | struct panfrost_file_priv *priv = file->driver_priv; |
84 | struct panfrost_gem_object *bo; |
85 | struct drm_panfrost_create_bo *args = data; |
86 | struct panfrost_gem_mapping *mapping; |
87 | int ret; |
88 | |
89 | if (!args->size || args->pad || |
90 | (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) |
91 | return -EINVAL; |
92 | |
93 | /* Heaps should never be executable */ |
94 | if ((args->flags & PANFROST_BO_HEAP) && |
95 | !(args->flags & PANFROST_BO_NOEXEC)) |
96 | return -EINVAL; |
97 | |
98 | bo = panfrost_gem_create(dev, size: args->size, flags: args->flags); |
99 | if (IS_ERR(ptr: bo)) |
100 | return PTR_ERR(ptr: bo); |
101 | |
102 | ret = drm_gem_handle_create(file_priv: file, obj: &bo->base.base, handlep: &args->handle); |
103 | if (ret) |
104 | goto out; |
105 | |
106 | mapping = panfrost_gem_mapping_get(bo, priv); |
107 | if (mapping) { |
108 | args->offset = mapping->mmnode.start << PAGE_SHIFT; |
109 | panfrost_gem_mapping_put(mapping); |
110 | } else { |
111 | /* This can only happen if the handle from |
112 | * drm_gem_handle_create() has already been guessed and freed |
113 | * by user space |
114 | */ |
115 | ret = -EINVAL; |
116 | } |
117 | |
118 | out: |
119 | drm_gem_object_put(obj: &bo->base.base); |
120 | return ret; |
121 | } |
122 | |
123 | /** |
124 | * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects |
125 | * referenced by the job. |
126 | * @dev: DRM device |
127 | * @file_priv: DRM file for this fd |
128 | * @args: IOCTL args |
129 | * @job: job being set up |
130 | * |
131 | * Resolve handles from userspace to BOs and attach them to job. |
132 | * |
133 | * Note that this function doesn't need to unreference the BOs on |
134 | * failure, because that will happen at panfrost_job_cleanup() time. |
135 | */ |
136 | static int |
137 | panfrost_lookup_bos(struct drm_device *dev, |
138 | struct drm_file *file_priv, |
139 | struct drm_panfrost_submit *args, |
140 | struct panfrost_job *job) |
141 | { |
142 | struct panfrost_file_priv *priv = file_priv->driver_priv; |
143 | struct panfrost_gem_object *bo; |
144 | unsigned int i; |
145 | int ret; |
146 | |
147 | job->bo_count = args->bo_handle_count; |
148 | |
149 | if (!job->bo_count) |
150 | return 0; |
151 | |
152 | ret = drm_gem_objects_lookup(filp: file_priv, |
153 | bo_handles: (void __user *)(uintptr_t)args->bo_handles, |
154 | count: job->bo_count, objs_out: &job->bos); |
155 | if (ret) |
156 | return ret; |
157 | |
158 | job->mappings = kvmalloc_array(n: job->bo_count, |
159 | size: sizeof(struct panfrost_gem_mapping *), |
160 | GFP_KERNEL | __GFP_ZERO); |
161 | if (!job->mappings) |
162 | return -ENOMEM; |
163 | |
164 | for (i = 0; i < job->bo_count; i++) { |
165 | struct panfrost_gem_mapping *mapping; |
166 | |
167 | bo = to_panfrost_bo(obj: job->bos[i]); |
168 | mapping = panfrost_gem_mapping_get(bo, priv); |
169 | if (!mapping) { |
170 | ret = -EINVAL; |
171 | break; |
172 | } |
173 | |
174 | atomic_inc(v: &bo->gpu_usecount); |
175 | job->mappings[i] = mapping; |
176 | } |
177 | |
178 | return ret; |
179 | } |
180 | |
181 | /** |
182 | * panfrost_copy_in_sync() - Sets up job->deps with the sync objects |
183 | * referenced by the job. |
184 | * @dev: DRM device |
185 | * @file_priv: DRM file for this fd |
186 | * @args: IOCTL args |
187 | * @job: job being set up |
188 | * |
189 | * Resolve syncobjs from userspace to fences and attach them to job. |
190 | * |
191 | * Note that this function doesn't need to unreference the fences on |
192 | * failure, because that will happen at panfrost_job_cleanup() time. |
193 | */ |
194 | static int |
195 | panfrost_copy_in_sync(struct drm_device *dev, |
196 | struct drm_file *file_priv, |
197 | struct drm_panfrost_submit *args, |
198 | struct panfrost_job *job) |
199 | { |
200 | u32 *handles; |
201 | int ret = 0; |
202 | int i, in_fence_count; |
203 | |
204 | in_fence_count = args->in_sync_count; |
205 | |
206 | if (!in_fence_count) |
207 | return 0; |
208 | |
209 | handles = kvmalloc_array(n: in_fence_count, size: sizeof(u32), GFP_KERNEL); |
210 | if (!handles) { |
211 | ret = -ENOMEM; |
212 | DRM_DEBUG("Failed to allocate incoming syncobj handles\n" ); |
213 | goto fail; |
214 | } |
215 | |
216 | if (copy_from_user(to: handles, |
217 | from: (void __user *)(uintptr_t)args->in_syncs, |
218 | n: in_fence_count * sizeof(u32))) { |
219 | ret = -EFAULT; |
220 | DRM_DEBUG("Failed to copy in syncobj handles\n" ); |
221 | goto fail; |
222 | } |
223 | |
224 | for (i = 0; i < in_fence_count; i++) { |
225 | ret = drm_sched_job_add_syncobj_dependency(job: &job->base, file: file_priv, |
226 | handle: handles[i], point: 0); |
227 | if (ret) |
228 | goto fail; |
229 | } |
230 | |
231 | fail: |
232 | kvfree(addr: handles); |
233 | return ret; |
234 | } |
235 | |
236 | static int panfrost_ioctl_submit(struct drm_device *dev, void *data, |
237 | struct drm_file *file) |
238 | { |
239 | struct panfrost_device *pfdev = dev->dev_private; |
240 | struct panfrost_file_priv *file_priv = file->driver_priv; |
241 | struct drm_panfrost_submit *args = data; |
242 | struct drm_syncobj *sync_out = NULL; |
243 | struct panfrost_job *job; |
244 | int ret = 0, slot; |
245 | |
246 | if (!args->jc) |
247 | return -EINVAL; |
248 | |
249 | if (args->requirements && args->requirements != PANFROST_JD_REQ_FS) |
250 | return -EINVAL; |
251 | |
252 | if (args->out_sync > 0) { |
253 | sync_out = drm_syncobj_find(file_private: file, handle: args->out_sync); |
254 | if (!sync_out) |
255 | return -ENODEV; |
256 | } |
257 | |
258 | job = kzalloc(size: sizeof(*job), GFP_KERNEL); |
259 | if (!job) { |
260 | ret = -ENOMEM; |
261 | goto out_put_syncout; |
262 | } |
263 | |
264 | kref_init(kref: &job->refcount); |
265 | |
266 | job->pfdev = pfdev; |
267 | job->jc = args->jc; |
268 | job->requirements = args->requirements; |
269 | job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); |
270 | job->mmu = file_priv->mmu; |
271 | job->engine_usage = &file_priv->engine_usage; |
272 | |
273 | slot = panfrost_job_get_slot(job); |
274 | |
275 | ret = drm_sched_job_init(job: &job->base, |
276 | entity: &file_priv->sched_entity[slot], |
277 | NULL); |
278 | if (ret) |
279 | goto out_put_job; |
280 | |
281 | ret = panfrost_copy_in_sync(dev, file_priv: file, args, job); |
282 | if (ret) |
283 | goto out_cleanup_job; |
284 | |
285 | ret = panfrost_lookup_bos(dev, file_priv: file, args, job); |
286 | if (ret) |
287 | goto out_cleanup_job; |
288 | |
289 | ret = panfrost_job_push(job); |
290 | if (ret) |
291 | goto out_cleanup_job; |
292 | |
293 | /* Update the return sync object for the job */ |
294 | if (sync_out) |
295 | drm_syncobj_replace_fence(syncobj: sync_out, fence: job->render_done_fence); |
296 | |
297 | out_cleanup_job: |
298 | if (ret) |
299 | drm_sched_job_cleanup(job: &job->base); |
300 | out_put_job: |
301 | panfrost_job_put(job); |
302 | out_put_syncout: |
303 | if (sync_out) |
304 | drm_syncobj_put(obj: sync_out); |
305 | |
306 | return ret; |
307 | } |
308 | |
309 | static int |
310 | panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, |
311 | struct drm_file *file_priv) |
312 | { |
313 | long ret; |
314 | struct drm_panfrost_wait_bo *args = data; |
315 | struct drm_gem_object *gem_obj; |
316 | unsigned long timeout = drm_timeout_abs_to_jiffies(timeout_nsec: args->timeout_ns); |
317 | |
318 | if (args->pad) |
319 | return -EINVAL; |
320 | |
321 | gem_obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle); |
322 | if (!gem_obj) |
323 | return -ENOENT; |
324 | |
325 | ret = dma_resv_wait_timeout(obj: gem_obj->resv, usage: DMA_RESV_USAGE_READ, |
326 | intr: true, timeout); |
327 | if (!ret) |
328 | ret = timeout ? -ETIMEDOUT : -EBUSY; |
329 | |
330 | drm_gem_object_put(obj: gem_obj); |
331 | |
332 | return ret; |
333 | } |
334 | |
335 | static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, |
336 | struct drm_file *file_priv) |
337 | { |
338 | struct drm_panfrost_mmap_bo *args = data; |
339 | struct drm_gem_object *gem_obj; |
340 | int ret; |
341 | |
342 | if (args->flags != 0) { |
343 | DRM_INFO("unknown mmap_bo flags: %d\n" , args->flags); |
344 | return -EINVAL; |
345 | } |
346 | |
347 | gem_obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle); |
348 | if (!gem_obj) { |
349 | DRM_DEBUG("Failed to look up GEM BO %d\n" , args->handle); |
350 | return -ENOENT; |
351 | } |
352 | |
353 | /* Don't allow mmapping of heap objects as pages are not pinned. */ |
354 | if (to_panfrost_bo(obj: gem_obj)->is_heap) { |
355 | ret = -EINVAL; |
356 | goto out; |
357 | } |
358 | |
359 | ret = drm_gem_create_mmap_offset(obj: gem_obj); |
360 | if (ret == 0) |
361 | args->offset = drm_vma_node_offset_addr(node: &gem_obj->vma_node); |
362 | |
363 | out: |
364 | drm_gem_object_put(obj: gem_obj); |
365 | return ret; |
366 | } |
367 | |
368 | static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, |
369 | struct drm_file *file_priv) |
370 | { |
371 | struct panfrost_file_priv *priv = file_priv->driver_priv; |
372 | struct drm_panfrost_get_bo_offset *args = data; |
373 | struct panfrost_gem_mapping *mapping; |
374 | struct drm_gem_object *gem_obj; |
375 | struct panfrost_gem_object *bo; |
376 | |
377 | gem_obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle); |
378 | if (!gem_obj) { |
379 | DRM_DEBUG("Failed to look up GEM BO %d\n" , args->handle); |
380 | return -ENOENT; |
381 | } |
382 | bo = to_panfrost_bo(obj: gem_obj); |
383 | |
384 | mapping = panfrost_gem_mapping_get(bo, priv); |
385 | drm_gem_object_put(obj: gem_obj); |
386 | |
387 | if (!mapping) |
388 | return -EINVAL; |
389 | |
390 | args->offset = mapping->mmnode.start << PAGE_SHIFT; |
391 | panfrost_gem_mapping_put(mapping); |
392 | return 0; |
393 | } |
394 | |
395 | static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, |
396 | struct drm_file *file_priv) |
397 | { |
398 | struct panfrost_file_priv *priv = file_priv->driver_priv; |
399 | struct drm_panfrost_madvise *args = data; |
400 | struct panfrost_device *pfdev = dev->dev_private; |
401 | struct drm_gem_object *gem_obj; |
402 | struct panfrost_gem_object *bo; |
403 | int ret = 0; |
404 | |
405 | gem_obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle); |
406 | if (!gem_obj) { |
407 | DRM_DEBUG("Failed to look up GEM BO %d\n" , args->handle); |
408 | return -ENOENT; |
409 | } |
410 | |
411 | bo = to_panfrost_bo(obj: gem_obj); |
412 | |
413 | ret = dma_resv_lock_interruptible(obj: bo->base.base.resv, NULL); |
414 | if (ret) |
415 | goto out_put_object; |
416 | |
417 | mutex_lock(&pfdev->shrinker_lock); |
418 | mutex_lock(&bo->mappings.lock); |
419 | if (args->madv == PANFROST_MADV_DONTNEED) { |
420 | struct panfrost_gem_mapping *first; |
421 | |
422 | first = list_first_entry(&bo->mappings.list, |
423 | struct panfrost_gem_mapping, |
424 | node); |
425 | |
426 | /* |
427 | * If we want to mark the BO purgeable, there must be only one |
428 | * user: the caller FD. |
429 | * We could do something smarter and mark the BO purgeable only |
430 | * when all its users have marked it purgeable, but globally |
431 | * visible/shared BOs are likely to never be marked purgeable |
432 | * anyway, so let's not bother. |
433 | */ |
434 | if (!list_is_singular(head: &bo->mappings.list) || |
435 | WARN_ON_ONCE(first->mmu != priv->mmu)) { |
436 | ret = -EINVAL; |
437 | goto out_unlock_mappings; |
438 | } |
439 | } |
440 | |
441 | args->retained = drm_gem_shmem_madvise(shmem: &bo->base, madv: args->madv); |
442 | |
443 | if (args->retained) { |
444 | if (args->madv == PANFROST_MADV_DONTNEED) |
445 | list_move_tail(list: &bo->base.madv_list, |
446 | head: &pfdev->shrinker_list); |
447 | else if (args->madv == PANFROST_MADV_WILLNEED) |
448 | list_del_init(entry: &bo->base.madv_list); |
449 | } |
450 | |
451 | out_unlock_mappings: |
452 | mutex_unlock(lock: &bo->mappings.lock); |
453 | mutex_unlock(lock: &pfdev->shrinker_lock); |
454 | dma_resv_unlock(obj: bo->base.base.resv); |
455 | out_put_object: |
456 | drm_gem_object_put(obj: gem_obj); |
457 | return ret; |
458 | } |
459 | |
460 | int panfrost_unstable_ioctl_check(void) |
461 | { |
462 | if (!unstable_ioctls) |
463 | return -ENOSYS; |
464 | |
465 | return 0; |
466 | } |
467 | |
468 | static int |
469 | panfrost_open(struct drm_device *dev, struct drm_file *file) |
470 | { |
471 | int ret; |
472 | struct panfrost_device *pfdev = dev->dev_private; |
473 | struct panfrost_file_priv *panfrost_priv; |
474 | |
475 | panfrost_priv = kzalloc(size: sizeof(*panfrost_priv), GFP_KERNEL); |
476 | if (!panfrost_priv) |
477 | return -ENOMEM; |
478 | |
479 | panfrost_priv->pfdev = pfdev; |
480 | file->driver_priv = panfrost_priv; |
481 | |
482 | panfrost_priv->mmu = panfrost_mmu_ctx_create(pfdev); |
483 | if (IS_ERR(ptr: panfrost_priv->mmu)) { |
484 | ret = PTR_ERR(ptr: panfrost_priv->mmu); |
485 | goto err_free; |
486 | } |
487 | |
488 | ret = panfrost_job_open(panfrost_priv); |
489 | if (ret) |
490 | goto err_job; |
491 | |
492 | return 0; |
493 | |
494 | err_job: |
495 | panfrost_mmu_ctx_put(mmu: panfrost_priv->mmu); |
496 | err_free: |
497 | kfree(objp: panfrost_priv); |
498 | return ret; |
499 | } |
500 | |
501 | static void |
502 | panfrost_postclose(struct drm_device *dev, struct drm_file *file) |
503 | { |
504 | struct panfrost_file_priv *panfrost_priv = file->driver_priv; |
505 | |
506 | panfrost_perfcnt_close(file_priv: file); |
507 | panfrost_job_close(panfrost_priv); |
508 | |
509 | panfrost_mmu_ctx_put(mmu: panfrost_priv->mmu); |
510 | kfree(objp: panfrost_priv); |
511 | } |
512 | |
513 | static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { |
514 | #define PANFROST_IOCTL(n, func, flags) \ |
515 | DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags) |
516 | |
517 | PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW), |
518 | PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW), |
519 | PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW), |
520 | PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW), |
521 | PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW), |
522 | PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), |
523 | PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW), |
524 | PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), |
525 | PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), |
526 | }; |
527 | |
528 | static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev, |
529 | struct panfrost_file_priv *panfrost_priv, |
530 | struct drm_printer *p) |
531 | { |
532 | int i; |
533 | |
534 | /* |
535 | * IMPORTANT NOTE: drm-cycles and drm-engine measurements are not |
536 | * accurate, as they only provide a rough estimation of the number of |
537 | * GPU cycles and CPU time spent in a given context. This is due to two |
538 | * different factors: |
539 | * - Firstly, we must consider the time the CPU and then the kernel |
540 | * takes to process the GPU interrupt, which means additional time and |
541 | * GPU cycles will be added in excess to the real figure. |
542 | * - Secondly, the pipelining done by the Job Manager (2 job slots per |
543 | * engine) implies there is no way to know exactly how much time each |
544 | * job spent on the GPU. |
545 | */ |
546 | |
547 | static const char * const engine_names[] = { |
548 | "fragment" , "vertex-tiler" , "compute-only" |
549 | }; |
550 | |
551 | BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS); |
552 | |
553 | for (i = 0; i < NUM_JOB_SLOTS - 1; i++) { |
554 | drm_printf(p, f: "drm-engine-%s:\t%llu ns\n" , |
555 | engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]); |
556 | drm_printf(p, f: "drm-cycles-%s:\t%llu\n" , |
557 | engine_names[i], panfrost_priv->engine_usage.cycles[i]); |
558 | drm_printf(p, f: "drm-maxfreq-%s:\t%lu Hz\n" , |
559 | engine_names[i], pfdev->pfdevfreq.fast_rate); |
560 | drm_printf(p, f: "drm-curfreq-%s:\t%lu Hz\n" , |
561 | engine_names[i], pfdev->pfdevfreq.current_frequency); |
562 | } |
563 | } |
564 | |
565 | static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file) |
566 | { |
567 | struct drm_device *dev = file->minor->dev; |
568 | struct panfrost_device *pfdev = dev->dev_private; |
569 | |
570 | panfrost_gpu_show_fdinfo(pfdev, panfrost_priv: file->driver_priv, p); |
571 | |
572 | drm_show_memory_stats(p, file); |
573 | } |
574 | |
575 | static const struct file_operations panfrost_drm_driver_fops = { |
576 | .owner = THIS_MODULE, |
577 | DRM_GEM_FOPS, |
578 | .show_fdinfo = drm_show_fdinfo, |
579 | }; |
580 | |
581 | /* |
582 | * Panfrost driver version: |
583 | * - 1.0 - initial interface |
584 | * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO |
585 | * - 1.2 - adds AFBC_FEATURES query |
586 | */ |
587 | static const struct drm_driver panfrost_drm_driver = { |
588 | .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, |
589 | .open = panfrost_open, |
590 | .postclose = panfrost_postclose, |
591 | .show_fdinfo = panfrost_show_fdinfo, |
592 | .ioctls = panfrost_drm_driver_ioctls, |
593 | .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls), |
594 | .fops = &panfrost_drm_driver_fops, |
595 | .name = "panfrost" , |
596 | .desc = "panfrost DRM" , |
597 | .date = "20180908" , |
598 | .major = 1, |
599 | .minor = 2, |
600 | |
601 | .gem_create_object = panfrost_gem_create_object, |
602 | .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, |
603 | |
604 | #ifdef CONFIG_DEBUG_FS |
605 | .debugfs_init = panfrost_debugfs_init, |
606 | #endif |
607 | }; |
608 | |
609 | static int panfrost_probe(struct platform_device *pdev) |
610 | { |
611 | struct panfrost_device *pfdev; |
612 | struct drm_device *ddev; |
613 | int err; |
614 | |
615 | pfdev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*pfdev), GFP_KERNEL); |
616 | if (!pfdev) |
617 | return -ENOMEM; |
618 | |
619 | pfdev->pdev = pdev; |
620 | pfdev->dev = &pdev->dev; |
621 | |
622 | platform_set_drvdata(pdev, data: pfdev); |
623 | |
624 | pfdev->comp = of_device_get_match_data(dev: &pdev->dev); |
625 | if (!pfdev->comp) |
626 | return -ENODEV; |
627 | |
628 | pfdev->coherent = device_get_dma_attr(dev: &pdev->dev) == DEV_DMA_COHERENT; |
629 | |
630 | /* Allocate and initialize the DRM device. */ |
631 | ddev = drm_dev_alloc(driver: &panfrost_drm_driver, parent: &pdev->dev); |
632 | if (IS_ERR(ptr: ddev)) |
633 | return PTR_ERR(ptr: ddev); |
634 | |
635 | ddev->dev_private = pfdev; |
636 | pfdev->ddev = ddev; |
637 | |
638 | mutex_init(&pfdev->shrinker_lock); |
639 | INIT_LIST_HEAD(list: &pfdev->shrinker_list); |
640 | |
641 | err = panfrost_device_init(pfdev); |
642 | if (err) { |
643 | if (err != -EPROBE_DEFER) |
644 | dev_err(&pdev->dev, "Fatal error during GPU init\n" ); |
645 | goto err_out0; |
646 | } |
647 | |
648 | pm_runtime_set_active(dev: pfdev->dev); |
649 | pm_runtime_mark_last_busy(dev: pfdev->dev); |
650 | pm_runtime_enable(dev: pfdev->dev); |
651 | pm_runtime_set_autosuspend_delay(dev: pfdev->dev, delay: 50); /* ~3 frames */ |
652 | pm_runtime_use_autosuspend(dev: pfdev->dev); |
653 | |
654 | /* |
655 | * Register the DRM device with the core and the connectors with |
656 | * sysfs |
657 | */ |
658 | err = drm_dev_register(dev: ddev, flags: 0); |
659 | if (err < 0) |
660 | goto err_out1; |
661 | |
662 | err = panfrost_gem_shrinker_init(dev: ddev); |
663 | if (err) |
664 | goto err_out2; |
665 | |
666 | return 0; |
667 | |
668 | err_out2: |
669 | drm_dev_unregister(dev: ddev); |
670 | err_out1: |
671 | pm_runtime_disable(dev: pfdev->dev); |
672 | panfrost_device_fini(pfdev); |
673 | pm_runtime_set_suspended(dev: pfdev->dev); |
674 | err_out0: |
675 | drm_dev_put(dev: ddev); |
676 | return err; |
677 | } |
678 | |
679 | static void panfrost_remove(struct platform_device *pdev) |
680 | { |
681 | struct panfrost_device *pfdev = platform_get_drvdata(pdev); |
682 | struct drm_device *ddev = pfdev->ddev; |
683 | |
684 | drm_dev_unregister(dev: ddev); |
685 | panfrost_gem_shrinker_cleanup(dev: ddev); |
686 | |
687 | pm_runtime_get_sync(dev: pfdev->dev); |
688 | pm_runtime_disable(dev: pfdev->dev); |
689 | panfrost_device_fini(pfdev); |
690 | pm_runtime_set_suspended(dev: pfdev->dev); |
691 | |
692 | drm_dev_put(dev: ddev); |
693 | } |
694 | |
695 | /* |
696 | * The OPP core wants the supply names to be NULL terminated, but we need the |
697 | * correct num_supplies value for regulator core. Hence, we NULL terminate here |
698 | * and then initialize num_supplies with ARRAY_SIZE - 1. |
699 | */ |
700 | static const char * const default_supplies[] = { "mali" , NULL }; |
701 | static const struct panfrost_compatible default_data = { |
702 | .num_supplies = ARRAY_SIZE(default_supplies) - 1, |
703 | .supply_names = default_supplies, |
704 | .num_pm_domains = 1, /* optional */ |
705 | .pm_domain_names = NULL, |
706 | }; |
707 | |
708 | static const struct panfrost_compatible amlogic_data = { |
709 | .num_supplies = ARRAY_SIZE(default_supplies) - 1, |
710 | .supply_names = default_supplies, |
711 | .vendor_quirk = panfrost_gpu_amlogic_quirk, |
712 | }; |
713 | |
714 | /* |
715 | * The old data with two power supplies for MT8183 is here only to |
716 | * keep retro-compatibility with older devicetrees, as DVFS will |
717 | * not work with this one. |
718 | * |
719 | * On new devicetrees please use the _b variant with a single and |
720 | * coupled regulators instead. |
721 | */ |
722 | static const char * const mediatek_mt8183_supplies[] = { "mali" , "sram" , NULL }; |
723 | static const char * const mediatek_mt8183_pm_domains[] = { "core0" , "core1" , "core2" }; |
724 | static const struct panfrost_compatible mediatek_mt8183_data = { |
725 | .num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies) - 1, |
726 | .supply_names = mediatek_mt8183_supplies, |
727 | .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains), |
728 | .pm_domain_names = mediatek_mt8183_pm_domains, |
729 | }; |
730 | |
731 | static const char * const mediatek_mt8183_b_supplies[] = { "mali" , NULL }; |
732 | static const struct panfrost_compatible mediatek_mt8183_b_data = { |
733 | .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1, |
734 | .supply_names = mediatek_mt8183_b_supplies, |
735 | .num_pm_domains = ARRAY_SIZE(mediatek_mt8183_pm_domains), |
736 | .pm_domain_names = mediatek_mt8183_pm_domains, |
737 | }; |
738 | |
739 | static const char * const mediatek_mt8186_pm_domains[] = { "core0" , "core1" }; |
740 | static const struct panfrost_compatible mediatek_mt8186_data = { |
741 | .num_supplies = ARRAY_SIZE(mediatek_mt8183_b_supplies) - 1, |
742 | .supply_names = mediatek_mt8183_b_supplies, |
743 | .num_pm_domains = ARRAY_SIZE(mediatek_mt8186_pm_domains), |
744 | .pm_domain_names = mediatek_mt8186_pm_domains, |
745 | }; |
746 | |
747 | static const char * const mediatek_mt8192_supplies[] = { "mali" , NULL }; |
748 | static const char * const mediatek_mt8192_pm_domains[] = { "core0" , "core1" , "core2" , |
749 | "core3" , "core4" }; |
750 | static const struct panfrost_compatible mediatek_mt8192_data = { |
751 | .num_supplies = ARRAY_SIZE(mediatek_mt8192_supplies) - 1, |
752 | .supply_names = mediatek_mt8192_supplies, |
753 | .num_pm_domains = ARRAY_SIZE(mediatek_mt8192_pm_domains), |
754 | .pm_domain_names = mediatek_mt8192_pm_domains, |
755 | }; |
756 | |
757 | static const struct of_device_id dt_match[] = { |
758 | /* Set first to probe before the generic compatibles */ |
759 | { .compatible = "amlogic,meson-gxm-mali" , |
760 | .data = &amlogic_data, }, |
761 | { .compatible = "amlogic,meson-g12a-mali" , |
762 | .data = &amlogic_data, }, |
763 | { .compatible = "arm,mali-t604" , .data = &default_data, }, |
764 | { .compatible = "arm,mali-t624" , .data = &default_data, }, |
765 | { .compatible = "arm,mali-t628" , .data = &default_data, }, |
766 | { .compatible = "arm,mali-t720" , .data = &default_data, }, |
767 | { .compatible = "arm,mali-t760" , .data = &default_data, }, |
768 | { .compatible = "arm,mali-t820" , .data = &default_data, }, |
769 | { .compatible = "arm,mali-t830" , .data = &default_data, }, |
770 | { .compatible = "arm,mali-t860" , .data = &default_data, }, |
771 | { .compatible = "arm,mali-t880" , .data = &default_data, }, |
772 | { .compatible = "arm,mali-bifrost" , .data = &default_data, }, |
773 | { .compatible = "arm,mali-valhall-jm" , .data = &default_data, }, |
774 | { .compatible = "mediatek,mt8183-mali" , .data = &mediatek_mt8183_data }, |
775 | { .compatible = "mediatek,mt8183b-mali" , .data = &mediatek_mt8183_b_data }, |
776 | { .compatible = "mediatek,mt8186-mali" , .data = &mediatek_mt8186_data }, |
777 | { .compatible = "mediatek,mt8192-mali" , .data = &mediatek_mt8192_data }, |
778 | {} |
779 | }; |
780 | MODULE_DEVICE_TABLE(of, dt_match); |
781 | |
782 | static struct platform_driver panfrost_driver = { |
783 | .probe = panfrost_probe, |
784 | .remove_new = panfrost_remove, |
785 | .driver = { |
786 | .name = "panfrost" , |
787 | .pm = pm_ptr(&panfrost_pm_ops), |
788 | .of_match_table = dt_match, |
789 | }, |
790 | }; |
791 | module_platform_driver(panfrost_driver); |
792 | |
793 | MODULE_AUTHOR("Panfrost Project Developers" ); |
794 | MODULE_DESCRIPTION("Panfrost DRM Driver" ); |
795 | MODULE_LICENSE("GPL v2" ); |
796 | |