1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4#include <linux/err.h>
5#include <linux/slab.h>
6#include <linux/dma-buf.h>
7#include <linux/dma-mapping.h>
8
9#include <drm/panfrost_drm.h>
10#include "panfrost_device.h"
11#include "panfrost_gem.h"
12#include "panfrost_mmu.h"
13
14/* Called DRM core on the last userspace/kernel unreference of the
15 * BO.
16 */
17static void panfrost_gem_free_object(struct drm_gem_object *obj)
18{
19 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 struct panfrost_device *pfdev = obj->dev->dev_private;
21
22 /*
23 * Make sure the BO is no longer inserted in the shrinker list before
24 * taking care of the destruction itself. If we don't do that we have a
25 * race condition between this function and what's done in
26 * panfrost_gem_shrinker_scan().
27 */
28 mutex_lock(&pfdev->shrinker_lock);
29 list_del_init(entry: &bo->base.madv_list);
30 mutex_unlock(lock: &pfdev->shrinker_lock);
31
32 /*
33 * If we still have mappings attached to the BO, there's a problem in
34 * our refcounting.
35 */
36 WARN_ON_ONCE(!list_empty(&bo->mappings.list));
37
38 if (bo->sgts) {
39 int i;
40 int n_sgt = bo->base.base.size / SZ_2M;
41
42 for (i = 0; i < n_sgt; i++) {
43 if (bo->sgts[i].sgl) {
44 dma_unmap_sgtable(dev: pfdev->dev, sgt: &bo->sgts[i],
45 dir: DMA_BIDIRECTIONAL, attrs: 0);
46 sg_free_table(&bo->sgts[i]);
47 }
48 }
49 kvfree(addr: bo->sgts);
50 }
51
52 drm_gem_shmem_free(shmem: &bo->base);
53}
54
55struct panfrost_gem_mapping *
56panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
57 struct panfrost_file_priv *priv)
58{
59 struct panfrost_gem_mapping *iter, *mapping = NULL;
60
61 mutex_lock(&bo->mappings.lock);
62 list_for_each_entry(iter, &bo->mappings.list, node) {
63 if (iter->mmu == priv->mmu) {
64 kref_get(kref: &iter->refcount);
65 mapping = iter;
66 break;
67 }
68 }
69 mutex_unlock(lock: &bo->mappings.lock);
70
71 return mapping;
72}
73
74static void
75panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
76{
77 if (mapping->active)
78 panfrost_mmu_unmap(mapping);
79
80 spin_lock(lock: &mapping->mmu->mm_lock);
81 if (drm_mm_node_allocated(node: &mapping->mmnode))
82 drm_mm_remove_node(node: &mapping->mmnode);
83 spin_unlock(lock: &mapping->mmu->mm_lock);
84}
85
86static void panfrost_gem_mapping_release(struct kref *kref)
87{
88 struct panfrost_gem_mapping *mapping;
89
90 mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
91
92 panfrost_gem_teardown_mapping(mapping);
93 drm_gem_object_put(obj: &mapping->obj->base.base);
94 panfrost_mmu_ctx_put(mmu: mapping->mmu);
95 kfree(objp: mapping);
96}
97
98void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
99{
100 if (!mapping)
101 return;
102
103 kref_put(kref: &mapping->refcount, release: panfrost_gem_mapping_release);
104}
105
106void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
107{
108 struct panfrost_gem_mapping *mapping;
109
110 list_for_each_entry(mapping, &bo->mappings.list, node)
111 panfrost_gem_teardown_mapping(mapping);
112}
113
114int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
115{
116 int ret;
117 size_t size = obj->size;
118 u64 align;
119 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
120 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
121 struct panfrost_file_priv *priv = file_priv->driver_priv;
122 struct panfrost_gem_mapping *mapping;
123
124 mapping = kzalloc(size: sizeof(*mapping), GFP_KERNEL);
125 if (!mapping)
126 return -ENOMEM;
127
128 INIT_LIST_HEAD(list: &mapping->node);
129 kref_init(kref: &mapping->refcount);
130 drm_gem_object_get(obj);
131 mapping->obj = bo;
132
133 /*
134 * Executable buffers cannot cross a 16MB boundary as the program
135 * counter is 24-bits. We assume executable buffers will be less than
136 * 16MB and aligning executable buffers to their size will avoid
137 * crossing a 16MB boundary.
138 */
139 if (!bo->noexec)
140 align = size >> PAGE_SHIFT;
141 else
142 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
143
144 mapping->mmu = panfrost_mmu_ctx_get(mmu: priv->mmu);
145 spin_lock(lock: &mapping->mmu->mm_lock);
146 ret = drm_mm_insert_node_generic(mm: &mapping->mmu->mm, node: &mapping->mmnode,
147 size: size >> PAGE_SHIFT, alignment: align, color, mode: 0);
148 spin_unlock(lock: &mapping->mmu->mm_lock);
149 if (ret)
150 goto err;
151
152 if (!bo->is_heap) {
153 ret = panfrost_mmu_map(mapping);
154 if (ret)
155 goto err;
156 }
157
158 mutex_lock(&bo->mappings.lock);
159 WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
160 list_add_tail(new: &mapping->node, head: &bo->mappings.list);
161 mutex_unlock(lock: &bo->mappings.lock);
162
163err:
164 if (ret)
165 panfrost_gem_mapping_put(mapping);
166 return ret;
167}
168
169void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
170{
171 struct panfrost_file_priv *priv = file_priv->driver_priv;
172 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
173 struct panfrost_gem_mapping *mapping = NULL, *iter;
174
175 mutex_lock(&bo->mappings.lock);
176 list_for_each_entry(iter, &bo->mappings.list, node) {
177 if (iter->mmu == priv->mmu) {
178 mapping = iter;
179 list_del(entry: &iter->node);
180 break;
181 }
182 }
183 mutex_unlock(lock: &bo->mappings.lock);
184
185 panfrost_gem_mapping_put(mapping);
186}
187
188static int panfrost_gem_pin(struct drm_gem_object *obj)
189{
190 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
191
192 if (bo->is_heap)
193 return -EINVAL;
194
195 return drm_gem_shmem_pin(shmem: &bo->base);
196}
197
198static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
199{
200 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
201 enum drm_gem_object_status res = 0;
202
203 if (bo->base.pages)
204 res |= DRM_GEM_OBJECT_RESIDENT;
205
206 if (bo->base.madv == PANFROST_MADV_DONTNEED)
207 res |= DRM_GEM_OBJECT_PURGEABLE;
208
209 return res;
210}
211
212static size_t panfrost_gem_rss(struct drm_gem_object *obj)
213{
214 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
215
216 if (bo->is_heap) {
217 return bo->heap_rss_size;
218 } else if (bo->base.pages) {
219 WARN_ON(bo->heap_rss_size);
220 return bo->base.base.size;
221 }
222
223 return 0;
224}
225
226static const struct drm_gem_object_funcs panfrost_gem_funcs = {
227 .free = panfrost_gem_free_object,
228 .open = panfrost_gem_open,
229 .close = panfrost_gem_close,
230 .print_info = drm_gem_shmem_object_print_info,
231 .pin = panfrost_gem_pin,
232 .unpin = drm_gem_shmem_object_unpin,
233 .get_sg_table = drm_gem_shmem_object_get_sg_table,
234 .vmap = drm_gem_shmem_object_vmap,
235 .vunmap = drm_gem_shmem_object_vunmap,
236 .mmap = drm_gem_shmem_object_mmap,
237 .status = panfrost_gem_status,
238 .rss = panfrost_gem_rss,
239 .vm_ops = &drm_gem_shmem_vm_ops,
240};
241
242/**
243 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
244 * @dev: DRM device
245 * @size: Size in bytes of the memory the object will reference
246 *
247 * This lets the GEM helpers allocate object structs for us, and keep
248 * our BO stats correct.
249 */
250struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
251{
252 struct panfrost_device *pfdev = dev->dev_private;
253 struct panfrost_gem_object *obj;
254
255 obj = kzalloc(size: sizeof(*obj), GFP_KERNEL);
256 if (!obj)
257 return ERR_PTR(error: -ENOMEM);
258
259 INIT_LIST_HEAD(list: &obj->mappings.list);
260 mutex_init(&obj->mappings.lock);
261 obj->base.base.funcs = &panfrost_gem_funcs;
262 obj->base.map_wc = !pfdev->coherent;
263
264 return &obj->base.base;
265}
266
267struct panfrost_gem_object *
268panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
269{
270 struct drm_gem_shmem_object *shmem;
271 struct panfrost_gem_object *bo;
272
273 /* Round up heap allocations to 2MB to keep fault handling simple */
274 if (flags & PANFROST_BO_HEAP)
275 size = roundup(size, SZ_2M);
276
277 shmem = drm_gem_shmem_create(dev, size);
278 if (IS_ERR(ptr: shmem))
279 return ERR_CAST(ptr: shmem);
280
281 bo = to_panfrost_bo(obj: &shmem->base);
282 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
283 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
284
285 return bo;
286}
287
288struct drm_gem_object *
289panfrost_gem_prime_import_sg_table(struct drm_device *dev,
290 struct dma_buf_attachment *attach,
291 struct sg_table *sgt)
292{
293 struct drm_gem_object *obj;
294 struct panfrost_gem_object *bo;
295
296 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
297 if (IS_ERR(ptr: obj))
298 return ERR_CAST(ptr: obj);
299
300 bo = to_panfrost_bo(obj);
301 bo->noexec = true;
302
303 return obj;
304}
305

source code of linux/drivers/gpu/drm/panfrost/panfrost_gem.c