1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (c) 2015 MediaTek Inc. |
4 | */ |
5 | |
6 | #include <linux/dma-buf.h> |
7 | |
8 | #include <drm/drm.h> |
9 | #include <drm/drm_device.h> |
10 | #include <drm/drm_gem.h> |
11 | #include <drm/drm_gem_dma_helper.h> |
12 | #include <drm/drm_prime.h> |
13 | |
14 | #include "mtk_drm_drv.h" |
15 | #include "mtk_drm_gem.h" |
16 | |
17 | static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
18 | |
19 | static const struct vm_operations_struct vm_ops = { |
20 | .open = drm_gem_vm_open, |
21 | .close = drm_gem_vm_close, |
22 | }; |
23 | |
24 | static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = { |
25 | .free = mtk_drm_gem_free_object, |
26 | .get_sg_table = mtk_gem_prime_get_sg_table, |
27 | .vmap = mtk_drm_gem_prime_vmap, |
28 | .vunmap = mtk_drm_gem_prime_vunmap, |
29 | .mmap = mtk_drm_gem_object_mmap, |
30 | .vm_ops = &vm_ops, |
31 | }; |
32 | |
33 | static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev, |
34 | unsigned long size) |
35 | { |
36 | struct mtk_drm_gem_obj *mtk_gem_obj; |
37 | int ret; |
38 | |
39 | size = round_up(size, PAGE_SIZE); |
40 | |
41 | mtk_gem_obj = kzalloc(size: sizeof(*mtk_gem_obj), GFP_KERNEL); |
42 | if (!mtk_gem_obj) |
43 | return ERR_PTR(error: -ENOMEM); |
44 | |
45 | mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs; |
46 | |
47 | ret = drm_gem_object_init(dev, obj: &mtk_gem_obj->base, size); |
48 | if (ret < 0) { |
49 | DRM_ERROR("failed to initialize gem object\n" ); |
50 | kfree(objp: mtk_gem_obj); |
51 | return ERR_PTR(error: ret); |
52 | } |
53 | |
54 | return mtk_gem_obj; |
55 | } |
56 | |
57 | struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev, |
58 | size_t size, bool alloc_kmap) |
59 | { |
60 | struct mtk_drm_private *priv = dev->dev_private; |
61 | struct mtk_drm_gem_obj *mtk_gem; |
62 | struct drm_gem_object *obj; |
63 | int ret; |
64 | |
65 | mtk_gem = mtk_drm_gem_init(dev, size); |
66 | if (IS_ERR(ptr: mtk_gem)) |
67 | return ERR_CAST(ptr: mtk_gem); |
68 | |
69 | obj = &mtk_gem->base; |
70 | |
71 | mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE; |
72 | |
73 | if (!alloc_kmap) |
74 | mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING; |
75 | |
76 | mtk_gem->cookie = dma_alloc_attrs(dev: priv->dma_dev, size: obj->size, |
77 | dma_handle: &mtk_gem->dma_addr, GFP_KERNEL, |
78 | attrs: mtk_gem->dma_attrs); |
79 | if (!mtk_gem->cookie) { |
80 | DRM_ERROR("failed to allocate %zx byte dma buffer" , obj->size); |
81 | ret = -ENOMEM; |
82 | goto err_gem_free; |
83 | } |
84 | |
85 | if (alloc_kmap) |
86 | mtk_gem->kvaddr = mtk_gem->cookie; |
87 | |
88 | DRM_DEBUG_DRIVER("cookie = %p dma_addr = %pad size = %zu\n" , |
89 | mtk_gem->cookie, &mtk_gem->dma_addr, |
90 | size); |
91 | |
92 | return mtk_gem; |
93 | |
94 | err_gem_free: |
95 | drm_gem_object_release(obj); |
96 | kfree(objp: mtk_gem); |
97 | return ERR_PTR(error: ret); |
98 | } |
99 | |
100 | void mtk_drm_gem_free_object(struct drm_gem_object *obj) |
101 | { |
102 | struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); |
103 | struct mtk_drm_private *priv = obj->dev->dev_private; |
104 | |
105 | if (mtk_gem->sg) |
106 | drm_prime_gem_destroy(obj, sg: mtk_gem->sg); |
107 | else |
108 | dma_free_attrs(dev: priv->dma_dev, size: obj->size, cpu_addr: mtk_gem->cookie, |
109 | dma_handle: mtk_gem->dma_addr, attrs: mtk_gem->dma_attrs); |
110 | |
111 | /* release file pointer to gem object. */ |
112 | drm_gem_object_release(obj); |
113 | |
114 | kfree(objp: mtk_gem); |
115 | } |
116 | |
117 | int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, |
118 | struct drm_mode_create_dumb *args) |
119 | { |
120 | struct mtk_drm_gem_obj *mtk_gem; |
121 | int ret; |
122 | |
123 | args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
124 | |
125 | /* |
126 | * Multiply 2 variables of different types, |
127 | * for example: args->size = args->spacing * args->height; |
128 | * may cause coverity issue with unintentional overflow. |
129 | */ |
130 | args->size = args->pitch; |
131 | args->size *= args->height; |
132 | |
133 | mtk_gem = mtk_drm_gem_create(dev, size: args->size, alloc_kmap: false); |
134 | if (IS_ERR(ptr: mtk_gem)) |
135 | return PTR_ERR(ptr: mtk_gem); |
136 | |
137 | /* |
138 | * allocate a id of idr table where the obj is registered |
139 | * and handle has the id what user can see. |
140 | */ |
141 | ret = drm_gem_handle_create(file_priv, obj: &mtk_gem->base, handlep: &args->handle); |
142 | if (ret) |
143 | goto err_handle_create; |
144 | |
145 | /* drop reference from allocate - handle holds it now. */ |
146 | drm_gem_object_put(obj: &mtk_gem->base); |
147 | |
148 | return 0; |
149 | |
150 | err_handle_create: |
151 | mtk_drm_gem_free_object(obj: &mtk_gem->base); |
152 | return ret; |
153 | } |
154 | |
155 | static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, |
156 | struct vm_area_struct *vma) |
157 | |
158 | { |
159 | int ret; |
160 | struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); |
161 | struct mtk_drm_private *priv = obj->dev->dev_private; |
162 | |
163 | /* |
164 | * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the |
165 | * whole buffer from the start. |
166 | */ |
167 | vma->vm_pgoff = 0; |
168 | |
169 | /* |
170 | * dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear |
171 | * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap(). |
172 | */ |
173 | vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); |
174 | vma->vm_page_prot = pgprot_writecombine(prot: vm_get_page_prot(vm_flags: vma->vm_flags)); |
175 | vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); |
176 | |
177 | ret = dma_mmap_attrs(dev: priv->dma_dev, vma, cpu_addr: mtk_gem->cookie, |
178 | dma_addr: mtk_gem->dma_addr, size: obj->size, attrs: mtk_gem->dma_attrs); |
179 | |
180 | return ret; |
181 | } |
182 | |
183 | /* |
184 | * Allocate a sg_table for this GEM object. |
185 | * Note: Both the table's contents, and the sg_table itself must be freed by |
186 | * the caller. |
187 | * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error. |
188 | */ |
189 | struct sg_table *mtk_gem_prime_get_sg_table(struct drm_gem_object *obj) |
190 | { |
191 | struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); |
192 | struct mtk_drm_private *priv = obj->dev->dev_private; |
193 | struct sg_table *sgt; |
194 | int ret; |
195 | |
196 | sgt = kzalloc(size: sizeof(*sgt), GFP_KERNEL); |
197 | if (!sgt) |
198 | return ERR_PTR(error: -ENOMEM); |
199 | |
200 | ret = dma_get_sgtable_attrs(dev: priv->dma_dev, sgt, cpu_addr: mtk_gem->cookie, |
201 | dma_addr: mtk_gem->dma_addr, size: obj->size, |
202 | attrs: mtk_gem->dma_attrs); |
203 | if (ret) { |
204 | DRM_ERROR("failed to allocate sgt, %d\n" , ret); |
205 | kfree(objp: sgt); |
206 | return ERR_PTR(error: ret); |
207 | } |
208 | |
209 | return sgt; |
210 | } |
211 | |
212 | struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev, |
213 | struct dma_buf_attachment *attach, struct sg_table *sg) |
214 | { |
215 | struct mtk_drm_gem_obj *mtk_gem; |
216 | |
217 | /* check if the entries in the sg_table are contiguous */ |
218 | if (drm_prime_get_contiguous_size(sgt: sg) < attach->dmabuf->size) { |
219 | DRM_ERROR("sg_table is not contiguous" ); |
220 | return ERR_PTR(error: -EINVAL); |
221 | } |
222 | |
223 | mtk_gem = mtk_drm_gem_init(dev, size: attach->dmabuf->size); |
224 | if (IS_ERR(ptr: mtk_gem)) |
225 | return ERR_CAST(ptr: mtk_gem); |
226 | |
227 | mtk_gem->dma_addr = sg_dma_address(sg->sgl); |
228 | mtk_gem->sg = sg; |
229 | |
230 | return &mtk_gem->base; |
231 | } |
232 | |
233 | int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) |
234 | { |
235 | struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); |
236 | struct sg_table *sgt = NULL; |
237 | unsigned int npages; |
238 | |
239 | if (mtk_gem->kvaddr) |
240 | goto out; |
241 | |
242 | sgt = mtk_gem_prime_get_sg_table(obj); |
243 | if (IS_ERR(ptr: sgt)) |
244 | return PTR_ERR(ptr: sgt); |
245 | |
246 | npages = obj->size >> PAGE_SHIFT; |
247 | mtk_gem->pages = kcalloc(n: npages, size: sizeof(*mtk_gem->pages), GFP_KERNEL); |
248 | if (!mtk_gem->pages) { |
249 | sg_free_table(sgt); |
250 | kfree(objp: sgt); |
251 | return -ENOMEM; |
252 | } |
253 | |
254 | drm_prime_sg_to_page_array(sgt, pages: mtk_gem->pages, max_pages: npages); |
255 | |
256 | mtk_gem->kvaddr = vmap(pages: mtk_gem->pages, count: npages, VM_MAP, |
257 | pgprot_writecombine(PAGE_KERNEL)); |
258 | if (!mtk_gem->kvaddr) { |
259 | sg_free_table(sgt); |
260 | kfree(objp: sgt); |
261 | kfree(objp: mtk_gem->pages); |
262 | return -ENOMEM; |
263 | } |
264 | sg_free_table(sgt); |
265 | kfree(objp: sgt); |
266 | |
267 | out: |
268 | iosys_map_set_vaddr(map, vaddr: mtk_gem->kvaddr); |
269 | |
270 | return 0; |
271 | } |
272 | |
273 | void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, |
274 | struct iosys_map *map) |
275 | { |
276 | struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj); |
277 | void *vaddr = map->vaddr; |
278 | |
279 | if (!mtk_gem->pages) |
280 | return; |
281 | |
282 | vunmap(addr: vaddr); |
283 | mtk_gem->kvaddr = NULL; |
284 | kfree(objp: mtk_gem->pages); |
285 | } |
286 | |