1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * NVIDIA Tegra DRM GEM helper functions |
4 | * |
5 | * Copyright (C) 2012 Sascha Hauer, Pengutronix |
6 | * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved. |
7 | * |
8 | * Based on the GEM/CMA helpers |
9 | * |
10 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. |
11 | */ |
12 | |
13 | #include <linux/dma-buf.h> |
14 | #include <linux/iommu.h> |
15 | #include <linux/module.h> |
16 | #include <linux/vmalloc.h> |
17 | |
18 | #include <drm/drm_drv.h> |
19 | #include <drm/drm_prime.h> |
20 | #include <drm/tegra_drm.h> |
21 | |
22 | #include "drm.h" |
23 | #include "gem.h" |
24 | |
25 | MODULE_IMPORT_NS(DMA_BUF); |
26 | |
27 | static unsigned int sg_dma_count_chunks(struct scatterlist *sgl, unsigned int nents) |
28 | { |
29 | dma_addr_t next = ~(dma_addr_t)0; |
30 | unsigned int count = 0, i; |
31 | struct scatterlist *s; |
32 | |
33 | for_each_sg(sgl, s, nents, i) { |
34 | /* sg_dma_address(s) is only valid for entries that have sg_dma_len(s) != 0. */ |
35 | if (!sg_dma_len(s)) |
36 | continue; |
37 | |
38 | if (sg_dma_address(s) != next) { |
39 | next = sg_dma_address(s) + sg_dma_len(s); |
40 | count++; |
41 | } |
42 | } |
43 | |
44 | return count; |
45 | } |
46 | |
47 | static inline unsigned int sgt_dma_count_chunks(struct sg_table *sgt) |
48 | { |
49 | return sg_dma_count_chunks(sgl: sgt->sgl, nents: sgt->nents); |
50 | } |
51 | |
52 | static void tegra_bo_put(struct host1x_bo *bo) |
53 | { |
54 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
55 | |
56 | drm_gem_object_put(obj: &obj->gem); |
57 | } |
58 | |
59 | static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, |
60 | enum dma_data_direction direction) |
61 | { |
62 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
63 | struct drm_gem_object *gem = &obj->gem; |
64 | struct host1x_bo_mapping *map; |
65 | int err; |
66 | |
67 | map = kzalloc(size: sizeof(*map), GFP_KERNEL); |
68 | if (!map) |
69 | return ERR_PTR(error: -ENOMEM); |
70 | |
71 | kref_init(kref: &map->ref); |
72 | map->bo = host1x_bo_get(bo); |
73 | map->direction = direction; |
74 | map->dev = dev; |
75 | |
76 | /* |
77 | * Imported buffers need special treatment to satisfy the semantics of DMA-BUF. |
78 | */ |
79 | if (gem->import_attach) { |
80 | struct dma_buf *buf = gem->import_attach->dmabuf; |
81 | |
82 | map->attach = dma_buf_attach(dmabuf: buf, dev); |
83 | if (IS_ERR(ptr: map->attach)) { |
84 | err = PTR_ERR(ptr: map->attach); |
85 | goto free; |
86 | } |
87 | |
88 | map->sgt = dma_buf_map_attachment_unlocked(attach: map->attach, direction); |
89 | if (IS_ERR(ptr: map->sgt)) { |
90 | dma_buf_detach(dmabuf: buf, attach: map->attach); |
91 | err = PTR_ERR(ptr: map->sgt); |
92 | map->sgt = NULL; |
93 | goto free; |
94 | } |
95 | |
96 | err = sgt_dma_count_chunks(sgt: map->sgt); |
97 | map->size = gem->size; |
98 | |
99 | goto out; |
100 | } |
101 | |
102 | /* |
103 | * If we don't have a mapping for this buffer yet, return an SG table |
104 | * so that host1x can do the mapping for us via the DMA API. |
105 | */ |
106 | map->sgt = kzalloc(size: sizeof(*map->sgt), GFP_KERNEL); |
107 | if (!map->sgt) { |
108 | err = -ENOMEM; |
109 | goto free; |
110 | } |
111 | |
112 | if (obj->pages) { |
113 | /* |
114 | * If the buffer object was allocated from the explicit IOMMU |
115 | * API code paths, construct an SG table from the pages. |
116 | */ |
117 | err = sg_alloc_table_from_pages(sgt: map->sgt, pages: obj->pages, n_pages: obj->num_pages, offset: 0, size: gem->size, |
118 | GFP_KERNEL); |
119 | if (err < 0) |
120 | goto free; |
121 | } else { |
122 | /* |
123 | * If the buffer object had no pages allocated and if it was |
124 | * not imported, it had to be allocated with the DMA API, so |
125 | * the DMA API helper can be used. |
126 | */ |
127 | err = dma_get_sgtable(dev, map->sgt, obj->vaddr, obj->iova, gem->size); |
128 | if (err < 0) |
129 | goto free; |
130 | } |
131 | |
132 | err = dma_map_sgtable(dev, sgt: map->sgt, dir: direction, attrs: 0); |
133 | if (err) |
134 | goto free_sgt; |
135 | |
136 | out: |
137 | /* |
138 | * If we've manually mapped the buffer object through the IOMMU, make sure to return the |
139 | * existing IOVA address of our mapping. |
140 | */ |
141 | if (!obj->mm) { |
142 | map->phys = sg_dma_address(map->sgt->sgl); |
143 | map->chunks = err; |
144 | } else { |
145 | map->phys = obj->iova; |
146 | map->chunks = 1; |
147 | } |
148 | |
149 | map->size = gem->size; |
150 | |
151 | return map; |
152 | |
153 | free_sgt: |
154 | sg_free_table(map->sgt); |
155 | free: |
156 | kfree(objp: map->sgt); |
157 | kfree(objp: map); |
158 | return ERR_PTR(error: err); |
159 | } |
160 | |
161 | static void tegra_bo_unpin(struct host1x_bo_mapping *map) |
162 | { |
163 | if (map->attach) { |
164 | dma_buf_unmap_attachment_unlocked(attach: map->attach, sg_table: map->sgt, |
165 | direction: map->direction); |
166 | dma_buf_detach(dmabuf: map->attach->dmabuf, attach: map->attach); |
167 | } else { |
168 | dma_unmap_sgtable(dev: map->dev, sgt: map->sgt, dir: map->direction, attrs: 0); |
169 | sg_free_table(map->sgt); |
170 | kfree(objp: map->sgt); |
171 | } |
172 | |
173 | host1x_bo_put(bo: map->bo); |
174 | kfree(objp: map); |
175 | } |
176 | |
177 | static void *tegra_bo_mmap(struct host1x_bo *bo) |
178 | { |
179 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
180 | struct iosys_map map = { 0 }; |
181 | void *vaddr; |
182 | int ret; |
183 | |
184 | if (obj->vaddr) |
185 | return obj->vaddr; |
186 | |
187 | if (obj->gem.import_attach) { |
188 | ret = dma_buf_vmap_unlocked(dmabuf: obj->gem.import_attach->dmabuf, map: &map); |
189 | if (ret < 0) |
190 | return ERR_PTR(error: ret); |
191 | |
192 | return map.vaddr; |
193 | } |
194 | |
195 | vaddr = vmap(pages: obj->pages, count: obj->num_pages, VM_MAP, |
196 | pgprot_writecombine(PAGE_KERNEL)); |
197 | if (!vaddr) |
198 | return ERR_PTR(error: -ENOMEM); |
199 | |
200 | return vaddr; |
201 | } |
202 | |
203 | static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) |
204 | { |
205 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
206 | struct iosys_map map = IOSYS_MAP_INIT_VADDR(addr); |
207 | |
208 | if (obj->vaddr) |
209 | return; |
210 | |
211 | if (obj->gem.import_attach) |
212 | return dma_buf_vunmap_unlocked(dmabuf: obj->gem.import_attach->dmabuf, map: &map); |
213 | |
214 | vunmap(addr); |
215 | } |
216 | |
217 | static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo) |
218 | { |
219 | struct tegra_bo *obj = host1x_to_tegra_bo(bo); |
220 | |
221 | drm_gem_object_get(obj: &obj->gem); |
222 | |
223 | return bo; |
224 | } |
225 | |
226 | static const struct host1x_bo_ops tegra_bo_ops = { |
227 | .get = tegra_bo_get, |
228 | .put = tegra_bo_put, |
229 | .pin = tegra_bo_pin, |
230 | .unpin = tegra_bo_unpin, |
231 | .mmap = tegra_bo_mmap, |
232 | .munmap = tegra_bo_munmap, |
233 | }; |
234 | |
235 | static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) |
236 | { |
237 | int prot = IOMMU_READ | IOMMU_WRITE; |
238 | int err; |
239 | |
240 | if (bo->mm) |
241 | return -EBUSY; |
242 | |
243 | bo->mm = kzalloc(size: sizeof(*bo->mm), GFP_KERNEL); |
244 | if (!bo->mm) |
245 | return -ENOMEM; |
246 | |
247 | mutex_lock(&tegra->mm_lock); |
248 | |
249 | err = drm_mm_insert_node_generic(mm: &tegra->mm, |
250 | node: bo->mm, size: bo->gem.size, PAGE_SIZE, color: 0, mode: 0); |
251 | if (err < 0) { |
252 | dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n" , |
253 | err); |
254 | goto unlock; |
255 | } |
256 | |
257 | bo->iova = bo->mm->start; |
258 | |
259 | bo->size = iommu_map_sgtable(domain: tegra->domain, iova: bo->iova, sgt: bo->sgt, prot); |
260 | if (!bo->size) { |
261 | dev_err(tegra->drm->dev, "failed to map buffer\n" ); |
262 | err = -ENOMEM; |
263 | goto remove; |
264 | } |
265 | |
266 | mutex_unlock(lock: &tegra->mm_lock); |
267 | |
268 | return 0; |
269 | |
270 | remove: |
271 | drm_mm_remove_node(node: bo->mm); |
272 | unlock: |
273 | mutex_unlock(lock: &tegra->mm_lock); |
274 | kfree(objp: bo->mm); |
275 | return err; |
276 | } |
277 | |
278 | static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) |
279 | { |
280 | if (!bo->mm) |
281 | return 0; |
282 | |
283 | mutex_lock(&tegra->mm_lock); |
284 | iommu_unmap(domain: tegra->domain, iova: bo->iova, size: bo->size); |
285 | drm_mm_remove_node(node: bo->mm); |
286 | mutex_unlock(lock: &tegra->mm_lock); |
287 | |
288 | kfree(objp: bo->mm); |
289 | |
290 | return 0; |
291 | } |
292 | |
293 | static const struct drm_gem_object_funcs tegra_gem_object_funcs = { |
294 | .free = tegra_bo_free_object, |
295 | .export = tegra_gem_prime_export, |
296 | .vm_ops = &tegra_bo_vm_ops, |
297 | }; |
298 | |
299 | static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, |
300 | size_t size) |
301 | { |
302 | struct tegra_bo *bo; |
303 | int err; |
304 | |
305 | bo = kzalloc(size: sizeof(*bo), GFP_KERNEL); |
306 | if (!bo) |
307 | return ERR_PTR(error: -ENOMEM); |
308 | |
309 | bo->gem.funcs = &tegra_gem_object_funcs; |
310 | |
311 | host1x_bo_init(bo: &bo->base, ops: &tegra_bo_ops); |
312 | size = round_up(size, PAGE_SIZE); |
313 | |
314 | err = drm_gem_object_init(dev: drm, obj: &bo->gem, size); |
315 | if (err < 0) |
316 | goto free; |
317 | |
318 | err = drm_gem_create_mmap_offset(obj: &bo->gem); |
319 | if (err < 0) |
320 | goto release; |
321 | |
322 | return bo; |
323 | |
324 | release: |
325 | drm_gem_object_release(obj: &bo->gem); |
326 | free: |
327 | kfree(objp: bo); |
328 | return ERR_PTR(error: err); |
329 | } |
330 | |
331 | static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) |
332 | { |
333 | if (bo->pages) { |
334 | dma_unmap_sgtable(dev: drm->dev, sgt: bo->sgt, dir: DMA_FROM_DEVICE, attrs: 0); |
335 | drm_gem_put_pages(obj: &bo->gem, pages: bo->pages, dirty: true, accessed: true); |
336 | sg_free_table(bo->sgt); |
337 | kfree(objp: bo->sgt); |
338 | } else if (bo->vaddr) { |
339 | dma_free_wc(dev: drm->dev, size: bo->gem.size, cpu_addr: bo->vaddr, dma_addr: bo->iova); |
340 | } |
341 | } |
342 | |
343 | static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) |
344 | { |
345 | int err; |
346 | |
347 | bo->pages = drm_gem_get_pages(obj: &bo->gem); |
348 | if (IS_ERR(ptr: bo->pages)) |
349 | return PTR_ERR(ptr: bo->pages); |
350 | |
351 | bo->num_pages = bo->gem.size >> PAGE_SHIFT; |
352 | |
353 | bo->sgt = drm_prime_pages_to_sg(dev: bo->gem.dev, pages: bo->pages, nr_pages: bo->num_pages); |
354 | if (IS_ERR(ptr: bo->sgt)) { |
355 | err = PTR_ERR(ptr: bo->sgt); |
356 | goto put_pages; |
357 | } |
358 | |
359 | err = dma_map_sgtable(dev: drm->dev, sgt: bo->sgt, dir: DMA_FROM_DEVICE, attrs: 0); |
360 | if (err) |
361 | goto free_sgt; |
362 | |
363 | return 0; |
364 | |
365 | free_sgt: |
366 | sg_free_table(bo->sgt); |
367 | kfree(objp: bo->sgt); |
368 | put_pages: |
369 | drm_gem_put_pages(obj: &bo->gem, pages: bo->pages, dirty: false, accessed: false); |
370 | return err; |
371 | } |
372 | |
373 | static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) |
374 | { |
375 | struct tegra_drm *tegra = drm->dev_private; |
376 | int err; |
377 | |
378 | if (tegra->domain) { |
379 | err = tegra_bo_get_pages(drm, bo); |
380 | if (err < 0) |
381 | return err; |
382 | |
383 | err = tegra_bo_iommu_map(tegra, bo); |
384 | if (err < 0) { |
385 | tegra_bo_free(drm, bo); |
386 | return err; |
387 | } |
388 | } else { |
389 | size_t size = bo->gem.size; |
390 | |
391 | bo->vaddr = dma_alloc_wc(dev: drm->dev, size, dma_addr: &bo->iova, |
392 | GFP_KERNEL | __GFP_NOWARN); |
393 | if (!bo->vaddr) { |
394 | dev_err(drm->dev, |
395 | "failed to allocate buffer of size %zu\n" , |
396 | size); |
397 | return -ENOMEM; |
398 | } |
399 | } |
400 | |
401 | return 0; |
402 | } |
403 | |
404 | struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, |
405 | unsigned long flags) |
406 | { |
407 | struct tegra_bo *bo; |
408 | int err; |
409 | |
410 | bo = tegra_bo_alloc_object(drm, size); |
411 | if (IS_ERR(ptr: bo)) |
412 | return bo; |
413 | |
414 | err = tegra_bo_alloc(drm, bo); |
415 | if (err < 0) |
416 | goto release; |
417 | |
418 | if (flags & DRM_TEGRA_GEM_CREATE_TILED) |
419 | bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; |
420 | |
421 | if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP) |
422 | bo->flags |= TEGRA_BO_BOTTOM_UP; |
423 | |
424 | return bo; |
425 | |
426 | release: |
427 | drm_gem_object_release(obj: &bo->gem); |
428 | kfree(objp: bo); |
429 | return ERR_PTR(error: err); |
430 | } |
431 | |
432 | struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, |
433 | struct drm_device *drm, |
434 | size_t size, |
435 | unsigned long flags, |
436 | u32 *handle) |
437 | { |
438 | struct tegra_bo *bo; |
439 | int err; |
440 | |
441 | bo = tegra_bo_create(drm, size, flags); |
442 | if (IS_ERR(ptr: bo)) |
443 | return bo; |
444 | |
445 | err = drm_gem_handle_create(file_priv: file, obj: &bo->gem, handlep: handle); |
446 | if (err) { |
447 | tegra_bo_free_object(gem: &bo->gem); |
448 | return ERR_PTR(error: err); |
449 | } |
450 | |
451 | drm_gem_object_put(obj: &bo->gem); |
452 | |
453 | return bo; |
454 | } |
455 | |
456 | static struct tegra_bo *tegra_bo_import(struct drm_device *drm, |
457 | struct dma_buf *buf) |
458 | { |
459 | struct tegra_drm *tegra = drm->dev_private; |
460 | struct dma_buf_attachment *attach; |
461 | struct tegra_bo *bo; |
462 | int err; |
463 | |
464 | bo = tegra_bo_alloc_object(drm, size: buf->size); |
465 | if (IS_ERR(ptr: bo)) |
466 | return bo; |
467 | |
468 | attach = dma_buf_attach(dmabuf: buf, dev: drm->dev); |
469 | if (IS_ERR(ptr: attach)) { |
470 | err = PTR_ERR(ptr: attach); |
471 | goto free; |
472 | } |
473 | |
474 | get_dma_buf(dmabuf: buf); |
475 | |
476 | bo->sgt = dma_buf_map_attachment_unlocked(attach, direction: DMA_TO_DEVICE); |
477 | if (IS_ERR(ptr: bo->sgt)) { |
478 | err = PTR_ERR(ptr: bo->sgt); |
479 | goto detach; |
480 | } |
481 | |
482 | if (tegra->domain) { |
483 | err = tegra_bo_iommu_map(tegra, bo); |
484 | if (err < 0) |
485 | goto detach; |
486 | } |
487 | |
488 | bo->gem.import_attach = attach; |
489 | |
490 | return bo; |
491 | |
492 | detach: |
493 | if (!IS_ERR_OR_NULL(ptr: bo->sgt)) |
494 | dma_buf_unmap_attachment_unlocked(attach, sg_table: bo->sgt, direction: DMA_TO_DEVICE); |
495 | |
496 | dma_buf_detach(dmabuf: buf, attach); |
497 | dma_buf_put(dmabuf: buf); |
498 | free: |
499 | drm_gem_object_release(obj: &bo->gem); |
500 | kfree(objp: bo); |
501 | return ERR_PTR(error: err); |
502 | } |
503 | |
504 | void tegra_bo_free_object(struct drm_gem_object *gem) |
505 | { |
506 | struct tegra_drm *tegra = gem->dev->dev_private; |
507 | struct host1x_bo_mapping *mapping, *tmp; |
508 | struct tegra_bo *bo = to_tegra_bo(gem); |
509 | |
510 | /* remove all mappings of this buffer object from any caches */ |
511 | list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) { |
512 | if (mapping->cache) |
513 | host1x_bo_unpin(map: mapping); |
514 | else |
515 | dev_err(gem->dev->dev, "mapping %p stale for device %s\n" , mapping, |
516 | dev_name(mapping->dev)); |
517 | } |
518 | |
519 | if (tegra->domain) |
520 | tegra_bo_iommu_unmap(tegra, bo); |
521 | |
522 | if (gem->import_attach) { |
523 | dma_buf_unmap_attachment_unlocked(attach: gem->import_attach, sg_table: bo->sgt, |
524 | direction: DMA_TO_DEVICE); |
525 | drm_prime_gem_destroy(obj: gem, NULL); |
526 | } else { |
527 | tegra_bo_free(drm: gem->dev, bo); |
528 | } |
529 | |
530 | drm_gem_object_release(obj: gem); |
531 | kfree(objp: bo); |
532 | } |
533 | |
534 | int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, |
535 | struct drm_mode_create_dumb *args) |
536 | { |
537 | unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); |
538 | struct tegra_drm *tegra = drm->dev_private; |
539 | struct tegra_bo *bo; |
540 | |
541 | args->pitch = round_up(min_pitch, tegra->pitch_align); |
542 | args->size = args->pitch * args->height; |
543 | |
544 | bo = tegra_bo_create_with_handle(file, drm, size: args->size, flags: 0, |
545 | handle: &args->handle); |
546 | if (IS_ERR(ptr: bo)) |
547 | return PTR_ERR(ptr: bo); |
548 | |
549 | return 0; |
550 | } |
551 | |
552 | static vm_fault_t tegra_bo_fault(struct vm_fault *vmf) |
553 | { |
554 | struct vm_area_struct *vma = vmf->vma; |
555 | struct drm_gem_object *gem = vma->vm_private_data; |
556 | struct tegra_bo *bo = to_tegra_bo(gem); |
557 | struct page *page; |
558 | pgoff_t offset; |
559 | |
560 | if (!bo->pages) |
561 | return VM_FAULT_SIGBUS; |
562 | |
563 | offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; |
564 | page = bo->pages[offset]; |
565 | |
566 | return vmf_insert_page(vma, addr: vmf->address, page); |
567 | } |
568 | |
569 | const struct vm_operations_struct tegra_bo_vm_ops = { |
570 | .fault = tegra_bo_fault, |
571 | .open = drm_gem_vm_open, |
572 | .close = drm_gem_vm_close, |
573 | }; |
574 | |
575 | int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) |
576 | { |
577 | struct tegra_bo *bo = to_tegra_bo(gem); |
578 | |
579 | if (!bo->pages) { |
580 | unsigned long vm_pgoff = vma->vm_pgoff; |
581 | int err; |
582 | |
583 | /* |
584 | * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), |
585 | * and set the vm_pgoff (used as a fake buffer offset by DRM) |
586 | * to 0 as we want to map the whole buffer. |
587 | */ |
588 | vm_flags_clear(vma, VM_PFNMAP); |
589 | vma->vm_pgoff = 0; |
590 | |
591 | err = dma_mmap_wc(dev: gem->dev->dev, vma, cpu_addr: bo->vaddr, dma_addr: bo->iova, |
592 | size: gem->size); |
593 | if (err < 0) { |
594 | drm_gem_vm_close(vma); |
595 | return err; |
596 | } |
597 | |
598 | vma->vm_pgoff = vm_pgoff; |
599 | } else { |
600 | pgprot_t prot = vm_get_page_prot(vm_flags: vma->vm_flags); |
601 | |
602 | vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP); |
603 | |
604 | vma->vm_page_prot = pgprot_writecombine(prot); |
605 | } |
606 | |
607 | return 0; |
608 | } |
609 | |
610 | int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) |
611 | { |
612 | struct drm_gem_object *gem; |
613 | int err; |
614 | |
615 | err = drm_gem_mmap(filp: file, vma); |
616 | if (err < 0) |
617 | return err; |
618 | |
619 | gem = vma->vm_private_data; |
620 | |
621 | return __tegra_gem_mmap(gem, vma); |
622 | } |
623 | |
624 | static struct sg_table * |
625 | tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, |
626 | enum dma_data_direction dir) |
627 | { |
628 | struct drm_gem_object *gem = attach->dmabuf->priv; |
629 | struct tegra_bo *bo = to_tegra_bo(gem); |
630 | struct sg_table *sgt; |
631 | |
632 | sgt = kmalloc(size: sizeof(*sgt), GFP_KERNEL); |
633 | if (!sgt) |
634 | return NULL; |
635 | |
636 | if (bo->pages) { |
637 | if (sg_alloc_table_from_pages(sgt, pages: bo->pages, n_pages: bo->num_pages, |
638 | offset: 0, size: gem->size, GFP_KERNEL) < 0) |
639 | goto free; |
640 | } else { |
641 | if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova, |
642 | gem->size) < 0) |
643 | goto free; |
644 | } |
645 | |
646 | if (dma_map_sgtable(dev: attach->dev, sgt, dir, attrs: 0)) |
647 | goto free; |
648 | |
649 | return sgt; |
650 | |
651 | free: |
652 | sg_free_table(sgt); |
653 | kfree(objp: sgt); |
654 | return NULL; |
655 | } |
656 | |
657 | static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, |
658 | struct sg_table *sgt, |
659 | enum dma_data_direction dir) |
660 | { |
661 | struct drm_gem_object *gem = attach->dmabuf->priv; |
662 | struct tegra_bo *bo = to_tegra_bo(gem); |
663 | |
664 | if (bo->pages) |
665 | dma_unmap_sgtable(dev: attach->dev, sgt, dir, attrs: 0); |
666 | |
667 | sg_free_table(sgt); |
668 | kfree(objp: sgt); |
669 | } |
670 | |
671 | static void tegra_gem_prime_release(struct dma_buf *buf) |
672 | { |
673 | drm_gem_dmabuf_release(dma_buf: buf); |
674 | } |
675 | |
676 | static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf, |
677 | enum dma_data_direction direction) |
678 | { |
679 | struct drm_gem_object *gem = buf->priv; |
680 | struct tegra_bo *bo = to_tegra_bo(gem); |
681 | struct drm_device *drm = gem->dev; |
682 | |
683 | if (bo->pages) |
684 | dma_sync_sgtable_for_cpu(dev: drm->dev, sgt: bo->sgt, dir: DMA_FROM_DEVICE); |
685 | |
686 | return 0; |
687 | } |
688 | |
689 | static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf, |
690 | enum dma_data_direction direction) |
691 | { |
692 | struct drm_gem_object *gem = buf->priv; |
693 | struct tegra_bo *bo = to_tegra_bo(gem); |
694 | struct drm_device *drm = gem->dev; |
695 | |
696 | if (bo->pages) |
697 | dma_sync_sgtable_for_device(dev: drm->dev, sgt: bo->sgt, dir: DMA_TO_DEVICE); |
698 | |
699 | return 0; |
700 | } |
701 | |
702 | static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma) |
703 | { |
704 | struct drm_gem_object *gem = buf->priv; |
705 | int err; |
706 | |
707 | err = drm_gem_mmap_obj(obj: gem, obj_size: gem->size, vma); |
708 | if (err < 0) |
709 | return err; |
710 | |
711 | return __tegra_gem_mmap(gem, vma); |
712 | } |
713 | |
714 | static int tegra_gem_prime_vmap(struct dma_buf *buf, struct iosys_map *map) |
715 | { |
716 | struct drm_gem_object *gem = buf->priv; |
717 | struct tegra_bo *bo = to_tegra_bo(gem); |
718 | void *vaddr; |
719 | |
720 | vaddr = tegra_bo_mmap(bo: &bo->base); |
721 | if (IS_ERR(ptr: vaddr)) |
722 | return PTR_ERR(ptr: vaddr); |
723 | |
724 | iosys_map_set_vaddr(map, vaddr); |
725 | |
726 | return 0; |
727 | } |
728 | |
729 | static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct iosys_map *map) |
730 | { |
731 | struct drm_gem_object *gem = buf->priv; |
732 | struct tegra_bo *bo = to_tegra_bo(gem); |
733 | |
734 | tegra_bo_munmap(bo: &bo->base, addr: map->vaddr); |
735 | } |
736 | |
737 | static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = { |
738 | .map_dma_buf = tegra_gem_prime_map_dma_buf, |
739 | .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf, |
740 | .release = tegra_gem_prime_release, |
741 | .begin_cpu_access = tegra_gem_prime_begin_cpu_access, |
742 | .end_cpu_access = tegra_gem_prime_end_cpu_access, |
743 | .mmap = tegra_gem_prime_mmap, |
744 | .vmap = tegra_gem_prime_vmap, |
745 | .vunmap = tegra_gem_prime_vunmap, |
746 | }; |
747 | |
748 | struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem, |
749 | int flags) |
750 | { |
751 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
752 | |
753 | exp_info.exp_name = KBUILD_MODNAME; |
754 | exp_info.owner = gem->dev->driver->fops->owner; |
755 | exp_info.ops = &tegra_gem_prime_dmabuf_ops; |
756 | exp_info.size = gem->size; |
757 | exp_info.flags = flags; |
758 | exp_info.priv = gem; |
759 | |
760 | return drm_gem_dmabuf_export(dev: gem->dev, exp_info: &exp_info); |
761 | } |
762 | |
763 | struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm, |
764 | struct dma_buf *buf) |
765 | { |
766 | struct tegra_bo *bo; |
767 | |
768 | if (buf->ops == &tegra_gem_prime_dmabuf_ops) { |
769 | struct drm_gem_object *gem = buf->priv; |
770 | |
771 | if (gem->dev == drm) { |
772 | drm_gem_object_get(obj: gem); |
773 | return gem; |
774 | } |
775 | } |
776 | |
777 | bo = tegra_bo_import(drm, buf); |
778 | if (IS_ERR(ptr: bo)) |
779 | return ERR_CAST(ptr: bo); |
780 | |
781 | return &bo->gem; |
782 | } |
783 | |
784 | struct host1x_bo *tegra_gem_lookup(struct drm_file *file, u32 handle) |
785 | { |
786 | struct drm_gem_object *gem; |
787 | struct tegra_bo *bo; |
788 | |
789 | gem = drm_gem_object_lookup(filp: file, handle); |
790 | if (!gem) |
791 | return NULL; |
792 | |
793 | bo = to_tegra_bo(gem); |
794 | return &bo->base; |
795 | } |
796 | |