1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <drm/drm_prime.h>
7#include <linux/dma-mapping.h>
8#include <linux/shmem_fs.h>
9#include <linux/spinlock.h>
10#include <linux/vmalloc.h>
11
12#include "etnaviv_drv.h"
13#include "etnaviv_gem.h"
14#include "etnaviv_gpu.h"
15#include "etnaviv_mmu.h"
16
17static struct lock_class_key etnaviv_shm_lock_class;
18static struct lock_class_key etnaviv_userptr_lock_class;
19
20static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21{
22 struct drm_device *dev = etnaviv_obj->base.dev;
23 struct sg_table *sgt = etnaviv_obj->sgt;
24
25 /*
26 * For non-cached buffers, ensure the new pages are clean
27 * because display controller, GPU, etc. are not coherent.
28 */
29 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30 dma_map_sgtable(dev: dev->dev, sgt, dir: DMA_BIDIRECTIONAL, attrs: 0);
31}
32
33static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34{
35 struct drm_device *dev = etnaviv_obj->base.dev;
36 struct sg_table *sgt = etnaviv_obj->sgt;
37
38 /*
39 * For non-cached buffers, ensure the new pages are clean
40 * because display controller, GPU, etc. are not coherent:
41 *
42 * WARNING: The DMA API does not support concurrent CPU
43 * and device access to the memory area. With BIDIRECTIONAL,
44 * we will clean the cache lines which overlap the region,
45 * and invalidate all cache lines (partially) contained in
46 * the region.
47 *
48 * If you have dirty data in the overlapping cache lines,
49 * that will corrupt the GPU-written data. If you have
50 * written into the remainder of the region, this can
51 * discard those writes.
52 */
53 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54 dma_unmap_sgtable(dev: dev->dev, sgt, dir: DMA_BIDIRECTIONAL, attrs: 0);
55}
56
57/* called with etnaviv_obj->lock held */
58static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59{
60 struct drm_device *dev = etnaviv_obj->base.dev;
61 struct page **p = drm_gem_get_pages(obj: &etnaviv_obj->base);
62
63 if (IS_ERR(ptr: p)) {
64 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65 return PTR_ERR(ptr: p);
66 }
67
68 etnaviv_obj->pages = p;
69
70 return 0;
71}
72
73static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74{
75 if (etnaviv_obj->sgt) {
76 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77 sg_free_table(etnaviv_obj->sgt);
78 kfree(objp: etnaviv_obj->sgt);
79 etnaviv_obj->sgt = NULL;
80 }
81 if (etnaviv_obj->pages) {
82 drm_gem_put_pages(obj: &etnaviv_obj->base, pages: etnaviv_obj->pages,
83 dirty: true, accessed: false);
84
85 etnaviv_obj->pages = NULL;
86 }
87}
88
89struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90{
91 int ret;
92
93 lockdep_assert_held(&etnaviv_obj->lock);
94
95 if (!etnaviv_obj->pages) {
96 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97 if (ret < 0)
98 return ERR_PTR(error: ret);
99 }
100
101 if (!etnaviv_obj->sgt) {
102 struct drm_device *dev = etnaviv_obj->base.dev;
103 unsigned int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104 struct sg_table *sgt;
105
106 sgt = drm_prime_pages_to_sg(dev, pages: etnaviv_obj->pages, nr_pages: npages);
107 if (IS_ERR(ptr: sgt)) {
108 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
109 PTR_ERR(sgt));
110 return ERR_CAST(ptr: sgt);
111 }
112
113 etnaviv_obj->sgt = sgt;
114
115 etnaviv_gem_scatter_map(etnaviv_obj);
116 }
117
118 return etnaviv_obj->pages;
119}
120
121void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
122{
123 lockdep_assert_held(&etnaviv_obj->lock);
124 /* when we start tracking the pin count, then do something here */
125}
126
127static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128 struct vm_area_struct *vma)
129{
130 pgprot_t vm_page_prot;
131
132 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
133
134 vm_page_prot = vm_get_page_prot(vm_flags: vma->vm_flags);
135
136 if (etnaviv_obj->flags & ETNA_BO_WC) {
137 vma->vm_page_prot = pgprot_writecombine(prot: vm_page_prot);
138 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
139 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
140 } else {
141 /*
142 * Shunt off cached objs to shmem file so they have their own
143 * address_space (so unmap_mapping_range does what we want,
144 * in particular in the case of mmap'd dmabufs)
145 */
146 vma->vm_pgoff = 0;
147 vma_set_file(vma, file: etnaviv_obj->base.filp);
148
149 vma->vm_page_prot = vm_page_prot;
150 }
151
152 return 0;
153}
154
155static int etnaviv_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
156{
157 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
158
159 return etnaviv_obj->ops->mmap(etnaviv_obj, vma);
160}
161
162static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
163{
164 struct vm_area_struct *vma = vmf->vma;
165 struct drm_gem_object *obj = vma->vm_private_data;
166 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
167 struct page **pages;
168 unsigned long pfn;
169 pgoff_t pgoff;
170 int err;
171
172 /*
173 * Make sure we don't parallel update on a fault, nor move or remove
174 * something from beneath our feet. Note that vmf_insert_page() is
175 * specifically coded to take care of this, so we don't have to.
176 */
177 err = mutex_lock_interruptible(&etnaviv_obj->lock);
178 if (err)
179 return VM_FAULT_NOPAGE;
180 /* make sure we have pages attached now */
181 pages = etnaviv_gem_get_pages(etnaviv_obj);
182 mutex_unlock(lock: &etnaviv_obj->lock);
183
184 if (IS_ERR(ptr: pages)) {
185 err = PTR_ERR(ptr: pages);
186 return vmf_error(err);
187 }
188
189 /* We don't use vmf->pgoff since that has the fake offset: */
190 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
191
192 pfn = page_to_pfn(pages[pgoff]);
193
194 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
195 pfn, pfn << PAGE_SHIFT);
196
197 return vmf_insert_pfn(vma, addr: vmf->address, pfn);
198}
199
200int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
201{
202 int ret;
203
204 /* Make it mmapable */
205 ret = drm_gem_create_mmap_offset(obj);
206 if (ret)
207 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
208 else
209 *offset = drm_vma_node_offset_addr(node: &obj->vma_node);
210
211 return ret;
212}
213
214static struct etnaviv_vram_mapping *
215etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
216 struct etnaviv_iommu_context *context)
217{
218 struct etnaviv_vram_mapping *mapping;
219
220 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
221 if (mapping->context == context)
222 return mapping;
223 }
224
225 return NULL;
226}
227
228void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
229{
230 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
231
232 mutex_lock(&etnaviv_obj->lock);
233 WARN_ON(mapping->use == 0);
234 mapping->use -= 1;
235 mutex_unlock(lock: &etnaviv_obj->lock);
236
237 drm_gem_object_put(obj: &etnaviv_obj->base);
238}
239
240struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
241 struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
242 u64 va)
243{
244 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
245 struct etnaviv_vram_mapping *mapping;
246 struct page **pages;
247 int ret = 0;
248
249 mutex_lock(&etnaviv_obj->lock);
250 mapping = etnaviv_gem_get_vram_mapping(obj: etnaviv_obj, context: mmu_context);
251 if (mapping) {
252 /*
253 * Holding the object lock prevents the use count changing
254 * beneath us. If the use count is zero, the MMU might be
255 * reaping this object, so take the lock and re-check that
256 * the MMU owns this mapping to close this race.
257 */
258 if (mapping->use == 0) {
259 mutex_lock(&mmu_context->lock);
260 if (mapping->context == mmu_context)
261 if (va && mapping->iova != va) {
262 etnaviv_iommu_reap_mapping(mapping);
263 mapping = NULL;
264 } else {
265 mapping->use += 1;
266 }
267 else
268 mapping = NULL;
269 mutex_unlock(lock: &mmu_context->lock);
270 if (mapping)
271 goto out;
272 } else {
273 mapping->use += 1;
274 goto out;
275 }
276 }
277
278 pages = etnaviv_gem_get_pages(etnaviv_obj);
279 if (IS_ERR(ptr: pages)) {
280 ret = PTR_ERR(ptr: pages);
281 goto out;
282 }
283
284 /*
285 * See if we have a reaped vram mapping we can re-use before
286 * allocating a fresh mapping.
287 */
288 mapping = etnaviv_gem_get_vram_mapping(obj: etnaviv_obj, NULL);
289 if (!mapping) {
290 mapping = kzalloc(size: sizeof(*mapping), GFP_KERNEL);
291 if (!mapping) {
292 ret = -ENOMEM;
293 goto out;
294 }
295
296 INIT_LIST_HEAD(list: &mapping->scan_node);
297 mapping->object = etnaviv_obj;
298 } else {
299 list_del(entry: &mapping->obj_node);
300 }
301
302 mapping->use = 1;
303
304 ret = etnaviv_iommu_map_gem(context: mmu_context, etnaviv_obj,
305 memory_base: mmu_context->global->memory_base,
306 mapping, va);
307 if (ret < 0)
308 kfree(objp: mapping);
309 else
310 list_add_tail(new: &mapping->obj_node, head: &etnaviv_obj->vram_list);
311
312out:
313 mutex_unlock(lock: &etnaviv_obj->lock);
314
315 if (ret)
316 return ERR_PTR(error: ret);
317
318 /* Take a reference on the object */
319 drm_gem_object_get(obj);
320 return mapping;
321}
322
323void *etnaviv_gem_vmap(struct drm_gem_object *obj)
324{
325 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
326
327 if (etnaviv_obj->vaddr)
328 return etnaviv_obj->vaddr;
329
330 mutex_lock(&etnaviv_obj->lock);
331 /*
332 * Need to check again, as we might have raced with another thread
333 * while waiting for the mutex.
334 */
335 if (!etnaviv_obj->vaddr)
336 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
337 mutex_unlock(lock: &etnaviv_obj->lock);
338
339 return etnaviv_obj->vaddr;
340}
341
342static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
343{
344 struct page **pages;
345
346 lockdep_assert_held(&obj->lock);
347
348 pages = etnaviv_gem_get_pages(etnaviv_obj: obj);
349 if (IS_ERR(ptr: pages))
350 return NULL;
351
352 return vmap(pages, count: obj->base.size >> PAGE_SHIFT,
353 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
354}
355
356static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
357{
358 if (op & ETNA_PREP_READ)
359 return DMA_FROM_DEVICE;
360 else if (op & ETNA_PREP_WRITE)
361 return DMA_TO_DEVICE;
362 else
363 return DMA_BIDIRECTIONAL;
364}
365
366int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
367 struct drm_etnaviv_timespec *timeout)
368{
369 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
370 struct drm_device *dev = obj->dev;
371 bool write = !!(op & ETNA_PREP_WRITE);
372 int ret;
373
374 if (!etnaviv_obj->sgt) {
375 void *ret;
376
377 mutex_lock(&etnaviv_obj->lock);
378 ret = etnaviv_gem_get_pages(etnaviv_obj);
379 mutex_unlock(lock: &etnaviv_obj->lock);
380 if (IS_ERR(ptr: ret))
381 return PTR_ERR(ptr: ret);
382 }
383
384 if (op & ETNA_PREP_NOSYNC) {
385 if (!dma_resv_test_signaled(obj: obj->resv,
386 usage: dma_resv_usage_rw(write)))
387 return -EBUSY;
388 } else {
389 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
390
391 ret = dma_resv_wait_timeout(obj: obj->resv, usage: dma_resv_usage_rw(write),
392 intr: true, timeout: remain);
393 if (ret <= 0)
394 return ret == 0 ? -ETIMEDOUT : ret;
395 }
396
397 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
398 dma_sync_sgtable_for_cpu(dev: dev->dev, sgt: etnaviv_obj->sgt,
399 dir: etnaviv_op_to_dma_dir(op));
400 etnaviv_obj->last_cpu_prep_op = op;
401 }
402
403 return 0;
404}
405
406int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
407{
408 struct drm_device *dev = obj->dev;
409 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
410
411 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
412 /* fini without a prep is almost certainly a userspace error */
413 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
414 dma_sync_sgtable_for_device(dev: dev->dev, sgt: etnaviv_obj->sgt,
415 dir: etnaviv_op_to_dma_dir(op: etnaviv_obj->last_cpu_prep_op));
416 etnaviv_obj->last_cpu_prep_op = 0;
417 }
418
419 return 0;
420}
421
422int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
423 struct drm_etnaviv_timespec *timeout)
424{
425 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
426
427 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
428}
429
430#ifdef CONFIG_DEBUG_FS
431static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
432{
433 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
434 struct dma_resv *robj = obj->resv;
435 unsigned long off = drm_vma_node_start(node: &obj->vma_node);
436 int r;
437
438 seq_printf(m, fmt: "%08x: %c %2d (%2d) %08lx %p %zd\n",
439 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
440 obj->name, kref_read(kref: &obj->refcount),
441 off, etnaviv_obj->vaddr, obj->size);
442
443 r = dma_resv_lock(obj: robj, NULL);
444 if (r)
445 return;
446
447 dma_resv_describe(obj: robj, seq: m);
448 dma_resv_unlock(obj: robj);
449}
450
451void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
452 struct seq_file *m)
453{
454 struct etnaviv_gem_object *etnaviv_obj;
455 int count = 0;
456 size_t size = 0;
457
458 mutex_lock(&priv->gem_lock);
459 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
460 struct drm_gem_object *obj = &etnaviv_obj->base;
461
462 seq_puts(m, s: " ");
463 etnaviv_gem_describe(obj, m);
464 count++;
465 size += obj->size;
466 }
467 mutex_unlock(lock: &priv->gem_lock);
468
469 seq_printf(m, fmt: "Total %d objects, %zu bytes\n", count, size);
470}
471#endif
472
473static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
474{
475 vunmap(addr: etnaviv_obj->vaddr);
476 put_pages(etnaviv_obj);
477}
478
479static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
480 .get_pages = etnaviv_gem_shmem_get_pages,
481 .release = etnaviv_gem_shmem_release,
482 .vmap = etnaviv_gem_vmap_impl,
483 .mmap = etnaviv_gem_mmap_obj,
484};
485
486void etnaviv_gem_free_object(struct drm_gem_object *obj)
487{
488 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
489 struct etnaviv_drm_private *priv = obj->dev->dev_private;
490 struct etnaviv_vram_mapping *mapping, *tmp;
491
492 /* object should not be active */
493 WARN_ON(is_active(etnaviv_obj));
494
495 mutex_lock(&priv->gem_lock);
496 list_del(entry: &etnaviv_obj->gem_node);
497 mutex_unlock(lock: &priv->gem_lock);
498
499 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
500 obj_node) {
501 struct etnaviv_iommu_context *context = mapping->context;
502
503 WARN_ON(mapping->use);
504
505 if (context)
506 etnaviv_iommu_unmap_gem(context, mapping);
507
508 list_del(entry: &mapping->obj_node);
509 kfree(objp: mapping);
510 }
511
512 etnaviv_obj->ops->release(etnaviv_obj);
513 drm_gem_object_release(obj);
514
515 kfree(objp: etnaviv_obj);
516}
517
518void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
519{
520 struct etnaviv_drm_private *priv = dev->dev_private;
521 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
522
523 mutex_lock(&priv->gem_lock);
524 list_add_tail(new: &etnaviv_obj->gem_node, head: &priv->gem_list);
525 mutex_unlock(lock: &priv->gem_lock);
526}
527
528static const struct vm_operations_struct vm_ops = {
529 .fault = etnaviv_gem_fault,
530 .open = drm_gem_vm_open,
531 .close = drm_gem_vm_close,
532};
533
534static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
535 .free = etnaviv_gem_free_object,
536 .pin = etnaviv_gem_prime_pin,
537 .unpin = etnaviv_gem_prime_unpin,
538 .get_sg_table = etnaviv_gem_prime_get_sg_table,
539 .vmap = etnaviv_gem_prime_vmap,
540 .mmap = etnaviv_gem_mmap,
541 .vm_ops = &vm_ops,
542};
543
544static int etnaviv_gem_new_impl(struct drm_device *dev, u32 flags,
545 const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
546{
547 struct etnaviv_gem_object *etnaviv_obj;
548 unsigned sz = sizeof(*etnaviv_obj);
549 bool valid = true;
550
551 /* validate flags */
552 switch (flags & ETNA_BO_CACHE_MASK) {
553 case ETNA_BO_UNCACHED:
554 case ETNA_BO_CACHED:
555 case ETNA_BO_WC:
556 break;
557 default:
558 valid = false;
559 }
560
561 if (!valid) {
562 dev_err(dev->dev, "invalid cache flag: %x\n",
563 (flags & ETNA_BO_CACHE_MASK));
564 return -EINVAL;
565 }
566
567 etnaviv_obj = kzalloc(size: sz, GFP_KERNEL);
568 if (!etnaviv_obj)
569 return -ENOMEM;
570
571 etnaviv_obj->flags = flags;
572 etnaviv_obj->ops = ops;
573
574 mutex_init(&etnaviv_obj->lock);
575 INIT_LIST_HEAD(list: &etnaviv_obj->vram_list);
576
577 *obj = &etnaviv_obj->base;
578 (*obj)->funcs = &etnaviv_gem_object_funcs;
579
580 return 0;
581}
582
583/* convenience method to construct a GEM buffer object, and userspace handle */
584int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
585 u32 size, u32 flags, u32 *handle)
586{
587 struct etnaviv_drm_private *priv = dev->dev_private;
588 struct drm_gem_object *obj = NULL;
589 int ret;
590
591 size = PAGE_ALIGN(size);
592
593 ret = etnaviv_gem_new_impl(dev, flags, ops: &etnaviv_gem_shmem_ops, obj: &obj);
594 if (ret)
595 goto fail;
596
597 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
598
599 ret = drm_gem_object_init(dev, obj, size);
600 if (ret)
601 goto fail;
602
603 /*
604 * Our buffers are kept pinned, so allocating them from the MOVABLE
605 * zone is a really bad idea, and conflicts with CMA. See comments
606 * above new_inode() why this is required _and_ expected if you're
607 * going to pin these pages.
608 */
609 mapping_set_gfp_mask(m: obj->filp->f_mapping, mask: priv->shm_gfp_mask);
610
611 etnaviv_gem_obj_add(dev, obj);
612
613 ret = drm_gem_handle_create(file_priv: file, obj, handlep: handle);
614
615 /* drop reference from allocate - handle holds it now */
616fail:
617 drm_gem_object_put(obj);
618
619 return ret;
620}
621
622int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
623 const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
624{
625 struct drm_gem_object *obj;
626 int ret;
627
628 ret = etnaviv_gem_new_impl(dev, flags, ops, obj: &obj);
629 if (ret)
630 return ret;
631
632 drm_gem_private_object_init(dev, obj, size);
633
634 *res = to_etnaviv_bo(obj);
635
636 return 0;
637}
638
639static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
640{
641 struct page **pvec = NULL;
642 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
643 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
644 unsigned int gup_flags = FOLL_LONGTERM;
645
646 might_lock_read(&current->mm->mmap_lock);
647
648 if (userptr->mm != current->mm)
649 return -EPERM;
650
651 pvec = kvmalloc_array(n: npages, size: sizeof(struct page *), GFP_KERNEL);
652 if (!pvec)
653 return -ENOMEM;
654
655 if (!userptr->ro)
656 gup_flags |= FOLL_WRITE;
657
658 do {
659 unsigned num_pages = npages - pinned;
660 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
661 struct page **pages = pvec + pinned;
662
663 ret = pin_user_pages_fast(start: ptr, nr_pages: num_pages, gup_flags, pages);
664 if (ret < 0) {
665 unpin_user_pages(pages: pvec, npages: pinned);
666 kvfree(addr: pvec);
667 return ret;
668 }
669
670 pinned += ret;
671
672 } while (pinned < npages);
673
674 etnaviv_obj->pages = pvec;
675
676 return 0;
677}
678
679static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
680{
681 if (etnaviv_obj->sgt) {
682 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
683 sg_free_table(etnaviv_obj->sgt);
684 kfree(objp: etnaviv_obj->sgt);
685 }
686 if (etnaviv_obj->pages) {
687 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
688
689 unpin_user_pages(pages: etnaviv_obj->pages, npages);
690 kvfree(addr: etnaviv_obj->pages);
691 }
692}
693
694static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
695 struct vm_area_struct *vma)
696{
697 return -EINVAL;
698}
699
700static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
701 .get_pages = etnaviv_gem_userptr_get_pages,
702 .release = etnaviv_gem_userptr_release,
703 .vmap = etnaviv_gem_vmap_impl,
704 .mmap = etnaviv_gem_userptr_mmap_obj,
705};
706
707int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
708 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
709{
710 struct etnaviv_gem_object *etnaviv_obj;
711 int ret;
712
713 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
714 ops: &etnaviv_gem_userptr_ops, res: &etnaviv_obj);
715 if (ret)
716 return ret;
717
718 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
719
720 etnaviv_obj->userptr.ptr = ptr;
721 etnaviv_obj->userptr.mm = current->mm;
722 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
723
724 etnaviv_gem_obj_add(dev, obj: &etnaviv_obj->base);
725
726 ret = drm_gem_handle_create(file_priv: file, obj: &etnaviv_obj->base, handlep: handle);
727
728 /* drop reference from allocate - handle holds it now */
729 drm_gem_object_put(obj: &etnaviv_obj->base);
730 return ret;
731}
732

source code of linux/drivers/gpu/drm/etnaviv/etnaviv_gem.c