1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/spinlock.h>
18#include <linux/shmem_fs.h>
19#include <linux/sched/mm.h>
20#include <linux/sched/task.h>
21
22#include "etnaviv_drv.h"
23#include "etnaviv_gem.h"
24#include "etnaviv_gpu.h"
25#include "etnaviv_mmu.h"
26
27static struct lock_class_key etnaviv_shm_lock_class;
28static struct lock_class_key etnaviv_userptr_lock_class;
29
30static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
31{
32 struct drm_device *dev = etnaviv_obj->base.dev;
33 struct sg_table *sgt = etnaviv_obj->sgt;
34
35
36
37
38
39 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
40 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
41}
42
43static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
44{
45 struct drm_device *dev = etnaviv_obj->base.dev;
46 struct sg_table *sgt = etnaviv_obj->sgt;
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
64 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
65}
66
67
68static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
69{
70 struct drm_device *dev = etnaviv_obj->base.dev;
71 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
72
73 if (IS_ERR(p)) {
74 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
75 return PTR_ERR(p);
76 }
77
78 etnaviv_obj->pages = p;
79
80 return 0;
81}
82
83static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
84{
85 if (etnaviv_obj->sgt) {
86 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
87 sg_free_table(etnaviv_obj->sgt);
88 kfree(etnaviv_obj->sgt);
89 etnaviv_obj->sgt = NULL;
90 }
91 if (etnaviv_obj->pages) {
92 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
93 true, false);
94
95 etnaviv_obj->pages = NULL;
96 }
97}
98
99struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
100{
101 int ret;
102
103 lockdep_assert_held(&etnaviv_obj->lock);
104
105 if (!etnaviv_obj->pages) {
106 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
107 if (ret < 0)
108 return ERR_PTR(ret);
109 }
110
111 if (!etnaviv_obj->sgt) {
112 struct drm_device *dev = etnaviv_obj->base.dev;
113 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
114 struct sg_table *sgt;
115
116 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
117 if (IS_ERR(sgt)) {
118 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
119 PTR_ERR(sgt));
120 return ERR_CAST(sgt);
121 }
122
123 etnaviv_obj->sgt = sgt;
124
125 etnaviv_gem_scatter_map(etnaviv_obj);
126 }
127
128 return etnaviv_obj->pages;
129}
130
131void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
132{
133 lockdep_assert_held(&etnaviv_obj->lock);
134
135}
136
137static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
138 struct vm_area_struct *vma)
139{
140 pgprot_t vm_page_prot;
141
142 vma->vm_flags &= ~VM_PFNMAP;
143 vma->vm_flags |= VM_MIXEDMAP;
144
145 vm_page_prot = vm_get_page_prot(vma->vm_flags);
146
147 if (etnaviv_obj->flags & ETNA_BO_WC) {
148 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
149 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
150 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
151 } else {
152
153
154
155
156
157 fput(vma->vm_file);
158 get_file(etnaviv_obj->base.filp);
159 vma->vm_pgoff = 0;
160 vma->vm_file = etnaviv_obj->base.filp;
161
162 vma->vm_page_prot = vm_page_prot;
163 }
164
165 return 0;
166}
167
168int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
169{
170 struct etnaviv_gem_object *obj;
171 int ret;
172
173 ret = drm_gem_mmap(filp, vma);
174 if (ret) {
175 DBG("mmap failed: %d", ret);
176 return ret;
177 }
178
179 obj = to_etnaviv_bo(vma->vm_private_data);
180 return obj->ops->mmap(obj, vma);
181}
182
183int etnaviv_gem_fault(struct vm_fault *vmf)
184{
185 struct vm_area_struct *vma = vmf->vma;
186 struct drm_gem_object *obj = vma->vm_private_data;
187 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
188 struct page **pages, *page;
189 pgoff_t pgoff;
190 int ret;
191
192
193
194
195
196
197 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
198 if (ret)
199 goto out;
200
201
202 pages = etnaviv_gem_get_pages(etnaviv_obj);
203 mutex_unlock(&etnaviv_obj->lock);
204
205 if (IS_ERR(pages)) {
206 ret = PTR_ERR(pages);
207 goto out;
208 }
209
210
211 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
212
213 page = pages[pgoff];
214
215 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
216 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
217
218 ret = vm_insert_page(vma, vmf->address, page);
219
220out:
221 switch (ret) {
222 case -EAGAIN:
223 case 0:
224 case -ERESTARTSYS:
225 case -EINTR:
226 case -EBUSY:
227
228
229
230
231 return VM_FAULT_NOPAGE;
232 case -ENOMEM:
233 return VM_FAULT_OOM;
234 default:
235 return VM_FAULT_SIGBUS;
236 }
237}
238
239int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
240{
241 int ret;
242
243
244 ret = drm_gem_create_mmap_offset(obj);
245 if (ret)
246 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
247 else
248 *offset = drm_vma_node_offset_addr(&obj->vma_node);
249
250 return ret;
251}
252
253static struct etnaviv_vram_mapping *
254etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
255 struct etnaviv_iommu *mmu)
256{
257 struct etnaviv_vram_mapping *mapping;
258
259 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
260 if (mapping->mmu == mmu)
261 return mapping;
262 }
263
264 return NULL;
265}
266
267void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
268{
269 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
270
271 drm_gem_object_get(&etnaviv_obj->base);
272
273 mutex_lock(&etnaviv_obj->lock);
274 WARN_ON(mapping->use == 0);
275 mapping->use += 1;
276 mutex_unlock(&etnaviv_obj->lock);
277}
278
279void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
280{
281 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
282
283 mutex_lock(&etnaviv_obj->lock);
284 WARN_ON(mapping->use == 0);
285 mapping->use -= 1;
286 mutex_unlock(&etnaviv_obj->lock);
287
288 drm_gem_object_put_unlocked(&etnaviv_obj->base);
289}
290
291struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
292 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
293{
294 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
295 struct etnaviv_vram_mapping *mapping;
296 struct page **pages;
297 int ret = 0;
298
299 mutex_lock(&etnaviv_obj->lock);
300 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
301 if (mapping) {
302
303
304
305
306
307
308 if (mapping->use == 0) {
309 mutex_lock(&gpu->mmu->lock);
310 if (mapping->mmu == gpu->mmu)
311 mapping->use += 1;
312 else
313 mapping = NULL;
314 mutex_unlock(&gpu->mmu->lock);
315 if (mapping)
316 goto out;
317 } else {
318 mapping->use += 1;
319 goto out;
320 }
321 }
322
323 pages = etnaviv_gem_get_pages(etnaviv_obj);
324 if (IS_ERR(pages)) {
325 ret = PTR_ERR(pages);
326 goto out;
327 }
328
329
330
331
332
333 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
334 if (!mapping) {
335 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
336 if (!mapping) {
337 ret = -ENOMEM;
338 goto out;
339 }
340
341 INIT_LIST_HEAD(&mapping->scan_node);
342 mapping->object = etnaviv_obj;
343 } else {
344 list_del(&mapping->obj_node);
345 }
346
347 mapping->mmu = gpu->mmu;
348 mapping->use = 1;
349
350 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
351 mapping);
352 if (ret < 0)
353 kfree(mapping);
354 else
355 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
356
357out:
358 mutex_unlock(&etnaviv_obj->lock);
359
360 if (ret)
361 return ERR_PTR(ret);
362
363
364 drm_gem_object_get(obj);
365 return mapping;
366}
367
368void *etnaviv_gem_vmap(struct drm_gem_object *obj)
369{
370 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
371
372 if (etnaviv_obj->vaddr)
373 return etnaviv_obj->vaddr;
374
375 mutex_lock(&etnaviv_obj->lock);
376
377
378
379
380 if (!etnaviv_obj->vaddr)
381 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
382 mutex_unlock(&etnaviv_obj->lock);
383
384 return etnaviv_obj->vaddr;
385}
386
387static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
388{
389 struct page **pages;
390
391 lockdep_assert_held(&obj->lock);
392
393 pages = etnaviv_gem_get_pages(obj);
394 if (IS_ERR(pages))
395 return NULL;
396
397 return vmap(pages, obj->base.size >> PAGE_SHIFT,
398 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
399}
400
401static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
402{
403 if (op & ETNA_PREP_READ)
404 return DMA_FROM_DEVICE;
405 else if (op & ETNA_PREP_WRITE)
406 return DMA_TO_DEVICE;
407 else
408 return DMA_BIDIRECTIONAL;
409}
410
411int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
412 struct timespec *timeout)
413{
414 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
415 struct drm_device *dev = obj->dev;
416 bool write = !!(op & ETNA_PREP_WRITE);
417 int ret;
418
419 if (!etnaviv_obj->sgt) {
420 void *ret;
421
422 mutex_lock(&etnaviv_obj->lock);
423 ret = etnaviv_gem_get_pages(etnaviv_obj);
424 mutex_unlock(&etnaviv_obj->lock);
425 if (IS_ERR(ret))
426 return PTR_ERR(ret);
427 }
428
429 if (op & ETNA_PREP_NOSYNC) {
430 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
431 write))
432 return -EBUSY;
433 } else {
434 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
435
436 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
437 write, true, remain);
438 if (ret <= 0)
439 return ret == 0 ? -ETIMEDOUT : ret;
440 }
441
442 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
443 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
444 etnaviv_obj->sgt->nents,
445 etnaviv_op_to_dma_dir(op));
446 etnaviv_obj->last_cpu_prep_op = op;
447 }
448
449 return 0;
450}
451
452int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
453{
454 struct drm_device *dev = obj->dev;
455 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
456
457 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
458
459 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
460 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
461 etnaviv_obj->sgt->nents,
462 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
463 etnaviv_obj->last_cpu_prep_op = 0;
464 }
465
466 return 0;
467}
468
469int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
470 struct timespec *timeout)
471{
472 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
473
474 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
475}
476
477#ifdef CONFIG_DEBUG_FS
478static void etnaviv_gem_describe_fence(struct dma_fence *fence,
479 const char *type, struct seq_file *m)
480{
481 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
482 seq_printf(m, "\t%9s: %s %s seq %u\n",
483 type,
484 fence->ops->get_driver_name(fence),
485 fence->ops->get_timeline_name(fence),
486 fence->seqno);
487}
488
489static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
490{
491 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
492 struct reservation_object *robj = etnaviv_obj->resv;
493 struct reservation_object_list *fobj;
494 struct dma_fence *fence;
495 unsigned long off = drm_vma_node_start(&obj->vma_node);
496
497 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
498 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
499 obj->name, kref_read(&obj->refcount),
500 off, etnaviv_obj->vaddr, obj->size);
501
502 rcu_read_lock();
503 fobj = rcu_dereference(robj->fence);
504 if (fobj) {
505 unsigned int i, shared_count = fobj->shared_count;
506
507 for (i = 0; i < shared_count; i++) {
508 fence = rcu_dereference(fobj->shared[i]);
509 etnaviv_gem_describe_fence(fence, "Shared", m);
510 }
511 }
512
513 fence = rcu_dereference(robj->fence_excl);
514 if (fence)
515 etnaviv_gem_describe_fence(fence, "Exclusive", m);
516 rcu_read_unlock();
517}
518
519void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
520 struct seq_file *m)
521{
522 struct etnaviv_gem_object *etnaviv_obj;
523 int count = 0;
524 size_t size = 0;
525
526 mutex_lock(&priv->gem_lock);
527 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
528 struct drm_gem_object *obj = &etnaviv_obj->base;
529
530 seq_puts(m, " ");
531 etnaviv_gem_describe(obj, m);
532 count++;
533 size += obj->size;
534 }
535 mutex_unlock(&priv->gem_lock);
536
537 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
538}
539#endif
540
541static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
542{
543 vunmap(etnaviv_obj->vaddr);
544 put_pages(etnaviv_obj);
545}
546
547static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
548 .get_pages = etnaviv_gem_shmem_get_pages,
549 .release = etnaviv_gem_shmem_release,
550 .vmap = etnaviv_gem_vmap_impl,
551 .mmap = etnaviv_gem_mmap_obj,
552};
553
554void etnaviv_gem_free_object(struct drm_gem_object *obj)
555{
556 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
557 struct etnaviv_drm_private *priv = obj->dev->dev_private;
558 struct etnaviv_vram_mapping *mapping, *tmp;
559
560
561 WARN_ON(is_active(etnaviv_obj));
562
563 mutex_lock(&priv->gem_lock);
564 list_del(&etnaviv_obj->gem_node);
565 mutex_unlock(&priv->gem_lock);
566
567 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
568 obj_node) {
569 struct etnaviv_iommu *mmu = mapping->mmu;
570
571 WARN_ON(mapping->use);
572
573 if (mmu)
574 etnaviv_iommu_unmap_gem(mmu, mapping);
575
576 list_del(&mapping->obj_node);
577 kfree(mapping);
578 }
579
580 drm_gem_free_mmap_offset(obj);
581 etnaviv_obj->ops->release(etnaviv_obj);
582 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
583 reservation_object_fini(&etnaviv_obj->_resv);
584 drm_gem_object_release(obj);
585
586 kfree(etnaviv_obj);
587}
588
589void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
590{
591 struct etnaviv_drm_private *priv = dev->dev_private;
592 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
593
594 mutex_lock(&priv->gem_lock);
595 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
596 mutex_unlock(&priv->gem_lock);
597}
598
599static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
600 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
601 struct drm_gem_object **obj)
602{
603 struct etnaviv_gem_object *etnaviv_obj;
604 unsigned sz = sizeof(*etnaviv_obj);
605 bool valid = true;
606
607
608 switch (flags & ETNA_BO_CACHE_MASK) {
609 case ETNA_BO_UNCACHED:
610 case ETNA_BO_CACHED:
611 case ETNA_BO_WC:
612 break;
613 default:
614 valid = false;
615 }
616
617 if (!valid) {
618 dev_err(dev->dev, "invalid cache flag: %x\n",
619 (flags & ETNA_BO_CACHE_MASK));
620 return -EINVAL;
621 }
622
623 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
624 if (!etnaviv_obj)
625 return -ENOMEM;
626
627 etnaviv_obj->flags = flags;
628 etnaviv_obj->ops = ops;
629 if (robj) {
630 etnaviv_obj->resv = robj;
631 } else {
632 etnaviv_obj->resv = &etnaviv_obj->_resv;
633 reservation_object_init(&etnaviv_obj->_resv);
634 }
635
636 mutex_init(&etnaviv_obj->lock);
637 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
638
639 *obj = &etnaviv_obj->base;
640
641 return 0;
642}
643
644
645int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
646 u32 size, u32 flags, u32 *handle)
647{
648 struct drm_gem_object *obj = NULL;
649 int ret;
650
651 size = PAGE_ALIGN(size);
652
653 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
654 &etnaviv_gem_shmem_ops, &obj);
655 if (ret)
656 goto fail;
657
658 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
659
660 ret = drm_gem_object_init(dev, obj, size);
661 if (ret == 0) {
662 struct address_space *mapping;
663
664
665
666
667
668
669
670
671 mapping = obj->filp->f_mapping;
672 mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
673 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
674 }
675
676 if (ret)
677 goto fail;
678
679 etnaviv_gem_obj_add(dev, obj);
680
681 ret = drm_gem_handle_create(file, obj, handle);
682
683
684fail:
685 drm_gem_object_put_unlocked(obj);
686
687 return ret;
688}
689
690int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
691 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
692 struct etnaviv_gem_object **res)
693{
694 struct drm_gem_object *obj;
695 int ret;
696
697 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
698 if (ret)
699 return ret;
700
701 drm_gem_private_object_init(dev, obj, size);
702
703 *res = to_etnaviv_bo(obj);
704
705 return 0;
706}
707
708static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
709{
710 struct page **pvec = NULL;
711 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
712 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
713
714 might_lock_read(¤t->mm->mmap_sem);
715
716 if (userptr->mm != current->mm)
717 return -EPERM;
718
719 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
720 if (!pvec)
721 return -ENOMEM;
722
723 do {
724 unsigned num_pages = npages - pinned;
725 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
726 struct page **pages = pvec + pinned;
727
728 ret = get_user_pages_fast(ptr, num_pages,
729 !userptr->ro ? FOLL_WRITE : 0, pages);
730 if (ret < 0) {
731 release_pages(pvec, pinned);
732 kvfree(pvec);
733 return ret;
734 }
735
736 pinned += ret;
737
738 } while (pinned < npages);
739
740 etnaviv_obj->pages = pvec;
741
742 return 0;
743}
744
745static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
746{
747 if (etnaviv_obj->sgt) {
748 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
749 sg_free_table(etnaviv_obj->sgt);
750 kfree(etnaviv_obj->sgt);
751 }
752 if (etnaviv_obj->pages) {
753 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
754
755 release_pages(etnaviv_obj->pages, npages);
756 kvfree(etnaviv_obj->pages);
757 }
758}
759
760static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
761 struct vm_area_struct *vma)
762{
763 return -EINVAL;
764}
765
766static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
767 .get_pages = etnaviv_gem_userptr_get_pages,
768 .release = etnaviv_gem_userptr_release,
769 .vmap = etnaviv_gem_vmap_impl,
770 .mmap = etnaviv_gem_userptr_mmap_obj,
771};
772
773int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
774 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
775{
776 struct etnaviv_gem_object *etnaviv_obj;
777 int ret;
778
779 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
780 &etnaviv_gem_userptr_ops, &etnaviv_obj);
781 if (ret)
782 return ret;
783
784 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
785
786 etnaviv_obj->userptr.ptr = ptr;
787 etnaviv_obj->userptr.mm = current->mm;
788 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
789
790 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
791
792 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
793
794
795 drm_gem_object_put_unlocked(&etnaviv_obj->base);
796 return ret;
797}
798