1
2
3
4
5
6#include <linux/spinlock.h>
7#include <linux/shmem_fs.h>
8#include <linux/sched/mm.h>
9#include <linux/sched/task.h>
10
11#include "etnaviv_drv.h"
12#include "etnaviv_gem.h"
13#include "etnaviv_gpu.h"
14#include "etnaviv_mmu.h"
15
16static struct lock_class_key etnaviv_shm_lock_class;
17static struct lock_class_key etnaviv_userptr_lock_class;
18
19static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
20{
21 struct drm_device *dev = etnaviv_obj->base.dev;
22 struct sg_table *sgt = etnaviv_obj->sgt;
23
24
25
26
27
28 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
29 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
30}
31
32static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
33{
34 struct drm_device *dev = etnaviv_obj->base.dev;
35 struct sg_table *sgt = etnaviv_obj->sgt;
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
53 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
54}
55
56
57static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
58{
59 struct drm_device *dev = etnaviv_obj->base.dev;
60 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
61
62 if (IS_ERR(p)) {
63 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
64 return PTR_ERR(p);
65 }
66
67 etnaviv_obj->pages = p;
68
69 return 0;
70}
71
72static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
73{
74 if (etnaviv_obj->sgt) {
75 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
76 sg_free_table(etnaviv_obj->sgt);
77 kfree(etnaviv_obj->sgt);
78 etnaviv_obj->sgt = NULL;
79 }
80 if (etnaviv_obj->pages) {
81 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
82 true, false);
83
84 etnaviv_obj->pages = NULL;
85 }
86}
87
88struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
89{
90 int ret;
91
92 lockdep_assert_held(&etnaviv_obj->lock);
93
94 if (!etnaviv_obj->pages) {
95 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
96 if (ret < 0)
97 return ERR_PTR(ret);
98 }
99
100 if (!etnaviv_obj->sgt) {
101 struct drm_device *dev = etnaviv_obj->base.dev;
102 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
103 struct sg_table *sgt;
104
105 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
106 if (IS_ERR(sgt)) {
107 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
108 PTR_ERR(sgt));
109 return ERR_CAST(sgt);
110 }
111
112 etnaviv_obj->sgt = sgt;
113
114 etnaviv_gem_scatter_map(etnaviv_obj);
115 }
116
117 return etnaviv_obj->pages;
118}
119
120void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
121{
122 lockdep_assert_held(&etnaviv_obj->lock);
123
124}
125
126static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
127 struct vm_area_struct *vma)
128{
129 pgprot_t vm_page_prot;
130
131 vma->vm_flags &= ~VM_PFNMAP;
132 vma->vm_flags |= VM_MIXEDMAP;
133
134 vm_page_prot = vm_get_page_prot(vma->vm_flags);
135
136 if (etnaviv_obj->flags & ETNA_BO_WC) {
137 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
138 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
139 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
140 } else {
141
142
143
144
145
146 fput(vma->vm_file);
147 get_file(etnaviv_obj->base.filp);
148 vma->vm_pgoff = 0;
149 vma->vm_file = etnaviv_obj->base.filp;
150
151 vma->vm_page_prot = vm_page_prot;
152 }
153
154 return 0;
155}
156
157int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
158{
159 struct etnaviv_gem_object *obj;
160 int ret;
161
162 ret = drm_gem_mmap(filp, vma);
163 if (ret) {
164 DBG("mmap failed: %d", ret);
165 return ret;
166 }
167
168 obj = to_etnaviv_bo(vma->vm_private_data);
169 return obj->ops->mmap(obj, vma);
170}
171
172int etnaviv_gem_fault(struct vm_fault *vmf)
173{
174 struct vm_area_struct *vma = vmf->vma;
175 struct drm_gem_object *obj = vma->vm_private_data;
176 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
177 struct page **pages, *page;
178 pgoff_t pgoff;
179 int ret;
180
181
182
183
184
185
186 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
187 if (ret)
188 goto out;
189
190
191 pages = etnaviv_gem_get_pages(etnaviv_obj);
192 mutex_unlock(&etnaviv_obj->lock);
193
194 if (IS_ERR(pages)) {
195 ret = PTR_ERR(pages);
196 goto out;
197 }
198
199
200 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
201
202 page = pages[pgoff];
203
204 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
205 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
206
207 ret = vm_insert_page(vma, vmf->address, page);
208
209out:
210 switch (ret) {
211 case -EAGAIN:
212 case 0:
213 case -ERESTARTSYS:
214 case -EINTR:
215 case -EBUSY:
216
217
218
219
220 return VM_FAULT_NOPAGE;
221 case -ENOMEM:
222 return VM_FAULT_OOM;
223 default:
224 return VM_FAULT_SIGBUS;
225 }
226}
227
228int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
229{
230 int ret;
231
232
233 ret = drm_gem_create_mmap_offset(obj);
234 if (ret)
235 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
236 else
237 *offset = drm_vma_node_offset_addr(&obj->vma_node);
238
239 return ret;
240}
241
242static struct etnaviv_vram_mapping *
243etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
244 struct etnaviv_iommu *mmu)
245{
246 struct etnaviv_vram_mapping *mapping;
247
248 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
249 if (mapping->mmu == mmu)
250 return mapping;
251 }
252
253 return NULL;
254}
255
256void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
257{
258 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
259
260 drm_gem_object_get(&etnaviv_obj->base);
261
262 mutex_lock(&etnaviv_obj->lock);
263 WARN_ON(mapping->use == 0);
264 mapping->use += 1;
265 mutex_unlock(&etnaviv_obj->lock);
266}
267
268void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
269{
270 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
271
272 mutex_lock(&etnaviv_obj->lock);
273 WARN_ON(mapping->use == 0);
274 mapping->use -= 1;
275 mutex_unlock(&etnaviv_obj->lock);
276
277 drm_gem_object_put_unlocked(&etnaviv_obj->base);
278}
279
280struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
281 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
282{
283 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
284 struct etnaviv_vram_mapping *mapping;
285 struct page **pages;
286 int ret = 0;
287
288 mutex_lock(&etnaviv_obj->lock);
289 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
290 if (mapping) {
291
292
293
294
295
296
297 if (mapping->use == 0) {
298 mutex_lock(&gpu->mmu->lock);
299 if (mapping->mmu == gpu->mmu)
300 mapping->use += 1;
301 else
302 mapping = NULL;
303 mutex_unlock(&gpu->mmu->lock);
304 if (mapping)
305 goto out;
306 } else {
307 mapping->use += 1;
308 goto out;
309 }
310 }
311
312 pages = etnaviv_gem_get_pages(etnaviv_obj);
313 if (IS_ERR(pages)) {
314 ret = PTR_ERR(pages);
315 goto out;
316 }
317
318
319
320
321
322 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
323 if (!mapping) {
324 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
325 if (!mapping) {
326 ret = -ENOMEM;
327 goto out;
328 }
329
330 INIT_LIST_HEAD(&mapping->scan_node);
331 mapping->object = etnaviv_obj;
332 } else {
333 list_del(&mapping->obj_node);
334 }
335
336 mapping->mmu = gpu->mmu;
337 mapping->use = 1;
338
339 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
340 mapping);
341 if (ret < 0)
342 kfree(mapping);
343 else
344 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
345
346out:
347 mutex_unlock(&etnaviv_obj->lock);
348
349 if (ret)
350 return ERR_PTR(ret);
351
352
353 drm_gem_object_get(obj);
354 return mapping;
355}
356
357void *etnaviv_gem_vmap(struct drm_gem_object *obj)
358{
359 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
360
361 if (etnaviv_obj->vaddr)
362 return etnaviv_obj->vaddr;
363
364 mutex_lock(&etnaviv_obj->lock);
365
366
367
368
369 if (!etnaviv_obj->vaddr)
370 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
371 mutex_unlock(&etnaviv_obj->lock);
372
373 return etnaviv_obj->vaddr;
374}
375
376static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
377{
378 struct page **pages;
379
380 lockdep_assert_held(&obj->lock);
381
382 pages = etnaviv_gem_get_pages(obj);
383 if (IS_ERR(pages))
384 return NULL;
385
386 return vmap(pages, obj->base.size >> PAGE_SHIFT,
387 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
388}
389
390static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
391{
392 if (op & ETNA_PREP_READ)
393 return DMA_FROM_DEVICE;
394 else if (op & ETNA_PREP_WRITE)
395 return DMA_TO_DEVICE;
396 else
397 return DMA_BIDIRECTIONAL;
398}
399
400int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
401 struct timespec *timeout)
402{
403 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
404 struct drm_device *dev = obj->dev;
405 bool write = !!(op & ETNA_PREP_WRITE);
406 int ret;
407
408 if (!etnaviv_obj->sgt) {
409 void *ret;
410
411 mutex_lock(&etnaviv_obj->lock);
412 ret = etnaviv_gem_get_pages(etnaviv_obj);
413 mutex_unlock(&etnaviv_obj->lock);
414 if (IS_ERR(ret))
415 return PTR_ERR(ret);
416 }
417
418 if (op & ETNA_PREP_NOSYNC) {
419 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
420 write))
421 return -EBUSY;
422 } else {
423 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
424
425 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
426 write, true, remain);
427 if (ret <= 0)
428 return ret == 0 ? -ETIMEDOUT : ret;
429 }
430
431 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
432 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
433 etnaviv_obj->sgt->nents,
434 etnaviv_op_to_dma_dir(op));
435 etnaviv_obj->last_cpu_prep_op = op;
436 }
437
438 return 0;
439}
440
441int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
442{
443 struct drm_device *dev = obj->dev;
444 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
445
446 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
447
448 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
449 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
450 etnaviv_obj->sgt->nents,
451 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
452 etnaviv_obj->last_cpu_prep_op = 0;
453 }
454
455 return 0;
456}
457
458int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
459 struct timespec *timeout)
460{
461 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
462
463 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
464}
465
466#ifdef CONFIG_DEBUG_FS
467static void etnaviv_gem_describe_fence(struct dma_fence *fence,
468 const char *type, struct seq_file *m)
469{
470 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
471 seq_printf(m, "\t%9s: %s %s seq %u\n",
472 type,
473 fence->ops->get_driver_name(fence),
474 fence->ops->get_timeline_name(fence),
475 fence->seqno);
476}
477
478static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
479{
480 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
481 struct reservation_object *robj = etnaviv_obj->resv;
482 struct reservation_object_list *fobj;
483 struct dma_fence *fence;
484 unsigned long off = drm_vma_node_start(&obj->vma_node);
485
486 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
487 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
488 obj->name, kref_read(&obj->refcount),
489 off, etnaviv_obj->vaddr, obj->size);
490
491 rcu_read_lock();
492 fobj = rcu_dereference(robj->fence);
493 if (fobj) {
494 unsigned int i, shared_count = fobj->shared_count;
495
496 for (i = 0; i < shared_count; i++) {
497 fence = rcu_dereference(fobj->shared[i]);
498 etnaviv_gem_describe_fence(fence, "Shared", m);
499 }
500 }
501
502 fence = rcu_dereference(robj->fence_excl);
503 if (fence)
504 etnaviv_gem_describe_fence(fence, "Exclusive", m);
505 rcu_read_unlock();
506}
507
508void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
509 struct seq_file *m)
510{
511 struct etnaviv_gem_object *etnaviv_obj;
512 int count = 0;
513 size_t size = 0;
514
515 mutex_lock(&priv->gem_lock);
516 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
517 struct drm_gem_object *obj = &etnaviv_obj->base;
518
519 seq_puts(m, " ");
520 etnaviv_gem_describe(obj, m);
521 count++;
522 size += obj->size;
523 }
524 mutex_unlock(&priv->gem_lock);
525
526 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
527}
528#endif
529
530static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
531{
532 vunmap(etnaviv_obj->vaddr);
533 put_pages(etnaviv_obj);
534}
535
536static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
537 .get_pages = etnaviv_gem_shmem_get_pages,
538 .release = etnaviv_gem_shmem_release,
539 .vmap = etnaviv_gem_vmap_impl,
540 .mmap = etnaviv_gem_mmap_obj,
541};
542
543void etnaviv_gem_free_object(struct drm_gem_object *obj)
544{
545 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
546 struct etnaviv_drm_private *priv = obj->dev->dev_private;
547 struct etnaviv_vram_mapping *mapping, *tmp;
548
549
550 WARN_ON(is_active(etnaviv_obj));
551
552 mutex_lock(&priv->gem_lock);
553 list_del(&etnaviv_obj->gem_node);
554 mutex_unlock(&priv->gem_lock);
555
556 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
557 obj_node) {
558 struct etnaviv_iommu *mmu = mapping->mmu;
559
560 WARN_ON(mapping->use);
561
562 if (mmu)
563 etnaviv_iommu_unmap_gem(mmu, mapping);
564
565 list_del(&mapping->obj_node);
566 kfree(mapping);
567 }
568
569 drm_gem_free_mmap_offset(obj);
570 etnaviv_obj->ops->release(etnaviv_obj);
571 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
572 reservation_object_fini(&etnaviv_obj->_resv);
573 drm_gem_object_release(obj);
574
575 kfree(etnaviv_obj);
576}
577
578void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
579{
580 struct etnaviv_drm_private *priv = dev->dev_private;
581 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
582
583 mutex_lock(&priv->gem_lock);
584 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
585 mutex_unlock(&priv->gem_lock);
586}
587
588static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
589 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
590 struct drm_gem_object **obj)
591{
592 struct etnaviv_gem_object *etnaviv_obj;
593 unsigned sz = sizeof(*etnaviv_obj);
594 bool valid = true;
595
596
597 switch (flags & ETNA_BO_CACHE_MASK) {
598 case ETNA_BO_UNCACHED:
599 case ETNA_BO_CACHED:
600 case ETNA_BO_WC:
601 break;
602 default:
603 valid = false;
604 }
605
606 if (!valid) {
607 dev_err(dev->dev, "invalid cache flag: %x\n",
608 (flags & ETNA_BO_CACHE_MASK));
609 return -EINVAL;
610 }
611
612 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
613 if (!etnaviv_obj)
614 return -ENOMEM;
615
616 etnaviv_obj->flags = flags;
617 etnaviv_obj->ops = ops;
618 if (robj) {
619 etnaviv_obj->resv = robj;
620 } else {
621 etnaviv_obj->resv = &etnaviv_obj->_resv;
622 reservation_object_init(&etnaviv_obj->_resv);
623 }
624
625 mutex_init(&etnaviv_obj->lock);
626 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
627
628 *obj = &etnaviv_obj->base;
629
630 return 0;
631}
632
633
634int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
635 u32 size, u32 flags, u32 *handle)
636{
637 struct drm_gem_object *obj = NULL;
638 int ret;
639
640 size = PAGE_ALIGN(size);
641
642 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
643 &etnaviv_gem_shmem_ops, &obj);
644 if (ret)
645 goto fail;
646
647 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
648
649 ret = drm_gem_object_init(dev, obj, size);
650 if (ret == 0) {
651 struct address_space *mapping;
652
653
654
655
656
657
658
659
660 mapping = obj->filp->f_mapping;
661 mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
662 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
663 }
664
665 if (ret)
666 goto fail;
667
668 etnaviv_gem_obj_add(dev, obj);
669
670 ret = drm_gem_handle_create(file, obj, handle);
671
672
673fail:
674 drm_gem_object_put_unlocked(obj);
675
676 return ret;
677}
678
679int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
680 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
681 struct etnaviv_gem_object **res)
682{
683 struct drm_gem_object *obj;
684 int ret;
685
686 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
687 if (ret)
688 return ret;
689
690 drm_gem_private_object_init(dev, obj, size);
691
692 *res = to_etnaviv_bo(obj);
693
694 return 0;
695}
696
697static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
698{
699 struct page **pvec = NULL;
700 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
701 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
702
703 might_lock_read(¤t->mm->mmap_sem);
704
705 if (userptr->mm != current->mm)
706 return -EPERM;
707
708 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
709 if (!pvec)
710 return -ENOMEM;
711
712 do {
713 unsigned num_pages = npages - pinned;
714 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
715 struct page **pages = pvec + pinned;
716
717 ret = get_user_pages_fast(ptr, num_pages,
718 !userptr->ro ? FOLL_WRITE : 0, pages);
719 if (ret < 0) {
720 release_pages(pvec, pinned);
721 kvfree(pvec);
722 return ret;
723 }
724
725 pinned += ret;
726
727 } while (pinned < npages);
728
729 etnaviv_obj->pages = pvec;
730
731 return 0;
732}
733
734static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
735{
736 if (etnaviv_obj->sgt) {
737 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
738 sg_free_table(etnaviv_obj->sgt);
739 kfree(etnaviv_obj->sgt);
740 }
741 if (etnaviv_obj->pages) {
742 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
743
744 release_pages(etnaviv_obj->pages, npages);
745 kvfree(etnaviv_obj->pages);
746 }
747}
748
749static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
750 struct vm_area_struct *vma)
751{
752 return -EINVAL;
753}
754
755static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
756 .get_pages = etnaviv_gem_userptr_get_pages,
757 .release = etnaviv_gem_userptr_release,
758 .vmap = etnaviv_gem_vmap_impl,
759 .mmap = etnaviv_gem_userptr_mmap_obj,
760};
761
762int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
763 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
764{
765 struct etnaviv_gem_object *etnaviv_obj;
766 int ret;
767
768 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
769 &etnaviv_gem_userptr_ops, &etnaviv_obj);
770 if (ret)
771 return ret;
772
773 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
774
775 etnaviv_obj->userptr.ptr = ptr;
776 etnaviv_obj->userptr.mm = current->mm;
777 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
778
779 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
780
781 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
782
783
784 drm_gem_object_put_unlocked(&etnaviv_obj->base);
785 return ret;
786}
787