1
2
3
4
5
6#include <linux/spinlock.h>
7#include <linux/shmem_fs.h>
8#include <linux/sched/mm.h>
9#include <linux/sched/task.h>
10
11#include "etnaviv_drv.h"
12#include "etnaviv_gem.h"
13#include "etnaviv_gpu.h"
14#include "etnaviv_mmu.h"
15
16static struct lock_class_key etnaviv_shm_lock_class;
17static struct lock_class_key etnaviv_userptr_lock_class;
18
19static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
20{
21 struct drm_device *dev = etnaviv_obj->base.dev;
22 struct sg_table *sgt = etnaviv_obj->sgt;
23
24
25
26
27
28 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
29 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
30}
31
32static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
33{
34 struct drm_device *dev = etnaviv_obj->base.dev;
35 struct sg_table *sgt = etnaviv_obj->sgt;
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52 if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
53 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
54}
55
56
57static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
58{
59 struct drm_device *dev = etnaviv_obj->base.dev;
60 struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
61
62 if (IS_ERR(p)) {
63 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
64 return PTR_ERR(p);
65 }
66
67 etnaviv_obj->pages = p;
68
69 return 0;
70}
71
72static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
73{
74 if (etnaviv_obj->sgt) {
75 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
76 sg_free_table(etnaviv_obj->sgt);
77 kfree(etnaviv_obj->sgt);
78 etnaviv_obj->sgt = NULL;
79 }
80 if (etnaviv_obj->pages) {
81 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
82 true, false);
83
84 etnaviv_obj->pages = NULL;
85 }
86}
87
88struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
89{
90 int ret;
91
92 lockdep_assert_held(&etnaviv_obj->lock);
93
94 if (!etnaviv_obj->pages) {
95 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
96 if (ret < 0)
97 return ERR_PTR(ret);
98 }
99
100 if (!etnaviv_obj->sgt) {
101 struct drm_device *dev = etnaviv_obj->base.dev;
102 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
103 struct sg_table *sgt;
104
105 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
106 etnaviv_obj->pages, npages);
107 if (IS_ERR(sgt)) {
108 dev_err(dev->dev, "failed to allocate sgt: %ld\n",
109 PTR_ERR(sgt));
110 return ERR_CAST(sgt);
111 }
112
113 etnaviv_obj->sgt = sgt;
114
115 etnaviv_gem_scatter_map(etnaviv_obj);
116 }
117
118 return etnaviv_obj->pages;
119}
120
121void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
122{
123 lockdep_assert_held(&etnaviv_obj->lock);
124
125}
126
127static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128 struct vm_area_struct *vma)
129{
130 pgprot_t vm_page_prot;
131
132 vma->vm_flags &= ~VM_PFNMAP;
133 vma->vm_flags |= VM_MIXEDMAP;
134
135 vm_page_prot = vm_get_page_prot(vma->vm_flags);
136
137 if (etnaviv_obj->flags & ETNA_BO_WC) {
138 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139 } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141 } else {
142
143
144
145
146
147 vma->vm_pgoff = 0;
148 vma_set_file(vma, etnaviv_obj->base.filp);
149
150 vma->vm_page_prot = vm_page_prot;
151 }
152
153 return 0;
154}
155
156int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
157{
158 struct etnaviv_gem_object *obj;
159 int ret;
160
161 ret = drm_gem_mmap(filp, vma);
162 if (ret) {
163 DBG("mmap failed: %d", ret);
164 return ret;
165 }
166
167 obj = to_etnaviv_bo(vma->vm_private_data);
168 return obj->ops->mmap(obj, vma);
169}
170
171int etnaviv_gem_fault(struct vm_fault *vmf)
172{
173 struct vm_area_struct *vma = vmf->vma;
174 struct drm_gem_object *obj = vma->vm_private_data;
175 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
176 struct page **pages, *page;
177 pgoff_t pgoff;
178 int ret;
179
180
181
182
183
184
185 ret = mutex_lock_interruptible(&etnaviv_obj->lock);
186 if (ret)
187 goto out;
188
189
190 pages = etnaviv_gem_get_pages(etnaviv_obj);
191 mutex_unlock(&etnaviv_obj->lock);
192
193 if (IS_ERR(pages)) {
194 ret = PTR_ERR(pages);
195 goto out;
196 }
197
198
199 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
200
201 page = pages[pgoff];
202
203 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
204 page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
205
206 ret = vm_insert_page(vma, vmf->address, page);
207
208out:
209 switch (ret) {
210 case -EAGAIN:
211 case 0:
212 case -ERESTARTSYS:
213 case -EINTR:
214 case -EBUSY:
215
216
217
218
219 return VM_FAULT_NOPAGE;
220 case -ENOMEM:
221 return VM_FAULT_OOM;
222 default:
223 return VM_FAULT_SIGBUS;
224 }
225}
226
227int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
228{
229 int ret;
230
231
232 ret = drm_gem_create_mmap_offset(obj);
233 if (ret)
234 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
235 else
236 *offset = drm_vma_node_offset_addr(&obj->vma_node);
237
238 return ret;
239}
240
241static struct etnaviv_vram_mapping *
242etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
243 struct etnaviv_iommu *mmu)
244{
245 struct etnaviv_vram_mapping *mapping;
246
247 list_for_each_entry(mapping, &obj->vram_list, obj_node) {
248 if (mapping->mmu == mmu)
249 return mapping;
250 }
251
252 return NULL;
253}
254
255void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
256{
257 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
258
259 drm_gem_object_get(&etnaviv_obj->base);
260
261 mutex_lock(&etnaviv_obj->lock);
262 WARN_ON(mapping->use == 0);
263 mapping->use += 1;
264 mutex_unlock(&etnaviv_obj->lock);
265}
266
267void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
268{
269 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
270
271 mutex_lock(&etnaviv_obj->lock);
272 WARN_ON(mapping->use == 0);
273 mapping->use -= 1;
274 mutex_unlock(&etnaviv_obj->lock);
275
276 drm_gem_object_put_unlocked(&etnaviv_obj->base);
277}
278
279struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
280 struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
281{
282 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
283 struct etnaviv_vram_mapping *mapping;
284 struct page **pages;
285 int ret = 0;
286
287 mutex_lock(&etnaviv_obj->lock);
288 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
289 if (mapping) {
290
291
292
293
294
295
296 if (mapping->use == 0) {
297 mutex_lock(&gpu->mmu->lock);
298 if (mapping->mmu == gpu->mmu)
299 mapping->use += 1;
300 else
301 mapping = NULL;
302 mutex_unlock(&gpu->mmu->lock);
303 if (mapping)
304 goto out;
305 } else {
306 mapping->use += 1;
307 goto out;
308 }
309 }
310
311 pages = etnaviv_gem_get_pages(etnaviv_obj);
312 if (IS_ERR(pages)) {
313 ret = PTR_ERR(pages);
314 goto out;
315 }
316
317
318
319
320
321 mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
322 if (!mapping) {
323 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
324 if (!mapping) {
325 ret = -ENOMEM;
326 goto out;
327 }
328
329 INIT_LIST_HEAD(&mapping->scan_node);
330 mapping->object = etnaviv_obj;
331 } else {
332 list_del(&mapping->obj_node);
333 }
334
335 mapping->mmu = gpu->mmu;
336 mapping->use = 1;
337
338 ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
339 mapping);
340 if (ret < 0)
341 kfree(mapping);
342 else
343 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
344
345out:
346 mutex_unlock(&etnaviv_obj->lock);
347
348 if (ret)
349 return ERR_PTR(ret);
350
351
352 drm_gem_object_get(obj);
353 return mapping;
354}
355
356void *etnaviv_gem_vmap(struct drm_gem_object *obj)
357{
358 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
359
360 if (etnaviv_obj->vaddr)
361 return etnaviv_obj->vaddr;
362
363 mutex_lock(&etnaviv_obj->lock);
364
365
366
367
368 if (!etnaviv_obj->vaddr)
369 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
370 mutex_unlock(&etnaviv_obj->lock);
371
372 return etnaviv_obj->vaddr;
373}
374
375static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
376{
377 struct page **pages;
378
379 lockdep_assert_held(&obj->lock);
380
381 pages = etnaviv_gem_get_pages(obj);
382 if (IS_ERR(pages))
383 return NULL;
384
385 return vmap(pages, obj->base.size >> PAGE_SHIFT,
386 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
387}
388
389static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
390{
391 if (op & ETNA_PREP_READ)
392 return DMA_FROM_DEVICE;
393 else if (op & ETNA_PREP_WRITE)
394 return DMA_TO_DEVICE;
395 else
396 return DMA_BIDIRECTIONAL;
397}
398
399int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
400 struct timespec *timeout)
401{
402 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
403 struct drm_device *dev = obj->dev;
404 bool write = !!(op & ETNA_PREP_WRITE);
405 int ret;
406
407 if (!etnaviv_obj->sgt) {
408 void *ret;
409
410 mutex_lock(&etnaviv_obj->lock);
411 ret = etnaviv_gem_get_pages(etnaviv_obj);
412 mutex_unlock(&etnaviv_obj->lock);
413 if (IS_ERR(ret))
414 return PTR_ERR(ret);
415 }
416
417 if (op & ETNA_PREP_NOSYNC) {
418 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
419 write))
420 return -EBUSY;
421 } else {
422 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
423
424 ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
425 write, true, remain);
426 if (ret <= 0)
427 return ret == 0 ? -ETIMEDOUT : ret;
428 }
429
430 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
431 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
432 etnaviv_obj->sgt->nents,
433 etnaviv_op_to_dma_dir(op));
434 etnaviv_obj->last_cpu_prep_op = op;
435 }
436
437 return 0;
438}
439
440int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
441{
442 struct drm_device *dev = obj->dev;
443 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
444
445 if (etnaviv_obj->flags & ETNA_BO_CACHED) {
446
447 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
448 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
449 etnaviv_obj->sgt->nents,
450 etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
451 etnaviv_obj->last_cpu_prep_op = 0;
452 }
453
454 return 0;
455}
456
457int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
458 struct timespec *timeout)
459{
460 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
461
462 return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
463}
464
465#ifdef CONFIG_DEBUG_FS
466static void etnaviv_gem_describe_fence(struct dma_fence *fence,
467 const char *type, struct seq_file *m)
468{
469 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
470 seq_printf(m, "\t%9s: %s %s seq %u\n",
471 type,
472 fence->ops->get_driver_name(fence),
473 fence->ops->get_timeline_name(fence),
474 fence->seqno);
475}
476
477static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
478{
479 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
480 struct reservation_object *robj = etnaviv_obj->resv;
481 struct reservation_object_list *fobj;
482 struct dma_fence *fence;
483 unsigned long off = drm_vma_node_start(&obj->vma_node);
484
485 seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
486 etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
487 obj->name, kref_read(&obj->refcount),
488 off, etnaviv_obj->vaddr, obj->size);
489
490 rcu_read_lock();
491 fobj = rcu_dereference(robj->fence);
492 if (fobj) {
493 unsigned int i, shared_count = fobj->shared_count;
494
495 for (i = 0; i < shared_count; i++) {
496 fence = rcu_dereference(fobj->shared[i]);
497 etnaviv_gem_describe_fence(fence, "Shared", m);
498 }
499 }
500
501 fence = rcu_dereference(robj->fence_excl);
502 if (fence)
503 etnaviv_gem_describe_fence(fence, "Exclusive", m);
504 rcu_read_unlock();
505}
506
507void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
508 struct seq_file *m)
509{
510 struct etnaviv_gem_object *etnaviv_obj;
511 int count = 0;
512 size_t size = 0;
513
514 mutex_lock(&priv->gem_lock);
515 list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
516 struct drm_gem_object *obj = &etnaviv_obj->base;
517
518 seq_puts(m, " ");
519 etnaviv_gem_describe(obj, m);
520 count++;
521 size += obj->size;
522 }
523 mutex_unlock(&priv->gem_lock);
524
525 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
526}
527#endif
528
529static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
530{
531 vunmap(etnaviv_obj->vaddr);
532 put_pages(etnaviv_obj);
533}
534
535static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
536 .get_pages = etnaviv_gem_shmem_get_pages,
537 .release = etnaviv_gem_shmem_release,
538 .vmap = etnaviv_gem_vmap_impl,
539 .mmap = etnaviv_gem_mmap_obj,
540};
541
542void etnaviv_gem_free_object(struct drm_gem_object *obj)
543{
544 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
545 struct etnaviv_drm_private *priv = obj->dev->dev_private;
546 struct etnaviv_vram_mapping *mapping, *tmp;
547
548
549 WARN_ON(is_active(etnaviv_obj));
550
551 mutex_lock(&priv->gem_lock);
552 list_del(&etnaviv_obj->gem_node);
553 mutex_unlock(&priv->gem_lock);
554
555 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
556 obj_node) {
557 struct etnaviv_iommu *mmu = mapping->mmu;
558
559 WARN_ON(mapping->use);
560
561 if (mmu)
562 etnaviv_iommu_unmap_gem(mmu, mapping);
563
564 list_del(&mapping->obj_node);
565 kfree(mapping);
566 }
567
568 drm_gem_free_mmap_offset(obj);
569 etnaviv_obj->ops->release(etnaviv_obj);
570 if (etnaviv_obj->resv == &etnaviv_obj->_resv)
571 reservation_object_fini(&etnaviv_obj->_resv);
572 drm_gem_object_release(obj);
573
574 kfree(etnaviv_obj);
575}
576
577void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
578{
579 struct etnaviv_drm_private *priv = dev->dev_private;
580 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
581
582 mutex_lock(&priv->gem_lock);
583 list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
584 mutex_unlock(&priv->gem_lock);
585}
586
587static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
588 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
589 struct drm_gem_object **obj)
590{
591 struct etnaviv_gem_object *etnaviv_obj;
592 unsigned sz = sizeof(*etnaviv_obj);
593 bool valid = true;
594
595
596 switch (flags & ETNA_BO_CACHE_MASK) {
597 case ETNA_BO_UNCACHED:
598 case ETNA_BO_CACHED:
599 case ETNA_BO_WC:
600 break;
601 default:
602 valid = false;
603 }
604
605 if (!valid) {
606 dev_err(dev->dev, "invalid cache flag: %x\n",
607 (flags & ETNA_BO_CACHE_MASK));
608 return -EINVAL;
609 }
610
611 etnaviv_obj = kzalloc(sz, GFP_KERNEL);
612 if (!etnaviv_obj)
613 return -ENOMEM;
614
615 etnaviv_obj->flags = flags;
616 etnaviv_obj->ops = ops;
617 if (robj) {
618 etnaviv_obj->resv = robj;
619 } else {
620 etnaviv_obj->resv = &etnaviv_obj->_resv;
621 reservation_object_init(&etnaviv_obj->_resv);
622 }
623
624 mutex_init(&etnaviv_obj->lock);
625 INIT_LIST_HEAD(&etnaviv_obj->vram_list);
626
627 *obj = &etnaviv_obj->base;
628
629 return 0;
630}
631
632
633int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
634 u32 size, u32 flags, u32 *handle)
635{
636 struct drm_gem_object *obj = NULL;
637 int ret;
638
639 size = PAGE_ALIGN(size);
640
641 ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
642 &etnaviv_gem_shmem_ops, &obj);
643 if (ret)
644 goto fail;
645
646 lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
647
648 ret = drm_gem_object_init(dev, obj, size);
649 if (ret == 0) {
650 struct address_space *mapping;
651
652
653
654
655
656
657
658
659 mapping = obj->filp->f_mapping;
660 mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
661 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
662 }
663
664 if (ret)
665 goto fail;
666
667 etnaviv_gem_obj_add(dev, obj);
668
669 ret = drm_gem_handle_create(file, obj, handle);
670
671
672fail:
673 drm_gem_object_put_unlocked(obj);
674
675 return ret;
676}
677
678int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
679 struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
680 struct etnaviv_gem_object **res)
681{
682 struct drm_gem_object *obj;
683 int ret;
684
685 ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
686 if (ret)
687 return ret;
688
689 drm_gem_private_object_init(dev, obj, size);
690
691 *res = to_etnaviv_bo(obj);
692
693 return 0;
694}
695
696static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
697{
698 struct page **pvec = NULL;
699 struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
700 int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
701
702 might_lock_read(¤t->mm->mmap_sem);
703
704 if (userptr->mm != current->mm)
705 return -EPERM;
706
707 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
708 if (!pvec)
709 return -ENOMEM;
710
711 do {
712 unsigned num_pages = npages - pinned;
713 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
714 struct page **pages = pvec + pinned;
715
716 ret = get_user_pages_fast(ptr, num_pages,
717 !userptr->ro ? FOLL_WRITE : 0, pages);
718 if (ret < 0) {
719 release_pages(pvec, pinned);
720 kvfree(pvec);
721 return ret;
722 }
723
724 pinned += ret;
725
726 } while (pinned < npages);
727
728 etnaviv_obj->pages = pvec;
729
730 return 0;
731}
732
733static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
734{
735 if (etnaviv_obj->sgt) {
736 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
737 sg_free_table(etnaviv_obj->sgt);
738 kfree(etnaviv_obj->sgt);
739 }
740 if (etnaviv_obj->pages) {
741 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
742
743 release_pages(etnaviv_obj->pages, npages);
744 kvfree(etnaviv_obj->pages);
745 }
746}
747
748static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
749 struct vm_area_struct *vma)
750{
751 return -EINVAL;
752}
753
754static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
755 .get_pages = etnaviv_gem_userptr_get_pages,
756 .release = etnaviv_gem_userptr_release,
757 .vmap = etnaviv_gem_vmap_impl,
758 .mmap = etnaviv_gem_userptr_mmap_obj,
759};
760
761int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
762 uintptr_t ptr, u32 size, u32 flags, u32 *handle)
763{
764 struct etnaviv_gem_object *etnaviv_obj;
765 int ret;
766
767 ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
768 &etnaviv_gem_userptr_ops, &etnaviv_obj);
769 if (ret)
770 return ret;
771
772 lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
773
774 etnaviv_obj->userptr.ptr = ptr;
775 etnaviv_obj->userptr.mm = current->mm;
776 etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
777
778 etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
779
780 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
781
782
783 drm_gem_object_put_unlocked(&etnaviv_obj->base);
784 return ret;
785}
786