1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
21#include <linux/pfn_t.h>
22
23#include "msm_drv.h"
24#include "msm_fence.h"
25#include "msm_gem.h"
26#include "msm_gpu.h"
27#include "msm_mmu.h"
28
29static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
30
31
32static dma_addr_t physaddr(struct drm_gem_object *obj)
33{
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 struct msm_drm_private *priv = obj->dev->dev_private;
36 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
37 priv->vram.paddr;
38}
39
40static bool use_pages(struct drm_gem_object *obj)
41{
42 struct msm_gem_object *msm_obj = to_msm_bo(obj);
43 return !msm_obj->vram_node;
44}
45
46
47static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
48{
49 struct msm_gem_object *msm_obj = to_msm_bo(obj);
50 struct msm_drm_private *priv = obj->dev->dev_private;
51 dma_addr_t paddr;
52 struct page **p;
53 int ret, i;
54
55 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
56 if (!p)
57 return ERR_PTR(-ENOMEM);
58
59 spin_lock(&priv->vram.lock);
60 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
61 spin_unlock(&priv->vram.lock);
62 if (ret) {
63 kvfree(p);
64 return ERR_PTR(ret);
65 }
66
67 paddr = physaddr(obj);
68 for (i = 0; i < npages; i++) {
69 p[i] = phys_to_page(paddr);
70 paddr += PAGE_SIZE;
71 }
72
73 return p;
74}
75
76static struct page **get_pages(struct drm_gem_object *obj)
77{
78 struct msm_gem_object *msm_obj = to_msm_bo(obj);
79
80 if (!msm_obj->pages) {
81 struct drm_device *dev = obj->dev;
82 struct page **p;
83 int npages = obj->size >> PAGE_SHIFT;
84
85 if (use_pages(obj))
86 p = drm_gem_get_pages(obj);
87 else
88 p = get_pages_vram(obj, npages);
89
90 if (IS_ERR(p)) {
91 dev_err(dev->dev, "could not get pages: %ld\n",
92 PTR_ERR(p));
93 return p;
94 }
95
96 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
97 if (IS_ERR(msm_obj->sgt)) {
98 dev_err(dev->dev, "failed to allocate sgt\n");
99 return ERR_CAST(msm_obj->sgt);
100 }
101
102 msm_obj->pages = p;
103
104
105
106
107 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
108 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
109 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
110 }
111
112 return msm_obj->pages;
113}
114
115static void put_pages_vram(struct drm_gem_object *obj)
116{
117 struct msm_gem_object *msm_obj = to_msm_bo(obj);
118 struct msm_drm_private *priv = obj->dev->dev_private;
119
120 spin_lock(&priv->vram.lock);
121 drm_mm_remove_node(msm_obj->vram_node);
122 spin_unlock(&priv->vram.lock);
123
124 kvfree(msm_obj->pages);
125}
126
127static void put_pages(struct drm_gem_object *obj)
128{
129 struct msm_gem_object *msm_obj = to_msm_bo(obj);
130
131 if (msm_obj->pages) {
132
133
134
135 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
136 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
137 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
138 sg_free_table(msm_obj->sgt);
139 kfree(msm_obj->sgt);
140
141 if (use_pages(obj))
142 drm_gem_put_pages(obj, msm_obj->pages, true, false);
143 else
144 put_pages_vram(obj);
145
146 msm_obj->pages = NULL;
147 }
148}
149
150struct page **msm_gem_get_pages(struct drm_gem_object *obj)
151{
152 struct msm_gem_object *msm_obj = to_msm_bo(obj);
153 struct page **p;
154
155 mutex_lock(&msm_obj->lock);
156
157 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
158 mutex_unlock(&msm_obj->lock);
159 return ERR_PTR(-EBUSY);
160 }
161
162 p = get_pages(obj);
163 mutex_unlock(&msm_obj->lock);
164 return p;
165}
166
167void msm_gem_put_pages(struct drm_gem_object *obj)
168{
169
170}
171
172int msm_gem_mmap_obj(struct drm_gem_object *obj,
173 struct vm_area_struct *vma)
174{
175 struct msm_gem_object *msm_obj = to_msm_bo(obj);
176
177 vma->vm_flags &= ~VM_PFNMAP;
178 vma->vm_flags |= VM_MIXEDMAP;
179
180 if (msm_obj->flags & MSM_BO_WC) {
181 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
182 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
183 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
184 } else {
185
186
187
188
189
190 fput(vma->vm_file);
191 get_file(obj->filp);
192 vma->vm_pgoff = 0;
193 vma->vm_file = obj->filp;
194
195 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
196 }
197
198 return 0;
199}
200
201int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
202{
203 int ret;
204
205 ret = drm_gem_mmap(filp, vma);
206 if (ret) {
207 DBG("mmap failed: %d", ret);
208 return ret;
209 }
210
211 return msm_gem_mmap_obj(vma->vm_private_data, vma);
212}
213
214int msm_gem_fault(struct vm_fault *vmf)
215{
216 struct vm_area_struct *vma = vmf->vma;
217 struct drm_gem_object *obj = vma->vm_private_data;
218 struct msm_gem_object *msm_obj = to_msm_bo(obj);
219 struct page **pages;
220 unsigned long pfn;
221 pgoff_t pgoff;
222 int ret;
223
224
225
226
227
228 ret = mutex_lock_interruptible(&msm_obj->lock);
229 if (ret)
230 goto out;
231
232 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
233 mutex_unlock(&msm_obj->lock);
234 return VM_FAULT_SIGBUS;
235 }
236
237
238 pages = get_pages(obj);
239 if (IS_ERR(pages)) {
240 ret = PTR_ERR(pages);
241 goto out_unlock;
242 }
243
244
245 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
246
247 pfn = page_to_pfn(pages[pgoff]);
248
249 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
250 pfn, pfn << PAGE_SHIFT);
251
252 ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
253
254out_unlock:
255 mutex_unlock(&msm_obj->lock);
256out:
257 switch (ret) {
258 case -EAGAIN:
259 case 0:
260 case -ERESTARTSYS:
261 case -EINTR:
262 case -EBUSY:
263
264
265
266
267 return VM_FAULT_NOPAGE;
268 case -ENOMEM:
269 return VM_FAULT_OOM;
270 default:
271 return VM_FAULT_SIGBUS;
272 }
273}
274
275
276static uint64_t mmap_offset(struct drm_gem_object *obj)
277{
278 struct drm_device *dev = obj->dev;
279 struct msm_gem_object *msm_obj = to_msm_bo(obj);
280 int ret;
281
282 WARN_ON(!mutex_is_locked(&msm_obj->lock));
283
284
285 ret = drm_gem_create_mmap_offset(obj);
286
287 if (ret) {
288 dev_err(dev->dev, "could not allocate mmap offset\n");
289 return 0;
290 }
291
292 return drm_vma_node_offset_addr(&obj->vma_node);
293}
294
295uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
296{
297 uint64_t offset;
298 struct msm_gem_object *msm_obj = to_msm_bo(obj);
299
300 mutex_lock(&msm_obj->lock);
301 offset = mmap_offset(obj);
302 mutex_unlock(&msm_obj->lock);
303 return offset;
304}
305
306static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
307 struct msm_gem_address_space *aspace)
308{
309 struct msm_gem_object *msm_obj = to_msm_bo(obj);
310 struct msm_gem_vma *vma;
311
312 WARN_ON(!mutex_is_locked(&msm_obj->lock));
313
314 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
315 if (!vma)
316 return ERR_PTR(-ENOMEM);
317
318 vma->aspace = aspace;
319
320 list_add_tail(&vma->list, &msm_obj->vmas);
321
322 return vma;
323}
324
325static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
326 struct msm_gem_address_space *aspace)
327{
328 struct msm_gem_object *msm_obj = to_msm_bo(obj);
329 struct msm_gem_vma *vma;
330
331 WARN_ON(!mutex_is_locked(&msm_obj->lock));
332
333 list_for_each_entry(vma, &msm_obj->vmas, list) {
334 if (vma->aspace == aspace)
335 return vma;
336 }
337
338 return NULL;
339}
340
341static void del_vma(struct msm_gem_vma *vma)
342{
343 if (!vma)
344 return;
345
346 list_del(&vma->list);
347 kfree(vma);
348}
349
350
351static void
352put_iova(struct drm_gem_object *obj)
353{
354 struct msm_gem_object *msm_obj = to_msm_bo(obj);
355 struct msm_gem_vma *vma, *tmp;
356
357 WARN_ON(!mutex_is_locked(&msm_obj->lock));
358
359 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
360 msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
361 del_vma(vma);
362 }
363}
364
365
366int msm_gem_get_iova(struct drm_gem_object *obj,
367 struct msm_gem_address_space *aspace, uint64_t *iova)
368{
369 struct msm_gem_object *msm_obj = to_msm_bo(obj);
370 struct msm_gem_vma *vma;
371 int ret = 0;
372
373 mutex_lock(&msm_obj->lock);
374
375 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
376 mutex_unlock(&msm_obj->lock);
377 return -EBUSY;
378 }
379
380 vma = lookup_vma(obj, aspace);
381
382 if (!vma) {
383 struct page **pages;
384
385 vma = add_vma(obj, aspace);
386 if (IS_ERR(vma)) {
387 ret = PTR_ERR(vma);
388 goto unlock;
389 }
390
391 pages = get_pages(obj);
392 if (IS_ERR(pages)) {
393 ret = PTR_ERR(pages);
394 goto fail;
395 }
396
397 ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
398 obj->size >> PAGE_SHIFT);
399 if (ret)
400 goto fail;
401 }
402
403 *iova = vma->iova;
404
405 mutex_unlock(&msm_obj->lock);
406 return 0;
407
408fail:
409 del_vma(vma);
410unlock:
411 mutex_unlock(&msm_obj->lock);
412 return ret;
413}
414
415
416
417
418uint64_t msm_gem_iova(struct drm_gem_object *obj,
419 struct msm_gem_address_space *aspace)
420{
421 struct msm_gem_object *msm_obj = to_msm_bo(obj);
422 struct msm_gem_vma *vma;
423
424 mutex_lock(&msm_obj->lock);
425 vma = lookup_vma(obj, aspace);
426 mutex_unlock(&msm_obj->lock);
427 WARN_ON(!vma);
428
429 return vma ? vma->iova : 0;
430}
431
432void msm_gem_put_iova(struct drm_gem_object *obj,
433 struct msm_gem_address_space *aspace)
434{
435
436
437
438
439
440
441}
442
443int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
444 struct drm_mode_create_dumb *args)
445{
446 args->pitch = align_pitch(args->width, args->bpp);
447 args->size = PAGE_ALIGN(args->pitch * args->height);
448 return msm_gem_new_handle(dev, file, args->size,
449 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
450}
451
452int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
453 uint32_t handle, uint64_t *offset)
454{
455 struct drm_gem_object *obj;
456 int ret = 0;
457
458
459 obj = drm_gem_object_lookup(file, handle);
460 if (obj == NULL) {
461 ret = -ENOENT;
462 goto fail;
463 }
464
465 *offset = msm_gem_mmap_offset(obj);
466
467 drm_gem_object_unreference_unlocked(obj);
468
469fail:
470 return ret;
471}
472
473void *msm_gem_get_vaddr(struct drm_gem_object *obj)
474{
475 struct msm_gem_object *msm_obj = to_msm_bo(obj);
476 int ret = 0;
477
478 mutex_lock(&msm_obj->lock);
479
480 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
481 mutex_unlock(&msm_obj->lock);
482 return ERR_PTR(-EBUSY);
483 }
484
485
486
487
488
489
490
491 msm_obj->vmap_count++;
492
493 if (!msm_obj->vaddr) {
494 struct page **pages = get_pages(obj);
495 if (IS_ERR(pages)) {
496 ret = PTR_ERR(pages);
497 goto fail;
498 }
499 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
500 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
501 if (msm_obj->vaddr == NULL) {
502 ret = -ENOMEM;
503 goto fail;
504 }
505 }
506
507 mutex_unlock(&msm_obj->lock);
508 return msm_obj->vaddr;
509
510fail:
511 msm_obj->vmap_count--;
512 mutex_unlock(&msm_obj->lock);
513 return ERR_PTR(ret);
514}
515
516void msm_gem_put_vaddr(struct drm_gem_object *obj)
517{
518 struct msm_gem_object *msm_obj = to_msm_bo(obj);
519
520 mutex_lock(&msm_obj->lock);
521 WARN_ON(msm_obj->vmap_count < 1);
522 msm_obj->vmap_count--;
523 mutex_unlock(&msm_obj->lock);
524}
525
526
527
528
529int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
530{
531 struct msm_gem_object *msm_obj = to_msm_bo(obj);
532
533 mutex_lock(&msm_obj->lock);
534
535 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
536
537 if (msm_obj->madv != __MSM_MADV_PURGED)
538 msm_obj->madv = madv;
539
540 madv = msm_obj->madv;
541
542 mutex_unlock(&msm_obj->lock);
543
544 return (madv != __MSM_MADV_PURGED);
545}
546
547void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
548{
549 struct drm_device *dev = obj->dev;
550 struct msm_gem_object *msm_obj = to_msm_bo(obj);
551
552 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
553 WARN_ON(!is_purgeable(msm_obj));
554 WARN_ON(obj->import_attach);
555
556 mutex_lock_nested(&msm_obj->lock, subclass);
557
558 put_iova(obj);
559
560 msm_gem_vunmap_locked(obj);
561
562 put_pages(obj);
563
564 msm_obj->madv = __MSM_MADV_PURGED;
565
566 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
567 drm_gem_free_mmap_offset(obj);
568
569
570
571
572
573
574 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
575
576 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
577 0, (loff_t)-1);
578
579 mutex_unlock(&msm_obj->lock);
580}
581
582static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
583{
584 struct msm_gem_object *msm_obj = to_msm_bo(obj);
585
586 WARN_ON(!mutex_is_locked(&msm_obj->lock));
587
588 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
589 return;
590
591 vunmap(msm_obj->vaddr);
592 msm_obj->vaddr = NULL;
593}
594
595void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
596{
597 struct msm_gem_object *msm_obj = to_msm_bo(obj);
598
599 mutex_lock_nested(&msm_obj->lock, subclass);
600 msm_gem_vunmap_locked(obj);
601 mutex_unlock(&msm_obj->lock);
602}
603
604
605int msm_gem_sync_object(struct drm_gem_object *obj,
606 struct msm_fence_context *fctx, bool exclusive)
607{
608 struct msm_gem_object *msm_obj = to_msm_bo(obj);
609 struct reservation_object_list *fobj;
610 struct dma_fence *fence;
611 int i, ret;
612
613 fobj = reservation_object_get_list(msm_obj->resv);
614 if (!fobj || (fobj->shared_count == 0)) {
615 fence = reservation_object_get_excl(msm_obj->resv);
616
617 if (fence && (fence->context != fctx->context)) {
618 ret = dma_fence_wait(fence, true);
619 if (ret)
620 return ret;
621 }
622 }
623
624 if (!exclusive || !fobj)
625 return 0;
626
627 for (i = 0; i < fobj->shared_count; i++) {
628 fence = rcu_dereference_protected(fobj->shared[i],
629 reservation_object_held(msm_obj->resv));
630 if (fence->context != fctx->context) {
631 ret = dma_fence_wait(fence, true);
632 if (ret)
633 return ret;
634 }
635 }
636
637 return 0;
638}
639
640void msm_gem_move_to_active(struct drm_gem_object *obj,
641 struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
642{
643 struct msm_gem_object *msm_obj = to_msm_bo(obj);
644 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
645 msm_obj->gpu = gpu;
646 if (exclusive)
647 reservation_object_add_excl_fence(msm_obj->resv, fence);
648 else
649 reservation_object_add_shared_fence(msm_obj->resv, fence);
650 list_del_init(&msm_obj->mm_list);
651 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
652}
653
654void msm_gem_move_to_inactive(struct drm_gem_object *obj)
655{
656 struct drm_device *dev = obj->dev;
657 struct msm_drm_private *priv = dev->dev_private;
658 struct msm_gem_object *msm_obj = to_msm_bo(obj);
659
660 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
661
662 msm_obj->gpu = NULL;
663 list_del_init(&msm_obj->mm_list);
664 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
665}
666
667int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
668{
669 struct msm_gem_object *msm_obj = to_msm_bo(obj);
670 bool write = !!(op & MSM_PREP_WRITE);
671 unsigned long remain =
672 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
673 long ret;
674
675 ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
676 true, remain);
677 if (ret == 0)
678 return remain == 0 ? -EBUSY : -ETIMEDOUT;
679 else if (ret < 0)
680 return ret;
681
682
683
684 return 0;
685}
686
687int msm_gem_cpu_fini(struct drm_gem_object *obj)
688{
689
690 return 0;
691}
692
693#ifdef CONFIG_DEBUG_FS
694static void describe_fence(struct dma_fence *fence, const char *type,
695 struct seq_file *m)
696{
697 if (!dma_fence_is_signaled(fence))
698 seq_printf(m, "\t%9s: %s %s seq %u\n", type,
699 fence->ops->get_driver_name(fence),
700 fence->ops->get_timeline_name(fence),
701 fence->seqno);
702}
703
704void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
705{
706 struct msm_gem_object *msm_obj = to_msm_bo(obj);
707 struct reservation_object *robj = msm_obj->resv;
708 struct reservation_object_list *fobj;
709 struct dma_fence *fence;
710 struct msm_gem_vma *vma;
711 uint64_t off = drm_vma_node_start(&obj->vma_node);
712 const char *madv;
713
714 mutex_lock(&msm_obj->lock);
715
716 switch (msm_obj->madv) {
717 case __MSM_MADV_PURGED:
718 madv = " purged";
719 break;
720 case MSM_MADV_DONTNEED:
721 madv = " purgeable";
722 break;
723 case MSM_MADV_WILLNEED:
724 default:
725 madv = "";
726 break;
727 }
728
729 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
730 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
731 obj->name, kref_read(&obj->refcount),
732 off, msm_obj->vaddr);
733
734
735 list_for_each_entry(vma, &msm_obj->vmas, list)
736 seq_printf(m, " %08llx", vma->iova);
737
738 seq_printf(m, " %zu%s\n", obj->size, madv);
739
740 rcu_read_lock();
741 fobj = rcu_dereference(robj->fence);
742 if (fobj) {
743 unsigned int i, shared_count = fobj->shared_count;
744
745 for (i = 0; i < shared_count; i++) {
746 fence = rcu_dereference(fobj->shared[i]);
747 describe_fence(fence, "Shared", m);
748 }
749 }
750
751 fence = rcu_dereference(robj->fence_excl);
752 if (fence)
753 describe_fence(fence, "Exclusive", m);
754 rcu_read_unlock();
755
756 mutex_unlock(&msm_obj->lock);
757}
758
759void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
760{
761 struct msm_gem_object *msm_obj;
762 int count = 0;
763 size_t size = 0;
764
765 list_for_each_entry(msm_obj, list, mm_list) {
766 struct drm_gem_object *obj = &msm_obj->base;
767 seq_printf(m, " ");
768 msm_gem_describe(obj, m);
769 count++;
770 size += obj->size;
771 }
772
773 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
774}
775#endif
776
777void msm_gem_free_object(struct drm_gem_object *obj)
778{
779 struct drm_device *dev = obj->dev;
780 struct msm_gem_object *msm_obj = to_msm_bo(obj);
781
782 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
783
784
785 WARN_ON(is_active(msm_obj));
786
787 list_del(&msm_obj->mm_list);
788
789 mutex_lock(&msm_obj->lock);
790
791 put_iova(obj);
792
793 if (obj->import_attach) {
794 if (msm_obj->vaddr)
795 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
796
797
798
799
800 if (msm_obj->pages)
801 kvfree(msm_obj->pages);
802
803 drm_prime_gem_destroy(obj, msm_obj->sgt);
804 } else {
805 msm_gem_vunmap_locked(obj);
806 put_pages(obj);
807 }
808
809 if (msm_obj->resv == &msm_obj->_resv)
810 reservation_object_fini(msm_obj->resv);
811
812 drm_gem_object_release(obj);
813
814 mutex_unlock(&msm_obj->lock);
815 kfree(msm_obj);
816}
817
818
819int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
820 uint32_t size, uint32_t flags, uint32_t *handle)
821{
822 struct drm_gem_object *obj;
823 int ret;
824
825 obj = msm_gem_new(dev, size, flags);
826
827 if (IS_ERR(obj))
828 return PTR_ERR(obj);
829
830 ret = drm_gem_handle_create(file, obj, handle);
831
832
833 drm_gem_object_unreference_unlocked(obj);
834
835 return ret;
836}
837
838static int msm_gem_new_impl(struct drm_device *dev,
839 uint32_t size, uint32_t flags,
840 struct reservation_object *resv,
841 struct drm_gem_object **obj,
842 bool struct_mutex_locked)
843{
844 struct msm_drm_private *priv = dev->dev_private;
845 struct msm_gem_object *msm_obj;
846
847 switch (flags & MSM_BO_CACHE_MASK) {
848 case MSM_BO_UNCACHED:
849 case MSM_BO_CACHED:
850 case MSM_BO_WC:
851 break;
852 default:
853 dev_err(dev->dev, "invalid cache flag: %x\n",
854 (flags & MSM_BO_CACHE_MASK));
855 return -EINVAL;
856 }
857
858 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
859 if (!msm_obj)
860 return -ENOMEM;
861
862 mutex_init(&msm_obj->lock);
863
864 msm_obj->flags = flags;
865 msm_obj->madv = MSM_MADV_WILLNEED;
866
867 if (resv) {
868 msm_obj->resv = resv;
869 } else {
870 msm_obj->resv = &msm_obj->_resv;
871 reservation_object_init(msm_obj->resv);
872 }
873
874 INIT_LIST_HEAD(&msm_obj->submit_entry);
875 INIT_LIST_HEAD(&msm_obj->vmas);
876
877 if (struct_mutex_locked) {
878 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
879 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
880 } else {
881 mutex_lock(&dev->struct_mutex);
882 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
883 mutex_unlock(&dev->struct_mutex);
884 }
885
886 *obj = &msm_obj->base;
887
888 return 0;
889}
890
891static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
892 uint32_t size, uint32_t flags, bool struct_mutex_locked)
893{
894 struct msm_drm_private *priv = dev->dev_private;
895 struct drm_gem_object *obj = NULL;
896 bool use_vram = false;
897 int ret;
898
899 size = PAGE_ALIGN(size);
900
901 if (!iommu_present(&platform_bus_type))
902 use_vram = true;
903 else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
904 use_vram = true;
905
906 if (WARN_ON(use_vram && !priv->vram.size))
907 return ERR_PTR(-EINVAL);
908
909
910
911
912 if (size == 0)
913 return ERR_PTR(-EINVAL);
914
915 ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
916 if (ret)
917 goto fail;
918
919 if (use_vram) {
920 struct msm_gem_vma *vma;
921 struct page **pages;
922 struct msm_gem_object *msm_obj = to_msm_bo(obj);
923
924 mutex_lock(&msm_obj->lock);
925
926 vma = add_vma(obj, NULL);
927 mutex_unlock(&msm_obj->lock);
928 if (IS_ERR(vma)) {
929 ret = PTR_ERR(vma);
930 goto fail;
931 }
932
933 to_msm_bo(obj)->vram_node = &vma->node;
934
935 drm_gem_private_object_init(dev, obj, size);
936
937 pages = get_pages(obj);
938 if (IS_ERR(pages)) {
939 ret = PTR_ERR(pages);
940 goto fail;
941 }
942
943 vma->iova = physaddr(obj);
944 } else {
945 ret = drm_gem_object_init(dev, obj, size);
946 if (ret)
947 goto fail;
948 }
949
950 return obj;
951
952fail:
953 drm_gem_object_unreference_unlocked(obj);
954 return ERR_PTR(ret);
955}
956
957struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
958 uint32_t size, uint32_t flags)
959{
960 return _msm_gem_new(dev, size, flags, true);
961}
962
963struct drm_gem_object *msm_gem_new(struct drm_device *dev,
964 uint32_t size, uint32_t flags)
965{
966 return _msm_gem_new(dev, size, flags, false);
967}
968
969struct drm_gem_object *msm_gem_import(struct drm_device *dev,
970 struct dma_buf *dmabuf, struct sg_table *sgt)
971{
972 struct msm_gem_object *msm_obj;
973 struct drm_gem_object *obj;
974 uint32_t size;
975 int ret, npages;
976
977
978 if (!iommu_present(&platform_bus_type)) {
979 dev_err(dev->dev, "cannot import without IOMMU\n");
980 return ERR_PTR(-EINVAL);
981 }
982
983 size = PAGE_ALIGN(dmabuf->size);
984
985 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
986 if (ret)
987 goto fail;
988
989 drm_gem_private_object_init(dev, obj, size);
990
991 npages = size / PAGE_SIZE;
992
993 msm_obj = to_msm_bo(obj);
994 mutex_lock(&msm_obj->lock);
995 msm_obj->sgt = sgt;
996 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
997 if (!msm_obj->pages) {
998 mutex_unlock(&msm_obj->lock);
999 ret = -ENOMEM;
1000 goto fail;
1001 }
1002
1003 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1004 if (ret) {
1005 mutex_unlock(&msm_obj->lock);
1006 goto fail;
1007 }
1008
1009 mutex_unlock(&msm_obj->lock);
1010 return obj;
1011
1012fail:
1013 drm_gem_object_unreference_unlocked(obj);
1014 return ERR_PTR(ret);
1015}
1016
1017static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1018 uint32_t flags, struct msm_gem_address_space *aspace,
1019 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1020{
1021 void *vaddr;
1022 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1023 int ret;
1024
1025 if (IS_ERR(obj))
1026 return ERR_CAST(obj);
1027
1028 if (iova) {
1029 ret = msm_gem_get_iova(obj, aspace, iova);
1030 if (ret) {
1031 drm_gem_object_unreference(obj);
1032 return ERR_PTR(ret);
1033 }
1034 }
1035
1036 vaddr = msm_gem_get_vaddr(obj);
1037 if (IS_ERR(vaddr)) {
1038 msm_gem_put_iova(obj, aspace);
1039 drm_gem_object_unreference(obj);
1040 return ERR_CAST(vaddr);
1041 }
1042
1043 if (bo)
1044 *bo = obj;
1045
1046 return vaddr;
1047}
1048
1049void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1050 uint32_t flags, struct msm_gem_address_space *aspace,
1051 struct drm_gem_object **bo, uint64_t *iova)
1052{
1053 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1054}
1055
1056void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1057 uint32_t flags, struct msm_gem_address_space *aspace,
1058 struct drm_gem_object **bo, uint64_t *iova)
1059{
1060 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1061}
1062