1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
21
22#include "msm_drv.h"
23#include "msm_gem.h"
24#include "msm_gpu.h"
25#include "msm_mmu.h"
26
27static dma_addr_t physaddr(struct drm_gem_object *obj)
28{
29 struct msm_gem_object *msm_obj = to_msm_bo(obj);
30 struct msm_drm_private *priv = obj->dev->dev_private;
31 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
32 priv->vram.paddr;
33}
34
35
36static struct page **get_pages_vram(struct drm_gem_object *obj,
37 int npages)
38{
39 struct msm_gem_object *msm_obj = to_msm_bo(obj);
40 struct msm_drm_private *priv = obj->dev->dev_private;
41 dma_addr_t paddr;
42 struct page **p;
43 int ret, i;
44
45 p = drm_malloc_ab(npages, sizeof(struct page *));
46 if (!p)
47 return ERR_PTR(-ENOMEM);
48
49 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
50 npages, 0, DRM_MM_SEARCH_DEFAULT);
51 if (ret) {
52 drm_free_large(p);
53 return ERR_PTR(ret);
54 }
55
56 paddr = physaddr(obj);
57 for (i = 0; i < npages; i++) {
58 p[i] = phys_to_page(paddr);
59 paddr += PAGE_SIZE;
60 }
61
62 return p;
63}
64
65
66static struct page **get_pages(struct drm_gem_object *obj)
67{
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69
70 if (!msm_obj->pages) {
71 struct drm_device *dev = obj->dev;
72 struct page **p;
73 int npages = obj->size >> PAGE_SHIFT;
74
75 if (iommu_present(&platform_bus_type))
76 p = drm_gem_get_pages(obj);
77 else
78 p = get_pages_vram(obj, npages);
79
80 if (IS_ERR(p)) {
81 dev_err(dev->dev, "could not get pages: %ld\n",
82 PTR_ERR(p));
83 return p;
84 }
85
86 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
87 if (IS_ERR(msm_obj->sgt)) {
88 dev_err(dev->dev, "failed to allocate sgt\n");
89 return ERR_CAST(msm_obj->sgt);
90 }
91
92 msm_obj->pages = p;
93
94
95
96
97 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
98 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
99 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
100 }
101
102 return msm_obj->pages;
103}
104
105static void put_pages(struct drm_gem_object *obj)
106{
107 struct msm_gem_object *msm_obj = to_msm_bo(obj);
108
109 if (msm_obj->pages) {
110
111
112
113 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
114 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
115 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
116 sg_free_table(msm_obj->sgt);
117 kfree(msm_obj->sgt);
118
119 if (iommu_present(&platform_bus_type))
120 drm_gem_put_pages(obj, msm_obj->pages, true, false);
121 else {
122 drm_mm_remove_node(msm_obj->vram_node);
123 drm_free_large(msm_obj->pages);
124 }
125
126 msm_obj->pages = NULL;
127 }
128}
129
130struct page **msm_gem_get_pages(struct drm_gem_object *obj)
131{
132 struct drm_device *dev = obj->dev;
133 struct page **p;
134 mutex_lock(&dev->struct_mutex);
135 p = get_pages(obj);
136 mutex_unlock(&dev->struct_mutex);
137 return p;
138}
139
140void msm_gem_put_pages(struct drm_gem_object *obj)
141{
142
143}
144
145int msm_gem_mmap_obj(struct drm_gem_object *obj,
146 struct vm_area_struct *vma)
147{
148 struct msm_gem_object *msm_obj = to_msm_bo(obj);
149
150 vma->vm_flags &= ~VM_PFNMAP;
151 vma->vm_flags |= VM_MIXEDMAP;
152
153 if (msm_obj->flags & MSM_BO_WC) {
154 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
155 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
156 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
157 } else {
158
159
160
161
162
163 fput(vma->vm_file);
164 get_file(obj->filp);
165 vma->vm_pgoff = 0;
166 vma->vm_file = obj->filp;
167
168 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
169 }
170
171 return 0;
172}
173
174int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
175{
176 int ret;
177
178 ret = drm_gem_mmap(filp, vma);
179 if (ret) {
180 DBG("mmap failed: %d", ret);
181 return ret;
182 }
183
184 return msm_gem_mmap_obj(vma->vm_private_data, vma);
185}
186
187int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
188{
189 struct drm_gem_object *obj = vma->vm_private_data;
190 struct drm_device *dev = obj->dev;
191 struct page **pages;
192 unsigned long pfn;
193 pgoff_t pgoff;
194 int ret;
195
196
197
198
199 ret = mutex_lock_interruptible(&dev->struct_mutex);
200 if (ret)
201 goto out;
202
203
204 pages = get_pages(obj);
205 if (IS_ERR(pages)) {
206 ret = PTR_ERR(pages);
207 goto out_unlock;
208 }
209
210
211 pgoff = ((unsigned long)vmf->virtual_address -
212 vma->vm_start) >> PAGE_SHIFT;
213
214 pfn = page_to_pfn(pages[pgoff]);
215
216 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
217 pfn, pfn << PAGE_SHIFT);
218
219 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
220
221out_unlock:
222 mutex_unlock(&dev->struct_mutex);
223out:
224 switch (ret) {
225 case -EAGAIN:
226 case 0:
227 case -ERESTARTSYS:
228 case -EINTR:
229 case -EBUSY:
230
231
232
233
234 return VM_FAULT_NOPAGE;
235 case -ENOMEM:
236 return VM_FAULT_OOM;
237 default:
238 return VM_FAULT_SIGBUS;
239 }
240}
241
242
243static uint64_t mmap_offset(struct drm_gem_object *obj)
244{
245 struct drm_device *dev = obj->dev;
246 int ret;
247
248 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
249
250
251 ret = drm_gem_create_mmap_offset(obj);
252
253 if (ret) {
254 dev_err(dev->dev, "could not allocate mmap offset\n");
255 return 0;
256 }
257
258 return drm_vma_node_offset_addr(&obj->vma_node);
259}
260
261uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
262{
263 uint64_t offset;
264 mutex_lock(&obj->dev->struct_mutex);
265 offset = mmap_offset(obj);
266 mutex_unlock(&obj->dev->struct_mutex);
267 return offset;
268}
269
270
271
272
273
274
275
276
277int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
278 uint32_t *iova)
279{
280 struct msm_gem_object *msm_obj = to_msm_bo(obj);
281 int ret = 0;
282
283 if (!msm_obj->domain[id].iova) {
284 struct msm_drm_private *priv = obj->dev->dev_private;
285 struct page **pages = get_pages(obj);
286
287 if (IS_ERR(pages))
288 return PTR_ERR(pages);
289
290 if (iommu_present(&platform_bus_type)) {
291 struct msm_mmu *mmu = priv->mmus[id];
292 uint32_t offset;
293
294 if (WARN_ON(!mmu))
295 return -EINVAL;
296
297 offset = (uint32_t)mmap_offset(obj);
298 ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
299 obj->size, IOMMU_READ | IOMMU_WRITE);
300 msm_obj->domain[id].iova = offset;
301 } else {
302 msm_obj->domain[id].iova = physaddr(obj);
303 }
304 }
305
306 if (!ret)
307 *iova = msm_obj->domain[id].iova;
308
309 return ret;
310}
311
312
313int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
314{
315 struct msm_gem_object *msm_obj = to_msm_bo(obj);
316 int ret;
317
318
319
320
321 if (msm_obj->domain[id].iova) {
322 *iova = msm_obj->domain[id].iova;
323 return 0;
324 }
325
326 mutex_lock(&obj->dev->struct_mutex);
327 ret = msm_gem_get_iova_locked(obj, id, iova);
328 mutex_unlock(&obj->dev->struct_mutex);
329 return ret;
330}
331
332
333
334
335uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
336{
337 struct msm_gem_object *msm_obj = to_msm_bo(obj);
338 WARN_ON(!msm_obj->domain[id].iova);
339 return msm_obj->domain[id].iova;
340}
341
342void msm_gem_put_iova(struct drm_gem_object *obj, int id)
343{
344
345
346
347
348
349
350}
351
352int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
353 struct drm_mode_create_dumb *args)
354{
355 args->pitch = align_pitch(args->width, args->bpp);
356 args->size = PAGE_ALIGN(args->pitch * args->height);
357 return msm_gem_new_handle(dev, file, args->size,
358 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
359}
360
361int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
362 uint32_t handle, uint64_t *offset)
363{
364 struct drm_gem_object *obj;
365 int ret = 0;
366
367
368 obj = drm_gem_object_lookup(dev, file, handle);
369 if (obj == NULL) {
370 ret = -ENOENT;
371 goto fail;
372 }
373
374 *offset = msm_gem_mmap_offset(obj);
375
376 drm_gem_object_unreference_unlocked(obj);
377
378fail:
379 return ret;
380}
381
382void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
383{
384 struct msm_gem_object *msm_obj = to_msm_bo(obj);
385 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
386 if (!msm_obj->vaddr) {
387 struct page **pages = get_pages(obj);
388 if (IS_ERR(pages))
389 return ERR_CAST(pages);
390 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
391 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
392 }
393 return msm_obj->vaddr;
394}
395
396void *msm_gem_vaddr(struct drm_gem_object *obj)
397{
398 void *ret;
399 mutex_lock(&obj->dev->struct_mutex);
400 ret = msm_gem_vaddr_locked(obj);
401 mutex_unlock(&obj->dev->struct_mutex);
402 return ret;
403}
404
405
406
407
408int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
409 struct msm_fence_cb *cb)
410{
411 struct msm_gem_object *msm_obj = to_msm_bo(obj);
412 uint32_t fence = msm_gem_fence(msm_obj,
413 MSM_PREP_READ | MSM_PREP_WRITE);
414 return msm_queue_fence_cb(obj->dev, cb, fence);
415}
416
417void msm_gem_move_to_active(struct drm_gem_object *obj,
418 struct msm_gpu *gpu, bool write, uint32_t fence)
419{
420 struct msm_gem_object *msm_obj = to_msm_bo(obj);
421 msm_obj->gpu = gpu;
422 if (write)
423 msm_obj->write_fence = fence;
424 else
425 msm_obj->read_fence = fence;
426 list_del_init(&msm_obj->mm_list);
427 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
428}
429
430void msm_gem_move_to_inactive(struct drm_gem_object *obj)
431{
432 struct drm_device *dev = obj->dev;
433 struct msm_drm_private *priv = dev->dev_private;
434 struct msm_gem_object *msm_obj = to_msm_bo(obj);
435
436 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
437
438 msm_obj->gpu = NULL;
439 msm_obj->read_fence = 0;
440 msm_obj->write_fence = 0;
441 list_del_init(&msm_obj->mm_list);
442 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
443}
444
445int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
446 struct timespec *timeout)
447{
448 struct drm_device *dev = obj->dev;
449 struct msm_gem_object *msm_obj = to_msm_bo(obj);
450 int ret = 0;
451
452 if (is_active(msm_obj)) {
453 uint32_t fence = msm_gem_fence(msm_obj, op);
454
455 if (op & MSM_PREP_NOSYNC)
456 timeout = NULL;
457
458 ret = msm_wait_fence_interruptable(dev, fence, timeout);
459 }
460
461
462
463 return ret;
464}
465
466int msm_gem_cpu_fini(struct drm_gem_object *obj)
467{
468
469 return 0;
470}
471
472#ifdef CONFIG_DEBUG_FS
473void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
474{
475 struct drm_device *dev = obj->dev;
476 struct msm_gem_object *msm_obj = to_msm_bo(obj);
477 uint64_t off = drm_vma_node_start(&obj->vma_node);
478
479 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
480 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
481 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
482 msm_obj->read_fence, msm_obj->write_fence,
483 obj->name, obj->refcount.refcount.counter,
484 off, msm_obj->vaddr, obj->size);
485}
486
487void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
488{
489 struct msm_gem_object *msm_obj;
490 int count = 0;
491 size_t size = 0;
492
493 list_for_each_entry(msm_obj, list, mm_list) {
494 struct drm_gem_object *obj = &msm_obj->base;
495 seq_printf(m, " ");
496 msm_gem_describe(obj, m);
497 count++;
498 size += obj->size;
499 }
500
501 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
502}
503#endif
504
505void msm_gem_free_object(struct drm_gem_object *obj)
506{
507 struct drm_device *dev = obj->dev;
508 struct msm_drm_private *priv = obj->dev->dev_private;
509 struct msm_gem_object *msm_obj = to_msm_bo(obj);
510 int id;
511
512 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
513
514
515 WARN_ON(is_active(msm_obj));
516
517 list_del(&msm_obj->mm_list);
518
519 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
520 struct msm_mmu *mmu = priv->mmus[id];
521 if (mmu && msm_obj->domain[id].iova) {
522 uint32_t offset = msm_obj->domain[id].iova;
523 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
524 }
525 }
526
527 if (obj->import_attach) {
528 if (msm_obj->vaddr)
529 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
530
531
532
533
534 if (msm_obj->pages)
535 drm_free_large(msm_obj->pages);
536
537 } else {
538 vunmap(msm_obj->vaddr);
539 put_pages(obj);
540 }
541
542 if (msm_obj->resv == &msm_obj->_resv)
543 reservation_object_fini(msm_obj->resv);
544
545 drm_gem_object_release(obj);
546
547 kfree(msm_obj);
548}
549
550
551int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
552 uint32_t size, uint32_t flags, uint32_t *handle)
553{
554 struct drm_gem_object *obj;
555 int ret;
556
557 ret = mutex_lock_interruptible(&dev->struct_mutex);
558 if (ret)
559 return ret;
560
561 obj = msm_gem_new(dev, size, flags);
562
563 mutex_unlock(&dev->struct_mutex);
564
565 if (IS_ERR(obj))
566 return PTR_ERR(obj);
567
568 ret = drm_gem_handle_create(file, obj, handle);
569
570
571 drm_gem_object_unreference_unlocked(obj);
572
573 return ret;
574}
575
576static int msm_gem_new_impl(struct drm_device *dev,
577 uint32_t size, uint32_t flags,
578 struct drm_gem_object **obj)
579{
580 struct msm_drm_private *priv = dev->dev_private;
581 struct msm_gem_object *msm_obj;
582 unsigned sz;
583
584 switch (flags & MSM_BO_CACHE_MASK) {
585 case MSM_BO_UNCACHED:
586 case MSM_BO_CACHED:
587 case MSM_BO_WC:
588 break;
589 default:
590 dev_err(dev->dev, "invalid cache flag: %x\n",
591 (flags & MSM_BO_CACHE_MASK));
592 return -EINVAL;
593 }
594
595 sz = sizeof(*msm_obj);
596 if (!iommu_present(&platform_bus_type))
597 sz += sizeof(struct drm_mm_node);
598
599 msm_obj = kzalloc(sz, GFP_KERNEL);
600 if (!msm_obj)
601 return -ENOMEM;
602
603 if (!iommu_present(&platform_bus_type))
604 msm_obj->vram_node = (void *)&msm_obj[1];
605
606 msm_obj->flags = flags;
607
608 msm_obj->resv = &msm_obj->_resv;
609 reservation_object_init(msm_obj->resv);
610
611 INIT_LIST_HEAD(&msm_obj->submit_entry);
612 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
613
614 *obj = &msm_obj->base;
615
616 return 0;
617}
618
619struct drm_gem_object *msm_gem_new(struct drm_device *dev,
620 uint32_t size, uint32_t flags)
621{
622 struct drm_gem_object *obj = NULL;
623 int ret;
624
625 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
626
627 size = PAGE_ALIGN(size);
628
629 ret = msm_gem_new_impl(dev, size, flags, &obj);
630 if (ret)
631 goto fail;
632
633 if (iommu_present(&platform_bus_type)) {
634 ret = drm_gem_object_init(dev, obj, size);
635 if (ret)
636 goto fail;
637 } else {
638 drm_gem_private_object_init(dev, obj, size);
639 }
640
641 return obj;
642
643fail:
644 if (obj)
645 drm_gem_object_unreference(obj);
646
647 return ERR_PTR(ret);
648}
649
650struct drm_gem_object *msm_gem_import(struct drm_device *dev,
651 uint32_t size, struct sg_table *sgt)
652{
653 struct msm_gem_object *msm_obj;
654 struct drm_gem_object *obj;
655 int ret, npages;
656
657
658 if (!iommu_present(&platform_bus_type)) {
659 dev_err(dev->dev, "cannot import without IOMMU\n");
660 return ERR_PTR(-EINVAL);
661 }
662
663 size = PAGE_ALIGN(size);
664
665 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
666 if (ret)
667 goto fail;
668
669 drm_gem_private_object_init(dev, obj, size);
670
671 npages = size / PAGE_SIZE;
672
673 msm_obj = to_msm_bo(obj);
674 msm_obj->sgt = sgt;
675 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
676 if (!msm_obj->pages) {
677 ret = -ENOMEM;
678 goto fail;
679 }
680
681 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
682 if (ret)
683 goto fail;
684
685 return obj;
686
687fail:
688 if (obj)
689 drm_gem_object_unreference_unlocked(obj);
690
691 return ERR_PTR(ret);
692}
693