1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/spinlock.h>
19#include <linux/shmem_fs.h>
20#include <linux/dma-buf.h>
21
22#include "msm_drv.h"
23#include "msm_gem.h"
24#include "msm_gpu.h"
25
26
27
28static struct page **get_pages(struct drm_gem_object *obj)
29{
30 struct msm_gem_object *msm_obj = to_msm_bo(obj);
31
32 if (!msm_obj->pages) {
33 struct drm_device *dev = obj->dev;
34 struct page **p = drm_gem_get_pages(obj, 0);
35 int npages = obj->size >> PAGE_SHIFT;
36
37 if (IS_ERR(p)) {
38 dev_err(dev->dev, "could not get pages: %ld\n",
39 PTR_ERR(p));
40 return p;
41 }
42
43 msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
44 if (IS_ERR(msm_obj->sgt)) {
45 dev_err(dev->dev, "failed to allocate sgt\n");
46 return ERR_CAST(msm_obj->sgt);
47 }
48
49 msm_obj->pages = p;
50
51
52
53
54 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
55 dma_map_sg(dev->dev, msm_obj->sgt->sgl,
56 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
57 }
58
59 return msm_obj->pages;
60}
61
62static void put_pages(struct drm_gem_object *obj)
63{
64 struct msm_gem_object *msm_obj = to_msm_bo(obj);
65
66 if (msm_obj->pages) {
67
68
69
70 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
71 dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
72 msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
73 sg_free_table(msm_obj->sgt);
74 kfree(msm_obj->sgt);
75
76 drm_gem_put_pages(obj, msm_obj->pages, true, false);
77 msm_obj->pages = NULL;
78 }
79}
80
81struct page **msm_gem_get_pages(struct drm_gem_object *obj)
82{
83 struct drm_device *dev = obj->dev;
84 struct page **p;
85 mutex_lock(&dev->struct_mutex);
86 p = get_pages(obj);
87 mutex_unlock(&dev->struct_mutex);
88 return p;
89}
90
91void msm_gem_put_pages(struct drm_gem_object *obj)
92{
93
94}
95
96int msm_gem_mmap_obj(struct drm_gem_object *obj,
97 struct vm_area_struct *vma)
98{
99 struct msm_gem_object *msm_obj = to_msm_bo(obj);
100
101 vma->vm_flags &= ~VM_PFNMAP;
102 vma->vm_flags |= VM_MIXEDMAP;
103
104 if (msm_obj->flags & MSM_BO_WC) {
105 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
106 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
107 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
108 } else {
109
110
111
112
113
114 fput(vma->vm_file);
115 get_file(obj->filp);
116 vma->vm_pgoff = 0;
117 vma->vm_file = obj->filp;
118
119 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
120 }
121
122 return 0;
123}
124
125int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
126{
127 int ret;
128
129 ret = drm_gem_mmap(filp, vma);
130 if (ret) {
131 DBG("mmap failed: %d", ret);
132 return ret;
133 }
134
135 return msm_gem_mmap_obj(vma->vm_private_data, vma);
136}
137
138int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
139{
140 struct drm_gem_object *obj = vma->vm_private_data;
141 struct msm_gem_object *msm_obj = to_msm_bo(obj);
142 struct drm_device *dev = obj->dev;
143 struct page **pages;
144 unsigned long pfn;
145 pgoff_t pgoff;
146 int ret;
147
148
149
150
151 ret = mutex_lock_interruptible(&dev->struct_mutex);
152 if (ret)
153 goto out;
154
155
156 pages = get_pages(obj);
157 if (IS_ERR(pages)) {
158 ret = PTR_ERR(pages);
159 goto out_unlock;
160 }
161
162
163 pgoff = ((unsigned long)vmf->virtual_address -
164 vma->vm_start) >> PAGE_SHIFT;
165
166 pfn = page_to_pfn(msm_obj->pages[pgoff]);
167
168 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
169 pfn, pfn << PAGE_SHIFT);
170
171 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
172
173out_unlock:
174 mutex_unlock(&dev->struct_mutex);
175out:
176 switch (ret) {
177 case -EAGAIN:
178 case 0:
179 case -ERESTARTSYS:
180 case -EINTR:
181 case -EBUSY:
182
183
184
185
186 return VM_FAULT_NOPAGE;
187 case -ENOMEM:
188 return VM_FAULT_OOM;
189 default:
190 return VM_FAULT_SIGBUS;
191 }
192}
193
194
195static uint64_t mmap_offset(struct drm_gem_object *obj)
196{
197 struct drm_device *dev = obj->dev;
198 int ret;
199
200 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
201
202
203 ret = drm_gem_create_mmap_offset(obj);
204
205 if (ret) {
206 dev_err(dev->dev, "could not allocate mmap offset\n");
207 return 0;
208 }
209
210 return drm_vma_node_offset_addr(&obj->vma_node);
211}
212
213uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
214{
215 uint64_t offset;
216 mutex_lock(&obj->dev->struct_mutex);
217 offset = mmap_offset(obj);
218 mutex_unlock(&obj->dev->struct_mutex);
219 return offset;
220}
221
222
223static int map_range(struct iommu_domain *domain, unsigned int iova,
224 struct sg_table *sgt, unsigned int len, int prot)
225{
226 struct scatterlist *sg;
227 unsigned int da = iova;
228 unsigned int i, j;
229 int ret;
230
231 if (!domain || !sgt)
232 return -EINVAL;
233
234 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
235 u32 pa = sg_phys(sg) - sg->offset;
236 size_t bytes = sg->length + sg->offset;
237
238 VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
239
240 ret = iommu_map(domain, da, pa, bytes, prot);
241 if (ret)
242 goto fail;
243
244 da += bytes;
245 }
246
247 return 0;
248
249fail:
250 da = iova;
251
252 for_each_sg(sgt->sgl, sg, i, j) {
253 size_t bytes = sg->length + sg->offset;
254 iommu_unmap(domain, da, bytes);
255 da += bytes;
256 }
257 return ret;
258}
259
260static void unmap_range(struct iommu_domain *domain, unsigned int iova,
261 struct sg_table *sgt, unsigned int len)
262{
263 struct scatterlist *sg;
264 unsigned int da = iova;
265 int i;
266
267 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
268 size_t bytes = sg->length + sg->offset;
269 size_t unmapped;
270
271 unmapped = iommu_unmap(domain, da, bytes);
272 if (unmapped < bytes)
273 break;
274
275 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
276
277 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
278
279 da += bytes;
280 }
281}
282
283
284
285
286
287
288
289
290int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
291 uint32_t *iova)
292{
293 struct msm_gem_object *msm_obj = to_msm_bo(obj);
294 int ret = 0;
295
296 if (!msm_obj->domain[id].iova) {
297 struct msm_drm_private *priv = obj->dev->dev_private;
298 uint32_t offset = (uint32_t)mmap_offset(obj);
299 struct page **pages;
300 pages = get_pages(obj);
301 if (IS_ERR(pages))
302 return PTR_ERR(pages);
303
304 ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
305 obj->size, IOMMU_READ | IOMMU_WRITE);
306 msm_obj->domain[id].iova = offset;
307 }
308
309 if (!ret)
310 *iova = msm_obj->domain[id].iova;
311
312 return ret;
313}
314
315int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
316{
317 struct msm_gem_object *msm_obj = to_msm_bo(obj);
318 int ret;
319
320
321
322
323 if (msm_obj->domain[id].iova) {
324 *iova = msm_obj->domain[id].iova;
325 return 0;
326 }
327
328 mutex_lock(&obj->dev->struct_mutex);
329 ret = msm_gem_get_iova_locked(obj, id, iova);
330 mutex_unlock(&obj->dev->struct_mutex);
331 return ret;
332}
333
334void msm_gem_put_iova(struct drm_gem_object *obj, int id)
335{
336
337
338
339
340
341
342}
343
344int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
345 struct drm_mode_create_dumb *args)
346{
347 args->pitch = align_pitch(args->width, args->bpp);
348 args->size = PAGE_ALIGN(args->pitch * args->height);
349 return msm_gem_new_handle(dev, file, args->size,
350 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
351}
352
353int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
354 uint32_t handle, uint64_t *offset)
355{
356 struct drm_gem_object *obj;
357 int ret = 0;
358
359
360 obj = drm_gem_object_lookup(dev, file, handle);
361 if (obj == NULL) {
362 ret = -ENOENT;
363 goto fail;
364 }
365
366 *offset = msm_gem_mmap_offset(obj);
367
368 drm_gem_object_unreference_unlocked(obj);
369
370fail:
371 return ret;
372}
373
374void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
375{
376 struct msm_gem_object *msm_obj = to_msm_bo(obj);
377 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
378 if (!msm_obj->vaddr) {
379 struct page **pages = get_pages(obj);
380 if (IS_ERR(pages))
381 return ERR_CAST(pages);
382 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
383 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
384 }
385 return msm_obj->vaddr;
386}
387
388void *msm_gem_vaddr(struct drm_gem_object *obj)
389{
390 void *ret;
391 mutex_lock(&obj->dev->struct_mutex);
392 ret = msm_gem_vaddr_locked(obj);
393 mutex_unlock(&obj->dev->struct_mutex);
394 return ret;
395}
396
397
398
399
400int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
401 struct msm_fence_cb *cb)
402{
403 struct drm_device *dev = obj->dev;
404 struct msm_drm_private *priv = dev->dev_private;
405 struct msm_gem_object *msm_obj = to_msm_bo(obj);
406 int ret = 0;
407
408 mutex_lock(&dev->struct_mutex);
409 if (!list_empty(&cb->work.entry)) {
410 ret = -EINVAL;
411 } else if (is_active(msm_obj)) {
412 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
413 list_add_tail(&cb->work.entry, &priv->fence_cbs);
414 } else {
415 queue_work(priv->wq, &cb->work);
416 }
417 mutex_unlock(&dev->struct_mutex);
418
419 return ret;
420}
421
422void msm_gem_move_to_active(struct drm_gem_object *obj,
423 struct msm_gpu *gpu, bool write, uint32_t fence)
424{
425 struct msm_gem_object *msm_obj = to_msm_bo(obj);
426 msm_obj->gpu = gpu;
427 if (write)
428 msm_obj->write_fence = fence;
429 else
430 msm_obj->read_fence = fence;
431 list_del_init(&msm_obj->mm_list);
432 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
433}
434
435void msm_gem_move_to_inactive(struct drm_gem_object *obj)
436{
437 struct drm_device *dev = obj->dev;
438 struct msm_drm_private *priv = dev->dev_private;
439 struct msm_gem_object *msm_obj = to_msm_bo(obj);
440
441 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
442
443 msm_obj->gpu = NULL;
444 msm_obj->read_fence = 0;
445 msm_obj->write_fence = 0;
446 list_del_init(&msm_obj->mm_list);
447 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
448}
449
450int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
451 struct timespec *timeout)
452{
453 struct drm_device *dev = obj->dev;
454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
455 int ret = 0;
456
457 if (is_active(msm_obj)) {
458 uint32_t fence = 0;
459
460 if (op & MSM_PREP_READ)
461 fence = msm_obj->write_fence;
462 if (op & MSM_PREP_WRITE)
463 fence = max(fence, msm_obj->read_fence);
464 if (op & MSM_PREP_NOSYNC)
465 timeout = NULL;
466
467 ret = msm_wait_fence_interruptable(dev, fence, timeout);
468 }
469
470
471
472 return ret;
473}
474
475int msm_gem_cpu_fini(struct drm_gem_object *obj)
476{
477
478 return 0;
479}
480
481#ifdef CONFIG_DEBUG_FS
482void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
483{
484 struct drm_device *dev = obj->dev;
485 struct msm_gem_object *msm_obj = to_msm_bo(obj);
486 uint64_t off = drm_vma_node_start(&obj->vma_node);
487
488 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
489 seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
490 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
491 msm_obj->read_fence, msm_obj->write_fence,
492 obj->name, obj->refcount.refcount.counter,
493 off, msm_obj->vaddr, obj->size);
494}
495
496void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
497{
498 struct msm_gem_object *msm_obj;
499 int count = 0;
500 size_t size = 0;
501
502 list_for_each_entry(msm_obj, list, mm_list) {
503 struct drm_gem_object *obj = &msm_obj->base;
504 seq_printf(m, " ");
505 msm_gem_describe(obj, m);
506 count++;
507 size += obj->size;
508 }
509
510 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
511}
512#endif
513
514void msm_gem_free_object(struct drm_gem_object *obj)
515{
516 struct drm_device *dev = obj->dev;
517 struct msm_gem_object *msm_obj = to_msm_bo(obj);
518 int id;
519
520 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
521
522
523 WARN_ON(is_active(msm_obj));
524
525 list_del(&msm_obj->mm_list);
526
527 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
528 if (msm_obj->domain[id].iova) {
529 struct msm_drm_private *priv = obj->dev->dev_private;
530 uint32_t offset = (uint32_t)mmap_offset(obj);
531 unmap_range(priv->iommus[id], offset,
532 msm_obj->sgt, obj->size);
533 }
534 }
535
536 drm_gem_free_mmap_offset(obj);
537
538 if (obj->import_attach) {
539 if (msm_obj->vaddr)
540 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
541
542
543
544
545 if (msm_obj->pages)
546 drm_free_large(msm_obj->pages);
547
548 } else {
549 if (msm_obj->vaddr)
550 vunmap(msm_obj->vaddr);
551 put_pages(obj);
552 }
553
554 if (msm_obj->resv == &msm_obj->_resv)
555 reservation_object_fini(msm_obj->resv);
556
557 drm_gem_object_release(obj);
558
559 kfree(msm_obj);
560}
561
562
563int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
564 uint32_t size, uint32_t flags, uint32_t *handle)
565{
566 struct drm_gem_object *obj;
567 int ret;
568
569 ret = mutex_lock_interruptible(&dev->struct_mutex);
570 if (ret)
571 return ret;
572
573 obj = msm_gem_new(dev, size, flags);
574
575 mutex_unlock(&dev->struct_mutex);
576
577 if (IS_ERR(obj))
578 return PTR_ERR(obj);
579
580 ret = drm_gem_handle_create(file, obj, handle);
581
582
583 drm_gem_object_unreference_unlocked(obj);
584
585 return ret;
586}
587
588static int msm_gem_new_impl(struct drm_device *dev,
589 uint32_t size, uint32_t flags,
590 struct drm_gem_object **obj)
591{
592 struct msm_drm_private *priv = dev->dev_private;
593 struct msm_gem_object *msm_obj;
594
595 switch (flags & MSM_BO_CACHE_MASK) {
596 case MSM_BO_UNCACHED:
597 case MSM_BO_CACHED:
598 case MSM_BO_WC:
599 break;
600 default:
601 dev_err(dev->dev, "invalid cache flag: %x\n",
602 (flags & MSM_BO_CACHE_MASK));
603 return -EINVAL;
604 }
605
606 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
607 if (!msm_obj)
608 return -ENOMEM;
609
610 msm_obj->flags = flags;
611
612 msm_obj->resv = &msm_obj->_resv;
613 reservation_object_init(msm_obj->resv);
614
615 INIT_LIST_HEAD(&msm_obj->submit_entry);
616 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
617
618 *obj = &msm_obj->base;
619
620 return 0;
621}
622
623struct drm_gem_object *msm_gem_new(struct drm_device *dev,
624 uint32_t size, uint32_t flags)
625{
626 struct drm_gem_object *obj;
627 int ret;
628
629 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
630
631 size = PAGE_ALIGN(size);
632
633 ret = msm_gem_new_impl(dev, size, flags, &obj);
634 if (ret)
635 goto fail;
636
637 ret = drm_gem_object_init(dev, obj, size);
638 if (ret)
639 goto fail;
640
641 return obj;
642
643fail:
644 if (obj)
645 drm_gem_object_unreference_unlocked(obj);
646
647 return ERR_PTR(ret);
648}
649
650struct drm_gem_object *msm_gem_import(struct drm_device *dev,
651 uint32_t size, struct sg_table *sgt)
652{
653 struct msm_gem_object *msm_obj;
654 struct drm_gem_object *obj;
655 int ret, npages;
656
657 size = PAGE_ALIGN(size);
658
659 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
660 if (ret)
661 goto fail;
662
663 drm_gem_private_object_init(dev, obj, size);
664
665 npages = size / PAGE_SIZE;
666
667 msm_obj = to_msm_bo(obj);
668 msm_obj->sgt = sgt;
669 msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
670 if (!msm_obj->pages) {
671 ret = -ENOMEM;
672 goto fail;
673 }
674
675 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
676 if (ret)
677 goto fail;
678
679 return obj;
680
681fail:
682 if (obj)
683 drm_gem_object_unreference_unlocked(obj);
684
685 return ERR_PTR(ret);
686}
687