1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <linux/rbtree.h>
32#include <drm/drm_prime.h>
33#include <drm/drm_gem.h>
34#include <drm/drmP.h>
35
36#include "drm_internal.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78struct drm_prime_member {
79 struct dma_buf *dma_buf;
80 uint32_t handle;
81
82 struct rb_node dmabuf_rb;
83 struct rb_node handle_rb;
84};
85
86struct drm_prime_attachment {
87 struct sg_table *sgt;
88 enum dma_data_direction dir;
89};
90
91static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
92 struct dma_buf *dma_buf, uint32_t handle)
93{
94 struct drm_prime_member *member;
95 struct rb_node **p, *rb;
96
97 member = kmalloc(sizeof(*member), GFP_KERNEL);
98 if (!member)
99 return -ENOMEM;
100
101 get_dma_buf(dma_buf);
102 member->dma_buf = dma_buf;
103 member->handle = handle;
104
105 rb = NULL;
106 p = &prime_fpriv->dmabufs.rb_node;
107 while (*p) {
108 struct drm_prime_member *pos;
109
110 rb = *p;
111 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
112 if (dma_buf > pos->dma_buf)
113 p = &rb->rb_right;
114 else
115 p = &rb->rb_left;
116 }
117 rb_link_node(&member->dmabuf_rb, rb, p);
118 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
119
120 rb = NULL;
121 p = &prime_fpriv->handles.rb_node;
122 while (*p) {
123 struct drm_prime_member *pos;
124
125 rb = *p;
126 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
127 if (handle > pos->handle)
128 p = &rb->rb_right;
129 else
130 p = &rb->rb_left;
131 }
132 rb_link_node(&member->handle_rb, rb, p);
133 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
134
135 return 0;
136}
137
138static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
139 uint32_t handle)
140{
141 struct rb_node *rb;
142
143 rb = prime_fpriv->handles.rb_node;
144 while (rb) {
145 struct drm_prime_member *member;
146
147 member = rb_entry(rb, struct drm_prime_member, handle_rb);
148 if (member->handle == handle)
149 return member->dma_buf;
150 else if (member->handle < handle)
151 rb = rb->rb_right;
152 else
153 rb = rb->rb_left;
154 }
155
156 return NULL;
157}
158
159static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
160 struct dma_buf *dma_buf,
161 uint32_t *handle)
162{
163 struct rb_node *rb;
164
165 rb = prime_fpriv->dmabufs.rb_node;
166 while (rb) {
167 struct drm_prime_member *member;
168
169 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
170 if (member->dma_buf == dma_buf) {
171 *handle = member->handle;
172 return 0;
173 } else if (member->dma_buf < dma_buf) {
174 rb = rb->rb_right;
175 } else {
176 rb = rb->rb_left;
177 }
178 }
179
180 return -ENOENT;
181}
182
183static int drm_gem_map_attach(struct dma_buf *dma_buf,
184 struct device *target_dev,
185 struct dma_buf_attachment *attach)
186{
187 struct drm_prime_attachment *prime_attach;
188 struct drm_gem_object *obj = dma_buf->priv;
189 struct drm_device *dev = obj->dev;
190
191 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
192 if (!prime_attach)
193 return -ENOMEM;
194
195 prime_attach->dir = DMA_NONE;
196 attach->priv = prime_attach;
197
198 if (!dev->driver->gem_prime_pin)
199 return 0;
200
201 return dev->driver->gem_prime_pin(obj);
202}
203
204static void drm_gem_map_detach(struct dma_buf *dma_buf,
205 struct dma_buf_attachment *attach)
206{
207 struct drm_prime_attachment *prime_attach = attach->priv;
208 struct drm_gem_object *obj = dma_buf->priv;
209 struct drm_device *dev = obj->dev;
210 struct sg_table *sgt;
211
212 if (dev->driver->gem_prime_unpin)
213 dev->driver->gem_prime_unpin(obj);
214
215 if (!prime_attach)
216 return;
217
218 sgt = prime_attach->sgt;
219 if (sgt) {
220 if (prime_attach->dir != DMA_NONE)
221 dma_unmap_sg_attrs(attach->dev, sgt->sgl, sgt->nents,
222 prime_attach->dir,
223 DMA_ATTR_SKIP_CPU_SYNC);
224 sg_free_table(sgt);
225 }
226
227 kfree(sgt);
228 kfree(prime_attach);
229 attach->priv = NULL;
230}
231
232void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
233 struct dma_buf *dma_buf)
234{
235 struct rb_node *rb;
236
237 rb = prime_fpriv->dmabufs.rb_node;
238 while (rb) {
239 struct drm_prime_member *member;
240
241 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
242 if (member->dma_buf == dma_buf) {
243 rb_erase(&member->handle_rb, &prime_fpriv->handles);
244 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
245
246 dma_buf_put(dma_buf);
247 kfree(member);
248 return;
249 } else if (member->dma_buf < dma_buf) {
250 rb = rb->rb_right;
251 } else {
252 rb = rb->rb_left;
253 }
254 }
255}
256
257static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
258 enum dma_data_direction dir)
259{
260 struct drm_prime_attachment *prime_attach = attach->priv;
261 struct drm_gem_object *obj = attach->dmabuf->priv;
262 struct sg_table *sgt;
263
264 if (WARN_ON(dir == DMA_NONE || !prime_attach))
265 return ERR_PTR(-EINVAL);
266
267
268 if (prime_attach->dir == dir)
269 return prime_attach->sgt;
270
271
272
273
274
275 if (WARN_ON(prime_attach->dir != DMA_NONE))
276 return ERR_PTR(-EBUSY);
277
278 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
279
280 if (!IS_ERR(sgt)) {
281 if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
282 DMA_ATTR_SKIP_CPU_SYNC)) {
283 sg_free_table(sgt);
284 kfree(sgt);
285 sgt = ERR_PTR(-ENOMEM);
286 } else {
287 prime_attach->sgt = sgt;
288 prime_attach->dir = dir;
289 }
290 }
291
292 return sgt;
293}
294
295static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
296 struct sg_table *sgt,
297 enum dma_data_direction dir)
298{
299
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
315 struct dma_buf_export_info *exp_info)
316{
317 struct dma_buf *dma_buf;
318
319 dma_buf = dma_buf_export(exp_info);
320 if (IS_ERR(dma_buf))
321 return dma_buf;
322
323 drm_dev_get(dev);
324 drm_gem_object_get(exp_info->priv);
325
326 return dma_buf;
327}
328EXPORT_SYMBOL(drm_gem_dmabuf_export);
329
330
331
332
333
334
335
336
337
338
339void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
340{
341 struct drm_gem_object *obj = dma_buf->priv;
342 struct drm_device *dev = obj->dev;
343
344
345 drm_gem_object_put_unlocked(obj);
346
347 drm_dev_put(dev);
348}
349EXPORT_SYMBOL(drm_gem_dmabuf_release);
350
351static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
352{
353 struct drm_gem_object *obj = dma_buf->priv;
354 struct drm_device *dev = obj->dev;
355
356 return dev->driver->gem_prime_vmap(obj);
357}
358
359static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
360{
361 struct drm_gem_object *obj = dma_buf->priv;
362 struct drm_device *dev = obj->dev;
363
364 dev->driver->gem_prime_vunmap(obj, vaddr);
365}
366
367static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
368 unsigned long page_num)
369{
370 return NULL;
371}
372
373static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
374 unsigned long page_num, void *addr)
375{
376
377}
378static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
379 unsigned long page_num)
380{
381 return NULL;
382}
383
384static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
385 unsigned long page_num, void *addr)
386{
387
388}
389
390static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
391 struct vm_area_struct *vma)
392{
393 struct drm_gem_object *obj = dma_buf->priv;
394 struct drm_device *dev = obj->dev;
395
396 if (!dev->driver->gem_prime_mmap)
397 return -ENOSYS;
398
399 return dev->driver->gem_prime_mmap(obj, vma);
400}
401
402static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
403 .attach = drm_gem_map_attach,
404 .detach = drm_gem_map_detach,
405 .map_dma_buf = drm_gem_map_dma_buf,
406 .unmap_dma_buf = drm_gem_unmap_dma_buf,
407 .release = drm_gem_dmabuf_release,
408 .map = drm_gem_dmabuf_kmap,
409 .map_atomic = drm_gem_dmabuf_kmap_atomic,
410 .unmap = drm_gem_dmabuf_kunmap,
411 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
412 .mmap = drm_gem_dmabuf_mmap,
413 .vmap = drm_gem_dmabuf_vmap,
414 .vunmap = drm_gem_dmabuf_vunmap,
415};
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
449 struct drm_gem_object *obj,
450 int flags)
451{
452 struct dma_buf_export_info exp_info = {
453 .exp_name = KBUILD_MODNAME,
454 .owner = dev->driver->fops->owner,
455 .ops = &drm_gem_prime_dmabuf_ops,
456 .size = obj->size,
457 .flags = flags,
458 .priv = obj,
459 };
460
461 if (dev->driver->gem_prime_res_obj)
462 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
463
464 return drm_gem_dmabuf_export(dev, &exp_info);
465}
466EXPORT_SYMBOL(drm_gem_prime_export);
467
468static struct dma_buf *export_and_register_object(struct drm_device *dev,
469 struct drm_gem_object *obj,
470 uint32_t flags)
471{
472 struct dma_buf *dmabuf;
473
474
475 if (obj->handle_count == 0) {
476 dmabuf = ERR_PTR(-ENOENT);
477 return dmabuf;
478 }
479
480 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
481 if (IS_ERR(dmabuf)) {
482
483
484
485 return dmabuf;
486 }
487
488
489
490
491
492
493 obj->dma_buf = dmabuf;
494 get_dma_buf(obj->dma_buf);
495
496 return dmabuf;
497}
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512int drm_gem_prime_handle_to_fd(struct drm_device *dev,
513 struct drm_file *file_priv, uint32_t handle,
514 uint32_t flags,
515 int *prime_fd)
516{
517 struct drm_gem_object *obj;
518 int ret = 0;
519 struct dma_buf *dmabuf;
520
521 mutex_lock(&file_priv->prime.lock);
522 obj = drm_gem_object_lookup(file_priv, handle);
523 if (!obj) {
524 ret = -ENOENT;
525 goto out_unlock;
526 }
527
528 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
529 if (dmabuf) {
530 get_dma_buf(dmabuf);
531 goto out_have_handle;
532 }
533
534 mutex_lock(&dev->object_name_lock);
535
536 if (obj->import_attach) {
537 dmabuf = obj->import_attach->dmabuf;
538 get_dma_buf(dmabuf);
539 goto out_have_obj;
540 }
541
542 if (obj->dma_buf) {
543 get_dma_buf(obj->dma_buf);
544 dmabuf = obj->dma_buf;
545 goto out_have_obj;
546 }
547
548 dmabuf = export_and_register_object(dev, obj, flags);
549 if (IS_ERR(dmabuf)) {
550
551
552
553 ret = PTR_ERR(dmabuf);
554 mutex_unlock(&dev->object_name_lock);
555 goto out;
556 }
557
558out_have_obj:
559
560
561
562
563
564
565 ret = drm_prime_add_buf_handle(&file_priv->prime,
566 dmabuf, handle);
567 mutex_unlock(&dev->object_name_lock);
568 if (ret)
569 goto fail_put_dmabuf;
570
571out_have_handle:
572 ret = dma_buf_fd(dmabuf, flags);
573
574
575
576
577
578
579 if (ret < 0) {
580 goto fail_put_dmabuf;
581 } else {
582 *prime_fd = ret;
583 ret = 0;
584 }
585
586 goto out;
587
588fail_put_dmabuf:
589 dma_buf_put(dmabuf);
590out:
591 drm_gem_object_put_unlocked(obj);
592out_unlock:
593 mutex_unlock(&file_priv->prime.lock);
594
595 return ret;
596}
597EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
598
599
600
601
602
603
604
605
606
607
608
609struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
610 struct dma_buf *dma_buf,
611 struct device *attach_dev)
612{
613 struct dma_buf_attachment *attach;
614 struct sg_table *sgt;
615 struct drm_gem_object *obj;
616 int ret;
617
618 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
619 obj = dma_buf->priv;
620 if (obj->dev == dev) {
621
622
623
624
625 drm_gem_object_get(obj);
626 return obj;
627 }
628 }
629
630 if (!dev->driver->gem_prime_import_sg_table)
631 return ERR_PTR(-EINVAL);
632
633 attach = dma_buf_attach(dma_buf, attach_dev);
634 if (IS_ERR(attach))
635 return ERR_CAST(attach);
636
637 get_dma_buf(dma_buf);
638
639 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
640 if (IS_ERR(sgt)) {
641 ret = PTR_ERR(sgt);
642 goto fail_detach;
643 }
644
645 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
646 if (IS_ERR(obj)) {
647 ret = PTR_ERR(obj);
648 goto fail_unmap;
649 }
650
651 obj->import_attach = attach;
652
653 return obj;
654
655fail_unmap:
656 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
657fail_detach:
658 dma_buf_detach(dma_buf, attach);
659 dma_buf_put(dma_buf);
660
661 return ERR_PTR(ret);
662}
663EXPORT_SYMBOL(drm_gem_prime_import_dev);
664
665
666
667
668
669
670
671
672
673struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
674 struct dma_buf *dma_buf)
675{
676 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
677}
678EXPORT_SYMBOL(drm_gem_prime_import);
679
680
681
682
683
684
685
686
687
688
689
690
691
692int drm_gem_prime_fd_to_handle(struct drm_device *dev,
693 struct drm_file *file_priv, int prime_fd,
694 uint32_t *handle)
695{
696 struct dma_buf *dma_buf;
697 struct drm_gem_object *obj;
698 int ret;
699
700 dma_buf = dma_buf_get(prime_fd);
701 if (IS_ERR(dma_buf))
702 return PTR_ERR(dma_buf);
703
704 mutex_lock(&file_priv->prime.lock);
705
706 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
707 dma_buf, handle);
708 if (ret == 0)
709 goto out_put;
710
711
712 mutex_lock(&dev->object_name_lock);
713 obj = dev->driver->gem_prime_import(dev, dma_buf);
714 if (IS_ERR(obj)) {
715 ret = PTR_ERR(obj);
716 goto out_unlock;
717 }
718
719 if (obj->dma_buf) {
720 WARN_ON(obj->dma_buf != dma_buf);
721 } else {
722 obj->dma_buf = dma_buf;
723 get_dma_buf(dma_buf);
724 }
725
726
727 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
728 drm_gem_object_put_unlocked(obj);
729 if (ret)
730 goto out_put;
731
732 ret = drm_prime_add_buf_handle(&file_priv->prime,
733 dma_buf, *handle);
734 mutex_unlock(&file_priv->prime.lock);
735 if (ret)
736 goto fail;
737
738 dma_buf_put(dma_buf);
739
740 return 0;
741
742fail:
743
744
745
746 drm_gem_handle_delete(file_priv, *handle);
747 dma_buf_put(dma_buf);
748 return ret;
749
750out_unlock:
751 mutex_unlock(&dev->object_name_lock);
752out_put:
753 mutex_unlock(&file_priv->prime.lock);
754 dma_buf_put(dma_buf);
755 return ret;
756}
757EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
758
759int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
760 struct drm_file *file_priv)
761{
762 struct drm_prime_handle *args = data;
763
764 if (!drm_core_check_feature(dev, DRIVER_PRIME))
765 return -EINVAL;
766
767 if (!dev->driver->prime_handle_to_fd)
768 return -ENOSYS;
769
770
771 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
772 return -EINVAL;
773
774 return dev->driver->prime_handle_to_fd(dev, file_priv,
775 args->handle, args->flags, &args->fd);
776}
777
778int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
779 struct drm_file *file_priv)
780{
781 struct drm_prime_handle *args = data;
782
783 if (!drm_core_check_feature(dev, DRIVER_PRIME))
784 return -EINVAL;
785
786 if (!dev->driver->prime_fd_to_handle)
787 return -ENOSYS;
788
789 return dev->driver->prime_fd_to_handle(dev, file_priv,
790 args->fd, &args->handle);
791}
792
793
794
795
796
797
798
799
800
801
802struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
803{
804 struct sg_table *sg = NULL;
805 int ret;
806
807 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
808 if (!sg) {
809 ret = -ENOMEM;
810 goto out;
811 }
812
813 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
814 nr_pages << PAGE_SHIFT, GFP_KERNEL);
815 if (ret)
816 goto out;
817
818 return sg;
819out:
820 kfree(sg);
821 return ERR_PTR(ret);
822}
823EXPORT_SYMBOL(drm_prime_pages_to_sg);
824
825
826
827
828
829
830
831
832
833
834
835int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
836 dma_addr_t *addrs, int max_pages)
837{
838 unsigned count;
839 struct scatterlist *sg;
840 struct page *page;
841 u32 len;
842 int pg_index;
843 dma_addr_t addr;
844
845 pg_index = 0;
846 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
847 len = sg->length;
848 page = sg_page(sg);
849 addr = sg_dma_address(sg);
850
851 while (len > 0) {
852 if (WARN_ON(pg_index >= max_pages))
853 return -1;
854 pages[pg_index] = page;
855 if (addrs)
856 addrs[pg_index] = addr;
857
858 page++;
859 addr += PAGE_SIZE;
860 len -= PAGE_SIZE;
861 pg_index++;
862 }
863 }
864 return 0;
865}
866EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
867
868
869
870
871
872
873
874
875
876void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
877{
878 struct dma_buf_attachment *attach;
879 struct dma_buf *dma_buf;
880 attach = obj->import_attach;
881 if (sg)
882 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
883 dma_buf = attach->dmabuf;
884 dma_buf_detach(attach->dmabuf, attach);
885
886 dma_buf_put(dma_buf);
887}
888EXPORT_SYMBOL(drm_prime_gem_destroy);
889
890void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
891{
892 mutex_init(&prime_fpriv->lock);
893 prime_fpriv->dmabufs = RB_ROOT;
894 prime_fpriv->handles = RB_ROOT;
895}
896
897void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
898{
899
900 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
901}
902