1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <linux/rbtree.h>
32
33#include <drm/drm.h>
34#include <drm/drm_drv.h>
35#include <drm/drm_file.h>
36#include <drm/drm_framebuffer.h>
37#include <drm/drm_gem.h>
38#include <drm/drm_prime.h>
39
40#include "drm_internal.h"
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90struct drm_prime_member {
91 struct dma_buf *dma_buf;
92 uint32_t handle;
93
94 struct rb_node dmabuf_rb;
95 struct rb_node handle_rb;
96};
97
98static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
99 struct dma_buf *dma_buf, uint32_t handle)
100{
101 struct drm_prime_member *member;
102 struct rb_node **p, *rb;
103
104 member = kmalloc(sizeof(*member), GFP_KERNEL);
105 if (!member)
106 return -ENOMEM;
107
108 get_dma_buf(dma_buf);
109 member->dma_buf = dma_buf;
110 member->handle = handle;
111
112 rb = NULL;
113 p = &prime_fpriv->dmabufs.rb_node;
114 while (*p) {
115 struct drm_prime_member *pos;
116
117 rb = *p;
118 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
119 if (dma_buf > pos->dma_buf)
120 p = &rb->rb_right;
121 else
122 p = &rb->rb_left;
123 }
124 rb_link_node(&member->dmabuf_rb, rb, p);
125 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
126
127 rb = NULL;
128 p = &prime_fpriv->handles.rb_node;
129 while (*p) {
130 struct drm_prime_member *pos;
131
132 rb = *p;
133 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
134 if (handle > pos->handle)
135 p = &rb->rb_right;
136 else
137 p = &rb->rb_left;
138 }
139 rb_link_node(&member->handle_rb, rb, p);
140 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
141
142 return 0;
143}
144
145static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
146 uint32_t handle)
147{
148 struct rb_node *rb;
149
150 rb = prime_fpriv->handles.rb_node;
151 while (rb) {
152 struct drm_prime_member *member;
153
154 member = rb_entry(rb, struct drm_prime_member, handle_rb);
155 if (member->handle == handle)
156 return member->dma_buf;
157 else if (member->handle < handle)
158 rb = rb->rb_right;
159 else
160 rb = rb->rb_left;
161 }
162
163 return NULL;
164}
165
166static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
167 struct dma_buf *dma_buf,
168 uint32_t *handle)
169{
170 struct rb_node *rb;
171
172 rb = prime_fpriv->dmabufs.rb_node;
173 while (rb) {
174 struct drm_prime_member *member;
175
176 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
177 if (member->dma_buf == dma_buf) {
178 *handle = member->handle;
179 return 0;
180 } else if (member->dma_buf < dma_buf) {
181 rb = rb->rb_right;
182 } else {
183 rb = rb->rb_left;
184 }
185 }
186
187 return -ENOENT;
188}
189
190void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
191 struct dma_buf *dma_buf)
192{
193 struct rb_node *rb;
194
195 rb = prime_fpriv->dmabufs.rb_node;
196 while (rb) {
197 struct drm_prime_member *member;
198
199 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
200 if (member->dma_buf == dma_buf) {
201 rb_erase(&member->handle_rb, &prime_fpriv->handles);
202 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
203
204 dma_buf_put(dma_buf);
205 kfree(member);
206 return;
207 } else if (member->dma_buf < dma_buf) {
208 rb = rb->rb_right;
209 } else {
210 rb = rb->rb_left;
211 }
212 }
213}
214
215void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
216{
217 mutex_init(&prime_fpriv->lock);
218 prime_fpriv->dmabufs = RB_ROOT;
219 prime_fpriv->handles = RB_ROOT;
220}
221
222void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
223{
224
225 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
226}
227
228
229
230
231
232
233
234
235
236
237
238
239
240struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
241 struct dma_buf_export_info *exp_info)
242{
243 struct drm_gem_object *obj = exp_info->priv;
244 struct dma_buf *dma_buf;
245
246 dma_buf = dma_buf_export(exp_info);
247 if (IS_ERR(dma_buf))
248 return dma_buf;
249
250 drm_dev_get(dev);
251 drm_gem_object_get(obj);
252 dma_buf->file->f_mapping = obj->dev->anon_inode->i_mapping;
253
254 return dma_buf;
255}
256EXPORT_SYMBOL(drm_gem_dmabuf_export);
257
258
259
260
261
262
263
264
265
266
267void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
268{
269 struct drm_gem_object *obj = dma_buf->priv;
270 struct drm_device *dev = obj->dev;
271
272
273 drm_gem_object_put(obj);
274
275 drm_dev_put(dev);
276}
277EXPORT_SYMBOL(drm_gem_dmabuf_release);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293int drm_gem_prime_fd_to_handle(struct drm_device *dev,
294 struct drm_file *file_priv, int prime_fd,
295 uint32_t *handle)
296{
297 struct dma_buf *dma_buf;
298 struct drm_gem_object *obj;
299 int ret;
300
301 dma_buf = dma_buf_get(prime_fd);
302 if (IS_ERR(dma_buf))
303 return PTR_ERR(dma_buf);
304
305 mutex_lock(&file_priv->prime.lock);
306
307 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
308 dma_buf, handle);
309 if (ret == 0)
310 goto out_put;
311
312
313 mutex_lock(&dev->object_name_lock);
314 if (dev->driver->gem_prime_import)
315 obj = dev->driver->gem_prime_import(dev, dma_buf);
316 else
317 obj = drm_gem_prime_import(dev, dma_buf);
318 if (IS_ERR(obj)) {
319 ret = PTR_ERR(obj);
320 goto out_unlock;
321 }
322
323 if (obj->dma_buf) {
324 WARN_ON(obj->dma_buf != dma_buf);
325 } else {
326 obj->dma_buf = dma_buf;
327 get_dma_buf(dma_buf);
328 }
329
330
331 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
332 drm_gem_object_put(obj);
333 if (ret)
334 goto out_put;
335
336 ret = drm_prime_add_buf_handle(&file_priv->prime,
337 dma_buf, *handle);
338 mutex_unlock(&file_priv->prime.lock);
339 if (ret)
340 goto fail;
341
342 dma_buf_put(dma_buf);
343
344 return 0;
345
346fail:
347
348
349
350 drm_gem_handle_delete(file_priv, *handle);
351 dma_buf_put(dma_buf);
352 return ret;
353
354out_unlock:
355 mutex_unlock(&dev->object_name_lock);
356out_put:
357 mutex_unlock(&file_priv->prime.lock);
358 dma_buf_put(dma_buf);
359 return ret;
360}
361EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
362
363int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
364 struct drm_file *file_priv)
365{
366 struct drm_prime_handle *args = data;
367
368 if (!dev->driver->prime_fd_to_handle)
369 return -ENOSYS;
370
371 return dev->driver->prime_fd_to_handle(dev, file_priv,
372 args->fd, &args->handle);
373}
374
375static struct dma_buf *export_and_register_object(struct drm_device *dev,
376 struct drm_gem_object *obj,
377 uint32_t flags)
378{
379 struct dma_buf *dmabuf;
380
381
382 if (obj->handle_count == 0) {
383 dmabuf = ERR_PTR(-ENOENT);
384 return dmabuf;
385 }
386
387 if (obj->funcs && obj->funcs->export)
388 dmabuf = obj->funcs->export(obj, flags);
389 else
390 dmabuf = drm_gem_prime_export(obj, flags);
391 if (IS_ERR(dmabuf)) {
392
393
394
395 return dmabuf;
396 }
397
398
399
400
401
402
403 obj->dma_buf = dmabuf;
404 get_dma_buf(obj->dma_buf);
405
406 return dmabuf;
407}
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422int drm_gem_prime_handle_to_fd(struct drm_device *dev,
423 struct drm_file *file_priv, uint32_t handle,
424 uint32_t flags,
425 int *prime_fd)
426{
427 struct drm_gem_object *obj;
428 int ret = 0;
429 struct dma_buf *dmabuf;
430
431 mutex_lock(&file_priv->prime.lock);
432 obj = drm_gem_object_lookup(file_priv, handle);
433 if (!obj) {
434 ret = -ENOENT;
435 goto out_unlock;
436 }
437
438 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
439 if (dmabuf) {
440 get_dma_buf(dmabuf);
441 goto out_have_handle;
442 }
443
444 mutex_lock(&dev->object_name_lock);
445
446 if (obj->import_attach) {
447 dmabuf = obj->import_attach->dmabuf;
448 get_dma_buf(dmabuf);
449 goto out_have_obj;
450 }
451
452 if (obj->dma_buf) {
453 get_dma_buf(obj->dma_buf);
454 dmabuf = obj->dma_buf;
455 goto out_have_obj;
456 }
457
458 dmabuf = export_and_register_object(dev, obj, flags);
459 if (IS_ERR(dmabuf)) {
460
461
462
463 ret = PTR_ERR(dmabuf);
464 mutex_unlock(&dev->object_name_lock);
465 goto out;
466 }
467
468out_have_obj:
469
470
471
472
473
474
475 ret = drm_prime_add_buf_handle(&file_priv->prime,
476 dmabuf, handle);
477 mutex_unlock(&dev->object_name_lock);
478 if (ret)
479 goto fail_put_dmabuf;
480
481out_have_handle:
482 ret = dma_buf_fd(dmabuf, flags);
483
484
485
486
487
488
489 if (ret < 0) {
490 goto fail_put_dmabuf;
491 } else {
492 *prime_fd = ret;
493 ret = 0;
494 }
495
496 goto out;
497
498fail_put_dmabuf:
499 dma_buf_put(dmabuf);
500out:
501 drm_gem_object_put(obj);
502out_unlock:
503 mutex_unlock(&file_priv->prime.lock);
504
505 return ret;
506}
507EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
508
509int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
510 struct drm_file *file_priv)
511{
512 struct drm_prime_handle *args = data;
513
514 if (!dev->driver->prime_handle_to_fd)
515 return -ENOSYS;
516
517
518 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
519 return -EINVAL;
520
521 return dev->driver->prime_handle_to_fd(dev, file_priv,
522 args->handle, args->flags, &args->fd);
523}
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574int drm_gem_map_attach(struct dma_buf *dma_buf,
575 struct dma_buf_attachment *attach)
576{
577 struct drm_gem_object *obj = dma_buf->priv;
578
579 return drm_gem_pin(obj);
580}
581EXPORT_SYMBOL(drm_gem_map_attach);
582
583
584
585
586
587
588
589
590
591
592void drm_gem_map_detach(struct dma_buf *dma_buf,
593 struct dma_buf_attachment *attach)
594{
595 struct drm_gem_object *obj = dma_buf->priv;
596
597 drm_gem_unpin(obj);
598}
599EXPORT_SYMBOL(drm_gem_map_detach);
600
601
602
603
604
605
606
607
608
609
610
611
612
613struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
614 enum dma_data_direction dir)
615{
616 struct drm_gem_object *obj = attach->dmabuf->priv;
617 struct sg_table *sgt;
618 int ret;
619
620 if (WARN_ON(dir == DMA_NONE))
621 return ERR_PTR(-EINVAL);
622
623 if (WARN_ON(!obj->funcs->get_sg_table))
624 return ERR_PTR(-ENOSYS);
625
626 sgt = obj->funcs->get_sg_table(obj);
627 if (IS_ERR(sgt))
628 return sgt;
629
630 ret = dma_map_sgtable(attach->dev, sgt, dir,
631 DMA_ATTR_SKIP_CPU_SYNC);
632 if (ret) {
633 sg_free_table(sgt);
634 kfree(sgt);
635 sgt = ERR_PTR(ret);
636 }
637
638 return sgt;
639}
640EXPORT_SYMBOL(drm_gem_map_dma_buf);
641
642
643
644
645
646
647
648
649
650void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
651 struct sg_table *sgt,
652 enum dma_data_direction dir)
653{
654 if (!sgt)
655 return;
656
657 dma_unmap_sgtable(attach->dev, sgt, dir, DMA_ATTR_SKIP_CPU_SYNC);
658 sg_free_table(sgt);
659 kfree(sgt);
660}
661EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
662
663
664
665
666
667
668
669
670
671
672
673
674int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
675{
676 struct drm_gem_object *obj = dma_buf->priv;
677
678 return drm_gem_vmap(obj, map);
679}
680EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
681
682
683
684
685
686
687
688
689
690void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
691{
692 struct drm_gem_object *obj = dma_buf->priv;
693
694 drm_gem_vunmap(obj, map);
695}
696EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
697
698
699
700
701
702
703
704
705
706
707
708
709
710int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
711{
712 struct drm_file *priv;
713 struct file *fil;
714 int ret;
715
716
717 vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
718
719 if (obj->funcs && obj->funcs->mmap) {
720 vma->vm_ops = obj->funcs->vm_ops;
721
722 ret = obj->funcs->mmap(obj, vma);
723 if (ret)
724 return ret;
725 vma->vm_private_data = obj;
726 drm_gem_object_get(obj);
727 return 0;
728 }
729
730 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
731 fil = kzalloc(sizeof(*fil), GFP_KERNEL);
732 if (!priv || !fil) {
733 ret = -ENOMEM;
734 goto out;
735 }
736
737
738 priv->minor = obj->dev->primary;
739 fil->private_data = priv;
740
741 ret = drm_vma_node_allow(&obj->vma_node, priv);
742 if (ret)
743 goto out;
744
745 ret = obj->dev->driver->fops->mmap(fil, vma);
746
747 drm_vma_node_revoke(&obj->vma_node, priv);
748out:
749 kfree(priv);
750 kfree(fil);
751
752 return ret;
753}
754EXPORT_SYMBOL(drm_gem_prime_mmap);
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
771{
772 struct drm_gem_object *obj = dma_buf->priv;
773 struct drm_device *dev = obj->dev;
774
775 if (!dev->driver->gem_prime_mmap)
776 return -ENOSYS;
777
778 return dev->driver->gem_prime_mmap(obj, vma);
779}
780EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
781
782static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
783 .cache_sgt_mapping = true,
784 .attach = drm_gem_map_attach,
785 .detach = drm_gem_map_detach,
786 .map_dma_buf = drm_gem_map_dma_buf,
787 .unmap_dma_buf = drm_gem_unmap_dma_buf,
788 .release = drm_gem_dmabuf_release,
789 .mmap = drm_gem_dmabuf_mmap,
790 .vmap = drm_gem_dmabuf_vmap,
791 .vunmap = drm_gem_dmabuf_vunmap,
792};
793
794
795
796
797
798
799
800
801
802
803
804
805
806struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
807 struct page **pages, unsigned int nr_pages)
808{
809 struct sg_table *sg;
810 size_t max_segment = 0;
811 int err;
812
813 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
814 if (!sg)
815 return ERR_PTR(-ENOMEM);
816
817 if (dev)
818 max_segment = dma_max_mapping_size(dev->dev);
819 if (max_segment == 0)
820 max_segment = UINT_MAX;
821 err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
822 nr_pages << PAGE_SHIFT,
823 max_segment, GFP_KERNEL);
824 if (err) {
825 kfree(sg);
826 sg = ERR_PTR(err);
827 }
828 return sg;
829}
830EXPORT_SYMBOL(drm_prime_pages_to_sg);
831
832
833
834
835
836
837
838
839
840
841
842unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt)
843{
844 dma_addr_t expected = sg_dma_address(sgt->sgl);
845 struct scatterlist *sg;
846 unsigned long size = 0;
847 int i;
848
849 for_each_sgtable_dma_sg(sgt, sg, i) {
850 unsigned int len = sg_dma_len(sg);
851
852 if (!len)
853 break;
854 if (sg_dma_address(sg) != expected)
855 break;
856 expected += len;
857 size += len;
858 }
859 return size;
860}
861EXPORT_SYMBOL(drm_prime_get_contiguous_size);
862
863
864
865
866
867
868
869
870
871
872struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
873 int flags)
874{
875 struct drm_device *dev = obj->dev;
876 struct dma_buf_export_info exp_info = {
877 .exp_name = KBUILD_MODNAME,
878 .owner = dev->driver->fops->owner,
879 .ops = &drm_gem_prime_dmabuf_ops,
880 .size = obj->size,
881 .flags = flags,
882 .priv = obj,
883 .resv = obj->resv,
884 };
885
886 return drm_gem_dmabuf_export(dev, &exp_info);
887}
888EXPORT_SYMBOL(drm_gem_prime_export);
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
905 struct dma_buf *dma_buf,
906 struct device *attach_dev)
907{
908 struct dma_buf_attachment *attach;
909 struct sg_table *sgt;
910 struct drm_gem_object *obj;
911 int ret;
912
913 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
914 obj = dma_buf->priv;
915 if (obj->dev == dev) {
916
917
918
919
920 drm_gem_object_get(obj);
921 return obj;
922 }
923 }
924
925 if (!dev->driver->gem_prime_import_sg_table)
926 return ERR_PTR(-EINVAL);
927
928 attach = dma_buf_attach(dma_buf, attach_dev);
929 if (IS_ERR(attach))
930 return ERR_CAST(attach);
931
932 get_dma_buf(dma_buf);
933
934 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
935 if (IS_ERR(sgt)) {
936 ret = PTR_ERR(sgt);
937 goto fail_detach;
938 }
939
940 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
941 if (IS_ERR(obj)) {
942 ret = PTR_ERR(obj);
943 goto fail_unmap;
944 }
945
946 obj->import_attach = attach;
947 obj->resv = dma_buf->resv;
948
949 return obj;
950
951fail_unmap:
952 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
953fail_detach:
954 dma_buf_detach(dma_buf, attach);
955 dma_buf_put(dma_buf);
956
957 return ERR_PTR(ret);
958}
959EXPORT_SYMBOL(drm_gem_prime_import_dev);
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
975 struct dma_buf *dma_buf)
976{
977 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
978}
979EXPORT_SYMBOL(drm_gem_prime_import);
980
981
982
983
984
985
986
987
988
989
990
991
992
993int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt,
994 struct page **pages,
995 int max_entries)
996{
997 struct sg_page_iter page_iter;
998 struct page **p = pages;
999
1000 for_each_sgtable_page(sgt, &page_iter, 0) {
1001 if (WARN_ON(p - pages >= max_entries))
1002 return -1;
1003 *p++ = sg_page_iter_page(&page_iter);
1004 }
1005 return 0;
1006}
1007EXPORT_SYMBOL(drm_prime_sg_to_page_array);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs,
1021 int max_entries)
1022{
1023 struct sg_dma_page_iter dma_iter;
1024 dma_addr_t *a = addrs;
1025
1026 for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
1027 if (WARN_ON(a - addrs >= max_entries))
1028 return -1;
1029 *a++ = sg_page_iter_dma_address(&dma_iter);
1030 }
1031 return 0;
1032}
1033EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
1044{
1045 struct dma_buf_attachment *attach;
1046 struct dma_buf *dma_buf;
1047
1048 attach = obj->import_attach;
1049 if (sg)
1050 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
1051 dma_buf = attach->dmabuf;
1052 dma_buf_detach(attach->dmabuf, attach);
1053
1054 dma_buf_put(dma_buf);
1055}
1056EXPORT_SYMBOL(drm_prime_gem_destroy);
1057