1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/dma-buf.h>
31#include <linux/rbtree.h>
32#include <drm/drm_prime.h>
33#include <drm/drm_gem.h>
34#include <drm/drmP.h>
35
36#include "drm_internal.h"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78struct drm_prime_member {
79 struct dma_buf *dma_buf;
80 uint32_t handle;
81
82 struct rb_node dmabuf_rb;
83 struct rb_node handle_rb;
84};
85
86struct drm_prime_attachment {
87 struct sg_table *sgt;
88 enum dma_data_direction dir;
89};
90
91static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
92 struct dma_buf *dma_buf, uint32_t handle)
93{
94 struct drm_prime_member *member;
95 struct rb_node **p, *rb;
96
97 member = kmalloc(sizeof(*member), GFP_KERNEL);
98 if (!member)
99 return -ENOMEM;
100
101 get_dma_buf(dma_buf);
102 member->dma_buf = dma_buf;
103 member->handle = handle;
104
105 rb = NULL;
106 p = &prime_fpriv->dmabufs.rb_node;
107 while (*p) {
108 struct drm_prime_member *pos;
109
110 rb = *p;
111 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
112 if (dma_buf > pos->dma_buf)
113 p = &rb->rb_right;
114 else
115 p = &rb->rb_left;
116 }
117 rb_link_node(&member->dmabuf_rb, rb, p);
118 rb_insert_color(&member->dmabuf_rb, &prime_fpriv->dmabufs);
119
120 rb = NULL;
121 p = &prime_fpriv->handles.rb_node;
122 while (*p) {
123 struct drm_prime_member *pos;
124
125 rb = *p;
126 pos = rb_entry(rb, struct drm_prime_member, handle_rb);
127 if (handle > pos->handle)
128 p = &rb->rb_right;
129 else
130 p = &rb->rb_left;
131 }
132 rb_link_node(&member->handle_rb, rb, p);
133 rb_insert_color(&member->handle_rb, &prime_fpriv->handles);
134
135 return 0;
136}
137
138static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
139 uint32_t handle)
140{
141 struct rb_node *rb;
142
143 rb = prime_fpriv->handles.rb_node;
144 while (rb) {
145 struct drm_prime_member *member;
146
147 member = rb_entry(rb, struct drm_prime_member, handle_rb);
148 if (member->handle == handle)
149 return member->dma_buf;
150 else if (member->handle < handle)
151 rb = rb->rb_right;
152 else
153 rb = rb->rb_left;
154 }
155
156 return NULL;
157}
158
159static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
160 struct dma_buf *dma_buf,
161 uint32_t *handle)
162{
163 struct rb_node *rb;
164
165 rb = prime_fpriv->dmabufs.rb_node;
166 while (rb) {
167 struct drm_prime_member *member;
168
169 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
170 if (member->dma_buf == dma_buf) {
171 *handle = member->handle;
172 return 0;
173 } else if (member->dma_buf < dma_buf) {
174 rb = rb->rb_right;
175 } else {
176 rb = rb->rb_left;
177 }
178 }
179
180 return -ENOENT;
181}
182
183static int drm_gem_map_attach(struct dma_buf *dma_buf,
184 struct device *target_dev,
185 struct dma_buf_attachment *attach)
186{
187 struct drm_prime_attachment *prime_attach;
188 struct drm_gem_object *obj = dma_buf->priv;
189 struct drm_device *dev = obj->dev;
190
191 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
192 if (!prime_attach)
193 return -ENOMEM;
194
195 prime_attach->dir = DMA_NONE;
196 attach->priv = prime_attach;
197
198 if (!dev->driver->gem_prime_pin)
199 return 0;
200
201 return dev->driver->gem_prime_pin(obj);
202}
203
204static void drm_gem_map_detach(struct dma_buf *dma_buf,
205 struct dma_buf_attachment *attach)
206{
207 struct drm_prime_attachment *prime_attach = attach->priv;
208 struct drm_gem_object *obj = dma_buf->priv;
209 struct drm_device *dev = obj->dev;
210 struct sg_table *sgt;
211
212 if (dev->driver->gem_prime_unpin)
213 dev->driver->gem_prime_unpin(obj);
214
215 if (!prime_attach)
216 return;
217
218 sgt = prime_attach->sgt;
219 if (sgt) {
220 if (prime_attach->dir != DMA_NONE)
221 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
222 prime_attach->dir);
223 sg_free_table(sgt);
224 }
225
226 kfree(sgt);
227 kfree(prime_attach);
228 attach->priv = NULL;
229}
230
231void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
232 struct dma_buf *dma_buf)
233{
234 struct rb_node *rb;
235
236 rb = prime_fpriv->dmabufs.rb_node;
237 while (rb) {
238 struct drm_prime_member *member;
239
240 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb);
241 if (member->dma_buf == dma_buf) {
242 rb_erase(&member->handle_rb, &prime_fpriv->handles);
243 rb_erase(&member->dmabuf_rb, &prime_fpriv->dmabufs);
244
245 dma_buf_put(dma_buf);
246 kfree(member);
247 return;
248 } else if (member->dma_buf < dma_buf) {
249 rb = rb->rb_right;
250 } else {
251 rb = rb->rb_left;
252 }
253 }
254}
255
256static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
257 enum dma_data_direction dir)
258{
259 struct drm_prime_attachment *prime_attach = attach->priv;
260 struct drm_gem_object *obj = attach->dmabuf->priv;
261 struct sg_table *sgt;
262
263 if (WARN_ON(dir == DMA_NONE || !prime_attach))
264 return ERR_PTR(-EINVAL);
265
266
267 if (prime_attach->dir == dir)
268 return prime_attach->sgt;
269
270
271
272
273
274 if (WARN_ON(prime_attach->dir != DMA_NONE))
275 return ERR_PTR(-EBUSY);
276
277 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
278
279 if (!IS_ERR(sgt)) {
280 if (!dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir)) {
281 sg_free_table(sgt);
282 kfree(sgt);
283 sgt = ERR_PTR(-ENOMEM);
284 } else {
285 prime_attach->sgt = sgt;
286 prime_attach->dir = dir;
287 }
288 }
289
290 return sgt;
291}
292
293static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
294 struct sg_table *sgt,
295 enum dma_data_direction dir)
296{
297
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
313 struct dma_buf_export_info *exp_info)
314{
315 struct dma_buf *dma_buf;
316
317 dma_buf = dma_buf_export(exp_info);
318 if (IS_ERR(dma_buf))
319 return dma_buf;
320
321 drm_dev_ref(dev);
322 drm_gem_object_get(exp_info->priv);
323
324 return dma_buf;
325}
326EXPORT_SYMBOL(drm_gem_dmabuf_export);
327
328
329
330
331
332
333
334
335
336
337void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
338{
339 struct drm_gem_object *obj = dma_buf->priv;
340 struct drm_device *dev = obj->dev;
341
342
343 drm_gem_object_put_unlocked(obj);
344
345 drm_dev_unref(dev);
346}
347EXPORT_SYMBOL(drm_gem_dmabuf_release);
348
349static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
350{
351 struct drm_gem_object *obj = dma_buf->priv;
352 struct drm_device *dev = obj->dev;
353
354 return dev->driver->gem_prime_vmap(obj);
355}
356
357static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
358{
359 struct drm_gem_object *obj = dma_buf->priv;
360 struct drm_device *dev = obj->dev;
361
362 dev->driver->gem_prime_vunmap(obj, vaddr);
363}
364
365static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
366 unsigned long page_num)
367{
368 return NULL;
369}
370
371static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
372 unsigned long page_num, void *addr)
373{
374
375}
376static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
377 unsigned long page_num)
378{
379 return NULL;
380}
381
382static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
383 unsigned long page_num, void *addr)
384{
385
386}
387
388static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
389 struct vm_area_struct *vma)
390{
391 struct drm_gem_object *obj = dma_buf->priv;
392 struct drm_device *dev = obj->dev;
393
394 if (!dev->driver->gem_prime_mmap)
395 return -ENOSYS;
396
397 return dev->driver->gem_prime_mmap(obj, vma);
398}
399
400static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
401 .attach = drm_gem_map_attach,
402 .detach = drm_gem_map_detach,
403 .map_dma_buf = drm_gem_map_dma_buf,
404 .unmap_dma_buf = drm_gem_unmap_dma_buf,
405 .release = drm_gem_dmabuf_release,
406 .map = drm_gem_dmabuf_kmap,
407 .map_atomic = drm_gem_dmabuf_kmap_atomic,
408 .unmap = drm_gem_dmabuf_kunmap,
409 .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
410 .mmap = drm_gem_dmabuf_mmap,
411 .vmap = drm_gem_dmabuf_vmap,
412 .vunmap = drm_gem_dmabuf_vunmap,
413};
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
447 struct drm_gem_object *obj,
448 int flags)
449{
450 struct dma_buf_export_info exp_info = {
451 .exp_name = KBUILD_MODNAME,
452 .owner = dev->driver->fops->owner,
453 .ops = &drm_gem_prime_dmabuf_ops,
454 .size = obj->size,
455 .flags = flags,
456 .priv = obj,
457 };
458
459 if (dev->driver->gem_prime_res_obj)
460 exp_info.resv = dev->driver->gem_prime_res_obj(obj);
461
462 return drm_gem_dmabuf_export(dev, &exp_info);
463}
464EXPORT_SYMBOL(drm_gem_prime_export);
465
466static struct dma_buf *export_and_register_object(struct drm_device *dev,
467 struct drm_gem_object *obj,
468 uint32_t flags)
469{
470 struct dma_buf *dmabuf;
471
472
473 if (obj->handle_count == 0) {
474 dmabuf = ERR_PTR(-ENOENT);
475 return dmabuf;
476 }
477
478 dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
479 if (IS_ERR(dmabuf)) {
480
481
482
483 return dmabuf;
484 }
485
486
487
488
489
490
491 obj->dma_buf = dmabuf;
492 get_dma_buf(obj->dma_buf);
493
494 return dmabuf;
495}
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510int drm_gem_prime_handle_to_fd(struct drm_device *dev,
511 struct drm_file *file_priv, uint32_t handle,
512 uint32_t flags,
513 int *prime_fd)
514{
515 struct drm_gem_object *obj;
516 int ret = 0;
517 struct dma_buf *dmabuf;
518
519 mutex_lock(&file_priv->prime.lock);
520 obj = drm_gem_object_lookup(file_priv, handle);
521 if (!obj) {
522 ret = -ENOENT;
523 goto out_unlock;
524 }
525
526 dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
527 if (dmabuf) {
528 get_dma_buf(dmabuf);
529 goto out_have_handle;
530 }
531
532 mutex_lock(&dev->object_name_lock);
533
534 if (obj->import_attach) {
535 dmabuf = obj->import_attach->dmabuf;
536 get_dma_buf(dmabuf);
537 goto out_have_obj;
538 }
539
540 if (obj->dma_buf) {
541 get_dma_buf(obj->dma_buf);
542 dmabuf = obj->dma_buf;
543 goto out_have_obj;
544 }
545
546 dmabuf = export_and_register_object(dev, obj, flags);
547 if (IS_ERR(dmabuf)) {
548
549
550
551 ret = PTR_ERR(dmabuf);
552 mutex_unlock(&dev->object_name_lock);
553 goto out;
554 }
555
556out_have_obj:
557
558
559
560
561
562
563 ret = drm_prime_add_buf_handle(&file_priv->prime,
564 dmabuf, handle);
565 mutex_unlock(&dev->object_name_lock);
566 if (ret)
567 goto fail_put_dmabuf;
568
569out_have_handle:
570 ret = dma_buf_fd(dmabuf, flags);
571
572
573
574
575
576
577 if (ret < 0) {
578 goto fail_put_dmabuf;
579 } else {
580 *prime_fd = ret;
581 ret = 0;
582 }
583
584 goto out;
585
586fail_put_dmabuf:
587 dma_buf_put(dmabuf);
588out:
589 drm_gem_object_put_unlocked(obj);
590out_unlock:
591 mutex_unlock(&file_priv->prime.lock);
592
593 return ret;
594}
595EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
596
597
598
599
600
601
602
603
604
605
606
607struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
608 struct dma_buf *dma_buf,
609 struct device *attach_dev)
610{
611 struct dma_buf_attachment *attach;
612 struct sg_table *sgt;
613 struct drm_gem_object *obj;
614 int ret;
615
616 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
617 obj = dma_buf->priv;
618 if (obj->dev == dev) {
619
620
621
622
623 drm_gem_object_get(obj);
624 return obj;
625 }
626 }
627
628 if (!dev->driver->gem_prime_import_sg_table)
629 return ERR_PTR(-EINVAL);
630
631 attach = dma_buf_attach(dma_buf, attach_dev);
632 if (IS_ERR(attach))
633 return ERR_CAST(attach);
634
635 get_dma_buf(dma_buf);
636
637 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
638 if (IS_ERR(sgt)) {
639 ret = PTR_ERR(sgt);
640 goto fail_detach;
641 }
642
643 obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
644 if (IS_ERR(obj)) {
645 ret = PTR_ERR(obj);
646 goto fail_unmap;
647 }
648
649 obj->import_attach = attach;
650
651 return obj;
652
653fail_unmap:
654 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
655fail_detach:
656 dma_buf_detach(dma_buf, attach);
657 dma_buf_put(dma_buf);
658
659 return ERR_PTR(ret);
660}
661EXPORT_SYMBOL(drm_gem_prime_import_dev);
662
663
664
665
666
667
668
669
670
671struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
672 struct dma_buf *dma_buf)
673{
674 return drm_gem_prime_import_dev(dev, dma_buf, dev->dev);
675}
676EXPORT_SYMBOL(drm_gem_prime_import);
677
678
679
680
681
682
683
684
685
686
687
688
689
690int drm_gem_prime_fd_to_handle(struct drm_device *dev,
691 struct drm_file *file_priv, int prime_fd,
692 uint32_t *handle)
693{
694 struct dma_buf *dma_buf;
695 struct drm_gem_object *obj;
696 int ret;
697
698 dma_buf = dma_buf_get(prime_fd);
699 if (IS_ERR(dma_buf))
700 return PTR_ERR(dma_buf);
701
702 mutex_lock(&file_priv->prime.lock);
703
704 ret = drm_prime_lookup_buf_handle(&file_priv->prime,
705 dma_buf, handle);
706 if (ret == 0)
707 goto out_put;
708
709
710 mutex_lock(&dev->object_name_lock);
711 obj = dev->driver->gem_prime_import(dev, dma_buf);
712 if (IS_ERR(obj)) {
713 ret = PTR_ERR(obj);
714 goto out_unlock;
715 }
716
717 if (obj->dma_buf) {
718 WARN_ON(obj->dma_buf != dma_buf);
719 } else {
720 obj->dma_buf = dma_buf;
721 get_dma_buf(dma_buf);
722 }
723
724
725 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
726 drm_gem_object_put_unlocked(obj);
727 if (ret)
728 goto out_put;
729
730 ret = drm_prime_add_buf_handle(&file_priv->prime,
731 dma_buf, *handle);
732 mutex_unlock(&file_priv->prime.lock);
733 if (ret)
734 goto fail;
735
736 dma_buf_put(dma_buf);
737
738 return 0;
739
740fail:
741
742
743
744 drm_gem_handle_delete(file_priv, *handle);
745 dma_buf_put(dma_buf);
746 return ret;
747
748out_unlock:
749 mutex_unlock(&dev->object_name_lock);
750out_put:
751 mutex_unlock(&file_priv->prime.lock);
752 dma_buf_put(dma_buf);
753 return ret;
754}
755EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
756
757int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
758 struct drm_file *file_priv)
759{
760 struct drm_prime_handle *args = data;
761
762 if (!drm_core_check_feature(dev, DRIVER_PRIME))
763 return -EINVAL;
764
765 if (!dev->driver->prime_handle_to_fd)
766 return -ENOSYS;
767
768
769 if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
770 return -EINVAL;
771
772 return dev->driver->prime_handle_to_fd(dev, file_priv,
773 args->handle, args->flags, &args->fd);
774}
775
776int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
777 struct drm_file *file_priv)
778{
779 struct drm_prime_handle *args = data;
780
781 if (!drm_core_check_feature(dev, DRIVER_PRIME))
782 return -EINVAL;
783
784 if (!dev->driver->prime_fd_to_handle)
785 return -ENOSYS;
786
787 return dev->driver->prime_fd_to_handle(dev, file_priv,
788 args->fd, &args->handle);
789}
790
791
792
793
794
795
796
797
798
799
800struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
801{
802 struct sg_table *sg = NULL;
803 int ret;
804
805 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
806 if (!sg) {
807 ret = -ENOMEM;
808 goto out;
809 }
810
811 ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
812 nr_pages << PAGE_SHIFT, GFP_KERNEL);
813 if (ret)
814 goto out;
815
816 return sg;
817out:
818 kfree(sg);
819 return ERR_PTR(ret);
820}
821EXPORT_SYMBOL(drm_prime_pages_to_sg);
822
823
824
825
826
827
828
829
830
831
832
833int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
834 dma_addr_t *addrs, int max_pages)
835{
836 unsigned count;
837 struct scatterlist *sg;
838 struct page *page;
839 u32 len;
840 int pg_index;
841 dma_addr_t addr;
842
843 pg_index = 0;
844 for_each_sg(sgt->sgl, sg, sgt->nents, count) {
845 len = sg->length;
846 page = sg_page(sg);
847 addr = sg_dma_address(sg);
848
849 while (len > 0) {
850 if (WARN_ON(pg_index >= max_pages))
851 return -1;
852 pages[pg_index] = page;
853 if (addrs)
854 addrs[pg_index] = addr;
855
856 page++;
857 addr += PAGE_SIZE;
858 len -= PAGE_SIZE;
859 pg_index++;
860 }
861 }
862 return 0;
863}
864EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);
865
866
867
868
869
870
871
872
873
874void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
875{
876 struct dma_buf_attachment *attach;
877 struct dma_buf *dma_buf;
878 attach = obj->import_attach;
879 if (sg)
880 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
881 dma_buf = attach->dmabuf;
882 dma_buf_detach(attach->dmabuf, attach);
883
884 dma_buf_put(dma_buf);
885}
886EXPORT_SYMBOL(drm_prime_gem_destroy);
887
888void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
889{
890 mutex_init(&prime_fpriv->lock);
891 prime_fpriv->dmabufs = RB_ROOT;
892 prime_fpriv->handles = RB_ROOT;
893}
894
895void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
896{
897
898 WARN_ON(!RB_EMPTY_ROOT(&prime_fpriv->dmabufs));
899}
900