1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/dma-buf.h>
28#include <linux/dma-fence.h>
29#include <linux/anon_inodes.h>
30#include <linux/export.h>
31#include <linux/debugfs.h>
32#include <linux/module.h>
33#include <linux/seq_file.h>
34#include <linux/poll.h>
35#include <linux/reservation.h>
36#include <linux/mm.h>
37
38#include <uapi/linux/dma-buf.h>
39
40static inline int is_dma_buf_file(struct file *);
41
42struct dma_buf_list {
43 struct list_head head;
44 struct mutex lock;
45};
46
47static struct dma_buf_list db_list;
48
49static int dma_buf_release(struct inode *inode, struct file *file)
50{
51 struct dma_buf *dmabuf;
52
53 if (!is_dma_buf_file(file))
54 return -EINVAL;
55
56 dmabuf = file->private_data;
57
58 BUG_ON(dmabuf->vmapping_counter);
59
60
61
62
63
64
65
66
67
68 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
69
70 dmabuf->ops->release(dmabuf);
71
72 mutex_lock(&db_list.lock);
73 list_del(&dmabuf->list_node);
74 mutex_unlock(&db_list.lock);
75
76 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
77 reservation_object_fini(dmabuf->resv);
78
79 module_put(dmabuf->owner);
80 kfree(dmabuf);
81 return 0;
82}
83
84static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
85{
86 struct dma_buf *dmabuf;
87
88 if (!is_dma_buf_file(file))
89 return -EINVAL;
90
91 dmabuf = file->private_data;
92
93
94 if (vma->vm_pgoff + vma_pages(vma) >
95 dmabuf->size >> PAGE_SHIFT)
96 return -EINVAL;
97
98 return dmabuf->ops->mmap(dmabuf, vma);
99}
100
101static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
102{
103 struct dma_buf *dmabuf;
104 loff_t base;
105
106 if (!is_dma_buf_file(file))
107 return -EBADF;
108
109 dmabuf = file->private_data;
110
111
112
113
114 if (whence == SEEK_END)
115 base = dmabuf->size;
116 else if (whence == SEEK_SET)
117 base = 0;
118 else
119 return -EINVAL;
120
121 if (offset != 0)
122 return -EINVAL;
123
124 return base + offset;
125}
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
150{
151 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
152 unsigned long flags;
153
154 spin_lock_irqsave(&dcb->poll->lock, flags);
155 wake_up_locked_poll(dcb->poll, dcb->active);
156 dcb->active = 0;
157 spin_unlock_irqrestore(&dcb->poll->lock, flags);
158}
159
160static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
161{
162 struct dma_buf *dmabuf;
163 struct reservation_object *resv;
164 struct reservation_object_list *fobj;
165 struct dma_fence *fence_excl;
166 unsigned long events;
167 unsigned shared_count, seq;
168
169 dmabuf = file->private_data;
170 if (!dmabuf || !dmabuf->resv)
171 return POLLERR;
172
173 resv = dmabuf->resv;
174
175 poll_wait(file, &dmabuf->poll, poll);
176
177 events = poll_requested_events(poll) & (POLLIN | POLLOUT);
178 if (!events)
179 return 0;
180
181retry:
182 seq = read_seqcount_begin(&resv->seq);
183 rcu_read_lock();
184
185 fobj = rcu_dereference(resv->fence);
186 if (fobj)
187 shared_count = fobj->shared_count;
188 else
189 shared_count = 0;
190 fence_excl = rcu_dereference(resv->fence_excl);
191 if (read_seqcount_retry(&resv->seq, seq)) {
192 rcu_read_unlock();
193 goto retry;
194 }
195
196 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
197 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
198 unsigned long pevents = POLLIN;
199
200 if (shared_count == 0)
201 pevents |= POLLOUT;
202
203 spin_lock_irq(&dmabuf->poll.lock);
204 if (dcb->active) {
205 dcb->active |= pevents;
206 events &= ~pevents;
207 } else
208 dcb->active = pevents;
209 spin_unlock_irq(&dmabuf->poll.lock);
210
211 if (events & pevents) {
212 if (!dma_fence_get_rcu(fence_excl)) {
213
214 events &= ~pevents;
215 dma_buf_poll_cb(NULL, &dcb->cb);
216 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
217 dma_buf_poll_cb)) {
218 events &= ~pevents;
219 dma_fence_put(fence_excl);
220 } else {
221
222
223
224
225 dma_fence_put(fence_excl);
226 dma_buf_poll_cb(NULL, &dcb->cb);
227 }
228 }
229 }
230
231 if ((events & POLLOUT) && shared_count > 0) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
233 int i;
234
235
236 spin_lock_irq(&dmabuf->poll.lock);
237 if (dcb->active)
238 events &= ~POLLOUT;
239 else
240 dcb->active = POLLOUT;
241 spin_unlock_irq(&dmabuf->poll.lock);
242
243 if (!(events & POLLOUT))
244 goto out;
245
246 for (i = 0; i < shared_count; ++i) {
247 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
248
249 if (!dma_fence_get_rcu(fence)) {
250
251
252
253
254
255
256 events &= ~POLLOUT;
257 dma_buf_poll_cb(NULL, &dcb->cb);
258 break;
259 }
260 if (!dma_fence_add_callback(fence, &dcb->cb,
261 dma_buf_poll_cb)) {
262 dma_fence_put(fence);
263 events &= ~POLLOUT;
264 break;
265 }
266 dma_fence_put(fence);
267 }
268
269
270 if (i == shared_count)
271 dma_buf_poll_cb(NULL, &dcb->cb);
272 }
273
274out:
275 rcu_read_unlock();
276 return events;
277}
278
279static long dma_buf_ioctl(struct file *file,
280 unsigned int cmd, unsigned long arg)
281{
282 struct dma_buf *dmabuf;
283 struct dma_buf_sync sync;
284 enum dma_data_direction direction;
285 int ret;
286
287 dmabuf = file->private_data;
288
289 switch (cmd) {
290 case DMA_BUF_IOCTL_SYNC:
291 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
292 return -EFAULT;
293
294 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
295 return -EINVAL;
296
297 switch (sync.flags & DMA_BUF_SYNC_RW) {
298 case DMA_BUF_SYNC_READ:
299 direction = DMA_FROM_DEVICE;
300 break;
301 case DMA_BUF_SYNC_WRITE:
302 direction = DMA_TO_DEVICE;
303 break;
304 case DMA_BUF_SYNC_RW:
305 direction = DMA_BIDIRECTIONAL;
306 break;
307 default:
308 return -EINVAL;
309 }
310
311 if (sync.flags & DMA_BUF_SYNC_END)
312 ret = dma_buf_end_cpu_access(dmabuf, direction);
313 else
314 ret = dma_buf_begin_cpu_access(dmabuf, direction);
315
316 return ret;
317 default:
318 return -ENOTTY;
319 }
320}
321
322static const struct file_operations dma_buf_fops = {
323 .release = dma_buf_release,
324 .mmap = dma_buf_mmap_internal,
325 .llseek = dma_buf_llseek,
326 .poll = dma_buf_poll,
327 .unlocked_ioctl = dma_buf_ioctl,
328#ifdef CONFIG_COMPAT
329 .compat_ioctl = dma_buf_ioctl,
330#endif
331};
332
333
334
335
336static inline int is_dma_buf_file(struct file *file)
337{
338 return file->f_op == &dma_buf_fops;
339}
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
390{
391 struct dma_buf *dmabuf;
392 struct reservation_object *resv = exp_info->resv;
393 struct file *file;
394 size_t alloc_size = sizeof(struct dma_buf);
395 int ret;
396
397 if (!exp_info->resv)
398 alloc_size += sizeof(struct reservation_object);
399 else
400
401 alloc_size += 1;
402
403 if (WARN_ON(!exp_info->priv
404 || !exp_info->ops
405 || !exp_info->ops->map_dma_buf
406 || !exp_info->ops->unmap_dma_buf
407 || !exp_info->ops->release
408 || !exp_info->ops->map_atomic
409 || !exp_info->ops->map
410 || !exp_info->ops->mmap)) {
411 return ERR_PTR(-EINVAL);
412 }
413
414 if (!try_module_get(exp_info->owner))
415 return ERR_PTR(-ENOENT);
416
417 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
418 if (!dmabuf) {
419 ret = -ENOMEM;
420 goto err_module;
421 }
422
423 dmabuf->priv = exp_info->priv;
424 dmabuf->ops = exp_info->ops;
425 dmabuf->size = exp_info->size;
426 dmabuf->exp_name = exp_info->exp_name;
427 dmabuf->owner = exp_info->owner;
428 init_waitqueue_head(&dmabuf->poll);
429 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
430 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
431
432 if (!resv) {
433 resv = (struct reservation_object *)&dmabuf[1];
434 reservation_object_init(resv);
435 }
436 dmabuf->resv = resv;
437
438 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
439 exp_info->flags);
440 if (IS_ERR(file)) {
441 ret = PTR_ERR(file);
442 goto err_dmabuf;
443 }
444
445 file->f_mode |= FMODE_LSEEK;
446 dmabuf->file = file;
447
448 mutex_init(&dmabuf->lock);
449 INIT_LIST_HEAD(&dmabuf->attachments);
450
451 mutex_lock(&db_list.lock);
452 list_add(&dmabuf->list_node, &db_list.head);
453 mutex_unlock(&db_list.lock);
454
455 return dmabuf;
456
457err_dmabuf:
458 kfree(dmabuf);
459err_module:
460 module_put(exp_info->owner);
461 return ERR_PTR(ret);
462}
463EXPORT_SYMBOL_GPL(dma_buf_export);
464
465
466
467
468
469
470
471
472int dma_buf_fd(struct dma_buf *dmabuf, int flags)
473{
474 int fd;
475
476 if (!dmabuf || !dmabuf->file)
477 return -EINVAL;
478
479 fd = get_unused_fd_flags(flags);
480 if (fd < 0)
481 return fd;
482
483 fd_install(fd, dmabuf->file);
484
485 return fd;
486}
487EXPORT_SYMBOL_GPL(dma_buf_fd);
488
489
490
491
492
493
494
495
496
497struct dma_buf *dma_buf_get(int fd)
498{
499 struct file *file;
500
501 file = fget(fd);
502
503 if (!file)
504 return ERR_PTR(-EBADF);
505
506 if (!is_dma_buf_file(file)) {
507 fput(file);
508 return ERR_PTR(-EINVAL);
509 }
510
511 return file->private_data;
512}
513EXPORT_SYMBOL_GPL(dma_buf_get);
514
515
516
517
518
519
520
521
522
523
524
525void dma_buf_put(struct dma_buf *dmabuf)
526{
527 if (WARN_ON(!dmabuf || !dmabuf->file))
528 return;
529
530 fput(dmabuf->file);
531}
532EXPORT_SYMBOL_GPL(dma_buf_put);
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
553 struct device *dev)
554{
555 struct dma_buf_attachment *attach;
556 int ret;
557
558 if (WARN_ON(!dmabuf || !dev))
559 return ERR_PTR(-EINVAL);
560
561 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
562 if (!attach)
563 return ERR_PTR(-ENOMEM);
564
565 attach->dev = dev;
566 attach->dmabuf = dmabuf;
567
568 mutex_lock(&dmabuf->lock);
569
570 if (dmabuf->ops->attach) {
571 ret = dmabuf->ops->attach(dmabuf, dev, attach);
572 if (ret)
573 goto err_attach;
574 }
575 list_add(&attach->node, &dmabuf->attachments);
576
577 mutex_unlock(&dmabuf->lock);
578 return attach;
579
580err_attach:
581 kfree(attach);
582 mutex_unlock(&dmabuf->lock);
583 return ERR_PTR(ret);
584}
585EXPORT_SYMBOL_GPL(dma_buf_attach);
586
587
588
589
590
591
592
593
594
595void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
596{
597 if (WARN_ON(!dmabuf || !attach))
598 return;
599
600 mutex_lock(&dmabuf->lock);
601 list_del(&attach->node);
602 if (dmabuf->ops->detach)
603 dmabuf->ops->detach(dmabuf, attach);
604
605 mutex_unlock(&dmabuf->lock);
606 kfree(attach);
607}
608EXPORT_SYMBOL_GPL(dma_buf_detach);
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
626 enum dma_data_direction direction)
627{
628 struct sg_table *sg_table;
629
630 might_sleep();
631
632 if (WARN_ON(!attach || !attach->dmabuf))
633 return ERR_PTR(-EINVAL);
634
635 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
636 if (!sg_table)
637 sg_table = ERR_PTR(-ENOMEM);
638
639 return sg_table;
640}
641EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
642
643
644
645
646
647
648
649
650
651
652
653void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
654 struct sg_table *sg_table,
655 enum dma_data_direction direction)
656{
657 might_sleep();
658
659 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
660 return;
661
662 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
663 direction);
664}
665EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
784 enum dma_data_direction direction)
785{
786 bool write = (direction == DMA_BIDIRECTIONAL ||
787 direction == DMA_TO_DEVICE);
788 struct reservation_object *resv = dmabuf->resv;
789 long ret;
790
791
792 ret = reservation_object_wait_timeout_rcu(resv, write, true,
793 MAX_SCHEDULE_TIMEOUT);
794 if (ret < 0)
795 return ret;
796
797 return 0;
798}
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
815 enum dma_data_direction direction)
816{
817 int ret = 0;
818
819 if (WARN_ON(!dmabuf))
820 return -EINVAL;
821
822 if (dmabuf->ops->begin_cpu_access)
823 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
824
825
826
827
828
829 if (ret == 0)
830 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
831
832 return ret;
833}
834EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
835
836
837
838
839
840
841
842
843
844
845
846
847
848int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
849 enum dma_data_direction direction)
850{
851 int ret = 0;
852
853 WARN_ON(!dmabuf);
854
855 if (dmabuf->ops->end_cpu_access)
856 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
857
858 return ret;
859}
860EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
861
862
863
864
865
866
867
868
869
870
871void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
872{
873 WARN_ON(!dmabuf);
874
875 return dmabuf->ops->map_atomic(dmabuf, page_num);
876}
877EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
878
879
880
881
882
883
884
885
886
887void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
888 void *vaddr)
889{
890 WARN_ON(!dmabuf);
891
892 if (dmabuf->ops->unmap_atomic)
893 dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
894}
895EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
896
897
898
899
900
901
902
903
904
905
906void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
907{
908 WARN_ON(!dmabuf);
909
910 return dmabuf->ops->map(dmabuf, page_num);
911}
912EXPORT_SYMBOL_GPL(dma_buf_kmap);
913
914
915
916
917
918
919
920
921
922void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
923 void *vaddr)
924{
925 WARN_ON(!dmabuf);
926
927 if (dmabuf->ops->unmap)
928 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
929}
930EXPORT_SYMBOL_GPL(dma_buf_kunmap);
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
948 unsigned long pgoff)
949{
950 struct file *oldfile;
951 int ret;
952
953 if (WARN_ON(!dmabuf || !vma))
954 return -EINVAL;
955
956
957 if (pgoff + vma_pages(vma) < pgoff)
958 return -EOVERFLOW;
959
960
961 if (pgoff + vma_pages(vma) >
962 dmabuf->size >> PAGE_SHIFT)
963 return -EINVAL;
964
965
966 get_file(dmabuf->file);
967 oldfile = vma->vm_file;
968 vma->vm_file = dmabuf->file;
969 vma->vm_pgoff = pgoff;
970
971 ret = dmabuf->ops->mmap(dmabuf, vma);
972 if (ret) {
973
974 vma->vm_file = oldfile;
975 fput(dmabuf->file);
976 } else {
977 if (oldfile)
978 fput(oldfile);
979 }
980 return ret;
981
982}
983EXPORT_SYMBOL_GPL(dma_buf_mmap);
984
985
986
987
988
989
990
991
992
993
994
995
996
997void *dma_buf_vmap(struct dma_buf *dmabuf)
998{
999 void *ptr;
1000
1001 if (WARN_ON(!dmabuf))
1002 return NULL;
1003
1004 if (!dmabuf->ops->vmap)
1005 return NULL;
1006
1007 mutex_lock(&dmabuf->lock);
1008 if (dmabuf->vmapping_counter) {
1009 dmabuf->vmapping_counter++;
1010 BUG_ON(!dmabuf->vmap_ptr);
1011 ptr = dmabuf->vmap_ptr;
1012 goto out_unlock;
1013 }
1014
1015 BUG_ON(dmabuf->vmap_ptr);
1016
1017 ptr = dmabuf->ops->vmap(dmabuf);
1018 if (WARN_ON_ONCE(IS_ERR(ptr)))
1019 ptr = NULL;
1020 if (!ptr)
1021 goto out_unlock;
1022
1023 dmabuf->vmap_ptr = ptr;
1024 dmabuf->vmapping_counter = 1;
1025
1026out_unlock:
1027 mutex_unlock(&dmabuf->lock);
1028 return ptr;
1029}
1030EXPORT_SYMBOL_GPL(dma_buf_vmap);
1031
1032
1033
1034
1035
1036
1037void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1038{
1039 if (WARN_ON(!dmabuf))
1040 return;
1041
1042 BUG_ON(!dmabuf->vmap_ptr);
1043 BUG_ON(dmabuf->vmapping_counter == 0);
1044 BUG_ON(dmabuf->vmap_ptr != vaddr);
1045
1046 mutex_lock(&dmabuf->lock);
1047 if (--dmabuf->vmapping_counter == 0) {
1048 if (dmabuf->ops->vunmap)
1049 dmabuf->ops->vunmap(dmabuf, vaddr);
1050 dmabuf->vmap_ptr = NULL;
1051 }
1052 mutex_unlock(&dmabuf->lock);
1053}
1054EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1055
1056#ifdef CONFIG_DEBUG_FS
1057static int dma_buf_debug_show(struct seq_file *s, void *unused)
1058{
1059 int ret;
1060 struct dma_buf *buf_obj;
1061 struct dma_buf_attachment *attach_obj;
1062 struct reservation_object *robj;
1063 struct reservation_object_list *fobj;
1064 struct dma_fence *fence;
1065 unsigned seq;
1066 int count = 0, attach_count, shared_count, i;
1067 size_t size = 0;
1068
1069 ret = mutex_lock_interruptible(&db_list.lock);
1070
1071 if (ret)
1072 return ret;
1073
1074 seq_puts(s, "\nDma-buf Objects:\n");
1075 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
1076 "size", "flags", "mode", "count");
1077
1078 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1079 ret = mutex_lock_interruptible(&buf_obj->lock);
1080
1081 if (ret) {
1082 seq_puts(s,
1083 "\tERROR locking buffer object: skipping\n");
1084 continue;
1085 }
1086
1087 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
1088 buf_obj->size,
1089 buf_obj->file->f_flags, buf_obj->file->f_mode,
1090 file_count(buf_obj->file),
1091 buf_obj->exp_name);
1092
1093 robj = buf_obj->resv;
1094 while (true) {
1095 seq = read_seqcount_begin(&robj->seq);
1096 rcu_read_lock();
1097 fobj = rcu_dereference(robj->fence);
1098 shared_count = fobj ? fobj->shared_count : 0;
1099 fence = rcu_dereference(robj->fence_excl);
1100 if (!read_seqcount_retry(&robj->seq, seq))
1101 break;
1102 rcu_read_unlock();
1103 }
1104
1105 if (fence)
1106 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1107 fence->ops->get_driver_name(fence),
1108 fence->ops->get_timeline_name(fence),
1109 dma_fence_is_signaled(fence) ? "" : "un");
1110 for (i = 0; i < shared_count; i++) {
1111 fence = rcu_dereference(fobj->shared[i]);
1112 if (!dma_fence_get_rcu(fence))
1113 continue;
1114 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1115 fence->ops->get_driver_name(fence),
1116 fence->ops->get_timeline_name(fence),
1117 dma_fence_is_signaled(fence) ? "" : "un");
1118 }
1119 rcu_read_unlock();
1120
1121 seq_puts(s, "\tAttached Devices:\n");
1122 attach_count = 0;
1123
1124 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1125 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1126 attach_count++;
1127 }
1128
1129 seq_printf(s, "Total %d devices attached\n\n",
1130 attach_count);
1131
1132 count++;
1133 size += buf_obj->size;
1134 mutex_unlock(&buf_obj->lock);
1135 }
1136
1137 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1138
1139 mutex_unlock(&db_list.lock);
1140 return 0;
1141}
1142
1143static int dma_buf_debug_open(struct inode *inode, struct file *file)
1144{
1145 return single_open(file, dma_buf_debug_show, NULL);
1146}
1147
1148static const struct file_operations dma_buf_debug_fops = {
1149 .open = dma_buf_debug_open,
1150 .read = seq_read,
1151 .llseek = seq_lseek,
1152 .release = single_release,
1153};
1154
1155static struct dentry *dma_buf_debugfs_dir;
1156
1157static int dma_buf_init_debugfs(void)
1158{
1159 struct dentry *d;
1160 int err = 0;
1161
1162 d = debugfs_create_dir("dma_buf", NULL);
1163 if (IS_ERR(d))
1164 return PTR_ERR(d);
1165
1166 dma_buf_debugfs_dir = d;
1167
1168 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1169 NULL, &dma_buf_debug_fops);
1170 if (IS_ERR(d)) {
1171 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1172 debugfs_remove_recursive(dma_buf_debugfs_dir);
1173 dma_buf_debugfs_dir = NULL;
1174 err = PTR_ERR(d);
1175 }
1176
1177 return err;
1178}
1179
1180static void dma_buf_uninit_debugfs(void)
1181{
1182 debugfs_remove_recursive(dma_buf_debugfs_dir);
1183}
1184#else
1185static inline int dma_buf_init_debugfs(void)
1186{
1187 return 0;
1188}
1189static inline void dma_buf_uninit_debugfs(void)
1190{
1191}
1192#endif
1193
1194static int __init dma_buf_init(void)
1195{
1196 mutex_init(&db_list.lock);
1197 INIT_LIST_HEAD(&db_list.head);
1198 dma_buf_init_debugfs();
1199 return 0;
1200}
1201subsys_initcall(dma_buf_init);
1202
1203static void __exit dma_buf_deinit(void)
1204{
1205 dma_buf_uninit_debugfs();
1206}
1207__exitcall(dma_buf_deinit);
1208