1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/dma-buf.h>
28#include <linux/dma-fence.h>
29#include <linux/anon_inodes.h>
30#include <linux/export.h>
31#include <linux/debugfs.h>
32#include <linux/module.h>
33#include <linux/seq_file.h>
34#include <linux/poll.h>
35#include <linux/reservation.h>
36#include <linux/mm.h>
37
38#include <uapi/linux/dma-buf.h>
39
40static inline int is_dma_buf_file(struct file *);
41
42struct dma_buf_list {
43 struct list_head head;
44 struct mutex lock;
45};
46
47static struct dma_buf_list db_list;
48
49static int dma_buf_release(struct inode *inode, struct file *file)
50{
51 struct dma_buf *dmabuf;
52
53 if (!is_dma_buf_file(file))
54 return -EINVAL;
55
56 dmabuf = file->private_data;
57
58 BUG_ON(dmabuf->vmapping_counter);
59
60
61
62
63
64
65
66
67
68 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
69
70 dmabuf->ops->release(dmabuf);
71
72 mutex_lock(&db_list.lock);
73 list_del(&dmabuf->list_node);
74 mutex_unlock(&db_list.lock);
75
76 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
77 reservation_object_fini(dmabuf->resv);
78
79 module_put(dmabuf->owner);
80 kfree(dmabuf);
81 return 0;
82}
83
84static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
85{
86 struct dma_buf *dmabuf;
87
88 if (!is_dma_buf_file(file))
89 return -EINVAL;
90
91 dmabuf = file->private_data;
92
93
94 if (vma->vm_pgoff + vma_pages(vma) >
95 dmabuf->size >> PAGE_SHIFT)
96 return -EINVAL;
97
98 return dmabuf->ops->mmap(dmabuf, vma);
99}
100
101static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
102{
103 struct dma_buf *dmabuf;
104 loff_t base;
105
106 if (!is_dma_buf_file(file))
107 return -EBADF;
108
109 dmabuf = file->private_data;
110
111
112
113
114 if (whence == SEEK_END)
115 base = dmabuf->size;
116 else if (whence == SEEK_SET)
117 base = 0;
118 else
119 return -EINVAL;
120
121 if (offset != 0)
122 return -EINVAL;
123
124 return base + offset;
125}
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
150{
151 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
152 unsigned long flags;
153
154 spin_lock_irqsave(&dcb->poll->lock, flags);
155 wake_up_locked_poll(dcb->poll, dcb->active);
156 dcb->active = 0;
157 spin_unlock_irqrestore(&dcb->poll->lock, flags);
158}
159
160static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
161{
162 struct dma_buf *dmabuf;
163 struct reservation_object *resv;
164 struct reservation_object_list *fobj;
165 struct dma_fence *fence_excl;
166 __poll_t events;
167 unsigned shared_count, seq;
168
169 dmabuf = file->private_data;
170 if (!dmabuf || !dmabuf->resv)
171 return EPOLLERR;
172
173 resv = dmabuf->resv;
174
175 poll_wait(file, &dmabuf->poll, poll);
176
177 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
178 if (!events)
179 return 0;
180
181retry:
182 seq = read_seqcount_begin(&resv->seq);
183 rcu_read_lock();
184
185 fobj = rcu_dereference(resv->fence);
186 if (fobj)
187 shared_count = fobj->shared_count;
188 else
189 shared_count = 0;
190 fence_excl = rcu_dereference(resv->fence_excl);
191 if (read_seqcount_retry(&resv->seq, seq)) {
192 rcu_read_unlock();
193 goto retry;
194 }
195
196 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
197 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
198 __poll_t pevents = EPOLLIN;
199
200 if (shared_count == 0)
201 pevents |= EPOLLOUT;
202
203 spin_lock_irq(&dmabuf->poll.lock);
204 if (dcb->active) {
205 dcb->active |= pevents;
206 events &= ~pevents;
207 } else
208 dcb->active = pevents;
209 spin_unlock_irq(&dmabuf->poll.lock);
210
211 if (events & pevents) {
212 if (!dma_fence_get_rcu(fence_excl)) {
213
214 events &= ~pevents;
215 dma_buf_poll_cb(NULL, &dcb->cb);
216 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
217 dma_buf_poll_cb)) {
218 events &= ~pevents;
219 dma_fence_put(fence_excl);
220 } else {
221
222
223
224
225 dma_fence_put(fence_excl);
226 dma_buf_poll_cb(NULL, &dcb->cb);
227 }
228 }
229 }
230
231 if ((events & EPOLLOUT) && shared_count > 0) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
233 int i;
234
235
236 spin_lock_irq(&dmabuf->poll.lock);
237 if (dcb->active)
238 events &= ~EPOLLOUT;
239 else
240 dcb->active = EPOLLOUT;
241 spin_unlock_irq(&dmabuf->poll.lock);
242
243 if (!(events & EPOLLOUT))
244 goto out;
245
246 for (i = 0; i < shared_count; ++i) {
247 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
248
249 if (!dma_fence_get_rcu(fence)) {
250
251
252
253
254
255
256 events &= ~EPOLLOUT;
257 dma_buf_poll_cb(NULL, &dcb->cb);
258 break;
259 }
260 if (!dma_fence_add_callback(fence, &dcb->cb,
261 dma_buf_poll_cb)) {
262 dma_fence_put(fence);
263 events &= ~EPOLLOUT;
264 break;
265 }
266 dma_fence_put(fence);
267 }
268
269
270 if (i == shared_count)
271 dma_buf_poll_cb(NULL, &dcb->cb);
272 }
273
274out:
275 rcu_read_unlock();
276 return events;
277}
278
279static long dma_buf_ioctl(struct file *file,
280 unsigned int cmd, unsigned long arg)
281{
282 struct dma_buf *dmabuf;
283 struct dma_buf_sync sync;
284 enum dma_data_direction direction;
285 int ret;
286
287 dmabuf = file->private_data;
288
289 switch (cmd) {
290 case DMA_BUF_IOCTL_SYNC:
291 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
292 return -EFAULT;
293
294 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
295 return -EINVAL;
296
297 switch (sync.flags & DMA_BUF_SYNC_RW) {
298 case DMA_BUF_SYNC_READ:
299 direction = DMA_FROM_DEVICE;
300 break;
301 case DMA_BUF_SYNC_WRITE:
302 direction = DMA_TO_DEVICE;
303 break;
304 case DMA_BUF_SYNC_RW:
305 direction = DMA_BIDIRECTIONAL;
306 break;
307 default:
308 return -EINVAL;
309 }
310
311 if (sync.flags & DMA_BUF_SYNC_END)
312 ret = dma_buf_end_cpu_access(dmabuf, direction);
313 else
314 ret = dma_buf_begin_cpu_access(dmabuf, direction);
315
316 return ret;
317 default:
318 return -ENOTTY;
319 }
320}
321
322static const struct file_operations dma_buf_fops = {
323 .release = dma_buf_release,
324 .mmap = dma_buf_mmap_internal,
325 .llseek = dma_buf_llseek,
326 .poll = dma_buf_poll,
327 .unlocked_ioctl = dma_buf_ioctl,
328#ifdef CONFIG_COMPAT
329 .compat_ioctl = dma_buf_ioctl,
330#endif
331};
332
333
334
335
336static inline int is_dma_buf_file(struct file *file)
337{
338 return file->f_op == &dma_buf_fops;
339}
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
390{
391 struct dma_buf *dmabuf;
392 struct reservation_object *resv = exp_info->resv;
393 struct file *file;
394 size_t alloc_size = sizeof(struct dma_buf);
395 int ret;
396
397 if (!exp_info->resv)
398 alloc_size += sizeof(struct reservation_object);
399 else
400
401 alloc_size += 1;
402
403 if (WARN_ON(!exp_info->priv
404 || !exp_info->ops
405 || !exp_info->ops->map_dma_buf
406 || !exp_info->ops->unmap_dma_buf
407 || !exp_info->ops->release
408 || !exp_info->ops->mmap)) {
409 return ERR_PTR(-EINVAL);
410 }
411
412 if (!try_module_get(exp_info->owner))
413 return ERR_PTR(-ENOENT);
414
415 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
416 if (!dmabuf) {
417 ret = -ENOMEM;
418 goto err_module;
419 }
420
421 dmabuf->priv = exp_info->priv;
422 dmabuf->ops = exp_info->ops;
423 dmabuf->size = exp_info->size;
424 dmabuf->exp_name = exp_info->exp_name;
425 dmabuf->owner = exp_info->owner;
426 init_waitqueue_head(&dmabuf->poll);
427 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
428 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
429
430 if (!resv) {
431 resv = (struct reservation_object *)&dmabuf[1];
432 reservation_object_init(resv);
433 }
434 dmabuf->resv = resv;
435
436 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
437 exp_info->flags);
438 if (IS_ERR(file)) {
439 ret = PTR_ERR(file);
440 goto err_dmabuf;
441 }
442
443 file->f_mode |= FMODE_LSEEK;
444 dmabuf->file = file;
445
446 mutex_init(&dmabuf->lock);
447 INIT_LIST_HEAD(&dmabuf->attachments);
448
449 mutex_lock(&db_list.lock);
450 list_add(&dmabuf->list_node, &db_list.head);
451 mutex_unlock(&db_list.lock);
452
453 return dmabuf;
454
455err_dmabuf:
456 kfree(dmabuf);
457err_module:
458 module_put(exp_info->owner);
459 return ERR_PTR(ret);
460}
461EXPORT_SYMBOL_GPL(dma_buf_export);
462
463
464
465
466
467
468
469
470int dma_buf_fd(struct dma_buf *dmabuf, int flags)
471{
472 int fd;
473
474 if (!dmabuf || !dmabuf->file)
475 return -EINVAL;
476
477 fd = get_unused_fd_flags(flags);
478 if (fd < 0)
479 return fd;
480
481 fd_install(fd, dmabuf->file);
482
483 return fd;
484}
485EXPORT_SYMBOL_GPL(dma_buf_fd);
486
487
488
489
490
491
492
493
494
495struct dma_buf *dma_buf_get(int fd)
496{
497 struct file *file;
498
499 file = fget(fd);
500
501 if (!file)
502 return ERR_PTR(-EBADF);
503
504 if (!is_dma_buf_file(file)) {
505 fput(file);
506 return ERR_PTR(-EINVAL);
507 }
508
509 return file->private_data;
510}
511EXPORT_SYMBOL_GPL(dma_buf_get);
512
513
514
515
516
517
518
519
520
521
522
523void dma_buf_put(struct dma_buf *dmabuf)
524{
525 if (WARN_ON(!dmabuf || !dmabuf->file))
526 return;
527
528 fput(dmabuf->file);
529}
530EXPORT_SYMBOL_GPL(dma_buf_put);
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
551 struct device *dev)
552{
553 struct dma_buf_attachment *attach;
554 int ret;
555
556 if (WARN_ON(!dmabuf || !dev))
557 return ERR_PTR(-EINVAL);
558
559 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
560 if (!attach)
561 return ERR_PTR(-ENOMEM);
562
563 attach->dev = dev;
564 attach->dmabuf = dmabuf;
565
566 mutex_lock(&dmabuf->lock);
567
568 if (dmabuf->ops->attach) {
569 ret = dmabuf->ops->attach(dmabuf, attach);
570 if (ret)
571 goto err_attach;
572 }
573 list_add(&attach->node, &dmabuf->attachments);
574
575 mutex_unlock(&dmabuf->lock);
576 return attach;
577
578err_attach:
579 kfree(attach);
580 mutex_unlock(&dmabuf->lock);
581 return ERR_PTR(ret);
582}
583EXPORT_SYMBOL_GPL(dma_buf_attach);
584
585
586
587
588
589
590
591
592
593void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
594{
595 if (WARN_ON(!dmabuf || !attach))
596 return;
597
598 mutex_lock(&dmabuf->lock);
599 list_del(&attach->node);
600 if (dmabuf->ops->detach)
601 dmabuf->ops->detach(dmabuf, attach);
602
603 mutex_unlock(&dmabuf->lock);
604 kfree(attach);
605}
606EXPORT_SYMBOL_GPL(dma_buf_detach);
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
624 enum dma_data_direction direction)
625{
626 struct sg_table *sg_table;
627
628 might_sleep();
629
630 if (WARN_ON(!attach || !attach->dmabuf))
631 return ERR_PTR(-EINVAL);
632
633 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
634 if (!sg_table)
635 sg_table = ERR_PTR(-ENOMEM);
636
637 return sg_table;
638}
639EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
640
641
642
643
644
645
646
647
648
649
650
651void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
652 struct sg_table *sg_table,
653 enum dma_data_direction direction)
654{
655 might_sleep();
656
657 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
658 return;
659
660 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
661 direction);
662}
663EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
770 enum dma_data_direction direction)
771{
772 bool write = (direction == DMA_BIDIRECTIONAL ||
773 direction == DMA_TO_DEVICE);
774 struct reservation_object *resv = dmabuf->resv;
775 long ret;
776
777
778 ret = reservation_object_wait_timeout_rcu(resv, write, true,
779 MAX_SCHEDULE_TIMEOUT);
780 if (ret < 0)
781 return ret;
782
783 return 0;
784}
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
801 enum dma_data_direction direction)
802{
803 int ret = 0;
804
805 if (WARN_ON(!dmabuf))
806 return -EINVAL;
807
808 if (dmabuf->ops->begin_cpu_access)
809 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
810
811
812
813
814
815 if (ret == 0)
816 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
817
818 return ret;
819}
820EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
821
822
823
824
825
826
827
828
829
830
831
832
833
834int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
835 enum dma_data_direction direction)
836{
837 int ret = 0;
838
839 WARN_ON(!dmabuf);
840
841 if (dmabuf->ops->end_cpu_access)
842 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
843
844 return ret;
845}
846EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
847
848
849
850
851
852
853
854
855
856
857void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
858{
859 WARN_ON(!dmabuf);
860
861 if (!dmabuf->ops->map)
862 return NULL;
863 return dmabuf->ops->map(dmabuf, page_num);
864}
865EXPORT_SYMBOL_GPL(dma_buf_kmap);
866
867
868
869
870
871
872
873
874
875void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
876 void *vaddr)
877{
878 WARN_ON(!dmabuf);
879
880 if (dmabuf->ops->unmap)
881 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
882}
883EXPORT_SYMBOL_GPL(dma_buf_kunmap);
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
901 unsigned long pgoff)
902{
903 struct file *oldfile;
904 int ret;
905
906 if (WARN_ON(!dmabuf || !vma))
907 return -EINVAL;
908
909
910 if (pgoff + vma_pages(vma) < pgoff)
911 return -EOVERFLOW;
912
913
914 if (pgoff + vma_pages(vma) >
915 dmabuf->size >> PAGE_SHIFT)
916 return -EINVAL;
917
918
919 get_file(dmabuf->file);
920 oldfile = vma->vm_file;
921 vma->vm_file = dmabuf->file;
922 vma->vm_pgoff = pgoff;
923
924 ret = dmabuf->ops->mmap(dmabuf, vma);
925 if (ret) {
926
927 vma->vm_file = oldfile;
928 fput(dmabuf->file);
929 } else {
930 if (oldfile)
931 fput(oldfile);
932 }
933 return ret;
934
935}
936EXPORT_SYMBOL_GPL(dma_buf_mmap);
937
938
939
940
941
942
943
944
945
946
947
948
949
950void *dma_buf_vmap(struct dma_buf *dmabuf)
951{
952 void *ptr;
953
954 if (WARN_ON(!dmabuf))
955 return NULL;
956
957 if (!dmabuf->ops->vmap)
958 return NULL;
959
960 mutex_lock(&dmabuf->lock);
961 if (dmabuf->vmapping_counter) {
962 dmabuf->vmapping_counter++;
963 BUG_ON(!dmabuf->vmap_ptr);
964 ptr = dmabuf->vmap_ptr;
965 goto out_unlock;
966 }
967
968 BUG_ON(dmabuf->vmap_ptr);
969
970 ptr = dmabuf->ops->vmap(dmabuf);
971 if (WARN_ON_ONCE(IS_ERR(ptr)))
972 ptr = NULL;
973 if (!ptr)
974 goto out_unlock;
975
976 dmabuf->vmap_ptr = ptr;
977 dmabuf->vmapping_counter = 1;
978
979out_unlock:
980 mutex_unlock(&dmabuf->lock);
981 return ptr;
982}
983EXPORT_SYMBOL_GPL(dma_buf_vmap);
984
985
986
987
988
989
990void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
991{
992 if (WARN_ON(!dmabuf))
993 return;
994
995 BUG_ON(!dmabuf->vmap_ptr);
996 BUG_ON(dmabuf->vmapping_counter == 0);
997 BUG_ON(dmabuf->vmap_ptr != vaddr);
998
999 mutex_lock(&dmabuf->lock);
1000 if (--dmabuf->vmapping_counter == 0) {
1001 if (dmabuf->ops->vunmap)
1002 dmabuf->ops->vunmap(dmabuf, vaddr);
1003 dmabuf->vmap_ptr = NULL;
1004 }
1005 mutex_unlock(&dmabuf->lock);
1006}
1007EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1008
1009#ifdef CONFIG_DEBUG_FS
1010static int dma_buf_debug_show(struct seq_file *s, void *unused)
1011{
1012 int ret;
1013 struct dma_buf *buf_obj;
1014 struct dma_buf_attachment *attach_obj;
1015 struct reservation_object *robj;
1016 struct reservation_object_list *fobj;
1017 struct dma_fence *fence;
1018 unsigned seq;
1019 int count = 0, attach_count, shared_count, i;
1020 size_t size = 0;
1021
1022 ret = mutex_lock_interruptible(&db_list.lock);
1023
1024 if (ret)
1025 return ret;
1026
1027 seq_puts(s, "\nDma-buf Objects:\n");
1028 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
1029 "size", "flags", "mode", "count");
1030
1031 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1032 ret = mutex_lock_interruptible(&buf_obj->lock);
1033
1034 if (ret) {
1035 seq_puts(s,
1036 "\tERROR locking buffer object: skipping\n");
1037 continue;
1038 }
1039
1040 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
1041 buf_obj->size,
1042 buf_obj->file->f_flags, buf_obj->file->f_mode,
1043 file_count(buf_obj->file),
1044 buf_obj->exp_name);
1045
1046 robj = buf_obj->resv;
1047 while (true) {
1048 seq = read_seqcount_begin(&robj->seq);
1049 rcu_read_lock();
1050 fobj = rcu_dereference(robj->fence);
1051 shared_count = fobj ? fobj->shared_count : 0;
1052 fence = rcu_dereference(robj->fence_excl);
1053 if (!read_seqcount_retry(&robj->seq, seq))
1054 break;
1055 rcu_read_unlock();
1056 }
1057
1058 if (fence)
1059 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1060 fence->ops->get_driver_name(fence),
1061 fence->ops->get_timeline_name(fence),
1062 dma_fence_is_signaled(fence) ? "" : "un");
1063 for (i = 0; i < shared_count; i++) {
1064 fence = rcu_dereference(fobj->shared[i]);
1065 if (!dma_fence_get_rcu(fence))
1066 continue;
1067 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1068 fence->ops->get_driver_name(fence),
1069 fence->ops->get_timeline_name(fence),
1070 dma_fence_is_signaled(fence) ? "" : "un");
1071 }
1072 rcu_read_unlock();
1073
1074 seq_puts(s, "\tAttached Devices:\n");
1075 attach_count = 0;
1076
1077 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1078 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1079 attach_count++;
1080 }
1081
1082 seq_printf(s, "Total %d devices attached\n\n",
1083 attach_count);
1084
1085 count++;
1086 size += buf_obj->size;
1087 mutex_unlock(&buf_obj->lock);
1088 }
1089
1090 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1091
1092 mutex_unlock(&db_list.lock);
1093 return 0;
1094}
1095
1096DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1097
1098static struct dentry *dma_buf_debugfs_dir;
1099
1100static int dma_buf_init_debugfs(void)
1101{
1102 struct dentry *d;
1103 int err = 0;
1104
1105 d = debugfs_create_dir("dma_buf", NULL);
1106 if (IS_ERR(d))
1107 return PTR_ERR(d);
1108
1109 dma_buf_debugfs_dir = d;
1110
1111 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1112 NULL, &dma_buf_debug_fops);
1113 if (IS_ERR(d)) {
1114 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1115 debugfs_remove_recursive(dma_buf_debugfs_dir);
1116 dma_buf_debugfs_dir = NULL;
1117 err = PTR_ERR(d);
1118 }
1119
1120 return err;
1121}
1122
1123static void dma_buf_uninit_debugfs(void)
1124{
1125 debugfs_remove_recursive(dma_buf_debugfs_dir);
1126}
1127#else
1128static inline int dma_buf_init_debugfs(void)
1129{
1130 return 0;
1131}
1132static inline void dma_buf_uninit_debugfs(void)
1133{
1134}
1135#endif
1136
1137static int __init dma_buf_init(void)
1138{
1139 mutex_init(&db_list.lock);
1140 INIT_LIST_HEAD(&db_list.head);
1141 dma_buf_init_debugfs();
1142 return 0;
1143}
1144subsys_initcall(dma_buf_init);
1145
1146static void __exit dma_buf_deinit(void)
1147{
1148 dma_buf_uninit_debugfs();
1149}
1150__exitcall(dma_buf_deinit);
1151