1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/dma-buf.h>
28#include <linux/fence.h>
29#include <linux/anon_inodes.h>
30#include <linux/export.h>
31#include <linux/debugfs.h>
32#include <linux/module.h>
33#include <linux/seq_file.h>
34#include <linux/poll.h>
35#include <linux/reservation.h>
36#include <linux/mm.h>
37
38#include <uapi/linux/dma-buf.h>
39
40static inline int is_dma_buf_file(struct file *);
41
42struct dma_buf_list {
43 struct list_head head;
44 struct mutex lock;
45};
46
47static struct dma_buf_list db_list;
48
49static int dma_buf_release(struct inode *inode, struct file *file)
50{
51 struct dma_buf *dmabuf;
52
53 if (!is_dma_buf_file(file))
54 return -EINVAL;
55
56 dmabuf = file->private_data;
57
58 BUG_ON(dmabuf->vmapping_counter);
59
60
61
62
63
64
65
66
67
68 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
69
70 dmabuf->ops->release(dmabuf);
71
72 mutex_lock(&db_list.lock);
73 list_del(&dmabuf->list_node);
74 mutex_unlock(&db_list.lock);
75
76 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
77 reservation_object_fini(dmabuf->resv);
78
79 module_put(dmabuf->owner);
80 kfree(dmabuf);
81 return 0;
82}
83
84static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
85{
86 struct dma_buf *dmabuf;
87
88 if (!is_dma_buf_file(file))
89 return -EINVAL;
90
91 dmabuf = file->private_data;
92
93
94 if (vma->vm_pgoff + vma_pages(vma) >
95 dmabuf->size >> PAGE_SHIFT)
96 return -EINVAL;
97
98 return dmabuf->ops->mmap(dmabuf, vma);
99}
100
101static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
102{
103 struct dma_buf *dmabuf;
104 loff_t base;
105
106 if (!is_dma_buf_file(file))
107 return -EBADF;
108
109 dmabuf = file->private_data;
110
111
112
113
114 if (whence == SEEK_END)
115 base = dmabuf->size;
116 else if (whence == SEEK_SET)
117 base = 0;
118 else
119 return -EINVAL;
120
121 if (offset != 0)
122 return -EINVAL;
123
124 return base + offset;
125}
126
127static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
128{
129 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
130 unsigned long flags;
131
132 spin_lock_irqsave(&dcb->poll->lock, flags);
133 wake_up_locked_poll(dcb->poll, dcb->active);
134 dcb->active = 0;
135 spin_unlock_irqrestore(&dcb->poll->lock, flags);
136}
137
138static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
139{
140 struct dma_buf *dmabuf;
141 struct reservation_object *resv;
142 struct reservation_object_list *fobj;
143 struct fence *fence_excl;
144 unsigned long events;
145 unsigned shared_count, seq;
146
147 dmabuf = file->private_data;
148 if (!dmabuf || !dmabuf->resv)
149 return POLLERR;
150
151 resv = dmabuf->resv;
152
153 poll_wait(file, &dmabuf->poll, poll);
154
155 events = poll_requested_events(poll) & (POLLIN | POLLOUT);
156 if (!events)
157 return 0;
158
159retry:
160 seq = read_seqcount_begin(&resv->seq);
161 rcu_read_lock();
162
163 fobj = rcu_dereference(resv->fence);
164 if (fobj)
165 shared_count = fobj->shared_count;
166 else
167 shared_count = 0;
168 fence_excl = rcu_dereference(resv->fence_excl);
169 if (read_seqcount_retry(&resv->seq, seq)) {
170 rcu_read_unlock();
171 goto retry;
172 }
173
174 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
175 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
176 unsigned long pevents = POLLIN;
177
178 if (shared_count == 0)
179 pevents |= POLLOUT;
180
181 spin_lock_irq(&dmabuf->poll.lock);
182 if (dcb->active) {
183 dcb->active |= pevents;
184 events &= ~pevents;
185 } else
186 dcb->active = pevents;
187 spin_unlock_irq(&dmabuf->poll.lock);
188
189 if (events & pevents) {
190 if (!fence_get_rcu(fence_excl)) {
191
192 events &= ~pevents;
193 dma_buf_poll_cb(NULL, &dcb->cb);
194 } else if (!fence_add_callback(fence_excl, &dcb->cb,
195 dma_buf_poll_cb)) {
196 events &= ~pevents;
197 fence_put(fence_excl);
198 } else {
199
200
201
202
203 fence_put(fence_excl);
204 dma_buf_poll_cb(NULL, &dcb->cb);
205 }
206 }
207 }
208
209 if ((events & POLLOUT) && shared_count > 0) {
210 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
211 int i;
212
213
214 spin_lock_irq(&dmabuf->poll.lock);
215 if (dcb->active)
216 events &= ~POLLOUT;
217 else
218 dcb->active = POLLOUT;
219 spin_unlock_irq(&dmabuf->poll.lock);
220
221 if (!(events & POLLOUT))
222 goto out;
223
224 for (i = 0; i < shared_count; ++i) {
225 struct fence *fence = rcu_dereference(fobj->shared[i]);
226
227 if (!fence_get_rcu(fence)) {
228
229
230
231
232
233
234 events &= ~POLLOUT;
235 dma_buf_poll_cb(NULL, &dcb->cb);
236 break;
237 }
238 if (!fence_add_callback(fence, &dcb->cb,
239 dma_buf_poll_cb)) {
240 fence_put(fence);
241 events &= ~POLLOUT;
242 break;
243 }
244 fence_put(fence);
245 }
246
247
248 if (i == shared_count)
249 dma_buf_poll_cb(NULL, &dcb->cb);
250 }
251
252out:
253 rcu_read_unlock();
254 return events;
255}
256
257static long dma_buf_ioctl(struct file *file,
258 unsigned int cmd, unsigned long arg)
259{
260 struct dma_buf *dmabuf;
261 struct dma_buf_sync sync;
262 enum dma_data_direction direction;
263 int ret;
264
265 dmabuf = file->private_data;
266
267 switch (cmd) {
268 case DMA_BUF_IOCTL_SYNC:
269 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
270 return -EFAULT;
271
272 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
273 return -EINVAL;
274
275 switch (sync.flags & DMA_BUF_SYNC_RW) {
276 case DMA_BUF_SYNC_READ:
277 direction = DMA_FROM_DEVICE;
278 break;
279 case DMA_BUF_SYNC_WRITE:
280 direction = DMA_TO_DEVICE;
281 break;
282 case DMA_BUF_SYNC_RW:
283 direction = DMA_BIDIRECTIONAL;
284 break;
285 default:
286 return -EINVAL;
287 }
288
289 if (sync.flags & DMA_BUF_SYNC_END)
290 ret = dma_buf_end_cpu_access(dmabuf, direction);
291 else
292 ret = dma_buf_begin_cpu_access(dmabuf, direction);
293
294 return ret;
295 default:
296 return -ENOTTY;
297 }
298}
299
300static const struct file_operations dma_buf_fops = {
301 .release = dma_buf_release,
302 .mmap = dma_buf_mmap_internal,
303 .llseek = dma_buf_llseek,
304 .poll = dma_buf_poll,
305 .unlocked_ioctl = dma_buf_ioctl,
306};
307
308
309
310
311static inline int is_dma_buf_file(struct file *file)
312{
313 return file->f_op == &dma_buf_fops;
314}
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
332{
333 struct dma_buf *dmabuf;
334 struct reservation_object *resv = exp_info->resv;
335 struct file *file;
336 size_t alloc_size = sizeof(struct dma_buf);
337 int ret;
338
339 if (!exp_info->resv)
340 alloc_size += sizeof(struct reservation_object);
341 else
342
343 alloc_size += 1;
344
345 if (WARN_ON(!exp_info->priv
346 || !exp_info->ops
347 || !exp_info->ops->map_dma_buf
348 || !exp_info->ops->unmap_dma_buf
349 || !exp_info->ops->release
350 || !exp_info->ops->kmap_atomic
351 || !exp_info->ops->kmap
352 || !exp_info->ops->mmap)) {
353 return ERR_PTR(-EINVAL);
354 }
355
356 if (!try_module_get(exp_info->owner))
357 return ERR_PTR(-ENOENT);
358
359 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
360 if (!dmabuf) {
361 ret = -ENOMEM;
362 goto err_module;
363 }
364
365 dmabuf->priv = exp_info->priv;
366 dmabuf->ops = exp_info->ops;
367 dmabuf->size = exp_info->size;
368 dmabuf->exp_name = exp_info->exp_name;
369 dmabuf->owner = exp_info->owner;
370 init_waitqueue_head(&dmabuf->poll);
371 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
372 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
373
374 if (!resv) {
375 resv = (struct reservation_object *)&dmabuf[1];
376 reservation_object_init(resv);
377 }
378 dmabuf->resv = resv;
379
380 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
381 exp_info->flags);
382 if (IS_ERR(file)) {
383 ret = PTR_ERR(file);
384 goto err_dmabuf;
385 }
386
387 file->f_mode |= FMODE_LSEEK;
388 dmabuf->file = file;
389
390 mutex_init(&dmabuf->lock);
391 INIT_LIST_HEAD(&dmabuf->attachments);
392
393 mutex_lock(&db_list.lock);
394 list_add(&dmabuf->list_node, &db_list.head);
395 mutex_unlock(&db_list.lock);
396
397 return dmabuf;
398
399err_dmabuf:
400 kfree(dmabuf);
401err_module:
402 module_put(exp_info->owner);
403 return ERR_PTR(ret);
404}
405EXPORT_SYMBOL_GPL(dma_buf_export);
406
407
408
409
410
411
412
413
414int dma_buf_fd(struct dma_buf *dmabuf, int flags)
415{
416 int fd;
417
418 if (!dmabuf || !dmabuf->file)
419 return -EINVAL;
420
421 fd = get_unused_fd_flags(flags);
422 if (fd < 0)
423 return fd;
424
425 fd_install(fd, dmabuf->file);
426
427 return fd;
428}
429EXPORT_SYMBOL_GPL(dma_buf_fd);
430
431
432
433
434
435
436
437
438
439struct dma_buf *dma_buf_get(int fd)
440{
441 struct file *file;
442
443 file = fget(fd);
444
445 if (!file)
446 return ERR_PTR(-EBADF);
447
448 if (!is_dma_buf_file(file)) {
449 fput(file);
450 return ERR_PTR(-EINVAL);
451 }
452
453 return file->private_data;
454}
455EXPORT_SYMBOL_GPL(dma_buf_get);
456
457
458
459
460
461
462
463void dma_buf_put(struct dma_buf *dmabuf)
464{
465 if (WARN_ON(!dmabuf || !dmabuf->file))
466 return;
467
468 fput(dmabuf->file);
469}
470EXPORT_SYMBOL_GPL(dma_buf_put);
471
472
473
474
475
476
477
478
479
480
481struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
482 struct device *dev)
483{
484 struct dma_buf_attachment *attach;
485 int ret;
486
487 if (WARN_ON(!dmabuf || !dev))
488 return ERR_PTR(-EINVAL);
489
490 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
491 if (attach == NULL)
492 return ERR_PTR(-ENOMEM);
493
494 attach->dev = dev;
495 attach->dmabuf = dmabuf;
496
497 mutex_lock(&dmabuf->lock);
498
499 if (dmabuf->ops->attach) {
500 ret = dmabuf->ops->attach(dmabuf, dev, attach);
501 if (ret)
502 goto err_attach;
503 }
504 list_add(&attach->node, &dmabuf->attachments);
505
506 mutex_unlock(&dmabuf->lock);
507 return attach;
508
509err_attach:
510 kfree(attach);
511 mutex_unlock(&dmabuf->lock);
512 return ERR_PTR(ret);
513}
514EXPORT_SYMBOL_GPL(dma_buf_attach);
515
516
517
518
519
520
521
522
523void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
524{
525 if (WARN_ON(!dmabuf || !attach))
526 return;
527
528 mutex_lock(&dmabuf->lock);
529 list_del(&attach->node);
530 if (dmabuf->ops->detach)
531 dmabuf->ops->detach(dmabuf, attach);
532
533 mutex_unlock(&dmabuf->lock);
534 kfree(attach);
535}
536EXPORT_SYMBOL_GPL(dma_buf_detach);
537
538
539
540
541
542
543
544
545
546
547
548struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
549 enum dma_data_direction direction)
550{
551 struct sg_table *sg_table = ERR_PTR(-EINVAL);
552
553 might_sleep();
554
555 if (WARN_ON(!attach || !attach->dmabuf))
556 return ERR_PTR(-EINVAL);
557
558 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
559 if (!sg_table)
560 sg_table = ERR_PTR(-ENOMEM);
561
562 return sg_table;
563}
564EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
565
566
567
568
569
570
571
572
573
574
575void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
576 struct sg_table *sg_table,
577 enum dma_data_direction direction)
578{
579 might_sleep();
580
581 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
582 return;
583
584 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
585 direction);
586}
587EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
588
589static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
590 enum dma_data_direction direction)
591{
592 bool write = (direction == DMA_BIDIRECTIONAL ||
593 direction == DMA_TO_DEVICE);
594 struct reservation_object *resv = dmabuf->resv;
595 long ret;
596
597
598 ret = reservation_object_wait_timeout_rcu(resv, write, true,
599 MAX_SCHEDULE_TIMEOUT);
600 if (ret < 0)
601 return ret;
602
603 return 0;
604}
605
606
607
608
609
610
611
612
613
614
615
616int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
617 enum dma_data_direction direction)
618{
619 int ret = 0;
620
621 if (WARN_ON(!dmabuf))
622 return -EINVAL;
623
624 if (dmabuf->ops->begin_cpu_access)
625 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
626
627
628
629
630
631 if (ret == 0)
632 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
633
634 return ret;
635}
636EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
637
638
639
640
641
642
643
644
645
646
647
648int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
649 enum dma_data_direction direction)
650{
651 int ret = 0;
652
653 WARN_ON(!dmabuf);
654
655 if (dmabuf->ops->end_cpu_access)
656 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
657
658 return ret;
659}
660EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
661
662
663
664
665
666
667
668
669
670
671void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
672{
673 WARN_ON(!dmabuf);
674
675 return dmabuf->ops->kmap_atomic(dmabuf, page_num);
676}
677EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
678
679
680
681
682
683
684
685
686
687void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
688 void *vaddr)
689{
690 WARN_ON(!dmabuf);
691
692 if (dmabuf->ops->kunmap_atomic)
693 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
694}
695EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
696
697
698
699
700
701
702
703
704
705
706void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
707{
708 WARN_ON(!dmabuf);
709
710 return dmabuf->ops->kmap(dmabuf, page_num);
711}
712EXPORT_SYMBOL_GPL(dma_buf_kmap);
713
714
715
716
717
718
719
720
721
722void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
723 void *vaddr)
724{
725 WARN_ON(!dmabuf);
726
727 if (dmabuf->ops->kunmap)
728 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
729}
730EXPORT_SYMBOL_GPL(dma_buf_kunmap);
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
748 unsigned long pgoff)
749{
750 struct file *oldfile;
751 int ret;
752
753 if (WARN_ON(!dmabuf || !vma))
754 return -EINVAL;
755
756
757 if (pgoff + vma_pages(vma) < pgoff)
758 return -EOVERFLOW;
759
760
761 if (pgoff + vma_pages(vma) >
762 dmabuf->size >> PAGE_SHIFT)
763 return -EINVAL;
764
765
766 get_file(dmabuf->file);
767 oldfile = vma->vm_file;
768 vma->vm_file = dmabuf->file;
769 vma->vm_pgoff = pgoff;
770
771 ret = dmabuf->ops->mmap(dmabuf, vma);
772 if (ret) {
773
774 vma->vm_file = oldfile;
775 fput(dmabuf->file);
776 } else {
777 if (oldfile)
778 fput(oldfile);
779 }
780 return ret;
781
782}
783EXPORT_SYMBOL_GPL(dma_buf_mmap);
784
785
786
787
788
789
790
791
792
793
794
795
796
797void *dma_buf_vmap(struct dma_buf *dmabuf)
798{
799 void *ptr;
800
801 if (WARN_ON(!dmabuf))
802 return NULL;
803
804 if (!dmabuf->ops->vmap)
805 return NULL;
806
807 mutex_lock(&dmabuf->lock);
808 if (dmabuf->vmapping_counter) {
809 dmabuf->vmapping_counter++;
810 BUG_ON(!dmabuf->vmap_ptr);
811 ptr = dmabuf->vmap_ptr;
812 goto out_unlock;
813 }
814
815 BUG_ON(dmabuf->vmap_ptr);
816
817 ptr = dmabuf->ops->vmap(dmabuf);
818 if (WARN_ON_ONCE(IS_ERR(ptr)))
819 ptr = NULL;
820 if (!ptr)
821 goto out_unlock;
822
823 dmabuf->vmap_ptr = ptr;
824 dmabuf->vmapping_counter = 1;
825
826out_unlock:
827 mutex_unlock(&dmabuf->lock);
828 return ptr;
829}
830EXPORT_SYMBOL_GPL(dma_buf_vmap);
831
832
833
834
835
836
837void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
838{
839 if (WARN_ON(!dmabuf))
840 return;
841
842 BUG_ON(!dmabuf->vmap_ptr);
843 BUG_ON(dmabuf->vmapping_counter == 0);
844 BUG_ON(dmabuf->vmap_ptr != vaddr);
845
846 mutex_lock(&dmabuf->lock);
847 if (--dmabuf->vmapping_counter == 0) {
848 if (dmabuf->ops->vunmap)
849 dmabuf->ops->vunmap(dmabuf, vaddr);
850 dmabuf->vmap_ptr = NULL;
851 }
852 mutex_unlock(&dmabuf->lock);
853}
854EXPORT_SYMBOL_GPL(dma_buf_vunmap);
855
856#ifdef CONFIG_DEBUG_FS
857static int dma_buf_debug_show(struct seq_file *s, void *unused)
858{
859 int ret;
860 struct dma_buf *buf_obj;
861 struct dma_buf_attachment *attach_obj;
862 int count = 0, attach_count;
863 size_t size = 0;
864
865 ret = mutex_lock_interruptible(&db_list.lock);
866
867 if (ret)
868 return ret;
869
870 seq_puts(s, "\nDma-buf Objects:\n");
871 seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
872
873 list_for_each_entry(buf_obj, &db_list.head, list_node) {
874 ret = mutex_lock_interruptible(&buf_obj->lock);
875
876 if (ret) {
877 seq_puts(s,
878 "\tERROR locking buffer object: skipping\n");
879 continue;
880 }
881
882 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
883 buf_obj->size,
884 buf_obj->file->f_flags, buf_obj->file->f_mode,
885 file_count(buf_obj->file),
886 buf_obj->exp_name);
887
888 seq_puts(s, "\tAttached Devices:\n");
889 attach_count = 0;
890
891 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
892 seq_puts(s, "\t");
893
894 seq_printf(s, "%s\n", dev_name(attach_obj->dev));
895 attach_count++;
896 }
897
898 seq_printf(s, "Total %d devices attached\n\n",
899 attach_count);
900
901 count++;
902 size += buf_obj->size;
903 mutex_unlock(&buf_obj->lock);
904 }
905
906 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
907
908 mutex_unlock(&db_list.lock);
909 return 0;
910}
911
912static int dma_buf_debug_open(struct inode *inode, struct file *file)
913{
914 return single_open(file, dma_buf_debug_show, NULL);
915}
916
917static const struct file_operations dma_buf_debug_fops = {
918 .open = dma_buf_debug_open,
919 .read = seq_read,
920 .llseek = seq_lseek,
921 .release = single_release,
922};
923
924static struct dentry *dma_buf_debugfs_dir;
925
926static int dma_buf_init_debugfs(void)
927{
928 struct dentry *d;
929 int err = 0;
930
931 d = debugfs_create_dir("dma_buf", NULL);
932 if (IS_ERR(d))
933 return PTR_ERR(d);
934
935 dma_buf_debugfs_dir = d;
936
937 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
938 NULL, &dma_buf_debug_fops);
939 if (IS_ERR(d)) {
940 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
941 debugfs_remove_recursive(dma_buf_debugfs_dir);
942 dma_buf_debugfs_dir = NULL;
943 err = PTR_ERR(d);
944 }
945
946 return err;
947}
948
949static void dma_buf_uninit_debugfs(void)
950{
951 if (dma_buf_debugfs_dir)
952 debugfs_remove_recursive(dma_buf_debugfs_dir);
953}
954#else
955static inline int dma_buf_init_debugfs(void)
956{
957 return 0;
958}
959static inline void dma_buf_uninit_debugfs(void)
960{
961}
962#endif
963
964static int __init dma_buf_init(void)
965{
966 mutex_init(&db_list.lock);
967 INIT_LIST_HEAD(&db_list.head);
968 dma_buf_init_debugfs();
969 return 0;
970}
971subsys_initcall(dma_buf_init);
972
973static void __exit dma_buf_deinit(void)
974{
975 dma_buf_uninit_debugfs();
976}
977__exitcall(dma_buf_deinit);
978