1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
24#include <linux/reservation.h>
25#include <linux/mm.h>
26#include <linux/mount.h>
27#include <linux/pseudo_fs.h>
28
29#include <uapi/linux/dma-buf.h>
30#include <uapi/linux/magic.h>
31
32static inline int is_dma_buf_file(struct file *);
33
34struct dma_buf_list {
35 struct list_head head;
36 struct mutex lock;
37};
38
39static struct dma_buf_list db_list;
40
41static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42{
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
45 size_t ret = 0;
46
47 dmabuf = dentry->d_fsdata;
48 mutex_lock(&dmabuf->lock);
49 if (dmabuf->name)
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 mutex_unlock(&dmabuf->lock);
52
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
55}
56
57static const struct dentry_operations dma_buf_dentry_ops = {
58 .d_dname = dmabuffs_dname,
59};
60
61static struct vfsmount *dma_buf_mnt;
62
63static int dma_buf_fs_init_context(struct fs_context *fc)
64{
65 struct pseudo_fs_context *ctx;
66
67 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
68 if (!ctx)
69 return -ENOMEM;
70 ctx->dops = &dma_buf_dentry_ops;
71 return 0;
72}
73
74static struct file_system_type dma_buf_fs_type = {
75 .name = "dmabuf",
76 .init_fs_context = dma_buf_fs_init_context,
77 .kill_sb = kill_anon_super,
78};
79
80static int dma_buf_release(struct inode *inode, struct file *file)
81{
82 struct dma_buf *dmabuf;
83
84 if (!is_dma_buf_file(file))
85 return -EINVAL;
86
87 dmabuf = file->private_data;
88
89 BUG_ON(dmabuf->vmapping_counter);
90
91
92
93
94
95
96
97
98
99 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
100
101 dmabuf->ops->release(dmabuf);
102
103 mutex_lock(&db_list.lock);
104 list_del(&dmabuf->list_node);
105 mutex_unlock(&db_list.lock);
106
107 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
108 reservation_object_fini(dmabuf->resv);
109
110 module_put(dmabuf->owner);
111 kfree(dmabuf);
112 return 0;
113}
114
115static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
116{
117 struct dma_buf *dmabuf;
118
119 if (!is_dma_buf_file(file))
120 return -EINVAL;
121
122 dmabuf = file->private_data;
123
124
125 if (!dmabuf->ops->mmap)
126 return -EINVAL;
127
128
129 if (vma->vm_pgoff + vma_pages(vma) >
130 dmabuf->size >> PAGE_SHIFT)
131 return -EINVAL;
132
133 return dmabuf->ops->mmap(dmabuf, vma);
134}
135
136static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
137{
138 struct dma_buf *dmabuf;
139 loff_t base;
140
141 if (!is_dma_buf_file(file))
142 return -EBADF;
143
144 dmabuf = file->private_data;
145
146
147
148
149 if (whence == SEEK_END)
150 base = dmabuf->size;
151 else if (whence == SEEK_SET)
152 base = 0;
153 else
154 return -EINVAL;
155
156 if (offset != 0)
157 return -EINVAL;
158
159 return base + offset;
160}
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
185{
186 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
187 unsigned long flags;
188
189 spin_lock_irqsave(&dcb->poll->lock, flags);
190 wake_up_locked_poll(dcb->poll, dcb->active);
191 dcb->active = 0;
192 spin_unlock_irqrestore(&dcb->poll->lock, flags);
193}
194
195static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
196{
197 struct dma_buf *dmabuf;
198 struct reservation_object *resv;
199 struct reservation_object_list *fobj;
200 struct dma_fence *fence_excl;
201 __poll_t events;
202 unsigned shared_count, seq;
203
204 dmabuf = file->private_data;
205 if (!dmabuf || !dmabuf->resv)
206 return EPOLLERR;
207
208 resv = dmabuf->resv;
209
210 poll_wait(file, &dmabuf->poll, poll);
211
212 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
213 if (!events)
214 return 0;
215
216retry:
217 seq = read_seqcount_begin(&resv->seq);
218 rcu_read_lock();
219
220 fobj = rcu_dereference(resv->fence);
221 if (fobj)
222 shared_count = fobj->shared_count;
223 else
224 shared_count = 0;
225 fence_excl = rcu_dereference(resv->fence_excl);
226 if (read_seqcount_retry(&resv->seq, seq)) {
227 rcu_read_unlock();
228 goto retry;
229 }
230
231 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
233 __poll_t pevents = EPOLLIN;
234
235 if (shared_count == 0)
236 pevents |= EPOLLOUT;
237
238 spin_lock_irq(&dmabuf->poll.lock);
239 if (dcb->active) {
240 dcb->active |= pevents;
241 events &= ~pevents;
242 } else
243 dcb->active = pevents;
244 spin_unlock_irq(&dmabuf->poll.lock);
245
246 if (events & pevents) {
247 if (!dma_fence_get_rcu(fence_excl)) {
248
249 events &= ~pevents;
250 dma_buf_poll_cb(NULL, &dcb->cb);
251 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
252 dma_buf_poll_cb)) {
253 events &= ~pevents;
254 dma_fence_put(fence_excl);
255 } else {
256
257
258
259
260 dma_fence_put(fence_excl);
261 dma_buf_poll_cb(NULL, &dcb->cb);
262 }
263 }
264 }
265
266 if ((events & EPOLLOUT) && shared_count > 0) {
267 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
268 int i;
269
270
271 spin_lock_irq(&dmabuf->poll.lock);
272 if (dcb->active)
273 events &= ~EPOLLOUT;
274 else
275 dcb->active = EPOLLOUT;
276 spin_unlock_irq(&dmabuf->poll.lock);
277
278 if (!(events & EPOLLOUT))
279 goto out;
280
281 for (i = 0; i < shared_count; ++i) {
282 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
283
284 if (!dma_fence_get_rcu(fence)) {
285
286
287
288
289
290
291 events &= ~EPOLLOUT;
292 dma_buf_poll_cb(NULL, &dcb->cb);
293 break;
294 }
295 if (!dma_fence_add_callback(fence, &dcb->cb,
296 dma_buf_poll_cb)) {
297 dma_fence_put(fence);
298 events &= ~EPOLLOUT;
299 break;
300 }
301 dma_fence_put(fence);
302 }
303
304
305 if (i == shared_count)
306 dma_buf_poll_cb(NULL, &dcb->cb);
307 }
308
309out:
310 rcu_read_unlock();
311 return events;
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
330{
331 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
332 long ret = 0;
333
334 if (IS_ERR(name))
335 return PTR_ERR(name);
336
337 mutex_lock(&dmabuf->lock);
338 if (!list_empty(&dmabuf->attachments)) {
339 ret = -EBUSY;
340 kfree(name);
341 goto out_unlock;
342 }
343 kfree(dmabuf->name);
344 dmabuf->name = name;
345
346out_unlock:
347 mutex_unlock(&dmabuf->lock);
348 return ret;
349}
350
351static long dma_buf_ioctl(struct file *file,
352 unsigned int cmd, unsigned long arg)
353{
354 struct dma_buf *dmabuf;
355 struct dma_buf_sync sync;
356 enum dma_data_direction direction;
357 int ret;
358
359 dmabuf = file->private_data;
360
361 switch (cmd) {
362 case DMA_BUF_IOCTL_SYNC:
363 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
364 return -EFAULT;
365
366 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
367 return -EINVAL;
368
369 switch (sync.flags & DMA_BUF_SYNC_RW) {
370 case DMA_BUF_SYNC_READ:
371 direction = DMA_FROM_DEVICE;
372 break;
373 case DMA_BUF_SYNC_WRITE:
374 direction = DMA_TO_DEVICE;
375 break;
376 case DMA_BUF_SYNC_RW:
377 direction = DMA_BIDIRECTIONAL;
378 break;
379 default:
380 return -EINVAL;
381 }
382
383 if (sync.flags & DMA_BUF_SYNC_END)
384 ret = dma_buf_end_cpu_access(dmabuf, direction);
385 else
386 ret = dma_buf_begin_cpu_access(dmabuf, direction);
387
388 return ret;
389
390 case DMA_BUF_SET_NAME:
391 return dma_buf_set_name(dmabuf, (const char __user *)arg);
392
393 default:
394 return -ENOTTY;
395 }
396}
397
398static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
399{
400 struct dma_buf *dmabuf = file->private_data;
401
402 seq_printf(m, "size:\t%zu\n", dmabuf->size);
403
404 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
405 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
406 mutex_lock(&dmabuf->lock);
407 if (dmabuf->name)
408 seq_printf(m, "name:\t%s\n", dmabuf->name);
409 mutex_unlock(&dmabuf->lock);
410}
411
412static const struct file_operations dma_buf_fops = {
413 .release = dma_buf_release,
414 .mmap = dma_buf_mmap_internal,
415 .llseek = dma_buf_llseek,
416 .poll = dma_buf_poll,
417 .unlocked_ioctl = dma_buf_ioctl,
418#ifdef CONFIG_COMPAT
419 .compat_ioctl = dma_buf_ioctl,
420#endif
421 .show_fdinfo = dma_buf_show_fdinfo,
422};
423
424
425
426
427static inline int is_dma_buf_file(struct file *file)
428{
429 return file->f_op == &dma_buf_fops;
430}
431
432static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
433{
434 struct file *file;
435 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
436
437 if (IS_ERR(inode))
438 return ERR_CAST(inode);
439
440 inode->i_size = dmabuf->size;
441 inode_set_bytes(inode, dmabuf->size);
442
443 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
444 flags, &dma_buf_fops);
445 if (IS_ERR(file))
446 goto err_alloc_file;
447 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
448 file->private_data = dmabuf;
449 file->f_path.dentry->d_fsdata = dmabuf;
450
451 return file;
452
453err_alloc_file:
454 iput(inode);
455 return file;
456}
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
507{
508 struct dma_buf *dmabuf;
509 struct reservation_object *resv = exp_info->resv;
510 struct file *file;
511 size_t alloc_size = sizeof(struct dma_buf);
512 int ret;
513
514 if (!exp_info->resv)
515 alloc_size += sizeof(struct reservation_object);
516 else
517
518 alloc_size += 1;
519
520 if (WARN_ON(!exp_info->priv
521 || !exp_info->ops
522 || !exp_info->ops->map_dma_buf
523 || !exp_info->ops->unmap_dma_buf
524 || !exp_info->ops->release)) {
525 return ERR_PTR(-EINVAL);
526 }
527
528 if (!try_module_get(exp_info->owner))
529 return ERR_PTR(-ENOENT);
530
531 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
532 if (!dmabuf) {
533 ret = -ENOMEM;
534 goto err_module;
535 }
536
537 dmabuf->priv = exp_info->priv;
538 dmabuf->ops = exp_info->ops;
539 dmabuf->size = exp_info->size;
540 dmabuf->exp_name = exp_info->exp_name;
541 dmabuf->owner = exp_info->owner;
542 init_waitqueue_head(&dmabuf->poll);
543 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
544 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
545
546 if (!resv) {
547 resv = (struct reservation_object *)&dmabuf[1];
548 reservation_object_init(resv);
549 }
550 dmabuf->resv = resv;
551
552 file = dma_buf_getfile(dmabuf, exp_info->flags);
553 if (IS_ERR(file)) {
554 ret = PTR_ERR(file);
555 goto err_dmabuf;
556 }
557
558 file->f_mode |= FMODE_LSEEK;
559 dmabuf->file = file;
560
561 mutex_init(&dmabuf->lock);
562 INIT_LIST_HEAD(&dmabuf->attachments);
563
564 mutex_lock(&db_list.lock);
565 list_add(&dmabuf->list_node, &db_list.head);
566 mutex_unlock(&db_list.lock);
567
568 return dmabuf;
569
570err_dmabuf:
571 kfree(dmabuf);
572err_module:
573 module_put(exp_info->owner);
574 return ERR_PTR(ret);
575}
576EXPORT_SYMBOL_GPL(dma_buf_export);
577
578
579
580
581
582
583
584
585int dma_buf_fd(struct dma_buf *dmabuf, int flags)
586{
587 int fd;
588
589 if (!dmabuf || !dmabuf->file)
590 return -EINVAL;
591
592 fd = get_unused_fd_flags(flags);
593 if (fd < 0)
594 return fd;
595
596 fd_install(fd, dmabuf->file);
597
598 return fd;
599}
600EXPORT_SYMBOL_GPL(dma_buf_fd);
601
602
603
604
605
606
607
608
609
610struct dma_buf *dma_buf_get(int fd)
611{
612 struct file *file;
613
614 file = fget(fd);
615
616 if (!file)
617 return ERR_PTR(-EBADF);
618
619 if (!is_dma_buf_file(file)) {
620 fput(file);
621 return ERR_PTR(-EINVAL);
622 }
623
624 return file->private_data;
625}
626EXPORT_SYMBOL_GPL(dma_buf_get);
627
628
629
630
631
632
633
634
635
636
637
638void dma_buf_put(struct dma_buf *dmabuf)
639{
640 if (WARN_ON(!dmabuf || !dmabuf->file))
641 return;
642
643 fput(dmabuf->file);
644}
645EXPORT_SYMBOL_GPL(dma_buf_put);
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
666 struct device *dev)
667{
668 struct dma_buf_attachment *attach;
669 int ret;
670
671 if (WARN_ON(!dmabuf || !dev))
672 return ERR_PTR(-EINVAL);
673
674 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
675 if (!attach)
676 return ERR_PTR(-ENOMEM);
677
678 attach->dev = dev;
679 attach->dmabuf = dmabuf;
680
681 mutex_lock(&dmabuf->lock);
682
683 if (dmabuf->ops->attach) {
684 ret = dmabuf->ops->attach(dmabuf, attach);
685 if (ret)
686 goto err_attach;
687 }
688 list_add(&attach->node, &dmabuf->attachments);
689
690 mutex_unlock(&dmabuf->lock);
691
692 return attach;
693
694err_attach:
695 kfree(attach);
696 mutex_unlock(&dmabuf->lock);
697 return ERR_PTR(ret);
698}
699EXPORT_SYMBOL_GPL(dma_buf_attach);
700
701
702
703
704
705
706
707
708
709void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
710{
711 if (WARN_ON(!dmabuf || !attach))
712 return;
713
714 if (attach->sgt)
715 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
716
717 mutex_lock(&dmabuf->lock);
718 list_del(&attach->node);
719 if (dmabuf->ops->detach)
720 dmabuf->ops->detach(dmabuf, attach);
721
722 mutex_unlock(&dmabuf->lock);
723 kfree(attach);
724}
725EXPORT_SYMBOL_GPL(dma_buf_detach);
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
743 enum dma_data_direction direction)
744{
745 struct sg_table *sg_table;
746
747 might_sleep();
748
749 if (WARN_ON(!attach || !attach->dmabuf))
750 return ERR_PTR(-EINVAL);
751
752 if (attach->sgt) {
753
754
755
756
757 if (attach->dir != direction &&
758 attach->dir != DMA_BIDIRECTIONAL)
759 return ERR_PTR(-EBUSY);
760
761 return attach->sgt;
762 }
763
764 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
765 if (!sg_table)
766 sg_table = ERR_PTR(-ENOMEM);
767
768 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
769 attach->sgt = sg_table;
770 attach->dir = direction;
771 }
772
773 return sg_table;
774}
775EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
776
777
778
779
780
781
782
783
784
785
786
787void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
788 struct sg_table *sg_table,
789 enum dma_data_direction direction)
790{
791 might_sleep();
792
793 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
794 return;
795
796 if (attach->sgt == sg_table)
797 return;
798
799 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
800}
801EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
908 enum dma_data_direction direction)
909{
910 bool write = (direction == DMA_BIDIRECTIONAL ||
911 direction == DMA_TO_DEVICE);
912 struct reservation_object *resv = dmabuf->resv;
913 long ret;
914
915
916 ret = reservation_object_wait_timeout_rcu(resv, write, true,
917 MAX_SCHEDULE_TIMEOUT);
918 if (ret < 0)
919 return ret;
920
921 return 0;
922}
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
939 enum dma_data_direction direction)
940{
941 int ret = 0;
942
943 if (WARN_ON(!dmabuf))
944 return -EINVAL;
945
946 if (dmabuf->ops->begin_cpu_access)
947 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
948
949
950
951
952
953 if (ret == 0)
954 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
955
956 return ret;
957}
958EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
959
960
961
962
963
964
965
966
967
968
969
970
971
972int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
973 enum dma_data_direction direction)
974{
975 int ret = 0;
976
977 WARN_ON(!dmabuf);
978
979 if (dmabuf->ops->end_cpu_access)
980 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
981
982 return ret;
983}
984EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
985
986
987
988
989
990
991
992
993
994
995void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
996{
997 WARN_ON(!dmabuf);
998
999 if (!dmabuf->ops->map)
1000 return NULL;
1001 return dmabuf->ops->map(dmabuf, page_num);
1002}
1003EXPORT_SYMBOL_GPL(dma_buf_kmap);
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
1014 void *vaddr)
1015{
1016 WARN_ON(!dmabuf);
1017
1018 if (dmabuf->ops->unmap)
1019 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
1020}
1021EXPORT_SYMBOL_GPL(dma_buf_kunmap);
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1039 unsigned long pgoff)
1040{
1041 struct file *oldfile;
1042 int ret;
1043
1044 if (WARN_ON(!dmabuf || !vma))
1045 return -EINVAL;
1046
1047
1048 if (!dmabuf->ops->mmap)
1049 return -EINVAL;
1050
1051
1052 if (pgoff + vma_pages(vma) < pgoff)
1053 return -EOVERFLOW;
1054
1055
1056 if (pgoff + vma_pages(vma) >
1057 dmabuf->size >> PAGE_SHIFT)
1058 return -EINVAL;
1059
1060
1061 get_file(dmabuf->file);
1062 oldfile = vma->vm_file;
1063 vma->vm_file = dmabuf->file;
1064 vma->vm_pgoff = pgoff;
1065
1066 ret = dmabuf->ops->mmap(dmabuf, vma);
1067 if (ret) {
1068
1069 vma->vm_file = oldfile;
1070 fput(dmabuf->file);
1071 } else {
1072 if (oldfile)
1073 fput(oldfile);
1074 }
1075 return ret;
1076
1077}
1078EXPORT_SYMBOL_GPL(dma_buf_mmap);
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092void *dma_buf_vmap(struct dma_buf *dmabuf)
1093{
1094 void *ptr;
1095
1096 if (WARN_ON(!dmabuf))
1097 return NULL;
1098
1099 if (!dmabuf->ops->vmap)
1100 return NULL;
1101
1102 mutex_lock(&dmabuf->lock);
1103 if (dmabuf->vmapping_counter) {
1104 dmabuf->vmapping_counter++;
1105 BUG_ON(!dmabuf->vmap_ptr);
1106 ptr = dmabuf->vmap_ptr;
1107 goto out_unlock;
1108 }
1109
1110 BUG_ON(dmabuf->vmap_ptr);
1111
1112 ptr = dmabuf->ops->vmap(dmabuf);
1113 if (WARN_ON_ONCE(IS_ERR(ptr)))
1114 ptr = NULL;
1115 if (!ptr)
1116 goto out_unlock;
1117
1118 dmabuf->vmap_ptr = ptr;
1119 dmabuf->vmapping_counter = 1;
1120
1121out_unlock:
1122 mutex_unlock(&dmabuf->lock);
1123 return ptr;
1124}
1125EXPORT_SYMBOL_GPL(dma_buf_vmap);
1126
1127
1128
1129
1130
1131
1132void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1133{
1134 if (WARN_ON(!dmabuf))
1135 return;
1136
1137 BUG_ON(!dmabuf->vmap_ptr);
1138 BUG_ON(dmabuf->vmapping_counter == 0);
1139 BUG_ON(dmabuf->vmap_ptr != vaddr);
1140
1141 mutex_lock(&dmabuf->lock);
1142 if (--dmabuf->vmapping_counter == 0) {
1143 if (dmabuf->ops->vunmap)
1144 dmabuf->ops->vunmap(dmabuf, vaddr);
1145 dmabuf->vmap_ptr = NULL;
1146 }
1147 mutex_unlock(&dmabuf->lock);
1148}
1149EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1150
1151#ifdef CONFIG_DEBUG_FS
1152static int dma_buf_debug_show(struct seq_file *s, void *unused)
1153{
1154 int ret;
1155 struct dma_buf *buf_obj;
1156 struct dma_buf_attachment *attach_obj;
1157 struct reservation_object *robj;
1158 struct reservation_object_list *fobj;
1159 struct dma_fence *fence;
1160 unsigned seq;
1161 int count = 0, attach_count, shared_count, i;
1162 size_t size = 0;
1163
1164 ret = mutex_lock_interruptible(&db_list.lock);
1165
1166 if (ret)
1167 return ret;
1168
1169 seq_puts(s, "\nDma-buf Objects:\n");
1170 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1171 "size", "flags", "mode", "count", "ino");
1172
1173 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1174 ret = mutex_lock_interruptible(&buf_obj->lock);
1175
1176 if (ret) {
1177 seq_puts(s,
1178 "\tERROR locking buffer object: skipping\n");
1179 continue;
1180 }
1181
1182 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1183 buf_obj->size,
1184 buf_obj->file->f_flags, buf_obj->file->f_mode,
1185 file_count(buf_obj->file),
1186 buf_obj->exp_name,
1187 file_inode(buf_obj->file)->i_ino,
1188 buf_obj->name ?: "");
1189
1190 robj = buf_obj->resv;
1191 while (true) {
1192 seq = read_seqcount_begin(&robj->seq);
1193 rcu_read_lock();
1194 fobj = rcu_dereference(robj->fence);
1195 shared_count = fobj ? fobj->shared_count : 0;
1196 fence = rcu_dereference(robj->fence_excl);
1197 if (!read_seqcount_retry(&robj->seq, seq))
1198 break;
1199 rcu_read_unlock();
1200 }
1201
1202 if (fence)
1203 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1204 fence->ops->get_driver_name(fence),
1205 fence->ops->get_timeline_name(fence),
1206 dma_fence_is_signaled(fence) ? "" : "un");
1207 for (i = 0; i < shared_count; i++) {
1208 fence = rcu_dereference(fobj->shared[i]);
1209 if (!dma_fence_get_rcu(fence))
1210 continue;
1211 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1212 fence->ops->get_driver_name(fence),
1213 fence->ops->get_timeline_name(fence),
1214 dma_fence_is_signaled(fence) ? "" : "un");
1215 dma_fence_put(fence);
1216 }
1217 rcu_read_unlock();
1218
1219 seq_puts(s, "\tAttached Devices:\n");
1220 attach_count = 0;
1221
1222 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1223 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1224 attach_count++;
1225 }
1226
1227 seq_printf(s, "Total %d devices attached\n\n",
1228 attach_count);
1229
1230 count++;
1231 size += buf_obj->size;
1232 mutex_unlock(&buf_obj->lock);
1233 }
1234
1235 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1236
1237 mutex_unlock(&db_list.lock);
1238 return 0;
1239}
1240
1241DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1242
1243static struct dentry *dma_buf_debugfs_dir;
1244
1245static int dma_buf_init_debugfs(void)
1246{
1247 struct dentry *d;
1248 int err = 0;
1249
1250 d = debugfs_create_dir("dma_buf", NULL);
1251 if (IS_ERR(d))
1252 return PTR_ERR(d);
1253
1254 dma_buf_debugfs_dir = d;
1255
1256 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1257 NULL, &dma_buf_debug_fops);
1258 if (IS_ERR(d)) {
1259 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1260 debugfs_remove_recursive(dma_buf_debugfs_dir);
1261 dma_buf_debugfs_dir = NULL;
1262 err = PTR_ERR(d);
1263 }
1264
1265 return err;
1266}
1267
1268static void dma_buf_uninit_debugfs(void)
1269{
1270 debugfs_remove_recursive(dma_buf_debugfs_dir);
1271}
1272#else
1273static inline int dma_buf_init_debugfs(void)
1274{
1275 return 0;
1276}
1277static inline void dma_buf_uninit_debugfs(void)
1278{
1279}
1280#endif
1281
1282static int __init dma_buf_init(void)
1283{
1284 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1285 if (IS_ERR(dma_buf_mnt))
1286 return PTR_ERR(dma_buf_mnt);
1287
1288 mutex_init(&db_list.lock);
1289 INIT_LIST_HEAD(&db_list.head);
1290 dma_buf_init_debugfs();
1291 return 0;
1292}
1293subsys_initcall(dma_buf_init);
1294
1295static void __exit dma_buf_deinit(void)
1296{
1297 dma_buf_uninit_debugfs();
1298 kern_unmount(dma_buf_mnt);
1299}
1300__exitcall(dma_buf_deinit);
1301