1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
24#include <linux/dma-resv.h>
25#include <linux/mm.h>
26#include <linux/mount.h>
27
28#include <uapi/linux/dma-buf.h>
29#include <uapi/linux/magic.h>
30
31static inline int is_dma_buf_file(struct file *);
32
33struct dma_buf_list {
34 struct list_head head;
35 struct mutex lock;
36};
37
38static struct dma_buf_list db_list;
39
40static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
41{
42 struct dma_buf *dmabuf;
43 char name[DMA_BUF_NAME_LEN];
44 size_t ret = 0;
45
46 dmabuf = dentry->d_fsdata;
47 dma_resv_lock(dmabuf->resv, NULL);
48 if (dmabuf->name)
49 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
50 dma_resv_unlock(dmabuf->resv);
51
52 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
53 dentry->d_name.name, ret > 0 ? name : "");
54}
55
56static const struct dentry_operations dma_buf_dentry_ops = {
57 .d_dname = dmabuffs_dname,
58};
59
60static struct vfsmount *dma_buf_mnt;
61
62static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type,
63 int flags, const char *name, void *data)
64{
65 return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops,
66 DMA_BUF_MAGIC);
67}
68
69static struct file_system_type dma_buf_fs_type = {
70 .name = "dmabuf",
71 .mount = dma_buf_fs_mount,
72 .kill_sb = kill_anon_super,
73};
74
75static int dma_buf_release(struct inode *inode, struct file *file)
76{
77 struct dma_buf *dmabuf;
78
79 if (!is_dma_buf_file(file))
80 return -EINVAL;
81
82 dmabuf = file->private_data;
83
84 BUG_ON(dmabuf->vmapping_counter);
85
86
87
88
89
90
91
92
93
94 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
95
96 dmabuf->ops->release(dmabuf);
97
98 mutex_lock(&db_list.lock);
99 list_del(&dmabuf->list_node);
100 mutex_unlock(&db_list.lock);
101
102 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
103 dma_resv_fini(dmabuf->resv);
104
105 module_put(dmabuf->owner);
106 kfree(dmabuf->name);
107 kfree(dmabuf);
108 return 0;
109}
110
111static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
112{
113 struct dma_buf *dmabuf;
114
115 if (!is_dma_buf_file(file))
116 return -EINVAL;
117
118 dmabuf = file->private_data;
119
120
121 if (!dmabuf->ops->mmap)
122 return -EINVAL;
123
124
125 if (vma->vm_pgoff + vma_pages(vma) >
126 dmabuf->size >> PAGE_SHIFT)
127 return -EINVAL;
128
129 return dmabuf->ops->mmap(dmabuf, vma);
130}
131
132static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
133{
134 struct dma_buf *dmabuf;
135 loff_t base;
136
137 if (!is_dma_buf_file(file))
138 return -EBADF;
139
140 dmabuf = file->private_data;
141
142
143
144
145 if (whence == SEEK_END)
146 base = dmabuf->size;
147 else if (whence == SEEK_SET)
148 base = 0;
149 else
150 return -EINVAL;
151
152 if (offset != 0)
153 return -EINVAL;
154
155 return base + offset;
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
181{
182 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
183 unsigned long flags;
184
185 spin_lock_irqsave(&dcb->poll->lock, flags);
186 wake_up_locked_poll(dcb->poll, dcb->active);
187 dcb->active = 0;
188 spin_unlock_irqrestore(&dcb->poll->lock, flags);
189}
190
191static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
192{
193 struct dma_buf *dmabuf;
194 struct dma_resv *resv;
195 struct dma_resv_list *fobj;
196 struct dma_fence *fence_excl;
197 __poll_t events;
198 unsigned shared_count, seq;
199
200 dmabuf = file->private_data;
201 if (!dmabuf || !dmabuf->resv)
202 return EPOLLERR;
203
204 resv = dmabuf->resv;
205
206 poll_wait(file, &dmabuf->poll, poll);
207
208 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
209 if (!events)
210 return 0;
211
212retry:
213 seq = read_seqcount_begin(&resv->seq);
214 rcu_read_lock();
215
216 fobj = rcu_dereference(resv->fence);
217 if (fobj)
218 shared_count = fobj->shared_count;
219 else
220 shared_count = 0;
221 fence_excl = rcu_dereference(resv->fence_excl);
222 if (read_seqcount_retry(&resv->seq, seq)) {
223 rcu_read_unlock();
224 goto retry;
225 }
226
227 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
228 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
229 __poll_t pevents = EPOLLIN;
230
231 if (shared_count == 0)
232 pevents |= EPOLLOUT;
233
234 spin_lock_irq(&dmabuf->poll.lock);
235 if (dcb->active) {
236 dcb->active |= pevents;
237 events &= ~pevents;
238 } else
239 dcb->active = pevents;
240 spin_unlock_irq(&dmabuf->poll.lock);
241
242 if (events & pevents) {
243 if (!dma_fence_get_rcu(fence_excl)) {
244
245 events &= ~pevents;
246 dma_buf_poll_cb(NULL, &dcb->cb);
247 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
248 dma_buf_poll_cb)) {
249 events &= ~pevents;
250 dma_fence_put(fence_excl);
251 } else {
252
253
254
255
256 dma_fence_put(fence_excl);
257 dma_buf_poll_cb(NULL, &dcb->cb);
258 }
259 }
260 }
261
262 if ((events & EPOLLOUT) && shared_count > 0) {
263 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
264 int i;
265
266
267 spin_lock_irq(&dmabuf->poll.lock);
268 if (dcb->active)
269 events &= ~EPOLLOUT;
270 else
271 dcb->active = EPOLLOUT;
272 spin_unlock_irq(&dmabuf->poll.lock);
273
274 if (!(events & EPOLLOUT))
275 goto out;
276
277 for (i = 0; i < shared_count; ++i) {
278 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
279
280 if (!dma_fence_get_rcu(fence)) {
281
282
283
284
285
286
287 events &= ~EPOLLOUT;
288 dma_buf_poll_cb(NULL, &dcb->cb);
289 break;
290 }
291 if (!dma_fence_add_callback(fence, &dcb->cb,
292 dma_buf_poll_cb)) {
293 dma_fence_put(fence);
294 events &= ~EPOLLOUT;
295 break;
296 }
297 dma_fence_put(fence);
298 }
299
300
301 if (i == shared_count)
302 dma_buf_poll_cb(NULL, &dcb->cb);
303 }
304
305out:
306 rcu_read_unlock();
307 return events;
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
326{
327 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
328 long ret = 0;
329
330 if (IS_ERR(name))
331 return PTR_ERR(name);
332
333 dma_resv_lock(dmabuf->resv, NULL);
334 if (!list_empty(&dmabuf->attachments)) {
335 ret = -EBUSY;
336 kfree(name);
337 goto out_unlock;
338 }
339 kfree(dmabuf->name);
340 dmabuf->name = name;
341
342out_unlock:
343 dma_resv_unlock(dmabuf->resv);
344 return ret;
345}
346
347static long dma_buf_ioctl(struct file *file,
348 unsigned int cmd, unsigned long arg)
349{
350 struct dma_buf *dmabuf;
351 struct dma_buf_sync sync;
352 enum dma_data_direction direction;
353 int ret;
354
355 dmabuf = file->private_data;
356
357 switch (cmd) {
358 case DMA_BUF_IOCTL_SYNC:
359 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
360 return -EFAULT;
361
362 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
363 return -EINVAL;
364
365 switch (sync.flags & DMA_BUF_SYNC_RW) {
366 case DMA_BUF_SYNC_READ:
367 direction = DMA_FROM_DEVICE;
368 break;
369 case DMA_BUF_SYNC_WRITE:
370 direction = DMA_TO_DEVICE;
371 break;
372 case DMA_BUF_SYNC_RW:
373 direction = DMA_BIDIRECTIONAL;
374 break;
375 default:
376 return -EINVAL;
377 }
378
379 if (sync.flags & DMA_BUF_SYNC_END)
380 ret = dma_buf_end_cpu_access(dmabuf, direction);
381 else
382 ret = dma_buf_begin_cpu_access(dmabuf, direction);
383
384 return ret;
385
386 case DMA_BUF_SET_NAME_A:
387 case DMA_BUF_SET_NAME_B:
388 return dma_buf_set_name(dmabuf, (const char __user *)arg);
389
390 default:
391 return -ENOTTY;
392 }
393}
394
395static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
396{
397 struct dma_buf *dmabuf = file->private_data;
398
399 seq_printf(m, "size:\t%zu\n", dmabuf->size);
400
401 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
402 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
403 dma_resv_lock(dmabuf->resv, NULL);
404 if (dmabuf->name)
405 seq_printf(m, "name:\t%s\n", dmabuf->name);
406 dma_resv_unlock(dmabuf->resv);
407}
408
409static const struct file_operations dma_buf_fops = {
410 .release = dma_buf_release,
411 .mmap = dma_buf_mmap_internal,
412 .llseek = dma_buf_llseek,
413 .poll = dma_buf_poll,
414 .unlocked_ioctl = dma_buf_ioctl,
415 .compat_ioctl = compat_ptr_ioctl,
416 .show_fdinfo = dma_buf_show_fdinfo,
417};
418
419
420
421
422static inline int is_dma_buf_file(struct file *file)
423{
424 return file->f_op == &dma_buf_fops;
425}
426
427static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
428{
429 struct file *file;
430 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
431
432 if (IS_ERR(inode))
433 return ERR_CAST(inode);
434
435 inode->i_size = dmabuf->size;
436 inode_set_bytes(inode, dmabuf->size);
437
438 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
439 flags, &dma_buf_fops);
440 if (IS_ERR(file))
441 goto err_alloc_file;
442 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
443 file->private_data = dmabuf;
444 file->f_path.dentry->d_fsdata = dmabuf;
445
446 return file;
447
448err_alloc_file:
449 iput(inode);
450 return file;
451}
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
502{
503 struct dma_buf *dmabuf;
504 struct dma_resv *resv = exp_info->resv;
505 struct file *file;
506 size_t alloc_size = sizeof(struct dma_buf);
507 int ret;
508
509 if (!exp_info->resv)
510 alloc_size += sizeof(struct dma_resv);
511 else
512
513 alloc_size += 1;
514
515 if (WARN_ON(!exp_info->priv
516 || !exp_info->ops
517 || !exp_info->ops->map_dma_buf
518 || !exp_info->ops->unmap_dma_buf
519 || !exp_info->ops->release)) {
520 return ERR_PTR(-EINVAL);
521 }
522
523 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
524 exp_info->ops->dynamic_mapping))
525 return ERR_PTR(-EINVAL);
526
527 if (!try_module_get(exp_info->owner))
528 return ERR_PTR(-ENOENT);
529
530 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
531 if (!dmabuf) {
532 ret = -ENOMEM;
533 goto err_module;
534 }
535
536 dmabuf->priv = exp_info->priv;
537 dmabuf->ops = exp_info->ops;
538 dmabuf->size = exp_info->size;
539 dmabuf->exp_name = exp_info->exp_name;
540 dmabuf->owner = exp_info->owner;
541 init_waitqueue_head(&dmabuf->poll);
542 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
543 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
544
545 if (!resv) {
546 resv = (struct dma_resv *)&dmabuf[1];
547 dma_resv_init(resv);
548 }
549 dmabuf->resv = resv;
550
551 file = dma_buf_getfile(dmabuf, exp_info->flags);
552 if (IS_ERR(file)) {
553 ret = PTR_ERR(file);
554 goto err_dmabuf;
555 }
556
557 file->f_mode |= FMODE_LSEEK;
558 dmabuf->file = file;
559
560 mutex_init(&dmabuf->lock);
561 INIT_LIST_HEAD(&dmabuf->attachments);
562
563 mutex_lock(&db_list.lock);
564 list_add(&dmabuf->list_node, &db_list.head);
565 mutex_unlock(&db_list.lock);
566
567 return dmabuf;
568
569err_dmabuf:
570 kfree(dmabuf);
571err_module:
572 module_put(exp_info->owner);
573 return ERR_PTR(ret);
574}
575EXPORT_SYMBOL_GPL(dma_buf_export);
576
577
578
579
580
581
582
583
584int dma_buf_fd(struct dma_buf *dmabuf, int flags)
585{
586 int fd;
587
588 if (!dmabuf || !dmabuf->file)
589 return -EINVAL;
590
591 fd = get_unused_fd_flags(flags);
592 if (fd < 0)
593 return fd;
594
595 fd_install(fd, dmabuf->file);
596
597 return fd;
598}
599EXPORT_SYMBOL_GPL(dma_buf_fd);
600
601
602
603
604
605
606
607
608
609struct dma_buf *dma_buf_get(int fd)
610{
611 struct file *file;
612
613 file = fget(fd);
614
615 if (!file)
616 return ERR_PTR(-EBADF);
617
618 if (!is_dma_buf_file(file)) {
619 fput(file);
620 return ERR_PTR(-EINVAL);
621 }
622
623 return file->private_data;
624}
625EXPORT_SYMBOL_GPL(dma_buf_get);
626
627
628
629
630
631
632
633
634
635
636
637void dma_buf_put(struct dma_buf *dmabuf)
638{
639 if (WARN_ON(!dmabuf || !dmabuf->file))
640 return;
641
642 fput(dmabuf->file);
643}
644EXPORT_SYMBOL_GPL(dma_buf_put);
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665struct dma_buf_attachment *
666dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
667 bool dynamic_mapping)
668{
669 struct dma_buf_attachment *attach;
670 int ret;
671
672 if (WARN_ON(!dmabuf || !dev))
673 return ERR_PTR(-EINVAL);
674
675 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
676 if (!attach)
677 return ERR_PTR(-ENOMEM);
678
679 attach->dev = dev;
680 attach->dmabuf = dmabuf;
681 attach->dynamic_mapping = dynamic_mapping;
682
683 if (dmabuf->ops->attach) {
684 ret = dmabuf->ops->attach(dmabuf, attach);
685 if (ret)
686 goto err_attach;
687 }
688 dma_resv_lock(dmabuf->resv, NULL);
689 list_add(&attach->node, &dmabuf->attachments);
690 dma_resv_unlock(dmabuf->resv);
691
692
693
694
695
696 if (dma_buf_attachment_is_dynamic(attach) !=
697 dma_buf_is_dynamic(dmabuf)) {
698 struct sg_table *sgt;
699
700 if (dma_buf_is_dynamic(attach->dmabuf))
701 dma_resv_lock(attach->dmabuf->resv, NULL);
702
703 sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
704 if (!sgt)
705 sgt = ERR_PTR(-ENOMEM);
706 if (IS_ERR(sgt)) {
707 ret = PTR_ERR(sgt);
708 goto err_unlock;
709 }
710 if (dma_buf_is_dynamic(attach->dmabuf))
711 dma_resv_unlock(attach->dmabuf->resv);
712 attach->sgt = sgt;
713 attach->dir = DMA_BIDIRECTIONAL;
714 }
715
716 return attach;
717
718err_attach:
719 kfree(attach);
720 return ERR_PTR(ret);
721
722err_unlock:
723 if (dma_buf_is_dynamic(attach->dmabuf))
724 dma_resv_unlock(attach->dmabuf->resv);
725
726 dma_buf_detach(dmabuf, attach);
727 return ERR_PTR(ret);
728}
729EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
730
731
732
733
734
735
736
737
738
739struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
740 struct device *dev)
741{
742 return dma_buf_dynamic_attach(dmabuf, dev, false);
743}
744EXPORT_SYMBOL_GPL(dma_buf_attach);
745
746
747
748
749
750
751
752
753
754void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
755{
756 if (WARN_ON(!dmabuf || !attach))
757 return;
758
759 if (attach->sgt) {
760 if (dma_buf_is_dynamic(attach->dmabuf))
761 dma_resv_lock(attach->dmabuf->resv, NULL);
762
763 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
764
765 if (dma_buf_is_dynamic(attach->dmabuf))
766 dma_resv_unlock(attach->dmabuf->resv);
767 }
768
769 dma_resv_lock(dmabuf->resv, NULL);
770 list_del(&attach->node);
771 dma_resv_unlock(dmabuf->resv);
772 if (dmabuf->ops->detach)
773 dmabuf->ops->detach(dmabuf, attach);
774
775 kfree(attach);
776}
777EXPORT_SYMBOL_GPL(dma_buf_detach);
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
795 enum dma_data_direction direction)
796{
797 struct sg_table *sg_table;
798
799 might_sleep();
800
801 if (WARN_ON(!attach || !attach->dmabuf))
802 return ERR_PTR(-EINVAL);
803
804 if (dma_buf_attachment_is_dynamic(attach))
805 dma_resv_assert_held(attach->dmabuf->resv);
806
807 if (attach->sgt) {
808
809
810
811
812 if (attach->dir != direction &&
813 attach->dir != DMA_BIDIRECTIONAL)
814 return ERR_PTR(-EBUSY);
815
816 return attach->sgt;
817 }
818
819 if (dma_buf_is_dynamic(attach->dmabuf))
820 dma_resv_assert_held(attach->dmabuf->resv);
821
822 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
823 if (!sg_table)
824 sg_table = ERR_PTR(-ENOMEM);
825
826 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
827 attach->sgt = sg_table;
828 attach->dir = direction;
829 }
830
831 return sg_table;
832}
833EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
834
835
836
837
838
839
840
841
842
843
844
845void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
846 struct sg_table *sg_table,
847 enum dma_data_direction direction)
848{
849 might_sleep();
850
851 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
852 return;
853
854 if (dma_buf_attachment_is_dynamic(attach))
855 dma_resv_assert_held(attach->dmabuf->resv);
856
857 if (attach->sgt == sg_table)
858 return;
859
860 if (dma_buf_is_dynamic(attach->dmabuf))
861 dma_resv_assert_held(attach->dmabuf->resv);
862
863 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
864}
865EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
952 enum dma_data_direction direction)
953{
954 bool write = (direction == DMA_BIDIRECTIONAL ||
955 direction == DMA_TO_DEVICE);
956 struct dma_resv *resv = dmabuf->resv;
957 long ret;
958
959
960 ret = dma_resv_wait_timeout_rcu(resv, write, true,
961 MAX_SCHEDULE_TIMEOUT);
962 if (ret < 0)
963 return ret;
964
965 return 0;
966}
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
983 enum dma_data_direction direction)
984{
985 int ret = 0;
986
987 if (WARN_ON(!dmabuf))
988 return -EINVAL;
989
990 if (dmabuf->ops->begin_cpu_access)
991 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
992
993
994
995
996
997 if (ret == 0)
998 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
999
1000 return ret;
1001}
1002EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1017 enum dma_data_direction direction)
1018{
1019 int ret = 0;
1020
1021 WARN_ON(!dmabuf);
1022
1023 if (dmabuf->ops->end_cpu_access)
1024 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1025
1026 return ret;
1027}
1028EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1046 unsigned long pgoff)
1047{
1048 struct file *oldfile;
1049 int ret;
1050
1051 if (WARN_ON(!dmabuf || !vma))
1052 return -EINVAL;
1053
1054
1055 if (!dmabuf->ops->mmap)
1056 return -EINVAL;
1057
1058
1059 if (pgoff + vma_pages(vma) < pgoff)
1060 return -EOVERFLOW;
1061
1062
1063 if (pgoff + vma_pages(vma) >
1064 dmabuf->size >> PAGE_SHIFT)
1065 return -EINVAL;
1066
1067
1068 get_file(dmabuf->file);
1069 oldfile = vma->vm_file;
1070 vma->vm_file = dmabuf->file;
1071 vma->vm_pgoff = pgoff;
1072
1073 ret = dmabuf->ops->mmap(dmabuf, vma);
1074 if (ret) {
1075
1076 vma->vm_file = oldfile;
1077 fput(dmabuf->file);
1078 } else {
1079 if (oldfile)
1080 fput(oldfile);
1081 }
1082 return ret;
1083
1084}
1085EXPORT_SYMBOL_GPL(dma_buf_mmap);
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099void *dma_buf_vmap(struct dma_buf *dmabuf)
1100{
1101 void *ptr;
1102
1103 if (WARN_ON(!dmabuf))
1104 return NULL;
1105
1106 if (!dmabuf->ops->vmap)
1107 return NULL;
1108
1109 mutex_lock(&dmabuf->lock);
1110 if (dmabuf->vmapping_counter) {
1111 dmabuf->vmapping_counter++;
1112 BUG_ON(!dmabuf->vmap_ptr);
1113 ptr = dmabuf->vmap_ptr;
1114 goto out_unlock;
1115 }
1116
1117 BUG_ON(dmabuf->vmap_ptr);
1118
1119 ptr = dmabuf->ops->vmap(dmabuf);
1120 if (WARN_ON_ONCE(IS_ERR(ptr)))
1121 ptr = NULL;
1122 if (!ptr)
1123 goto out_unlock;
1124
1125 dmabuf->vmap_ptr = ptr;
1126 dmabuf->vmapping_counter = 1;
1127
1128out_unlock:
1129 mutex_unlock(&dmabuf->lock);
1130 return ptr;
1131}
1132EXPORT_SYMBOL_GPL(dma_buf_vmap);
1133
1134
1135
1136
1137
1138
1139void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1140{
1141 if (WARN_ON(!dmabuf))
1142 return;
1143
1144 BUG_ON(!dmabuf->vmap_ptr);
1145 BUG_ON(dmabuf->vmapping_counter == 0);
1146 BUG_ON(dmabuf->vmap_ptr != vaddr);
1147
1148 mutex_lock(&dmabuf->lock);
1149 if (--dmabuf->vmapping_counter == 0) {
1150 if (dmabuf->ops->vunmap)
1151 dmabuf->ops->vunmap(dmabuf, vaddr);
1152 dmabuf->vmap_ptr = NULL;
1153 }
1154 mutex_unlock(&dmabuf->lock);
1155}
1156EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1157
1158#ifdef CONFIG_DEBUG_FS
1159static int dma_buf_debug_show(struct seq_file *s, void *unused)
1160{
1161 int ret;
1162 struct dma_buf *buf_obj;
1163 struct dma_buf_attachment *attach_obj;
1164 struct dma_resv *robj;
1165 struct dma_resv_list *fobj;
1166 struct dma_fence *fence;
1167 unsigned seq;
1168 int count = 0, attach_count, shared_count, i;
1169 size_t size = 0;
1170
1171 ret = mutex_lock_interruptible(&db_list.lock);
1172
1173 if (ret)
1174 return ret;
1175
1176 seq_puts(s, "\nDma-buf Objects:\n");
1177 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1178 "size", "flags", "mode", "count", "ino");
1179
1180 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1181
1182 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1183 if (ret)
1184 goto error_unlock;
1185
1186 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1187 buf_obj->size,
1188 buf_obj->file->f_flags, buf_obj->file->f_mode,
1189 file_count(buf_obj->file),
1190 buf_obj->exp_name,
1191 file_inode(buf_obj->file)->i_ino,
1192 buf_obj->name ?: "");
1193
1194 robj = buf_obj->resv;
1195 while (true) {
1196 seq = read_seqcount_begin(&robj->seq);
1197 rcu_read_lock();
1198 fobj = rcu_dereference(robj->fence);
1199 shared_count = fobj ? fobj->shared_count : 0;
1200 fence = rcu_dereference(robj->fence_excl);
1201 if (!read_seqcount_retry(&robj->seq, seq))
1202 break;
1203 rcu_read_unlock();
1204 }
1205
1206 if (fence)
1207 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1208 fence->ops->get_driver_name(fence),
1209 fence->ops->get_timeline_name(fence),
1210 dma_fence_is_signaled(fence) ? "" : "un");
1211 for (i = 0; i < shared_count; i++) {
1212 fence = rcu_dereference(fobj->shared[i]);
1213 if (!dma_fence_get_rcu(fence))
1214 continue;
1215 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1216 fence->ops->get_driver_name(fence),
1217 fence->ops->get_timeline_name(fence),
1218 dma_fence_is_signaled(fence) ? "" : "un");
1219 dma_fence_put(fence);
1220 }
1221 rcu_read_unlock();
1222
1223 seq_puts(s, "\tAttached Devices:\n");
1224 attach_count = 0;
1225
1226 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1227 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1228 attach_count++;
1229 }
1230 dma_resv_unlock(buf_obj->resv);
1231
1232 seq_printf(s, "Total %d devices attached\n\n",
1233 attach_count);
1234
1235 count++;
1236 size += buf_obj->size;
1237 }
1238
1239 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1240
1241 mutex_unlock(&db_list.lock);
1242 return 0;
1243
1244error_unlock:
1245 mutex_unlock(&db_list.lock);
1246 return ret;
1247}
1248
1249DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1250
1251static struct dentry *dma_buf_debugfs_dir;
1252
1253static int dma_buf_init_debugfs(void)
1254{
1255 struct dentry *d;
1256 int err = 0;
1257
1258 d = debugfs_create_dir("dma_buf", NULL);
1259 if (IS_ERR(d))
1260 return PTR_ERR(d);
1261
1262 dma_buf_debugfs_dir = d;
1263
1264 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1265 NULL, &dma_buf_debug_fops);
1266 if (IS_ERR(d)) {
1267 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1268 debugfs_remove_recursive(dma_buf_debugfs_dir);
1269 dma_buf_debugfs_dir = NULL;
1270 err = PTR_ERR(d);
1271 }
1272
1273 return err;
1274}
1275
1276static void dma_buf_uninit_debugfs(void)
1277{
1278 debugfs_remove_recursive(dma_buf_debugfs_dir);
1279}
1280#else
1281static inline int dma_buf_init_debugfs(void)
1282{
1283 return 0;
1284}
1285static inline void dma_buf_uninit_debugfs(void)
1286{
1287}
1288#endif
1289
1290static int __init dma_buf_init(void)
1291{
1292 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1293 if (IS_ERR(dma_buf_mnt))
1294 return PTR_ERR(dma_buf_mnt);
1295
1296 mutex_init(&db_list.lock);
1297 INIT_LIST_HEAD(&db_list.head);
1298 dma_buf_init_debugfs();
1299 return 0;
1300}
1301subsys_initcall(dma_buf_init);
1302
1303static void __exit dma_buf_deinit(void)
1304{
1305 dma_buf_uninit_debugfs();
1306 kern_unmount(dma_buf_mnt);
1307}
1308__exitcall(dma_buf_deinit);
1309