1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
24#include <linux/dma-resv.h>
25#include <linux/mm.h>
26#include <linux/mount.h>
27#include <linux/pseudo_fs.h>
28
29#include <uapi/linux/dma-buf.h>
30#include <uapi/linux/magic.h>
31
32#include "dma-buf-sysfs-stats.h"
33
34static inline int is_dma_buf_file(struct file *);
35
36struct dma_buf_list {
37 struct list_head head;
38 struct mutex lock;
39};
40
41static struct dma_buf_list db_list;
42
43static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
44{
45 struct dma_buf *dmabuf;
46 char name[DMA_BUF_NAME_LEN];
47 size_t ret = 0;
48
49 dmabuf = dentry->d_fsdata;
50 spin_lock(&dmabuf->name_lock);
51 if (dmabuf->name)
52 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
53 spin_unlock(&dmabuf->name_lock);
54
55 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
56 dentry->d_name.name, ret > 0 ? name : "");
57}
58
59static void dma_buf_release(struct dentry *dentry)
60{
61 struct dma_buf *dmabuf;
62
63 dmabuf = dentry->d_fsdata;
64 if (unlikely(!dmabuf))
65 return;
66
67 BUG_ON(dmabuf->vmapping_counter);
68
69
70
71
72
73
74
75
76
77 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
78
79 dma_buf_stats_teardown(dmabuf);
80 dmabuf->ops->release(dmabuf);
81
82 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
83 dma_resv_fini(dmabuf->resv);
84
85 module_put(dmabuf->owner);
86 kfree(dmabuf->name);
87 kfree(dmabuf);
88}
89
90static int dma_buf_file_release(struct inode *inode, struct file *file)
91{
92 struct dma_buf *dmabuf;
93
94 if (!is_dma_buf_file(file))
95 return -EINVAL;
96
97 dmabuf = file->private_data;
98
99 mutex_lock(&db_list.lock);
100 list_del(&dmabuf->list_node);
101 mutex_unlock(&db_list.lock);
102
103 return 0;
104}
105
106static const struct dentry_operations dma_buf_dentry_ops = {
107 .d_dname = dmabuffs_dname,
108 .d_release = dma_buf_release,
109};
110
111static struct vfsmount *dma_buf_mnt;
112
113static int dma_buf_fs_init_context(struct fs_context *fc)
114{
115 struct pseudo_fs_context *ctx;
116
117 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
118 if (!ctx)
119 return -ENOMEM;
120 ctx->dops = &dma_buf_dentry_ops;
121 return 0;
122}
123
124static struct file_system_type dma_buf_fs_type = {
125 .name = "dmabuf",
126 .init_fs_context = dma_buf_fs_init_context,
127 .kill_sb = kill_anon_super,
128};
129
130static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
131{
132 struct dma_buf *dmabuf;
133
134 if (!is_dma_buf_file(file))
135 return -EINVAL;
136
137 dmabuf = file->private_data;
138
139
140 if (!dmabuf->ops->mmap)
141 return -EINVAL;
142
143
144 if (vma->vm_pgoff + vma_pages(vma) >
145 dmabuf->size >> PAGE_SHIFT)
146 return -EINVAL;
147
148 return dmabuf->ops->mmap(dmabuf, vma);
149}
150
151static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
152{
153 struct dma_buf *dmabuf;
154 loff_t base;
155
156 if (!is_dma_buf_file(file))
157 return -EBADF;
158
159 dmabuf = file->private_data;
160
161
162
163
164 if (whence == SEEK_END)
165 base = dmabuf->size;
166 else if (whence == SEEK_SET)
167 base = 0;
168 else
169 return -EINVAL;
170
171 if (offset != 0)
172 return -EINVAL;
173
174 return base + offset;
175}
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
200{
201 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
202 unsigned long flags;
203
204 spin_lock_irqsave(&dcb->poll->lock, flags);
205 wake_up_locked_poll(dcb->poll, dcb->active);
206 dcb->active = 0;
207 spin_unlock_irqrestore(&dcb->poll->lock, flags);
208}
209
210static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
211{
212 struct dma_buf *dmabuf;
213 struct dma_resv *resv;
214 struct dma_resv_list *fobj;
215 struct dma_fence *fence_excl;
216 __poll_t events;
217 unsigned shared_count, seq;
218
219 dmabuf = file->private_data;
220 if (!dmabuf || !dmabuf->resv)
221 return EPOLLERR;
222
223 resv = dmabuf->resv;
224
225 poll_wait(file, &dmabuf->poll, poll);
226
227 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
228 if (!events)
229 return 0;
230
231retry:
232 seq = read_seqcount_begin(&resv->seq);
233 rcu_read_lock();
234
235 fobj = rcu_dereference(resv->fence);
236 if (fobj)
237 shared_count = fobj->shared_count;
238 else
239 shared_count = 0;
240 fence_excl = dma_resv_excl_fence(resv);
241 if (read_seqcount_retry(&resv->seq, seq)) {
242 rcu_read_unlock();
243 goto retry;
244 }
245
246 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
247 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
248 __poll_t pevents = EPOLLIN;
249
250 if (shared_count == 0)
251 pevents |= EPOLLOUT;
252
253 spin_lock_irq(&dmabuf->poll.lock);
254 if (dcb->active) {
255 dcb->active |= pevents;
256 events &= ~pevents;
257 } else
258 dcb->active = pevents;
259 spin_unlock_irq(&dmabuf->poll.lock);
260
261 if (events & pevents) {
262 if (!dma_fence_get_rcu(fence_excl)) {
263
264 events &= ~pevents;
265 dma_buf_poll_cb(NULL, &dcb->cb);
266 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
267 dma_buf_poll_cb)) {
268 events &= ~pevents;
269 dma_fence_put(fence_excl);
270 } else {
271
272
273
274
275 dma_fence_put(fence_excl);
276 dma_buf_poll_cb(NULL, &dcb->cb);
277 }
278 }
279 }
280
281 if ((events & EPOLLOUT) && shared_count > 0) {
282 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
283 int i;
284
285
286 spin_lock_irq(&dmabuf->poll.lock);
287 if (dcb->active)
288 events &= ~EPOLLOUT;
289 else
290 dcb->active = EPOLLOUT;
291 spin_unlock_irq(&dmabuf->poll.lock);
292
293 if (!(events & EPOLLOUT))
294 goto out;
295
296 for (i = 0; i < shared_count; ++i) {
297 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
298
299 if (!dma_fence_get_rcu(fence)) {
300
301
302
303
304
305
306 events &= ~EPOLLOUT;
307 dma_buf_poll_cb(NULL, &dcb->cb);
308 break;
309 }
310 if (!dma_fence_add_callback(fence, &dcb->cb,
311 dma_buf_poll_cb)) {
312 dma_fence_put(fence);
313 events &= ~EPOLLOUT;
314 break;
315 }
316 dma_fence_put(fence);
317 }
318
319
320 if (i == shared_count)
321 dma_buf_poll_cb(NULL, &dcb->cb);
322 }
323
324out:
325 rcu_read_unlock();
326 return events;
327}
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
345{
346 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
347 long ret = 0;
348
349 if (IS_ERR(name))
350 return PTR_ERR(name);
351
352 dma_resv_lock(dmabuf->resv, NULL);
353 if (!list_empty(&dmabuf->attachments)) {
354 ret = -EBUSY;
355 kfree(name);
356 goto out_unlock;
357 }
358 spin_lock(&dmabuf->name_lock);
359 kfree(dmabuf->name);
360 dmabuf->name = name;
361 spin_unlock(&dmabuf->name_lock);
362
363out_unlock:
364 dma_resv_unlock(dmabuf->resv);
365 return ret;
366}
367
368static long dma_buf_ioctl(struct file *file,
369 unsigned int cmd, unsigned long arg)
370{
371 struct dma_buf *dmabuf;
372 struct dma_buf_sync sync;
373 enum dma_data_direction direction;
374 int ret;
375
376 dmabuf = file->private_data;
377
378 switch (cmd) {
379 case DMA_BUF_IOCTL_SYNC:
380 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
381 return -EFAULT;
382
383 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
384 return -EINVAL;
385
386 switch (sync.flags & DMA_BUF_SYNC_RW) {
387 case DMA_BUF_SYNC_READ:
388 direction = DMA_FROM_DEVICE;
389 break;
390 case DMA_BUF_SYNC_WRITE:
391 direction = DMA_TO_DEVICE;
392 break;
393 case DMA_BUF_SYNC_RW:
394 direction = DMA_BIDIRECTIONAL;
395 break;
396 default:
397 return -EINVAL;
398 }
399
400 if (sync.flags & DMA_BUF_SYNC_END)
401 ret = dma_buf_end_cpu_access(dmabuf, direction);
402 else
403 ret = dma_buf_begin_cpu_access(dmabuf, direction);
404
405 return ret;
406
407 case DMA_BUF_SET_NAME_A:
408 case DMA_BUF_SET_NAME_B:
409 return dma_buf_set_name(dmabuf, (const char __user *)arg);
410
411 default:
412 return -ENOTTY;
413 }
414}
415
416static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
417{
418 struct dma_buf *dmabuf = file->private_data;
419
420 seq_printf(m, "size:\t%zu\n", dmabuf->size);
421
422 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
423 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
424 spin_lock(&dmabuf->name_lock);
425 if (dmabuf->name)
426 seq_printf(m, "name:\t%s\n", dmabuf->name);
427 spin_unlock(&dmabuf->name_lock);
428}
429
430static const struct file_operations dma_buf_fops = {
431 .release = dma_buf_file_release,
432 .mmap = dma_buf_mmap_internal,
433 .llseek = dma_buf_llseek,
434 .poll = dma_buf_poll,
435 .unlocked_ioctl = dma_buf_ioctl,
436 .compat_ioctl = compat_ptr_ioctl,
437 .show_fdinfo = dma_buf_show_fdinfo,
438};
439
440
441
442
443static inline int is_dma_buf_file(struct file *file)
444{
445 return file->f_op == &dma_buf_fops;
446}
447
448static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
449{
450 struct file *file;
451 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
452
453 if (IS_ERR(inode))
454 return ERR_CAST(inode);
455
456 inode->i_size = dmabuf->size;
457 inode_set_bytes(inode, dmabuf->size);
458
459 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
460 flags, &dma_buf_fops);
461 if (IS_ERR(file))
462 goto err_alloc_file;
463 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
464 file->private_data = dmabuf;
465 file->f_path.dentry->d_fsdata = dmabuf;
466
467 return file;
468
469err_alloc_file:
470 iput(inode);
471 return file;
472}
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
524{
525 struct dma_buf *dmabuf;
526 struct dma_resv *resv = exp_info->resv;
527 struct file *file;
528 size_t alloc_size = sizeof(struct dma_buf);
529 int ret;
530
531 if (!exp_info->resv)
532 alloc_size += sizeof(struct dma_resv);
533 else
534
535 alloc_size += 1;
536
537 if (WARN_ON(!exp_info->priv
538 || !exp_info->ops
539 || !exp_info->ops->map_dma_buf
540 || !exp_info->ops->unmap_dma_buf
541 || !exp_info->ops->release)) {
542 return ERR_PTR(-EINVAL);
543 }
544
545 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
546 (exp_info->ops->pin || exp_info->ops->unpin)))
547 return ERR_PTR(-EINVAL);
548
549 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
550 return ERR_PTR(-EINVAL);
551
552 if (!try_module_get(exp_info->owner))
553 return ERR_PTR(-ENOENT);
554
555 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
556 if (!dmabuf) {
557 ret = -ENOMEM;
558 goto err_module;
559 }
560
561 dmabuf->priv = exp_info->priv;
562 dmabuf->ops = exp_info->ops;
563 dmabuf->size = exp_info->size;
564 dmabuf->exp_name = exp_info->exp_name;
565 dmabuf->owner = exp_info->owner;
566 spin_lock_init(&dmabuf->name_lock);
567 init_waitqueue_head(&dmabuf->poll);
568 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
569 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
570
571 if (!resv) {
572 resv = (struct dma_resv *)&dmabuf[1];
573 dma_resv_init(resv);
574 }
575 dmabuf->resv = resv;
576
577 file = dma_buf_getfile(dmabuf, exp_info->flags);
578 if (IS_ERR(file)) {
579 ret = PTR_ERR(file);
580 goto err_dmabuf;
581 }
582
583 file->f_mode |= FMODE_LSEEK;
584 dmabuf->file = file;
585
586 ret = dma_buf_stats_setup(dmabuf);
587 if (ret)
588 goto err_sysfs;
589
590 mutex_init(&dmabuf->lock);
591 INIT_LIST_HEAD(&dmabuf->attachments);
592
593 mutex_lock(&db_list.lock);
594 list_add(&dmabuf->list_node, &db_list.head);
595 mutex_unlock(&db_list.lock);
596
597 return dmabuf;
598
599err_sysfs:
600
601
602
603
604
605 file->f_path.dentry->d_fsdata = NULL;
606 fput(file);
607err_dmabuf:
608 kfree(dmabuf);
609err_module:
610 module_put(exp_info->owner);
611 return ERR_PTR(ret);
612}
613EXPORT_SYMBOL_GPL(dma_buf_export);
614
615
616
617
618
619
620
621
622int dma_buf_fd(struct dma_buf *dmabuf, int flags)
623{
624 int fd;
625
626 if (!dmabuf || !dmabuf->file)
627 return -EINVAL;
628
629 fd = get_unused_fd_flags(flags);
630 if (fd < 0)
631 return fd;
632
633 fd_install(fd, dmabuf->file);
634
635 return fd;
636}
637EXPORT_SYMBOL_GPL(dma_buf_fd);
638
639
640
641
642
643
644
645
646
647struct dma_buf *dma_buf_get(int fd)
648{
649 struct file *file;
650
651 file = fget(fd);
652
653 if (!file)
654 return ERR_PTR(-EBADF);
655
656 if (!is_dma_buf_file(file)) {
657 fput(file);
658 return ERR_PTR(-EINVAL);
659 }
660
661 return file->private_data;
662}
663EXPORT_SYMBOL_GPL(dma_buf_get);
664
665
666
667
668
669
670
671
672
673
674
675void dma_buf_put(struct dma_buf *dmabuf)
676{
677 if (WARN_ON(!dmabuf || !dmabuf->file))
678 return;
679
680 fput(dmabuf->file);
681}
682EXPORT_SYMBOL_GPL(dma_buf_put);
683
684static void mangle_sg_table(struct sg_table *sg_table)
685{
686#ifdef CONFIG_DMABUF_DEBUG
687 int i;
688 struct scatterlist *sg;
689
690
691
692
693
694 for_each_sgtable_sg(sg_table, sg, i)
695 sg->page_link ^= ~0xffUL;
696#endif
697
698}
699static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
700 enum dma_data_direction direction)
701{
702 struct sg_table *sg_table;
703
704 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
705
706 if (!IS_ERR_OR_NULL(sg_table))
707 mangle_sg_table(sg_table);
708
709 return sg_table;
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734struct dma_buf_attachment *
735dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
736 const struct dma_buf_attach_ops *importer_ops,
737 void *importer_priv)
738{
739 struct dma_buf_attachment *attach;
740 int ret;
741
742 if (WARN_ON(!dmabuf || !dev))
743 return ERR_PTR(-EINVAL);
744
745 if (WARN_ON(importer_ops && !importer_ops->move_notify))
746 return ERR_PTR(-EINVAL);
747
748 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
749 if (!attach)
750 return ERR_PTR(-ENOMEM);
751
752 attach->dev = dev;
753 attach->dmabuf = dmabuf;
754 if (importer_ops)
755 attach->peer2peer = importer_ops->allow_peer2peer;
756 attach->importer_ops = importer_ops;
757 attach->importer_priv = importer_priv;
758
759 if (dmabuf->ops->attach) {
760 ret = dmabuf->ops->attach(dmabuf, attach);
761 if (ret)
762 goto err_attach;
763 }
764 dma_resv_lock(dmabuf->resv, NULL);
765 list_add(&attach->node, &dmabuf->attachments);
766 dma_resv_unlock(dmabuf->resv);
767
768
769
770
771
772 if (dma_buf_attachment_is_dynamic(attach) !=
773 dma_buf_is_dynamic(dmabuf)) {
774 struct sg_table *sgt;
775
776 if (dma_buf_is_dynamic(attach->dmabuf)) {
777 dma_resv_lock(attach->dmabuf->resv, NULL);
778 ret = dmabuf->ops->pin(attach);
779 if (ret)
780 goto err_unlock;
781 }
782
783 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
784 if (!sgt)
785 sgt = ERR_PTR(-ENOMEM);
786 if (IS_ERR(sgt)) {
787 ret = PTR_ERR(sgt);
788 goto err_unpin;
789 }
790 if (dma_buf_is_dynamic(attach->dmabuf))
791 dma_resv_unlock(attach->dmabuf->resv);
792 attach->sgt = sgt;
793 attach->dir = DMA_BIDIRECTIONAL;
794 }
795
796 return attach;
797
798err_attach:
799 kfree(attach);
800 return ERR_PTR(ret);
801
802err_unpin:
803 if (dma_buf_is_dynamic(attach->dmabuf))
804 dmabuf->ops->unpin(attach);
805
806err_unlock:
807 if (dma_buf_is_dynamic(attach->dmabuf))
808 dma_resv_unlock(attach->dmabuf->resv);
809
810 dma_buf_detach(dmabuf, attach);
811 return ERR_PTR(ret);
812}
813EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
814
815
816
817
818
819
820
821
822
823struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
824 struct device *dev)
825{
826 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
827}
828EXPORT_SYMBOL_GPL(dma_buf_attach);
829
830static void __unmap_dma_buf(struct dma_buf_attachment *attach,
831 struct sg_table *sg_table,
832 enum dma_data_direction direction)
833{
834
835 mangle_sg_table(sg_table);
836
837 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
838}
839
840
841
842
843
844
845
846
847
848
849void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
850{
851 if (WARN_ON(!dmabuf || !attach))
852 return;
853
854 if (attach->sgt) {
855 if (dma_buf_is_dynamic(attach->dmabuf))
856 dma_resv_lock(attach->dmabuf->resv, NULL);
857
858 __unmap_dma_buf(attach, attach->sgt, attach->dir);
859
860 if (dma_buf_is_dynamic(attach->dmabuf)) {
861 dmabuf->ops->unpin(attach);
862 dma_resv_unlock(attach->dmabuf->resv);
863 }
864 }
865
866 dma_resv_lock(dmabuf->resv, NULL);
867 list_del(&attach->node);
868 dma_resv_unlock(dmabuf->resv);
869 if (dmabuf->ops->detach)
870 dmabuf->ops->detach(dmabuf, attach);
871
872 kfree(attach);
873}
874EXPORT_SYMBOL_GPL(dma_buf_detach);
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890int dma_buf_pin(struct dma_buf_attachment *attach)
891{
892 struct dma_buf *dmabuf = attach->dmabuf;
893 int ret = 0;
894
895 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
896
897 dma_resv_assert_held(dmabuf->resv);
898
899 if (dmabuf->ops->pin)
900 ret = dmabuf->ops->pin(attach);
901
902 return ret;
903}
904EXPORT_SYMBOL_GPL(dma_buf_pin);
905
906
907
908
909
910
911
912
913
914void dma_buf_unpin(struct dma_buf_attachment *attach)
915{
916 struct dma_buf *dmabuf = attach->dmabuf;
917
918 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
919
920 dma_resv_assert_held(dmabuf->resv);
921
922 if (dmabuf->ops->unpin)
923 dmabuf->ops->unpin(attach);
924}
925EXPORT_SYMBOL_GPL(dma_buf_unpin);
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
949 enum dma_data_direction direction)
950{
951 struct sg_table *sg_table;
952 int r;
953
954 might_sleep();
955
956 if (WARN_ON(!attach || !attach->dmabuf))
957 return ERR_PTR(-EINVAL);
958
959 if (dma_buf_attachment_is_dynamic(attach))
960 dma_resv_assert_held(attach->dmabuf->resv);
961
962 if (attach->sgt) {
963
964
965
966
967 if (attach->dir != direction &&
968 attach->dir != DMA_BIDIRECTIONAL)
969 return ERR_PTR(-EBUSY);
970
971 return attach->sgt;
972 }
973
974 if (dma_buf_is_dynamic(attach->dmabuf)) {
975 dma_resv_assert_held(attach->dmabuf->resv);
976 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
977 r = attach->dmabuf->ops->pin(attach);
978 if (r)
979 return ERR_PTR(r);
980 }
981 }
982
983 sg_table = __map_dma_buf(attach, direction);
984 if (!sg_table)
985 sg_table = ERR_PTR(-ENOMEM);
986
987 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
988 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
989 attach->dmabuf->ops->unpin(attach);
990
991 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
992 attach->sgt = sg_table;
993 attach->dir = direction;
994 }
995
996#ifdef CONFIG_DMA_API_DEBUG
997 if (!IS_ERR(sg_table)) {
998 struct scatterlist *sg;
999 u64 addr;
1000 int len;
1001 int i;
1002
1003 for_each_sgtable_dma_sg(sg_table, sg, i) {
1004 addr = sg_dma_address(sg);
1005 len = sg_dma_len(sg);
1006 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1007 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1008 __func__, addr, len);
1009 }
1010 }
1011 }
1012#endif
1013 return sg_table;
1014}
1015EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1028 struct sg_table *sg_table,
1029 enum dma_data_direction direction)
1030{
1031 might_sleep();
1032
1033 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1034 return;
1035
1036 if (dma_buf_attachment_is_dynamic(attach))
1037 dma_resv_assert_held(attach->dmabuf->resv);
1038
1039 if (attach->sgt == sg_table)
1040 return;
1041
1042 if (dma_buf_is_dynamic(attach->dmabuf))
1043 dma_resv_assert_held(attach->dmabuf->resv);
1044
1045 __unmap_dma_buf(attach, sg_table, direction);
1046
1047 if (dma_buf_is_dynamic(attach->dmabuf) &&
1048 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1049 dma_buf_unpin(attach);
1050}
1051EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061void dma_buf_move_notify(struct dma_buf *dmabuf)
1062{
1063 struct dma_buf_attachment *attach;
1064
1065 dma_resv_assert_held(dmabuf->resv);
1066
1067 list_for_each_entry(attach, &dmabuf->attachments, node)
1068 if (attach->importer_ops)
1069 attach->importer_ops->move_notify(attach);
1070}
1071EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1159 enum dma_data_direction direction)
1160{
1161 bool write = (direction == DMA_BIDIRECTIONAL ||
1162 direction == DMA_TO_DEVICE);
1163 struct dma_resv *resv = dmabuf->resv;
1164 long ret;
1165
1166
1167 ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
1168 if (ret < 0)
1169 return ret;
1170
1171 return 0;
1172}
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1194 enum dma_data_direction direction)
1195{
1196 int ret = 0;
1197
1198 if (WARN_ON(!dmabuf))
1199 return -EINVAL;
1200
1201 might_lock(&dmabuf->resv->lock.base);
1202
1203 if (dmabuf->ops->begin_cpu_access)
1204 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1205
1206
1207
1208
1209
1210 if (ret == 0)
1211 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1212
1213 return ret;
1214}
1215EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1230 enum dma_data_direction direction)
1231{
1232 int ret = 0;
1233
1234 WARN_ON(!dmabuf);
1235
1236 might_lock(&dmabuf->resv->lock.base);
1237
1238 if (dmabuf->ops->end_cpu_access)
1239 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1240
1241 return ret;
1242}
1243EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1261 unsigned long pgoff)
1262{
1263 if (WARN_ON(!dmabuf || !vma))
1264 return -EINVAL;
1265
1266
1267 if (!dmabuf->ops->mmap)
1268 return -EINVAL;
1269
1270
1271 if (pgoff + vma_pages(vma) < pgoff)
1272 return -EOVERFLOW;
1273
1274
1275 if (pgoff + vma_pages(vma) >
1276 dmabuf->size >> PAGE_SHIFT)
1277 return -EINVAL;
1278
1279
1280 vma_set_file(vma, dmabuf->file);
1281 vma->vm_pgoff = pgoff;
1282
1283 return dmabuf->ops->mmap(dmabuf, vma);
1284}
1285EXPORT_SYMBOL_GPL(dma_buf_mmap);
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1304{
1305 struct dma_buf_map ptr;
1306 int ret = 0;
1307
1308 dma_buf_map_clear(map);
1309
1310 if (WARN_ON(!dmabuf))
1311 return -EINVAL;
1312
1313 if (!dmabuf->ops->vmap)
1314 return -EINVAL;
1315
1316 mutex_lock(&dmabuf->lock);
1317 if (dmabuf->vmapping_counter) {
1318 dmabuf->vmapping_counter++;
1319 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1320 *map = dmabuf->vmap_ptr;
1321 goto out_unlock;
1322 }
1323
1324 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
1325
1326 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1327 if (WARN_ON_ONCE(ret))
1328 goto out_unlock;
1329
1330 dmabuf->vmap_ptr = ptr;
1331 dmabuf->vmapping_counter = 1;
1332
1333 *map = dmabuf->vmap_ptr;
1334
1335out_unlock:
1336 mutex_unlock(&dmabuf->lock);
1337 return ret;
1338}
1339EXPORT_SYMBOL_GPL(dma_buf_vmap);
1340
1341
1342
1343
1344
1345
1346void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1347{
1348 if (WARN_ON(!dmabuf))
1349 return;
1350
1351 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1352 BUG_ON(dmabuf->vmapping_counter == 0);
1353 BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
1354
1355 mutex_lock(&dmabuf->lock);
1356 if (--dmabuf->vmapping_counter == 0) {
1357 if (dmabuf->ops->vunmap)
1358 dmabuf->ops->vunmap(dmabuf, map);
1359 dma_buf_map_clear(&dmabuf->vmap_ptr);
1360 }
1361 mutex_unlock(&dmabuf->lock);
1362}
1363EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1364
1365#ifdef CONFIG_DEBUG_FS
1366static int dma_buf_debug_show(struct seq_file *s, void *unused)
1367{
1368 struct dma_buf *buf_obj;
1369 struct dma_buf_attachment *attach_obj;
1370 struct dma_resv *robj;
1371 struct dma_resv_list *fobj;
1372 struct dma_fence *fence;
1373 int count = 0, attach_count, shared_count, i;
1374 size_t size = 0;
1375 int ret;
1376
1377 ret = mutex_lock_interruptible(&db_list.lock);
1378
1379 if (ret)
1380 return ret;
1381
1382 seq_puts(s, "\nDma-buf Objects:\n");
1383 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1384 "size", "flags", "mode", "count", "ino");
1385
1386 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1387
1388 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1389 if (ret)
1390 goto error_unlock;
1391
1392 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1393 buf_obj->size,
1394 buf_obj->file->f_flags, buf_obj->file->f_mode,
1395 file_count(buf_obj->file),
1396 buf_obj->exp_name,
1397 file_inode(buf_obj->file)->i_ino,
1398 buf_obj->name ?: "");
1399
1400 robj = buf_obj->resv;
1401 fence = dma_resv_excl_fence(robj);
1402 if (fence)
1403 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1404 fence->ops->get_driver_name(fence),
1405 fence->ops->get_timeline_name(fence),
1406 dma_fence_is_signaled(fence) ? "" : "un");
1407
1408 fobj = rcu_dereference_protected(robj->fence,
1409 dma_resv_held(robj));
1410 shared_count = fobj ? fobj->shared_count : 0;
1411 for (i = 0; i < shared_count; i++) {
1412 fence = rcu_dereference_protected(fobj->shared[i],
1413 dma_resv_held(robj));
1414 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1415 fence->ops->get_driver_name(fence),
1416 fence->ops->get_timeline_name(fence),
1417 dma_fence_is_signaled(fence) ? "" : "un");
1418 }
1419
1420 seq_puts(s, "\tAttached Devices:\n");
1421 attach_count = 0;
1422
1423 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1424 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1425 attach_count++;
1426 }
1427 dma_resv_unlock(buf_obj->resv);
1428
1429 seq_printf(s, "Total %d devices attached\n\n",
1430 attach_count);
1431
1432 count++;
1433 size += buf_obj->size;
1434 }
1435
1436 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1437
1438 mutex_unlock(&db_list.lock);
1439 return 0;
1440
1441error_unlock:
1442 mutex_unlock(&db_list.lock);
1443 return ret;
1444}
1445
1446DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1447
1448static struct dentry *dma_buf_debugfs_dir;
1449
1450static int dma_buf_init_debugfs(void)
1451{
1452 struct dentry *d;
1453 int err = 0;
1454
1455 d = debugfs_create_dir("dma_buf", NULL);
1456 if (IS_ERR(d))
1457 return PTR_ERR(d);
1458
1459 dma_buf_debugfs_dir = d;
1460
1461 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1462 NULL, &dma_buf_debug_fops);
1463 if (IS_ERR(d)) {
1464 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1465 debugfs_remove_recursive(dma_buf_debugfs_dir);
1466 dma_buf_debugfs_dir = NULL;
1467 err = PTR_ERR(d);
1468 }
1469
1470 return err;
1471}
1472
1473static void dma_buf_uninit_debugfs(void)
1474{
1475 debugfs_remove_recursive(dma_buf_debugfs_dir);
1476}
1477#else
1478static inline int dma_buf_init_debugfs(void)
1479{
1480 return 0;
1481}
1482static inline void dma_buf_uninit_debugfs(void)
1483{
1484}
1485#endif
1486
1487static int __init dma_buf_init(void)
1488{
1489 int ret;
1490
1491 ret = dma_buf_init_sysfs_statistics();
1492 if (ret)
1493 return ret;
1494
1495 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1496 if (IS_ERR(dma_buf_mnt))
1497 return PTR_ERR(dma_buf_mnt);
1498
1499 mutex_init(&db_list.lock);
1500 INIT_LIST_HEAD(&db_list.head);
1501 dma_buf_init_debugfs();
1502 return 0;
1503}
1504subsys_initcall(dma_buf_init);
1505
1506static void __exit dma_buf_deinit(void)
1507{
1508 dma_buf_uninit_debugfs();
1509 kern_unmount(dma_buf_mnt);
1510 dma_buf_uninit_sysfs_statistics();
1511}
1512__exitcall(dma_buf_deinit);
1513