1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
24#include <linux/dma-resv.h>
25#include <linux/mm.h>
26#include <linux/mount.h>
27#include <linux/pseudo_fs.h>
28
29#include <uapi/linux/dma-buf.h>
30#include <uapi/linux/magic.h>
31
32static inline int is_dma_buf_file(struct file *);
33
34struct dma_buf_list {
35 struct list_head head;
36 struct mutex lock;
37};
38
39static struct dma_buf_list db_list;
40
41static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42{
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
45 size_t ret = 0;
46
47 dmabuf = dentry->d_fsdata;
48 spin_lock(&dmabuf->name_lock);
49 if (dmabuf->name)
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 spin_unlock(&dmabuf->name_lock);
52
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
55}
56
57static void dma_buf_release(struct dentry *dentry)
58{
59 struct dma_buf *dmabuf;
60
61 dmabuf = dentry->d_fsdata;
62 if (unlikely(!dmabuf))
63 return;
64
65 BUG_ON(dmabuf->vmapping_counter);
66
67
68
69
70
71
72
73
74
75 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
76
77 dmabuf->ops->release(dmabuf);
78
79 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
80 dma_resv_fini(dmabuf->resv);
81
82 module_put(dmabuf->owner);
83 kfree(dmabuf->name);
84 kfree(dmabuf);
85}
86
87static int dma_buf_file_release(struct inode *inode, struct file *file)
88{
89 struct dma_buf *dmabuf;
90
91 if (!is_dma_buf_file(file))
92 return -EINVAL;
93
94 dmabuf = file->private_data;
95
96 mutex_lock(&db_list.lock);
97 list_del(&dmabuf->list_node);
98 mutex_unlock(&db_list.lock);
99
100 return 0;
101}
102
103static const struct dentry_operations dma_buf_dentry_ops = {
104 .d_dname = dmabuffs_dname,
105 .d_release = dma_buf_release,
106};
107
108static struct vfsmount *dma_buf_mnt;
109
110static int dma_buf_fs_init_context(struct fs_context *fc)
111{
112 struct pseudo_fs_context *ctx;
113
114 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
115 if (!ctx)
116 return -ENOMEM;
117 ctx->dops = &dma_buf_dentry_ops;
118 return 0;
119}
120
121static struct file_system_type dma_buf_fs_type = {
122 .name = "dmabuf",
123 .init_fs_context = dma_buf_fs_init_context,
124 .kill_sb = kill_anon_super,
125};
126
127static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
128{
129 struct dma_buf *dmabuf;
130
131 if (!is_dma_buf_file(file))
132 return -EINVAL;
133
134 dmabuf = file->private_data;
135
136
137 if (!dmabuf->ops->mmap)
138 return -EINVAL;
139
140
141 if (vma->vm_pgoff + vma_pages(vma) >
142 dmabuf->size >> PAGE_SHIFT)
143 return -EINVAL;
144
145 return dmabuf->ops->mmap(dmabuf, vma);
146}
147
148static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
149{
150 struct dma_buf *dmabuf;
151 loff_t base;
152
153 if (!is_dma_buf_file(file))
154 return -EBADF;
155
156 dmabuf = file->private_data;
157
158
159
160
161 if (whence == SEEK_END)
162 base = dmabuf->size;
163 else if (whence == SEEK_SET)
164 base = 0;
165 else
166 return -EINVAL;
167
168 if (offset != 0)
169 return -EINVAL;
170
171 return base + offset;
172}
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
197{
198 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
199 unsigned long flags;
200
201 spin_lock_irqsave(&dcb->poll->lock, flags);
202 wake_up_locked_poll(dcb->poll, dcb->active);
203 dcb->active = 0;
204 spin_unlock_irqrestore(&dcb->poll->lock, flags);
205}
206
207static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
208{
209 struct dma_buf *dmabuf;
210 struct dma_resv *resv;
211 struct dma_resv_list *fobj;
212 struct dma_fence *fence_excl;
213 __poll_t events;
214 unsigned shared_count, seq;
215
216 dmabuf = file->private_data;
217 if (!dmabuf || !dmabuf->resv)
218 return EPOLLERR;
219
220 resv = dmabuf->resv;
221
222 poll_wait(file, &dmabuf->poll, poll);
223
224 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
225 if (!events)
226 return 0;
227
228retry:
229 seq = read_seqcount_begin(&resv->seq);
230 rcu_read_lock();
231
232 fobj = rcu_dereference(resv->fence);
233 if (fobj)
234 shared_count = fobj->shared_count;
235 else
236 shared_count = 0;
237 fence_excl = rcu_dereference(resv->fence_excl);
238 if (read_seqcount_retry(&resv->seq, seq)) {
239 rcu_read_unlock();
240 goto retry;
241 }
242
243 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
244 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
245 __poll_t pevents = EPOLLIN;
246
247 if (shared_count == 0)
248 pevents |= EPOLLOUT;
249
250 spin_lock_irq(&dmabuf->poll.lock);
251 if (dcb->active) {
252 dcb->active |= pevents;
253 events &= ~pevents;
254 } else
255 dcb->active = pevents;
256 spin_unlock_irq(&dmabuf->poll.lock);
257
258 if (events & pevents) {
259 if (!dma_fence_get_rcu(fence_excl)) {
260
261 events &= ~pevents;
262 dma_buf_poll_cb(NULL, &dcb->cb);
263 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
264 dma_buf_poll_cb)) {
265 events &= ~pevents;
266 dma_fence_put(fence_excl);
267 } else {
268
269
270
271
272 dma_fence_put(fence_excl);
273 dma_buf_poll_cb(NULL, &dcb->cb);
274 }
275 }
276 }
277
278 if ((events & EPOLLOUT) && shared_count > 0) {
279 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
280 int i;
281
282
283 spin_lock_irq(&dmabuf->poll.lock);
284 if (dcb->active)
285 events &= ~EPOLLOUT;
286 else
287 dcb->active = EPOLLOUT;
288 spin_unlock_irq(&dmabuf->poll.lock);
289
290 if (!(events & EPOLLOUT))
291 goto out;
292
293 for (i = 0; i < shared_count; ++i) {
294 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
295
296 if (!dma_fence_get_rcu(fence)) {
297
298
299
300
301
302
303 events &= ~EPOLLOUT;
304 dma_buf_poll_cb(NULL, &dcb->cb);
305 break;
306 }
307 if (!dma_fence_add_callback(fence, &dcb->cb,
308 dma_buf_poll_cb)) {
309 dma_fence_put(fence);
310 events &= ~EPOLLOUT;
311 break;
312 }
313 dma_fence_put(fence);
314 }
315
316
317 if (i == shared_count)
318 dma_buf_poll_cb(NULL, &dcb->cb);
319 }
320
321out:
322 rcu_read_unlock();
323 return events;
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
342{
343 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
344 long ret = 0;
345
346 if (IS_ERR(name))
347 return PTR_ERR(name);
348
349 dma_resv_lock(dmabuf->resv, NULL);
350 if (!list_empty(&dmabuf->attachments)) {
351 ret = -EBUSY;
352 kfree(name);
353 goto out_unlock;
354 }
355 spin_lock(&dmabuf->name_lock);
356 kfree(dmabuf->name);
357 dmabuf->name = name;
358 spin_unlock(&dmabuf->name_lock);
359
360out_unlock:
361 dma_resv_unlock(dmabuf->resv);
362 return ret;
363}
364
365static long dma_buf_ioctl(struct file *file,
366 unsigned int cmd, unsigned long arg)
367{
368 struct dma_buf *dmabuf;
369 struct dma_buf_sync sync;
370 enum dma_data_direction direction;
371 int ret;
372
373 dmabuf = file->private_data;
374
375 switch (cmd) {
376 case DMA_BUF_IOCTL_SYNC:
377 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
378 return -EFAULT;
379
380 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
381 return -EINVAL;
382
383 switch (sync.flags & DMA_BUF_SYNC_RW) {
384 case DMA_BUF_SYNC_READ:
385 direction = DMA_FROM_DEVICE;
386 break;
387 case DMA_BUF_SYNC_WRITE:
388 direction = DMA_TO_DEVICE;
389 break;
390 case DMA_BUF_SYNC_RW:
391 direction = DMA_BIDIRECTIONAL;
392 break;
393 default:
394 return -EINVAL;
395 }
396
397 if (sync.flags & DMA_BUF_SYNC_END)
398 ret = dma_buf_end_cpu_access(dmabuf, direction);
399 else
400 ret = dma_buf_begin_cpu_access(dmabuf, direction);
401
402 return ret;
403
404 case DMA_BUF_SET_NAME_A:
405 case DMA_BUF_SET_NAME_B:
406 return dma_buf_set_name(dmabuf, (const char __user *)arg);
407
408 default:
409 return -ENOTTY;
410 }
411}
412
413static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
414{
415 struct dma_buf *dmabuf = file->private_data;
416
417 seq_printf(m, "size:\t%zu\n", dmabuf->size);
418
419 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
420 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
421 spin_lock(&dmabuf->name_lock);
422 if (dmabuf->name)
423 seq_printf(m, "name:\t%s\n", dmabuf->name);
424 spin_unlock(&dmabuf->name_lock);
425}
426
427static const struct file_operations dma_buf_fops = {
428 .release = dma_buf_file_release,
429 .mmap = dma_buf_mmap_internal,
430 .llseek = dma_buf_llseek,
431 .poll = dma_buf_poll,
432 .unlocked_ioctl = dma_buf_ioctl,
433 .compat_ioctl = compat_ptr_ioctl,
434 .show_fdinfo = dma_buf_show_fdinfo,
435};
436
437
438
439
440static inline int is_dma_buf_file(struct file *file)
441{
442 return file->f_op == &dma_buf_fops;
443}
444
445static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
446{
447 struct file *file;
448 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
449
450 if (IS_ERR(inode))
451 return ERR_CAST(inode);
452
453 inode->i_size = dmabuf->size;
454 inode_set_bytes(inode, dmabuf->size);
455
456 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
457 flags, &dma_buf_fops);
458 if (IS_ERR(file))
459 goto err_alloc_file;
460 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
461 file->private_data = dmabuf;
462 file->f_path.dentry->d_fsdata = dmabuf;
463
464 return file;
465
466err_alloc_file:
467 iput(inode);
468 return file;
469}
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
521{
522 struct dma_buf *dmabuf;
523 struct dma_resv *resv = exp_info->resv;
524 struct file *file;
525 size_t alloc_size = sizeof(struct dma_buf);
526 int ret;
527
528 if (!exp_info->resv)
529 alloc_size += sizeof(struct dma_resv);
530 else
531
532 alloc_size += 1;
533
534 if (WARN_ON(!exp_info->priv
535 || !exp_info->ops
536 || !exp_info->ops->map_dma_buf
537 || !exp_info->ops->unmap_dma_buf
538 || !exp_info->ops->release)) {
539 return ERR_PTR(-EINVAL);
540 }
541
542 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
543 (exp_info->ops->pin || exp_info->ops->unpin)))
544 return ERR_PTR(-EINVAL);
545
546 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
547 return ERR_PTR(-EINVAL);
548
549 if (!try_module_get(exp_info->owner))
550 return ERR_PTR(-ENOENT);
551
552 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
553 if (!dmabuf) {
554 ret = -ENOMEM;
555 goto err_module;
556 }
557
558 dmabuf->priv = exp_info->priv;
559 dmabuf->ops = exp_info->ops;
560 dmabuf->size = exp_info->size;
561 dmabuf->exp_name = exp_info->exp_name;
562 dmabuf->owner = exp_info->owner;
563 spin_lock_init(&dmabuf->name_lock);
564 init_waitqueue_head(&dmabuf->poll);
565 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
566 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
567
568 if (!resv) {
569 resv = (struct dma_resv *)&dmabuf[1];
570 dma_resv_init(resv);
571 }
572 dmabuf->resv = resv;
573
574 file = dma_buf_getfile(dmabuf, exp_info->flags);
575 if (IS_ERR(file)) {
576 ret = PTR_ERR(file);
577 goto err_dmabuf;
578 }
579
580 file->f_mode |= FMODE_LSEEK;
581 dmabuf->file = file;
582
583 mutex_init(&dmabuf->lock);
584 INIT_LIST_HEAD(&dmabuf->attachments);
585
586 mutex_lock(&db_list.lock);
587 list_add(&dmabuf->list_node, &db_list.head);
588 mutex_unlock(&db_list.lock);
589
590 return dmabuf;
591
592err_dmabuf:
593 kfree(dmabuf);
594err_module:
595 module_put(exp_info->owner);
596 return ERR_PTR(ret);
597}
598EXPORT_SYMBOL_GPL(dma_buf_export);
599
600
601
602
603
604
605
606
607int dma_buf_fd(struct dma_buf *dmabuf, int flags)
608{
609 int fd;
610
611 if (!dmabuf || !dmabuf->file)
612 return -EINVAL;
613
614 fd = get_unused_fd_flags(flags);
615 if (fd < 0)
616 return fd;
617
618 fd_install(fd, dmabuf->file);
619
620 return fd;
621}
622EXPORT_SYMBOL_GPL(dma_buf_fd);
623
624
625
626
627
628
629
630
631
632struct dma_buf *dma_buf_get(int fd)
633{
634 struct file *file;
635
636 file = fget(fd);
637
638 if (!file)
639 return ERR_PTR(-EBADF);
640
641 if (!is_dma_buf_file(file)) {
642 fput(file);
643 return ERR_PTR(-EINVAL);
644 }
645
646 return file->private_data;
647}
648EXPORT_SYMBOL_GPL(dma_buf_get);
649
650
651
652
653
654
655
656
657
658
659
660void dma_buf_put(struct dma_buf *dmabuf)
661{
662 if (WARN_ON(!dmabuf || !dmabuf->file))
663 return;
664
665 fput(dmabuf->file);
666}
667EXPORT_SYMBOL_GPL(dma_buf_put);
668
669static void mangle_sg_table(struct sg_table *sg_table)
670{
671#ifdef CONFIG_DMABUF_DEBUG
672 int i;
673 struct scatterlist *sg;
674
675
676
677
678
679 for_each_sgtable_sg(sg_table, sg, i)
680 sg->page_link ^= ~0xffUL;
681#endif
682
683}
684static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
685 enum dma_data_direction direction)
686{
687 struct sg_table *sg_table;
688
689 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
690
691 if (!IS_ERR_OR_NULL(sg_table))
692 mangle_sg_table(sg_table);
693
694 return sg_table;
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719struct dma_buf_attachment *
720dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
721 const struct dma_buf_attach_ops *importer_ops,
722 void *importer_priv)
723{
724 struct dma_buf_attachment *attach;
725 int ret;
726
727 if (WARN_ON(!dmabuf || !dev))
728 return ERR_PTR(-EINVAL);
729
730 if (WARN_ON(importer_ops && !importer_ops->move_notify))
731 return ERR_PTR(-EINVAL);
732
733 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
734 if (!attach)
735 return ERR_PTR(-ENOMEM);
736
737 attach->dev = dev;
738 attach->dmabuf = dmabuf;
739 if (importer_ops)
740 attach->peer2peer = importer_ops->allow_peer2peer;
741 attach->importer_ops = importer_ops;
742 attach->importer_priv = importer_priv;
743
744 if (dmabuf->ops->attach) {
745 ret = dmabuf->ops->attach(dmabuf, attach);
746 if (ret)
747 goto err_attach;
748 }
749 dma_resv_lock(dmabuf->resv, NULL);
750 list_add(&attach->node, &dmabuf->attachments);
751 dma_resv_unlock(dmabuf->resv);
752
753
754
755
756
757 if (dma_buf_attachment_is_dynamic(attach) !=
758 dma_buf_is_dynamic(dmabuf)) {
759 struct sg_table *sgt;
760
761 if (dma_buf_is_dynamic(attach->dmabuf)) {
762 dma_resv_lock(attach->dmabuf->resv, NULL);
763 ret = dmabuf->ops->pin(attach);
764 if (ret)
765 goto err_unlock;
766 }
767
768 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
769 if (!sgt)
770 sgt = ERR_PTR(-ENOMEM);
771 if (IS_ERR(sgt)) {
772 ret = PTR_ERR(sgt);
773 goto err_unpin;
774 }
775 if (dma_buf_is_dynamic(attach->dmabuf))
776 dma_resv_unlock(attach->dmabuf->resv);
777 attach->sgt = sgt;
778 attach->dir = DMA_BIDIRECTIONAL;
779 }
780
781 return attach;
782
783err_attach:
784 kfree(attach);
785 return ERR_PTR(ret);
786
787err_unpin:
788 if (dma_buf_is_dynamic(attach->dmabuf))
789 dmabuf->ops->unpin(attach);
790
791err_unlock:
792 if (dma_buf_is_dynamic(attach->dmabuf))
793 dma_resv_unlock(attach->dmabuf->resv);
794
795 dma_buf_detach(dmabuf, attach);
796 return ERR_PTR(ret);
797}
798EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
799
800
801
802
803
804
805
806
807
808struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
809 struct device *dev)
810{
811 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
812}
813EXPORT_SYMBOL_GPL(dma_buf_attach);
814
815static void __unmap_dma_buf(struct dma_buf_attachment *attach,
816 struct sg_table *sg_table,
817 enum dma_data_direction direction)
818{
819
820 mangle_sg_table(sg_table);
821
822 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
823}
824
825
826
827
828
829
830
831
832
833
834void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
835{
836 if (WARN_ON(!dmabuf || !attach))
837 return;
838
839 if (attach->sgt) {
840 if (dma_buf_is_dynamic(attach->dmabuf))
841 dma_resv_lock(attach->dmabuf->resv, NULL);
842
843 __unmap_dma_buf(attach, attach->sgt, attach->dir);
844
845 if (dma_buf_is_dynamic(attach->dmabuf)) {
846 dmabuf->ops->unpin(attach);
847 dma_resv_unlock(attach->dmabuf->resv);
848 }
849 }
850
851 dma_resv_lock(dmabuf->resv, NULL);
852 list_del(&attach->node);
853 dma_resv_unlock(dmabuf->resv);
854 if (dmabuf->ops->detach)
855 dmabuf->ops->detach(dmabuf, attach);
856
857 kfree(attach);
858}
859EXPORT_SYMBOL_GPL(dma_buf_detach);
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875int dma_buf_pin(struct dma_buf_attachment *attach)
876{
877 struct dma_buf *dmabuf = attach->dmabuf;
878 int ret = 0;
879
880 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
881
882 dma_resv_assert_held(dmabuf->resv);
883
884 if (dmabuf->ops->pin)
885 ret = dmabuf->ops->pin(attach);
886
887 return ret;
888}
889EXPORT_SYMBOL_GPL(dma_buf_pin);
890
891
892
893
894
895
896
897
898
899void dma_buf_unpin(struct dma_buf_attachment *attach)
900{
901 struct dma_buf *dmabuf = attach->dmabuf;
902
903 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
904
905 dma_resv_assert_held(dmabuf->resv);
906
907 if (dmabuf->ops->unpin)
908 dmabuf->ops->unpin(attach);
909}
910EXPORT_SYMBOL_GPL(dma_buf_unpin);
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
931 enum dma_data_direction direction)
932{
933 struct sg_table *sg_table;
934 int r;
935
936 might_sleep();
937
938 if (WARN_ON(!attach || !attach->dmabuf))
939 return ERR_PTR(-EINVAL);
940
941 if (dma_buf_attachment_is_dynamic(attach))
942 dma_resv_assert_held(attach->dmabuf->resv);
943
944 if (attach->sgt) {
945
946
947
948
949 if (attach->dir != direction &&
950 attach->dir != DMA_BIDIRECTIONAL)
951 return ERR_PTR(-EBUSY);
952
953 return attach->sgt;
954 }
955
956 if (dma_buf_is_dynamic(attach->dmabuf)) {
957 dma_resv_assert_held(attach->dmabuf->resv);
958 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
959 r = attach->dmabuf->ops->pin(attach);
960 if (r)
961 return ERR_PTR(r);
962 }
963 }
964
965 sg_table = __map_dma_buf(attach, direction);
966 if (!sg_table)
967 sg_table = ERR_PTR(-ENOMEM);
968
969 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
970 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
971 attach->dmabuf->ops->unpin(attach);
972
973 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
974 attach->sgt = sg_table;
975 attach->dir = direction;
976 }
977
978#ifdef CONFIG_DMA_API_DEBUG
979 if (!IS_ERR(sg_table)) {
980 struct scatterlist *sg;
981 u64 addr;
982 int len;
983 int i;
984
985 for_each_sgtable_dma_sg(sg_table, sg, i) {
986 addr = sg_dma_address(sg);
987 len = sg_dma_len(sg);
988 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
989 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
990 __func__, addr, len);
991 }
992 }
993 }
994#endif
995
996 return sg_table;
997}
998EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1011 struct sg_table *sg_table,
1012 enum dma_data_direction direction)
1013{
1014 might_sleep();
1015
1016 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1017 return;
1018
1019 if (dma_buf_attachment_is_dynamic(attach))
1020 dma_resv_assert_held(attach->dmabuf->resv);
1021
1022 if (attach->sgt == sg_table)
1023 return;
1024
1025 if (dma_buf_is_dynamic(attach->dmabuf))
1026 dma_resv_assert_held(attach->dmabuf->resv);
1027
1028 __unmap_dma_buf(attach, sg_table, direction);
1029
1030 if (dma_buf_is_dynamic(attach->dmabuf) &&
1031 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1032 dma_buf_unpin(attach);
1033}
1034EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044void dma_buf_move_notify(struct dma_buf *dmabuf)
1045{
1046 struct dma_buf_attachment *attach;
1047
1048 dma_resv_assert_held(dmabuf->resv);
1049
1050 list_for_each_entry(attach, &dmabuf->attachments, node)
1051 if (attach->importer_ops)
1052 attach->importer_ops->move_notify(attach);
1053}
1054EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1142 enum dma_data_direction direction)
1143{
1144 bool write = (direction == DMA_BIDIRECTIONAL ||
1145 direction == DMA_TO_DEVICE);
1146 struct dma_resv *resv = dmabuf->resv;
1147 long ret;
1148
1149
1150 ret = dma_resv_wait_timeout_rcu(resv, write, true,
1151 MAX_SCHEDULE_TIMEOUT);
1152 if (ret < 0)
1153 return ret;
1154
1155 return 0;
1156}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1178 enum dma_data_direction direction)
1179{
1180 int ret = 0;
1181
1182 if (WARN_ON(!dmabuf))
1183 return -EINVAL;
1184
1185 might_lock(&dmabuf->resv->lock.base);
1186
1187 if (dmabuf->ops->begin_cpu_access)
1188 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1189
1190
1191
1192
1193
1194 if (ret == 0)
1195 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1196
1197 return ret;
1198}
1199EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1214 enum dma_data_direction direction)
1215{
1216 int ret = 0;
1217
1218 WARN_ON(!dmabuf);
1219
1220 might_lock(&dmabuf->resv->lock.base);
1221
1222 if (dmabuf->ops->end_cpu_access)
1223 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1224
1225 return ret;
1226}
1227EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1245 unsigned long pgoff)
1246{
1247 if (WARN_ON(!dmabuf || !vma))
1248 return -EINVAL;
1249
1250
1251 if (!dmabuf->ops->mmap)
1252 return -EINVAL;
1253
1254
1255 if (pgoff + vma_pages(vma) < pgoff)
1256 return -EOVERFLOW;
1257
1258
1259 if (pgoff + vma_pages(vma) >
1260 dmabuf->size >> PAGE_SHIFT)
1261 return -EINVAL;
1262
1263
1264 vma_set_file(vma, dmabuf->file);
1265 vma->vm_pgoff = pgoff;
1266
1267 return dmabuf->ops->mmap(dmabuf, vma);
1268}
1269EXPORT_SYMBOL_GPL(dma_buf_mmap);
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1288{
1289 struct dma_buf_map ptr;
1290 int ret = 0;
1291
1292 dma_buf_map_clear(map);
1293
1294 if (WARN_ON(!dmabuf))
1295 return -EINVAL;
1296
1297 if (!dmabuf->ops->vmap)
1298 return -EINVAL;
1299
1300 mutex_lock(&dmabuf->lock);
1301 if (dmabuf->vmapping_counter) {
1302 dmabuf->vmapping_counter++;
1303 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1304 *map = dmabuf->vmap_ptr;
1305 goto out_unlock;
1306 }
1307
1308 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
1309
1310 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1311 if (WARN_ON_ONCE(ret))
1312 goto out_unlock;
1313
1314 dmabuf->vmap_ptr = ptr;
1315 dmabuf->vmapping_counter = 1;
1316
1317 *map = dmabuf->vmap_ptr;
1318
1319out_unlock:
1320 mutex_unlock(&dmabuf->lock);
1321 return ret;
1322}
1323EXPORT_SYMBOL_GPL(dma_buf_vmap);
1324
1325
1326
1327
1328
1329
1330void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1331{
1332 if (WARN_ON(!dmabuf))
1333 return;
1334
1335 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1336 BUG_ON(dmabuf->vmapping_counter == 0);
1337 BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
1338
1339 mutex_lock(&dmabuf->lock);
1340 if (--dmabuf->vmapping_counter == 0) {
1341 if (dmabuf->ops->vunmap)
1342 dmabuf->ops->vunmap(dmabuf, map);
1343 dma_buf_map_clear(&dmabuf->vmap_ptr);
1344 }
1345 mutex_unlock(&dmabuf->lock);
1346}
1347EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1348
1349#ifdef CONFIG_DEBUG_FS
1350static int dma_buf_debug_show(struct seq_file *s, void *unused)
1351{
1352 int ret;
1353 struct dma_buf *buf_obj;
1354 struct dma_buf_attachment *attach_obj;
1355 struct dma_resv *robj;
1356 struct dma_resv_list *fobj;
1357 struct dma_fence *fence;
1358 unsigned seq;
1359 int count = 0, attach_count, shared_count, i;
1360 size_t size = 0;
1361
1362 ret = mutex_lock_interruptible(&db_list.lock);
1363
1364 if (ret)
1365 return ret;
1366
1367 seq_puts(s, "\nDma-buf Objects:\n");
1368 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1369 "size", "flags", "mode", "count", "ino");
1370
1371 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1372
1373 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1374 if (ret)
1375 goto error_unlock;
1376
1377 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1378 buf_obj->size,
1379 buf_obj->file->f_flags, buf_obj->file->f_mode,
1380 file_count(buf_obj->file),
1381 buf_obj->exp_name,
1382 file_inode(buf_obj->file)->i_ino,
1383 buf_obj->name ?: "");
1384
1385 robj = buf_obj->resv;
1386 while (true) {
1387 seq = read_seqcount_begin(&robj->seq);
1388 rcu_read_lock();
1389 fobj = rcu_dereference(robj->fence);
1390 shared_count = fobj ? fobj->shared_count : 0;
1391 fence = rcu_dereference(robj->fence_excl);
1392 if (!read_seqcount_retry(&robj->seq, seq))
1393 break;
1394 rcu_read_unlock();
1395 }
1396
1397 if (fence)
1398 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1399 fence->ops->get_driver_name(fence),
1400 fence->ops->get_timeline_name(fence),
1401 dma_fence_is_signaled(fence) ? "" : "un");
1402 for (i = 0; i < shared_count; i++) {
1403 fence = rcu_dereference(fobj->shared[i]);
1404 if (!dma_fence_get_rcu(fence))
1405 continue;
1406 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1407 fence->ops->get_driver_name(fence),
1408 fence->ops->get_timeline_name(fence),
1409 dma_fence_is_signaled(fence) ? "" : "un");
1410 dma_fence_put(fence);
1411 }
1412 rcu_read_unlock();
1413
1414 seq_puts(s, "\tAttached Devices:\n");
1415 attach_count = 0;
1416
1417 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1418 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1419 attach_count++;
1420 }
1421 dma_resv_unlock(buf_obj->resv);
1422
1423 seq_printf(s, "Total %d devices attached\n\n",
1424 attach_count);
1425
1426 count++;
1427 size += buf_obj->size;
1428 }
1429
1430 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1431
1432 mutex_unlock(&db_list.lock);
1433 return 0;
1434
1435error_unlock:
1436 mutex_unlock(&db_list.lock);
1437 return ret;
1438}
1439
1440DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1441
1442static struct dentry *dma_buf_debugfs_dir;
1443
1444static int dma_buf_init_debugfs(void)
1445{
1446 struct dentry *d;
1447 int err = 0;
1448
1449 d = debugfs_create_dir("dma_buf", NULL);
1450 if (IS_ERR(d))
1451 return PTR_ERR(d);
1452
1453 dma_buf_debugfs_dir = d;
1454
1455 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1456 NULL, &dma_buf_debug_fops);
1457 if (IS_ERR(d)) {
1458 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1459 debugfs_remove_recursive(dma_buf_debugfs_dir);
1460 dma_buf_debugfs_dir = NULL;
1461 err = PTR_ERR(d);
1462 }
1463
1464 return err;
1465}
1466
1467static void dma_buf_uninit_debugfs(void)
1468{
1469 debugfs_remove_recursive(dma_buf_debugfs_dir);
1470}
1471#else
1472static inline int dma_buf_init_debugfs(void)
1473{
1474 return 0;
1475}
1476static inline void dma_buf_uninit_debugfs(void)
1477{
1478}
1479#endif
1480
1481static int __init dma_buf_init(void)
1482{
1483 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1484 if (IS_ERR(dma_buf_mnt))
1485 return PTR_ERR(dma_buf_mnt);
1486
1487 mutex_init(&db_list.lock);
1488 INIT_LIST_HEAD(&db_list.head);
1489 dma_buf_init_debugfs();
1490 return 0;
1491}
1492subsys_initcall(dma_buf_init);
1493
1494static void __exit dma_buf_deinit(void)
1495{
1496 dma_buf_uninit_debugfs();
1497 kern_unmount(dma_buf_mnt);
1498}
1499__exitcall(dma_buf_deinit);
1500