1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
24#include <linux/dma-resv.h>
25#include <linux/mm.h>
26#include <linux/mount.h>
27#include <linux/pseudo_fs.h>
28
29#include <uapi/linux/dma-buf.h>
30#include <uapi/linux/magic.h>
31
32#include "dma-buf-sysfs-stats.h"
33
34static inline int is_dma_buf_file(struct file *);
35
36struct dma_buf_list {
37 struct list_head head;
38 struct mutex lock;
39};
40
41static struct dma_buf_list db_list;
42
43static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
44{
45 struct dma_buf *dmabuf;
46 char name[DMA_BUF_NAME_LEN];
47 size_t ret = 0;
48
49 dmabuf = dentry->d_fsdata;
50 spin_lock(&dmabuf->name_lock);
51 if (dmabuf->name)
52 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
53 spin_unlock(&dmabuf->name_lock);
54
55 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
56 dentry->d_name.name, ret > 0 ? name : "");
57}
58
59static void dma_buf_release(struct dentry *dentry)
60{
61 struct dma_buf *dmabuf;
62
63 dmabuf = dentry->d_fsdata;
64 if (unlikely(!dmabuf))
65 return;
66
67 BUG_ON(dmabuf->vmapping_counter);
68
69
70
71
72
73
74 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
75
76 dma_buf_stats_teardown(dmabuf);
77 dmabuf->ops->release(dmabuf);
78
79 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
80 dma_resv_fini(dmabuf->resv);
81
82 WARN_ON(!list_empty(&dmabuf->attachments));
83 module_put(dmabuf->owner);
84 kfree(dmabuf->name);
85 kfree(dmabuf);
86}
87
88static int dma_buf_file_release(struct inode *inode, struct file *file)
89{
90 struct dma_buf *dmabuf;
91
92 if (!is_dma_buf_file(file))
93 return -EINVAL;
94
95 dmabuf = file->private_data;
96
97 mutex_lock(&db_list.lock);
98 list_del(&dmabuf->list_node);
99 mutex_unlock(&db_list.lock);
100
101 return 0;
102}
103
104static const struct dentry_operations dma_buf_dentry_ops = {
105 .d_dname = dmabuffs_dname,
106 .d_release = dma_buf_release,
107};
108
109static struct vfsmount *dma_buf_mnt;
110
111static int dma_buf_fs_init_context(struct fs_context *fc)
112{
113 struct pseudo_fs_context *ctx;
114
115 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
116 if (!ctx)
117 return -ENOMEM;
118 ctx->dops = &dma_buf_dentry_ops;
119 return 0;
120}
121
122static struct file_system_type dma_buf_fs_type = {
123 .name = "dmabuf",
124 .init_fs_context = dma_buf_fs_init_context,
125 .kill_sb = kill_anon_super,
126};
127
128static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
129{
130 struct dma_buf *dmabuf;
131
132 if (!is_dma_buf_file(file))
133 return -EINVAL;
134
135 dmabuf = file->private_data;
136
137
138 if (!dmabuf->ops->mmap)
139 return -EINVAL;
140
141
142 if (vma->vm_pgoff + vma_pages(vma) >
143 dmabuf->size >> PAGE_SHIFT)
144 return -EINVAL;
145
146 return dmabuf->ops->mmap(dmabuf, vma);
147}
148
149static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
150{
151 struct dma_buf *dmabuf;
152 loff_t base;
153
154 if (!is_dma_buf_file(file))
155 return -EBADF;
156
157 dmabuf = file->private_data;
158
159
160
161
162 if (whence == SEEK_END)
163 base = dmabuf->size;
164 else if (whence == SEEK_SET)
165 base = 0;
166 else
167 return -EINVAL;
168
169 if (offset != 0)
170 return -EINVAL;
171
172 return base + offset;
173}
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
198{
199 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
200 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
201 unsigned long flags;
202
203 spin_lock_irqsave(&dcb->poll->lock, flags);
204 wake_up_locked_poll(dcb->poll, dcb->active);
205 dcb->active = 0;
206 spin_unlock_irqrestore(&dcb->poll->lock, flags);
207 dma_fence_put(fence);
208
209 fput(dmabuf->file);
210}
211
212static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
213 struct dma_buf_poll_cb_t *dcb)
214{
215 struct dma_resv_iter cursor;
216 struct dma_fence *fence;
217 int r;
218
219 dma_resv_for_each_fence(&cursor, resv, write, fence) {
220 dma_fence_get(fence);
221 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
222 if (!r)
223 return true;
224 dma_fence_put(fence);
225 }
226
227 return false;
228}
229
230static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
231{
232 struct dma_buf *dmabuf;
233 struct dma_resv *resv;
234 __poll_t events;
235
236 dmabuf = file->private_data;
237 if (!dmabuf || !dmabuf->resv)
238 return EPOLLERR;
239
240 resv = dmabuf->resv;
241
242 poll_wait(file, &dmabuf->poll, poll);
243
244 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
245 if (!events)
246 return 0;
247
248 dma_resv_lock(resv, NULL);
249
250 if (events & EPOLLOUT) {
251 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
252
253
254 spin_lock_irq(&dmabuf->poll.lock);
255 if (dcb->active)
256 events &= ~EPOLLOUT;
257 else
258 dcb->active = EPOLLOUT;
259 spin_unlock_irq(&dmabuf->poll.lock);
260
261 if (events & EPOLLOUT) {
262
263 get_file(dmabuf->file);
264
265 if (!dma_buf_poll_add_cb(resv, true, dcb))
266
267 dma_buf_poll_cb(NULL, &dcb->cb);
268 else
269 events &= ~EPOLLOUT;
270 }
271 }
272
273 if (events & EPOLLIN) {
274 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
275
276
277 spin_lock_irq(&dmabuf->poll.lock);
278 if (dcb->active)
279 events &= ~EPOLLIN;
280 else
281 dcb->active = EPOLLIN;
282 spin_unlock_irq(&dmabuf->poll.lock);
283
284 if (events & EPOLLIN) {
285
286 get_file(dmabuf->file);
287
288 if (!dma_buf_poll_add_cb(resv, false, dcb))
289
290 dma_buf_poll_cb(NULL, &dcb->cb);
291 else
292 events &= ~EPOLLIN;
293 }
294 }
295
296 dma_resv_unlock(resv);
297 return events;
298}
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
314{
315 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
316
317 if (IS_ERR(name))
318 return PTR_ERR(name);
319
320 spin_lock(&dmabuf->name_lock);
321 kfree(dmabuf->name);
322 dmabuf->name = name;
323 spin_unlock(&dmabuf->name_lock);
324
325 return 0;
326}
327
328static long dma_buf_ioctl(struct file *file,
329 unsigned int cmd, unsigned long arg)
330{
331 struct dma_buf *dmabuf;
332 struct dma_buf_sync sync;
333 enum dma_data_direction direction;
334 int ret;
335
336 dmabuf = file->private_data;
337
338 switch (cmd) {
339 case DMA_BUF_IOCTL_SYNC:
340 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
341 return -EFAULT;
342
343 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
344 return -EINVAL;
345
346 switch (sync.flags & DMA_BUF_SYNC_RW) {
347 case DMA_BUF_SYNC_READ:
348 direction = DMA_FROM_DEVICE;
349 break;
350 case DMA_BUF_SYNC_WRITE:
351 direction = DMA_TO_DEVICE;
352 break;
353 case DMA_BUF_SYNC_RW:
354 direction = DMA_BIDIRECTIONAL;
355 break;
356 default:
357 return -EINVAL;
358 }
359
360 if (sync.flags & DMA_BUF_SYNC_END)
361 ret = dma_buf_end_cpu_access(dmabuf, direction);
362 else
363 ret = dma_buf_begin_cpu_access(dmabuf, direction);
364
365 return ret;
366
367 case DMA_BUF_SET_NAME_A:
368 case DMA_BUF_SET_NAME_B:
369 return dma_buf_set_name(dmabuf, (const char __user *)arg);
370
371 default:
372 return -ENOTTY;
373 }
374}
375
376static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
377{
378 struct dma_buf *dmabuf = file->private_data;
379
380 seq_printf(m, "size:\t%zu\n", dmabuf->size);
381
382 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
383 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
384 spin_lock(&dmabuf->name_lock);
385 if (dmabuf->name)
386 seq_printf(m, "name:\t%s\n", dmabuf->name);
387 spin_unlock(&dmabuf->name_lock);
388}
389
390static const struct file_operations dma_buf_fops = {
391 .release = dma_buf_file_release,
392 .mmap = dma_buf_mmap_internal,
393 .llseek = dma_buf_llseek,
394 .poll = dma_buf_poll,
395 .unlocked_ioctl = dma_buf_ioctl,
396 .compat_ioctl = compat_ptr_ioctl,
397 .show_fdinfo = dma_buf_show_fdinfo,
398};
399
400
401
402
403static inline int is_dma_buf_file(struct file *file)
404{
405 return file->f_op == &dma_buf_fops;
406}
407
408static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
409{
410 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
411 struct file *file;
412 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
413
414 if (IS_ERR(inode))
415 return ERR_CAST(inode);
416
417 inode->i_size = dmabuf->size;
418 inode_set_bytes(inode, dmabuf->size);
419
420
421
422
423
424
425
426 inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
427 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
428 flags, &dma_buf_fops);
429 if (IS_ERR(file))
430 goto err_alloc_file;
431 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
432 file->private_data = dmabuf;
433 file->f_path.dentry->d_fsdata = dmabuf;
434
435 return file;
436
437err_alloc_file:
438 iput(inode);
439 return file;
440}
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
492{
493 struct dma_buf *dmabuf;
494 struct dma_resv *resv = exp_info->resv;
495 struct file *file;
496 size_t alloc_size = sizeof(struct dma_buf);
497 int ret;
498
499 if (!exp_info->resv)
500 alloc_size += sizeof(struct dma_resv);
501 else
502
503 alloc_size += 1;
504
505 if (WARN_ON(!exp_info->priv
506 || !exp_info->ops
507 || !exp_info->ops->map_dma_buf
508 || !exp_info->ops->unmap_dma_buf
509 || !exp_info->ops->release)) {
510 return ERR_PTR(-EINVAL);
511 }
512
513 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
514 (exp_info->ops->pin || exp_info->ops->unpin)))
515 return ERR_PTR(-EINVAL);
516
517 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
518 return ERR_PTR(-EINVAL);
519
520 if (!try_module_get(exp_info->owner))
521 return ERR_PTR(-ENOENT);
522
523 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
524 if (!dmabuf) {
525 ret = -ENOMEM;
526 goto err_module;
527 }
528
529 dmabuf->priv = exp_info->priv;
530 dmabuf->ops = exp_info->ops;
531 dmabuf->size = exp_info->size;
532 dmabuf->exp_name = exp_info->exp_name;
533 dmabuf->owner = exp_info->owner;
534 spin_lock_init(&dmabuf->name_lock);
535 init_waitqueue_head(&dmabuf->poll);
536 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
537 dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
538
539 if (!resv) {
540 resv = (struct dma_resv *)&dmabuf[1];
541 dma_resv_init(resv);
542 }
543 dmabuf->resv = resv;
544
545 file = dma_buf_getfile(dmabuf, exp_info->flags);
546 if (IS_ERR(file)) {
547 ret = PTR_ERR(file);
548 goto err_dmabuf;
549 }
550
551 file->f_mode |= FMODE_LSEEK;
552 dmabuf->file = file;
553
554 mutex_init(&dmabuf->lock);
555 INIT_LIST_HEAD(&dmabuf->attachments);
556
557 mutex_lock(&db_list.lock);
558 list_add(&dmabuf->list_node, &db_list.head);
559 mutex_unlock(&db_list.lock);
560
561 ret = dma_buf_stats_setup(dmabuf);
562 if (ret)
563 goto err_sysfs;
564
565 return dmabuf;
566
567err_sysfs:
568
569
570
571
572
573 file->f_path.dentry->d_fsdata = NULL;
574 fput(file);
575err_dmabuf:
576 kfree(dmabuf);
577err_module:
578 module_put(exp_info->owner);
579 return ERR_PTR(ret);
580}
581EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
582
583
584
585
586
587
588
589
590int dma_buf_fd(struct dma_buf *dmabuf, int flags)
591{
592 int fd;
593
594 if (!dmabuf || !dmabuf->file)
595 return -EINVAL;
596
597 fd = get_unused_fd_flags(flags);
598 if (fd < 0)
599 return fd;
600
601 fd_install(fd, dmabuf->file);
602
603 return fd;
604}
605EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
606
607
608
609
610
611
612
613
614
615struct dma_buf *dma_buf_get(int fd)
616{
617 struct file *file;
618
619 file = fget(fd);
620
621 if (!file)
622 return ERR_PTR(-EBADF);
623
624 if (!is_dma_buf_file(file)) {
625 fput(file);
626 return ERR_PTR(-EINVAL);
627 }
628
629 return file->private_data;
630}
631EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
632
633
634
635
636
637
638
639
640
641
642
643void dma_buf_put(struct dma_buf *dmabuf)
644{
645 if (WARN_ON(!dmabuf || !dmabuf->file))
646 return;
647
648 fput(dmabuf->file);
649}
650EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
651
652static void mangle_sg_table(struct sg_table *sg_table)
653{
654#ifdef CONFIG_DMABUF_DEBUG
655 int i;
656 struct scatterlist *sg;
657
658
659
660
661
662 for_each_sgtable_sg(sg_table, sg, i)
663 sg->page_link ^= ~0xffUL;
664#endif
665
666}
667static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
668 enum dma_data_direction direction)
669{
670 struct sg_table *sg_table;
671
672 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
673
674 if (!IS_ERR_OR_NULL(sg_table))
675 mangle_sg_table(sg_table);
676
677 return sg_table;
678}
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702struct dma_buf_attachment *
703dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
704 const struct dma_buf_attach_ops *importer_ops,
705 void *importer_priv)
706{
707 struct dma_buf_attachment *attach;
708 int ret;
709
710 if (WARN_ON(!dmabuf || !dev))
711 return ERR_PTR(-EINVAL);
712
713 if (WARN_ON(importer_ops && !importer_ops->move_notify))
714 return ERR_PTR(-EINVAL);
715
716 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
717 if (!attach)
718 return ERR_PTR(-ENOMEM);
719
720 attach->dev = dev;
721 attach->dmabuf = dmabuf;
722 if (importer_ops)
723 attach->peer2peer = importer_ops->allow_peer2peer;
724 attach->importer_ops = importer_ops;
725 attach->importer_priv = importer_priv;
726
727 if (dmabuf->ops->attach) {
728 ret = dmabuf->ops->attach(dmabuf, attach);
729 if (ret)
730 goto err_attach;
731 }
732 dma_resv_lock(dmabuf->resv, NULL);
733 list_add(&attach->node, &dmabuf->attachments);
734 dma_resv_unlock(dmabuf->resv);
735
736
737
738
739
740 if (dma_buf_attachment_is_dynamic(attach) !=
741 dma_buf_is_dynamic(dmabuf)) {
742 struct sg_table *sgt;
743
744 if (dma_buf_is_dynamic(attach->dmabuf)) {
745 dma_resv_lock(attach->dmabuf->resv, NULL);
746 ret = dmabuf->ops->pin(attach);
747 if (ret)
748 goto err_unlock;
749 }
750
751 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
752 if (!sgt)
753 sgt = ERR_PTR(-ENOMEM);
754 if (IS_ERR(sgt)) {
755 ret = PTR_ERR(sgt);
756 goto err_unpin;
757 }
758 if (dma_buf_is_dynamic(attach->dmabuf))
759 dma_resv_unlock(attach->dmabuf->resv);
760 attach->sgt = sgt;
761 attach->dir = DMA_BIDIRECTIONAL;
762 }
763
764 return attach;
765
766err_attach:
767 kfree(attach);
768 return ERR_PTR(ret);
769
770err_unpin:
771 if (dma_buf_is_dynamic(attach->dmabuf))
772 dmabuf->ops->unpin(attach);
773
774err_unlock:
775 if (dma_buf_is_dynamic(attach->dmabuf))
776 dma_resv_unlock(attach->dmabuf->resv);
777
778 dma_buf_detach(dmabuf, attach);
779 return ERR_PTR(ret);
780}
781EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
782
783
784
785
786
787
788
789
790
791struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
792 struct device *dev)
793{
794 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
795}
796EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
797
798static void __unmap_dma_buf(struct dma_buf_attachment *attach,
799 struct sg_table *sg_table,
800 enum dma_data_direction direction)
801{
802
803 mangle_sg_table(sg_table);
804
805 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
806}
807
808
809
810
811
812
813
814
815
816
817void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
818{
819 if (WARN_ON(!dmabuf || !attach))
820 return;
821
822 if (attach->sgt) {
823 if (dma_buf_is_dynamic(attach->dmabuf))
824 dma_resv_lock(attach->dmabuf->resv, NULL);
825
826 __unmap_dma_buf(attach, attach->sgt, attach->dir);
827
828 if (dma_buf_is_dynamic(attach->dmabuf)) {
829 dmabuf->ops->unpin(attach);
830 dma_resv_unlock(attach->dmabuf->resv);
831 }
832 }
833
834 dma_resv_lock(dmabuf->resv, NULL);
835 list_del(&attach->node);
836 dma_resv_unlock(dmabuf->resv);
837 if (dmabuf->ops->detach)
838 dmabuf->ops->detach(dmabuf, attach);
839
840 kfree(attach);
841}
842EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858int dma_buf_pin(struct dma_buf_attachment *attach)
859{
860 struct dma_buf *dmabuf = attach->dmabuf;
861 int ret = 0;
862
863 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
864
865 dma_resv_assert_held(dmabuf->resv);
866
867 if (dmabuf->ops->pin)
868 ret = dmabuf->ops->pin(attach);
869
870 return ret;
871}
872EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
873
874
875
876
877
878
879
880
881
882void dma_buf_unpin(struct dma_buf_attachment *attach)
883{
884 struct dma_buf *dmabuf = attach->dmabuf;
885
886 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
887
888 dma_resv_assert_held(dmabuf->resv);
889
890 if (dmabuf->ops->unpin)
891 dmabuf->ops->unpin(attach);
892}
893EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
917 enum dma_data_direction direction)
918{
919 struct sg_table *sg_table;
920 int r;
921
922 might_sleep();
923
924 if (WARN_ON(!attach || !attach->dmabuf))
925 return ERR_PTR(-EINVAL);
926
927 if (dma_buf_attachment_is_dynamic(attach))
928 dma_resv_assert_held(attach->dmabuf->resv);
929
930 if (attach->sgt) {
931
932
933
934
935 if (attach->dir != direction &&
936 attach->dir != DMA_BIDIRECTIONAL)
937 return ERR_PTR(-EBUSY);
938
939 return attach->sgt;
940 }
941
942 if (dma_buf_is_dynamic(attach->dmabuf)) {
943 dma_resv_assert_held(attach->dmabuf->resv);
944 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
945 r = attach->dmabuf->ops->pin(attach);
946 if (r)
947 return ERR_PTR(r);
948 }
949 }
950
951 sg_table = __map_dma_buf(attach, direction);
952 if (!sg_table)
953 sg_table = ERR_PTR(-ENOMEM);
954
955 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
956 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
957 attach->dmabuf->ops->unpin(attach);
958
959 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
960 attach->sgt = sg_table;
961 attach->dir = direction;
962 }
963
964#ifdef CONFIG_DMA_API_DEBUG
965 if (!IS_ERR(sg_table)) {
966 struct scatterlist *sg;
967 u64 addr;
968 int len;
969 int i;
970
971 for_each_sgtable_dma_sg(sg_table, sg, i) {
972 addr = sg_dma_address(sg);
973 len = sg_dma_len(sg);
974 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
975 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
976 __func__, addr, len);
977 }
978 }
979 }
980#endif
981 return sg_table;
982}
983EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
984
985
986
987
988
989
990
991
992
993
994
995void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
996 struct sg_table *sg_table,
997 enum dma_data_direction direction)
998{
999 might_sleep();
1000
1001 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1002 return;
1003
1004 if (dma_buf_attachment_is_dynamic(attach))
1005 dma_resv_assert_held(attach->dmabuf->resv);
1006
1007 if (attach->sgt == sg_table)
1008 return;
1009
1010 if (dma_buf_is_dynamic(attach->dmabuf))
1011 dma_resv_assert_held(attach->dmabuf->resv);
1012
1013 __unmap_dma_buf(attach, sg_table, direction);
1014
1015 if (dma_buf_is_dynamic(attach->dmabuf) &&
1016 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1017 dma_buf_unpin(attach);
1018}
1019EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029void dma_buf_move_notify(struct dma_buf *dmabuf)
1030{
1031 struct dma_buf_attachment *attach;
1032
1033 dma_resv_assert_held(dmabuf->resv);
1034
1035 list_for_each_entry(attach, &dmabuf->attachments, node)
1036 if (attach->importer_ops)
1037 attach->importer_ops->move_notify(attach);
1038}
1039EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1127 enum dma_data_direction direction)
1128{
1129 bool write = (direction == DMA_BIDIRECTIONAL ||
1130 direction == DMA_TO_DEVICE);
1131 struct dma_resv *resv = dmabuf->resv;
1132 long ret;
1133
1134
1135 ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
1136 if (ret < 0)
1137 return ret;
1138
1139 return 0;
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1162 enum dma_data_direction direction)
1163{
1164 int ret = 0;
1165
1166 if (WARN_ON(!dmabuf))
1167 return -EINVAL;
1168
1169 might_lock(&dmabuf->resv->lock.base);
1170
1171 if (dmabuf->ops->begin_cpu_access)
1172 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1173
1174
1175
1176
1177
1178 if (ret == 0)
1179 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1180
1181 return ret;
1182}
1183EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1198 enum dma_data_direction direction)
1199{
1200 int ret = 0;
1201
1202 WARN_ON(!dmabuf);
1203
1204 might_lock(&dmabuf->resv->lock.base);
1205
1206 if (dmabuf->ops->end_cpu_access)
1207 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1208
1209 return ret;
1210}
1211EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1229 unsigned long pgoff)
1230{
1231 if (WARN_ON(!dmabuf || !vma))
1232 return -EINVAL;
1233
1234
1235 if (!dmabuf->ops->mmap)
1236 return -EINVAL;
1237
1238
1239 if (pgoff + vma_pages(vma) < pgoff)
1240 return -EOVERFLOW;
1241
1242
1243 if (pgoff + vma_pages(vma) >
1244 dmabuf->size >> PAGE_SHIFT)
1245 return -EINVAL;
1246
1247
1248 vma_set_file(vma, dmabuf->file);
1249 vma->vm_pgoff = pgoff;
1250
1251 return dmabuf->ops->mmap(dmabuf, vma);
1252}
1253EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1272{
1273 struct iosys_map ptr;
1274 int ret = 0;
1275
1276 iosys_map_clear(map);
1277
1278 if (WARN_ON(!dmabuf))
1279 return -EINVAL;
1280
1281 if (!dmabuf->ops->vmap)
1282 return -EINVAL;
1283
1284 mutex_lock(&dmabuf->lock);
1285 if (dmabuf->vmapping_counter) {
1286 dmabuf->vmapping_counter++;
1287 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1288 *map = dmabuf->vmap_ptr;
1289 goto out_unlock;
1290 }
1291
1292 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1293
1294 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1295 if (WARN_ON_ONCE(ret))
1296 goto out_unlock;
1297
1298 dmabuf->vmap_ptr = ptr;
1299 dmabuf->vmapping_counter = 1;
1300
1301 *map = dmabuf->vmap_ptr;
1302
1303out_unlock:
1304 mutex_unlock(&dmabuf->lock);
1305 return ret;
1306}
1307EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1308
1309
1310
1311
1312
1313
1314void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1315{
1316 if (WARN_ON(!dmabuf))
1317 return;
1318
1319 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1320 BUG_ON(dmabuf->vmapping_counter == 0);
1321 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1322
1323 mutex_lock(&dmabuf->lock);
1324 if (--dmabuf->vmapping_counter == 0) {
1325 if (dmabuf->ops->vunmap)
1326 dmabuf->ops->vunmap(dmabuf, map);
1327 iosys_map_clear(&dmabuf->vmap_ptr);
1328 }
1329 mutex_unlock(&dmabuf->lock);
1330}
1331EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1332
1333#ifdef CONFIG_DEBUG_FS
1334static int dma_buf_debug_show(struct seq_file *s, void *unused)
1335{
1336 struct dma_buf *buf_obj;
1337 struct dma_buf_attachment *attach_obj;
1338 int count = 0, attach_count;
1339 size_t size = 0;
1340 int ret;
1341
1342 ret = mutex_lock_interruptible(&db_list.lock);
1343
1344 if (ret)
1345 return ret;
1346
1347 seq_puts(s, "\nDma-buf Objects:\n");
1348 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1349 "size", "flags", "mode", "count", "ino");
1350
1351 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1352
1353 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1354 if (ret)
1355 goto error_unlock;
1356
1357
1358 spin_lock(&buf_obj->name_lock);
1359 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1360 buf_obj->size,
1361 buf_obj->file->f_flags, buf_obj->file->f_mode,
1362 file_count(buf_obj->file),
1363 buf_obj->exp_name,
1364 file_inode(buf_obj->file)->i_ino,
1365 buf_obj->name ?: "");
1366 spin_unlock(&buf_obj->name_lock);
1367
1368 dma_resv_describe(buf_obj->resv, s);
1369
1370 seq_puts(s, "\tAttached Devices:\n");
1371 attach_count = 0;
1372
1373 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1374 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1375 attach_count++;
1376 }
1377 dma_resv_unlock(buf_obj->resv);
1378
1379 seq_printf(s, "Total %d devices attached\n\n",
1380 attach_count);
1381
1382 count++;
1383 size += buf_obj->size;
1384 }
1385
1386 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1387
1388 mutex_unlock(&db_list.lock);
1389 return 0;
1390
1391error_unlock:
1392 mutex_unlock(&db_list.lock);
1393 return ret;
1394}
1395
1396DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1397
1398static struct dentry *dma_buf_debugfs_dir;
1399
1400static int dma_buf_init_debugfs(void)
1401{
1402 struct dentry *d;
1403 int err = 0;
1404
1405 d = debugfs_create_dir("dma_buf", NULL);
1406 if (IS_ERR(d))
1407 return PTR_ERR(d);
1408
1409 dma_buf_debugfs_dir = d;
1410
1411 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1412 NULL, &dma_buf_debug_fops);
1413 if (IS_ERR(d)) {
1414 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1415 debugfs_remove_recursive(dma_buf_debugfs_dir);
1416 dma_buf_debugfs_dir = NULL;
1417 err = PTR_ERR(d);
1418 }
1419
1420 return err;
1421}
1422
1423static void dma_buf_uninit_debugfs(void)
1424{
1425 debugfs_remove_recursive(dma_buf_debugfs_dir);
1426}
1427#else
1428static inline int dma_buf_init_debugfs(void)
1429{
1430 return 0;
1431}
1432static inline void dma_buf_uninit_debugfs(void)
1433{
1434}
1435#endif
1436
1437static int __init dma_buf_init(void)
1438{
1439 int ret;
1440
1441 ret = dma_buf_init_sysfs_statistics();
1442 if (ret)
1443 return ret;
1444
1445 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1446 if (IS_ERR(dma_buf_mnt))
1447 return PTR_ERR(dma_buf_mnt);
1448
1449 mutex_init(&db_list.lock);
1450 INIT_LIST_HEAD(&db_list.head);
1451 dma_buf_init_debugfs();
1452 return 0;
1453}
1454subsys_initcall(dma_buf_init);
1455
1456static void __exit dma_buf_deinit(void)
1457{
1458 dma_buf_uninit_debugfs();
1459 kern_unmount(dma_buf_mnt);
1460 dma_buf_uninit_sysfs_statistics();
1461}
1462__exitcall(dma_buf_deinit);
1463