1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dma-buf.h>
14#include <linux/module.h>
15#include <linux/refcount.h>
16#include <linux/scatterlist.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dma-mapping.h>
20
21#include <media/videobuf2-v4l2.h>
22#include <media/videobuf2-dma-contig.h>
23#include <media/videobuf2-memops.h>
24
25struct vb2_dc_buf {
26 struct device *dev;
27 void *vaddr;
28 unsigned long size;
29 void *cookie;
30 dma_addr_t dma_addr;
31 unsigned long attrs;
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
34 struct frame_vector *vec;
35
36
37 struct vb2_vmarea_handler handler;
38 refcount_t refcount;
39 struct sg_table *sgt_base;
40
41
42 struct dma_buf_attachment *db_attach;
43};
44
45
46
47
48
49static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50{
51 struct scatterlist *s;
52 dma_addr_t expected = sg_dma_address(sgt->sgl);
53 unsigned int i;
54 unsigned long size = 0;
55
56 for_each_sgtable_dma_sg(sgt, s, i) {
57 if (sg_dma_address(s) != expected)
58 break;
59 expected += sg_dma_len(s);
60 size += sg_dma_len(s);
61 }
62 return size;
63}
64
65
66
67
68
69static void *vb2_dc_cookie(void *buf_priv)
70{
71 struct vb2_dc_buf *buf = buf_priv;
72
73 return &buf->dma_addr;
74}
75
76static void *vb2_dc_vaddr(void *buf_priv)
77{
78 struct vb2_dc_buf *buf = buf_priv;
79 struct dma_buf_map map;
80 int ret;
81
82 if (!buf->vaddr && buf->db_attach) {
83 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
84 buf->vaddr = ret ? NULL : map.vaddr;
85 }
86
87 return buf->vaddr;
88}
89
90static unsigned int vb2_dc_num_users(void *buf_priv)
91{
92 struct vb2_dc_buf *buf = buf_priv;
93
94 return refcount_read(&buf->refcount);
95}
96
97static void vb2_dc_prepare(void *buf_priv)
98{
99 struct vb2_dc_buf *buf = buf_priv;
100 struct sg_table *sgt = buf->dma_sgt;
101
102 if (!sgt)
103 return;
104
105 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
106}
107
108static void vb2_dc_finish(void *buf_priv)
109{
110 struct vb2_dc_buf *buf = buf_priv;
111 struct sg_table *sgt = buf->dma_sgt;
112
113 if (!sgt)
114 return;
115
116 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
117}
118
119
120
121
122
123static void vb2_dc_put(void *buf_priv)
124{
125 struct vb2_dc_buf *buf = buf_priv;
126
127 if (!refcount_dec_and_test(&buf->refcount))
128 return;
129
130 if (buf->sgt_base) {
131 sg_free_table(buf->sgt_base);
132 kfree(buf->sgt_base);
133 }
134 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
135 buf->attrs);
136 put_device(buf->dev);
137 kfree(buf);
138}
139
140static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
141 unsigned long size, enum dma_data_direction dma_dir,
142 gfp_t gfp_flags)
143{
144 struct vb2_dc_buf *buf;
145
146 if (WARN_ON(!dev))
147 return ERR_PTR(-EINVAL);
148
149 buf = kzalloc(sizeof *buf, GFP_KERNEL);
150 if (!buf)
151 return ERR_PTR(-ENOMEM);
152
153 buf->attrs = attrs;
154 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155 GFP_KERNEL | gfp_flags, buf->attrs);
156 if (!buf->cookie) {
157 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
158 kfree(buf);
159 return ERR_PTR(-ENOMEM);
160 }
161
162 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163 buf->vaddr = buf->cookie;
164
165
166 buf->dev = get_device(dev);
167 buf->size = size;
168 buf->dma_dir = dma_dir;
169
170 buf->handler.refcount = &buf->refcount;
171 buf->handler.put = vb2_dc_put;
172 buf->handler.arg = buf;
173
174 refcount_set(&buf->refcount, 1);
175
176 return buf;
177}
178
179static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
180{
181 struct vb2_dc_buf *buf = buf_priv;
182 int ret;
183
184 if (!buf) {
185 printk(KERN_ERR "No buffer to map\n");
186 return -EINVAL;
187 }
188
189 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
190 buf->dma_addr, buf->size, buf->attrs);
191
192 if (ret) {
193 pr_err("Remapping memory failed, error: %d\n", ret);
194 return ret;
195 }
196
197 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
198 vma->vm_private_data = &buf->handler;
199 vma->vm_ops = &vb2_common_vm_ops;
200
201 vma->vm_ops->open(vma);
202
203 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
204 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
205 buf->size);
206
207 return 0;
208}
209
210
211
212
213
214struct vb2_dc_attachment {
215 struct sg_table sgt;
216 enum dma_data_direction dma_dir;
217};
218
219static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
220 struct dma_buf_attachment *dbuf_attach)
221{
222 struct vb2_dc_attachment *attach;
223 unsigned int i;
224 struct scatterlist *rd, *wr;
225 struct sg_table *sgt;
226 struct vb2_dc_buf *buf = dbuf->priv;
227 int ret;
228
229 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
230 if (!attach)
231 return -ENOMEM;
232
233 sgt = &attach->sgt;
234
235
236
237 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
238 if (ret) {
239 kfree(attach);
240 return -ENOMEM;
241 }
242
243 rd = buf->sgt_base->sgl;
244 wr = sgt->sgl;
245 for (i = 0; i < sgt->orig_nents; ++i) {
246 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
247 rd = sg_next(rd);
248 wr = sg_next(wr);
249 }
250
251 attach->dma_dir = DMA_NONE;
252 dbuf_attach->priv = attach;
253
254 return 0;
255}
256
257static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
258 struct dma_buf_attachment *db_attach)
259{
260 struct vb2_dc_attachment *attach = db_attach->priv;
261 struct sg_table *sgt;
262
263 if (!attach)
264 return;
265
266 sgt = &attach->sgt;
267
268
269 if (attach->dma_dir != DMA_NONE)
270
271
272
273
274
275
276 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
277 DMA_ATTR_SKIP_CPU_SYNC);
278 sg_free_table(sgt);
279 kfree(attach);
280 db_attach->priv = NULL;
281}
282
283static struct sg_table *vb2_dc_dmabuf_ops_map(
284 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
285{
286 struct vb2_dc_attachment *attach = db_attach->priv;
287
288 struct mutex *lock = &db_attach->dmabuf->lock;
289 struct sg_table *sgt;
290
291 mutex_lock(lock);
292
293 sgt = &attach->sgt;
294
295 if (attach->dma_dir == dma_dir) {
296 mutex_unlock(lock);
297 return sgt;
298 }
299
300
301 if (attach->dma_dir != DMA_NONE) {
302 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
303 DMA_ATTR_SKIP_CPU_SYNC);
304 attach->dma_dir = DMA_NONE;
305 }
306
307
308
309
310
311 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
312 DMA_ATTR_SKIP_CPU_SYNC)) {
313 pr_err("failed to map scatterlist\n");
314 mutex_unlock(lock);
315 return ERR_PTR(-EIO);
316 }
317
318 attach->dma_dir = dma_dir;
319
320 mutex_unlock(lock);
321
322 return sgt;
323}
324
325static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
326 struct sg_table *sgt, enum dma_data_direction dma_dir)
327{
328
329}
330
331static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
332{
333
334 vb2_dc_put(dbuf->priv);
335}
336
337static int
338vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
339 enum dma_data_direction direction)
340{
341 return 0;
342}
343
344static int
345vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
346 enum dma_data_direction direction)
347{
348 return 0;
349}
350
351static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
352{
353 struct vb2_dc_buf *buf = dbuf->priv;
354
355 dma_buf_map_set_vaddr(map, buf->vaddr);
356
357 return 0;
358}
359
360static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
361 struct vm_area_struct *vma)
362{
363 return vb2_dc_mmap(dbuf->priv, vma);
364}
365
366static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
367 .attach = vb2_dc_dmabuf_ops_attach,
368 .detach = vb2_dc_dmabuf_ops_detach,
369 .map_dma_buf = vb2_dc_dmabuf_ops_map,
370 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
371 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
372 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
373 .vmap = vb2_dc_dmabuf_ops_vmap,
374 .mmap = vb2_dc_dmabuf_ops_mmap,
375 .release = vb2_dc_dmabuf_ops_release,
376};
377
378static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
379{
380 int ret;
381 struct sg_table *sgt;
382
383 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
384 if (!sgt) {
385 dev_err(buf->dev, "failed to alloc sg table\n");
386 return NULL;
387 }
388
389 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
390 buf->size, buf->attrs);
391 if (ret < 0) {
392 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
393 kfree(sgt);
394 return NULL;
395 }
396
397 return sgt;
398}
399
400static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
401{
402 struct vb2_dc_buf *buf = buf_priv;
403 struct dma_buf *dbuf;
404 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
405
406 exp_info.ops = &vb2_dc_dmabuf_ops;
407 exp_info.size = buf->size;
408 exp_info.flags = flags;
409 exp_info.priv = buf;
410
411 if (!buf->sgt_base)
412 buf->sgt_base = vb2_dc_get_base_sgt(buf);
413
414 if (WARN_ON(!buf->sgt_base))
415 return NULL;
416
417 dbuf = dma_buf_export(&exp_info);
418 if (IS_ERR(dbuf))
419 return NULL;
420
421
422 refcount_inc(&buf->refcount);
423
424 return dbuf;
425}
426
427
428
429
430
431static void vb2_dc_put_userptr(void *buf_priv)
432{
433 struct vb2_dc_buf *buf = buf_priv;
434 struct sg_table *sgt = buf->dma_sgt;
435 int i;
436 struct page **pages;
437
438 if (sgt) {
439
440
441
442
443 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
444 DMA_ATTR_SKIP_CPU_SYNC);
445 pages = frame_vector_pages(buf->vec);
446
447 BUG_ON(IS_ERR(pages));
448 if (buf->dma_dir == DMA_FROM_DEVICE ||
449 buf->dma_dir == DMA_BIDIRECTIONAL)
450 for (i = 0; i < frame_vector_count(buf->vec); i++)
451 set_page_dirty_lock(pages[i]);
452 sg_free_table(sgt);
453 kfree(sgt);
454 } else {
455 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
456 buf->dma_dir, 0);
457 }
458 vb2_destroy_framevec(buf->vec);
459 kfree(buf);
460}
461
462static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
463 unsigned long size, enum dma_data_direction dma_dir)
464{
465 struct vb2_dc_buf *buf;
466 struct frame_vector *vec;
467 unsigned int offset;
468 int n_pages, i;
469 int ret = 0;
470 struct sg_table *sgt;
471 unsigned long contig_size;
472 unsigned long dma_align = dma_get_cache_alignment();
473
474
475 if (!IS_ALIGNED(vaddr | size, dma_align)) {
476 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
477 return ERR_PTR(-EINVAL);
478 }
479
480 if (!size) {
481 pr_debug("size is zero\n");
482 return ERR_PTR(-EINVAL);
483 }
484
485 if (WARN_ON(!dev))
486 return ERR_PTR(-EINVAL);
487
488 buf = kzalloc(sizeof *buf, GFP_KERNEL);
489 if (!buf)
490 return ERR_PTR(-ENOMEM);
491
492 buf->dev = dev;
493 buf->dma_dir = dma_dir;
494
495 offset = lower_32_bits(offset_in_page(vaddr));
496 vec = vb2_create_framevec(vaddr, size);
497 if (IS_ERR(vec)) {
498 ret = PTR_ERR(vec);
499 goto fail_buf;
500 }
501 buf->vec = vec;
502 n_pages = frame_vector_count(vec);
503 ret = frame_vector_to_pages(vec);
504 if (ret < 0) {
505 unsigned long *nums = frame_vector_pfns(vec);
506
507
508
509
510
511 for (i = 1; i < n_pages; i++)
512 if (nums[i-1] + 1 != nums[i])
513 goto fail_pfnvec;
514 buf->dma_addr = dma_map_resource(buf->dev,
515 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
516 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
517 ret = -ENOMEM;
518 goto fail_pfnvec;
519 }
520 goto out;
521 }
522
523 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
524 if (!sgt) {
525 pr_err("failed to allocate sg table\n");
526 ret = -ENOMEM;
527 goto fail_pfnvec;
528 }
529
530 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
531 offset, size, GFP_KERNEL);
532 if (ret) {
533 pr_err("failed to initialize sg table\n");
534 goto fail_sgt;
535 }
536
537
538
539
540
541 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
542 DMA_ATTR_SKIP_CPU_SYNC)) {
543 pr_err("failed to map scatterlist\n");
544 ret = -EIO;
545 goto fail_sgt_init;
546 }
547
548 contig_size = vb2_dc_get_contiguous_size(sgt);
549 if (contig_size < size) {
550 pr_err("contiguous mapping is too small %lu/%lu\n",
551 contig_size, size);
552 ret = -EFAULT;
553 goto fail_map_sg;
554 }
555
556 buf->dma_addr = sg_dma_address(sgt->sgl);
557 buf->dma_sgt = sgt;
558out:
559 buf->size = size;
560
561 return buf;
562
563fail_map_sg:
564 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
565
566fail_sgt_init:
567 sg_free_table(sgt);
568
569fail_sgt:
570 kfree(sgt);
571
572fail_pfnvec:
573 vb2_destroy_framevec(vec);
574
575fail_buf:
576 kfree(buf);
577
578 return ERR_PTR(ret);
579}
580
581
582
583
584
585static int vb2_dc_map_dmabuf(void *mem_priv)
586{
587 struct vb2_dc_buf *buf = mem_priv;
588 struct sg_table *sgt;
589 unsigned long contig_size;
590
591 if (WARN_ON(!buf->db_attach)) {
592 pr_err("trying to pin a non attached buffer\n");
593 return -EINVAL;
594 }
595
596 if (WARN_ON(buf->dma_sgt)) {
597 pr_err("dmabuf buffer is already pinned\n");
598 return 0;
599 }
600
601
602 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
603 if (IS_ERR(sgt)) {
604 pr_err("Error getting dmabuf scatterlist\n");
605 return -EINVAL;
606 }
607
608
609 contig_size = vb2_dc_get_contiguous_size(sgt);
610 if (contig_size < buf->size) {
611 pr_err("contiguous chunk is too small %lu/%lu\n",
612 contig_size, buf->size);
613 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
614 return -EFAULT;
615 }
616
617 buf->dma_addr = sg_dma_address(sgt->sgl);
618 buf->dma_sgt = sgt;
619 buf->vaddr = NULL;
620
621 return 0;
622}
623
624static void vb2_dc_unmap_dmabuf(void *mem_priv)
625{
626 struct vb2_dc_buf *buf = mem_priv;
627 struct sg_table *sgt = buf->dma_sgt;
628 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
629
630 if (WARN_ON(!buf->db_attach)) {
631 pr_err("trying to unpin a not attached buffer\n");
632 return;
633 }
634
635 if (WARN_ON(!sgt)) {
636 pr_err("dmabuf buffer is already unpinned\n");
637 return;
638 }
639
640 if (buf->vaddr) {
641 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
642 buf->vaddr = NULL;
643 }
644 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
645
646 buf->dma_addr = 0;
647 buf->dma_sgt = NULL;
648}
649
650static void vb2_dc_detach_dmabuf(void *mem_priv)
651{
652 struct vb2_dc_buf *buf = mem_priv;
653
654
655 if (WARN_ON(buf->dma_addr))
656 vb2_dc_unmap_dmabuf(buf);
657
658
659 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
660 kfree(buf);
661}
662
663static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
664 unsigned long size, enum dma_data_direction dma_dir)
665{
666 struct vb2_dc_buf *buf;
667 struct dma_buf_attachment *dba;
668
669 if (dbuf->size < size)
670 return ERR_PTR(-EFAULT);
671
672 if (WARN_ON(!dev))
673 return ERR_PTR(-EINVAL);
674
675 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
676 if (!buf)
677 return ERR_PTR(-ENOMEM);
678
679 buf->dev = dev;
680
681 dba = dma_buf_attach(dbuf, buf->dev);
682 if (IS_ERR(dba)) {
683 pr_err("failed to attach dmabuf\n");
684 kfree(buf);
685 return dba;
686 }
687
688 buf->dma_dir = dma_dir;
689 buf->size = size;
690 buf->db_attach = dba;
691
692 return buf;
693}
694
695
696
697
698
699const struct vb2_mem_ops vb2_dma_contig_memops = {
700 .alloc = vb2_dc_alloc,
701 .put = vb2_dc_put,
702 .get_dmabuf = vb2_dc_get_dmabuf,
703 .cookie = vb2_dc_cookie,
704 .vaddr = vb2_dc_vaddr,
705 .mmap = vb2_dc_mmap,
706 .get_userptr = vb2_dc_get_userptr,
707 .put_userptr = vb2_dc_put_userptr,
708 .prepare = vb2_dc_prepare,
709 .finish = vb2_dc_finish,
710 .map_dmabuf = vb2_dc_map_dmabuf,
711 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
712 .attach_dmabuf = vb2_dc_attach_dmabuf,
713 .detach_dmabuf = vb2_dc_detach_dmabuf,
714 .num_users = vb2_dc_num_users,
715};
716EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
743{
744 if (!dev->dma_parms) {
745 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
746 return -ENODEV;
747 }
748 if (dma_get_max_seg_size(dev) < size)
749 return dma_set_max_seg_size(dev, size);
750
751 return 0;
752}
753EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
754
755MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
756MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
757MODULE_LICENSE("GPL");
758