1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/dma-buf.h>
14#include <linux/module.h>
15#include <linux/refcount.h>
16#include <linux/scatterlist.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/dma-mapping.h>
20
21#include <media/videobuf2-v4l2.h>
22#include <media/videobuf2-dma-contig.h>
23#include <media/videobuf2-memops.h>
24
25struct vb2_dc_buf {
26 struct device *dev;
27 void *vaddr;
28 unsigned long size;
29 void *cookie;
30 dma_addr_t dma_addr;
31 unsigned long attrs;
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
34 struct frame_vector *vec;
35
36
37 struct vb2_vmarea_handler handler;
38 refcount_t refcount;
39 struct sg_table *sgt_base;
40
41
42 struct dma_buf_attachment *db_attach;
43};
44
45
46
47
48
49static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50{
51 struct scatterlist *s;
52 dma_addr_t expected = sg_dma_address(sgt->sgl);
53 unsigned int i;
54 unsigned long size = 0;
55
56 for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 if (sg_dma_address(s) != expected)
58 break;
59 expected = sg_dma_address(s) + sg_dma_len(s);
60 size += sg_dma_len(s);
61 }
62 return size;
63}
64
65
66
67
68
69static void *vb2_dc_cookie(void *buf_priv)
70{
71 struct vb2_dc_buf *buf = buf_priv;
72
73 return &buf->dma_addr;
74}
75
76static void *vb2_dc_vaddr(void *buf_priv)
77{
78 struct vb2_dc_buf *buf = buf_priv;
79
80 if (!buf->vaddr && buf->db_attach)
81 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82
83 return buf->vaddr;
84}
85
86static unsigned int vb2_dc_num_users(void *buf_priv)
87{
88 struct vb2_dc_buf *buf = buf_priv;
89
90 return refcount_read(&buf->refcount);
91}
92
93static void vb2_dc_prepare(void *buf_priv)
94{
95 struct vb2_dc_buf *buf = buf_priv;
96 struct sg_table *sgt = buf->dma_sgt;
97
98 if (!sgt)
99 return;
100
101 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
102 buf->dma_dir);
103}
104
105static void vb2_dc_finish(void *buf_priv)
106{
107 struct vb2_dc_buf *buf = buf_priv;
108 struct sg_table *sgt = buf->dma_sgt;
109
110 if (!sgt)
111 return;
112
113 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
114}
115
116
117
118
119
120static void vb2_dc_put(void *buf_priv)
121{
122 struct vb2_dc_buf *buf = buf_priv;
123
124 if (!refcount_dec_and_test(&buf->refcount))
125 return;
126
127 if (buf->sgt_base) {
128 sg_free_table(buf->sgt_base);
129 kfree(buf->sgt_base);
130 }
131 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
132 buf->attrs);
133 put_device(buf->dev);
134 kfree(buf);
135}
136
137static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
138 unsigned long size, enum dma_data_direction dma_dir,
139 gfp_t gfp_flags)
140{
141 struct vb2_dc_buf *buf;
142
143 if (WARN_ON(!dev))
144 return ERR_PTR(-EINVAL);
145
146 buf = kzalloc(sizeof *buf, GFP_KERNEL);
147 if (!buf)
148 return ERR_PTR(-ENOMEM);
149
150 buf->attrs = attrs;
151 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
152 GFP_KERNEL | gfp_flags, buf->attrs);
153 if (!buf->cookie) {
154 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
155 kfree(buf);
156 return ERR_PTR(-ENOMEM);
157 }
158
159 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
160 buf->vaddr = buf->cookie;
161
162
163 buf->dev = get_device(dev);
164 buf->size = size;
165 buf->dma_dir = dma_dir;
166
167 buf->handler.refcount = &buf->refcount;
168 buf->handler.put = vb2_dc_put;
169 buf->handler.arg = buf;
170
171 refcount_set(&buf->refcount, 1);
172
173 return buf;
174}
175
176static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
177{
178 struct vb2_dc_buf *buf = buf_priv;
179 int ret;
180
181 if (!buf) {
182 printk(KERN_ERR "No buffer to map\n");
183 return -EINVAL;
184 }
185
186 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
187 buf->dma_addr, buf->size, buf->attrs);
188
189 if (ret) {
190 pr_err("Remapping memory failed, error: %d\n", ret);
191 return ret;
192 }
193
194 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
195 vma->vm_private_data = &buf->handler;
196 vma->vm_ops = &vb2_common_vm_ops;
197
198 vma->vm_ops->open(vma);
199
200 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
201 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
202 buf->size);
203
204 return 0;
205}
206
207
208
209
210
211struct vb2_dc_attachment {
212 struct sg_table sgt;
213 enum dma_data_direction dma_dir;
214};
215
216static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
217 struct dma_buf_attachment *dbuf_attach)
218{
219 struct vb2_dc_attachment *attach;
220 unsigned int i;
221 struct scatterlist *rd, *wr;
222 struct sg_table *sgt;
223 struct vb2_dc_buf *buf = dbuf->priv;
224 int ret;
225
226 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
227 if (!attach)
228 return -ENOMEM;
229
230 sgt = &attach->sgt;
231
232
233
234 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
235 if (ret) {
236 kfree(attach);
237 return -ENOMEM;
238 }
239
240 rd = buf->sgt_base->sgl;
241 wr = sgt->sgl;
242 for (i = 0; i < sgt->orig_nents; ++i) {
243 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
244 rd = sg_next(rd);
245 wr = sg_next(wr);
246 }
247
248 attach->dma_dir = DMA_NONE;
249 dbuf_attach->priv = attach;
250
251 return 0;
252}
253
254static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
255 struct dma_buf_attachment *db_attach)
256{
257 struct vb2_dc_attachment *attach = db_attach->priv;
258 struct sg_table *sgt;
259
260 if (!attach)
261 return;
262
263 sgt = &attach->sgt;
264
265
266 if (attach->dma_dir != DMA_NONE)
267
268
269
270
271
272
273 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
274 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
275 sg_free_table(sgt);
276 kfree(attach);
277 db_attach->priv = NULL;
278}
279
280static struct sg_table *vb2_dc_dmabuf_ops_map(
281 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
282{
283 struct vb2_dc_attachment *attach = db_attach->priv;
284
285 struct mutex *lock = &db_attach->dmabuf->lock;
286 struct sg_table *sgt;
287
288 mutex_lock(lock);
289
290 sgt = &attach->sgt;
291
292 if (attach->dma_dir == dma_dir) {
293 mutex_unlock(lock);
294 return sgt;
295 }
296
297
298 if (attach->dma_dir != DMA_NONE) {
299 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
300 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
301 attach->dma_dir = DMA_NONE;
302 }
303
304
305
306
307
308 sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
309 dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
310 if (!sgt->nents) {
311 pr_err("failed to map scatterlist\n");
312 mutex_unlock(lock);
313 return ERR_PTR(-EIO);
314 }
315
316 attach->dma_dir = dma_dir;
317
318 mutex_unlock(lock);
319
320 return sgt;
321}
322
323static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
324 struct sg_table *sgt, enum dma_data_direction dma_dir)
325{
326
327}
328
329static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
330{
331
332 vb2_dc_put(dbuf->priv);
333}
334
335static int
336vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
337 enum dma_data_direction direction)
338{
339 return 0;
340}
341
342static int
343vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
344 enum dma_data_direction direction)
345{
346 return 0;
347}
348
349static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
350{
351 struct vb2_dc_buf *buf = dbuf->priv;
352
353 return buf->vaddr;
354}
355
356static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
357 struct vm_area_struct *vma)
358{
359 return vb2_dc_mmap(dbuf->priv, vma);
360}
361
362static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
363 .attach = vb2_dc_dmabuf_ops_attach,
364 .detach = vb2_dc_dmabuf_ops_detach,
365 .map_dma_buf = vb2_dc_dmabuf_ops_map,
366 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
367 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
368 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
369 .vmap = vb2_dc_dmabuf_ops_vmap,
370 .mmap = vb2_dc_dmabuf_ops_mmap,
371 .release = vb2_dc_dmabuf_ops_release,
372};
373
374static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
375{
376 int ret;
377 struct sg_table *sgt;
378
379 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
380 if (!sgt) {
381 dev_err(buf->dev, "failed to alloc sg table\n");
382 return NULL;
383 }
384
385 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
386 buf->size, buf->attrs);
387 if (ret < 0) {
388 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
389 kfree(sgt);
390 return NULL;
391 }
392
393 return sgt;
394}
395
396static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
397{
398 struct vb2_dc_buf *buf = buf_priv;
399 struct dma_buf *dbuf;
400 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
401
402 exp_info.ops = &vb2_dc_dmabuf_ops;
403 exp_info.size = buf->size;
404 exp_info.flags = flags;
405 exp_info.priv = buf;
406
407 if (!buf->sgt_base)
408 buf->sgt_base = vb2_dc_get_base_sgt(buf);
409
410 if (WARN_ON(!buf->sgt_base))
411 return NULL;
412
413 dbuf = dma_buf_export(&exp_info);
414 if (IS_ERR(dbuf))
415 return NULL;
416
417
418 refcount_inc(&buf->refcount);
419
420 return dbuf;
421}
422
423
424
425
426
427static void vb2_dc_put_userptr(void *buf_priv)
428{
429 struct vb2_dc_buf *buf = buf_priv;
430 struct sg_table *sgt = buf->dma_sgt;
431 int i;
432 struct page **pages;
433
434 if (sgt) {
435
436
437
438
439 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
440 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
441 pages = frame_vector_pages(buf->vec);
442
443 BUG_ON(IS_ERR(pages));
444 if (buf->dma_dir == DMA_FROM_DEVICE ||
445 buf->dma_dir == DMA_BIDIRECTIONAL)
446 for (i = 0; i < frame_vector_count(buf->vec); i++)
447 set_page_dirty_lock(pages[i]);
448 sg_free_table(sgt);
449 kfree(sgt);
450 } else {
451 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
452 buf->dma_dir, 0);
453 }
454 vb2_destroy_framevec(buf->vec);
455 kfree(buf);
456}
457
458static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
459 unsigned long size, enum dma_data_direction dma_dir)
460{
461 struct vb2_dc_buf *buf;
462 struct frame_vector *vec;
463 unsigned int offset;
464 int n_pages, i;
465 int ret = 0;
466 struct sg_table *sgt;
467 unsigned long contig_size;
468 unsigned long dma_align = dma_get_cache_alignment();
469
470
471 if (!IS_ALIGNED(vaddr | size, dma_align)) {
472 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
473 return ERR_PTR(-EINVAL);
474 }
475
476 if (!size) {
477 pr_debug("size is zero\n");
478 return ERR_PTR(-EINVAL);
479 }
480
481 if (WARN_ON(!dev))
482 return ERR_PTR(-EINVAL);
483
484 buf = kzalloc(sizeof *buf, GFP_KERNEL);
485 if (!buf)
486 return ERR_PTR(-ENOMEM);
487
488 buf->dev = dev;
489 buf->dma_dir = dma_dir;
490
491 offset = lower_32_bits(offset_in_page(vaddr));
492 vec = vb2_create_framevec(vaddr, size);
493 if (IS_ERR(vec)) {
494 ret = PTR_ERR(vec);
495 goto fail_buf;
496 }
497 buf->vec = vec;
498 n_pages = frame_vector_count(vec);
499 ret = frame_vector_to_pages(vec);
500 if (ret < 0) {
501 unsigned long *nums = frame_vector_pfns(vec);
502
503
504
505
506
507 for (i = 1; i < n_pages; i++)
508 if (nums[i-1] + 1 != nums[i])
509 goto fail_pfnvec;
510 buf->dma_addr = dma_map_resource(buf->dev,
511 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
512 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
513 ret = -ENOMEM;
514 goto fail_pfnvec;
515 }
516 goto out;
517 }
518
519 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
520 if (!sgt) {
521 pr_err("failed to allocate sg table\n");
522 ret = -ENOMEM;
523 goto fail_pfnvec;
524 }
525
526 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
527 offset, size, GFP_KERNEL);
528 if (ret) {
529 pr_err("failed to initialize sg table\n");
530 goto fail_sgt;
531 }
532
533
534
535
536
537 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
538 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
539 if (sgt->nents <= 0) {
540 pr_err("failed to map scatterlist\n");
541 ret = -EIO;
542 goto fail_sgt_init;
543 }
544
545 contig_size = vb2_dc_get_contiguous_size(sgt);
546 if (contig_size < size) {
547 pr_err("contiguous mapping is too small %lu/%lu\n",
548 contig_size, size);
549 ret = -EFAULT;
550 goto fail_map_sg;
551 }
552
553 buf->dma_addr = sg_dma_address(sgt->sgl);
554 buf->dma_sgt = sgt;
555out:
556 buf->size = size;
557
558 return buf;
559
560fail_map_sg:
561 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
562 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
563
564fail_sgt_init:
565 sg_free_table(sgt);
566
567fail_sgt:
568 kfree(sgt);
569
570fail_pfnvec:
571 vb2_destroy_framevec(vec);
572
573fail_buf:
574 kfree(buf);
575
576 return ERR_PTR(ret);
577}
578
579
580
581
582
583static int vb2_dc_map_dmabuf(void *mem_priv)
584{
585 struct vb2_dc_buf *buf = mem_priv;
586 struct sg_table *sgt;
587 unsigned long contig_size;
588
589 if (WARN_ON(!buf->db_attach)) {
590 pr_err("trying to pin a non attached buffer\n");
591 return -EINVAL;
592 }
593
594 if (WARN_ON(buf->dma_sgt)) {
595 pr_err("dmabuf buffer is already pinned\n");
596 return 0;
597 }
598
599
600 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
601 if (IS_ERR(sgt)) {
602 pr_err("Error getting dmabuf scatterlist\n");
603 return -EINVAL;
604 }
605
606
607 contig_size = vb2_dc_get_contiguous_size(sgt);
608 if (contig_size < buf->size) {
609 pr_err("contiguous chunk is too small %lu/%lu\n",
610 contig_size, buf->size);
611 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
612 return -EFAULT;
613 }
614
615 buf->dma_addr = sg_dma_address(sgt->sgl);
616 buf->dma_sgt = sgt;
617 buf->vaddr = NULL;
618
619 return 0;
620}
621
622static void vb2_dc_unmap_dmabuf(void *mem_priv)
623{
624 struct vb2_dc_buf *buf = mem_priv;
625 struct sg_table *sgt = buf->dma_sgt;
626
627 if (WARN_ON(!buf->db_attach)) {
628 pr_err("trying to unpin a not attached buffer\n");
629 return;
630 }
631
632 if (WARN_ON(!sgt)) {
633 pr_err("dmabuf buffer is already unpinned\n");
634 return;
635 }
636
637 if (buf->vaddr) {
638 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
639 buf->vaddr = NULL;
640 }
641 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
642
643 buf->dma_addr = 0;
644 buf->dma_sgt = NULL;
645}
646
647static void vb2_dc_detach_dmabuf(void *mem_priv)
648{
649 struct vb2_dc_buf *buf = mem_priv;
650
651
652 if (WARN_ON(buf->dma_addr))
653 vb2_dc_unmap_dmabuf(buf);
654
655
656 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
657 kfree(buf);
658}
659
660static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
661 unsigned long size, enum dma_data_direction dma_dir)
662{
663 struct vb2_dc_buf *buf;
664 struct dma_buf_attachment *dba;
665
666 if (dbuf->size < size)
667 return ERR_PTR(-EFAULT);
668
669 if (WARN_ON(!dev))
670 return ERR_PTR(-EINVAL);
671
672 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
673 if (!buf)
674 return ERR_PTR(-ENOMEM);
675
676 buf->dev = dev;
677
678 dba = dma_buf_attach(dbuf, buf->dev);
679 if (IS_ERR(dba)) {
680 pr_err("failed to attach dmabuf\n");
681 kfree(buf);
682 return dba;
683 }
684
685 buf->dma_dir = dma_dir;
686 buf->size = size;
687 buf->db_attach = dba;
688
689 return buf;
690}
691
692
693
694
695
696const struct vb2_mem_ops vb2_dma_contig_memops = {
697 .alloc = vb2_dc_alloc,
698 .put = vb2_dc_put,
699 .get_dmabuf = vb2_dc_get_dmabuf,
700 .cookie = vb2_dc_cookie,
701 .vaddr = vb2_dc_vaddr,
702 .mmap = vb2_dc_mmap,
703 .get_userptr = vb2_dc_get_userptr,
704 .put_userptr = vb2_dc_put_userptr,
705 .prepare = vb2_dc_prepare,
706 .finish = vb2_dc_finish,
707 .map_dmabuf = vb2_dc_map_dmabuf,
708 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
709 .attach_dmabuf = vb2_dc_attach_dmabuf,
710 .detach_dmabuf = vb2_dc_detach_dmabuf,
711 .num_users = vb2_dc_num_users,
712};
713EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
740{
741 if (!dev->dma_parms) {
742 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
743 return -ENODEV;
744 }
745 if (dma_get_max_seg_size(dev) < size)
746 return dma_set_max_seg_size(dev, size);
747
748 return 0;
749}
750EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
751
752MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
753MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
754MODULE_LICENSE("GPL");
755