1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/refcount.h>
16#include <linux/scatterlist.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20
21#include <media/videobuf2-v4l2.h>
22#include <media/videobuf2-memops.h>
23#include <media/videobuf2-dma-sg.h>
24
25static int debug;
26module_param(debug, int, 0644);
27
28#define dprintk(level, fmt, arg...) \
29 do { \
30 if (debug >= level) \
31 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
32 } while (0)
33
34struct vb2_dma_sg_buf {
35 struct device *dev;
36 void *vaddr;
37 struct page **pages;
38 struct frame_vector *vec;
39 int offset;
40 enum dma_data_direction dma_dir;
41 struct sg_table sg_table;
42
43
44
45
46
47 struct sg_table *dma_sgt;
48 size_t size;
49 unsigned int num_pages;
50 refcount_t refcount;
51 struct vb2_vmarea_handler handler;
52
53 struct dma_buf_attachment *db_attach;
54
55 struct vb2_buffer *vb;
56};
57
58static void vb2_dma_sg_put(void *buf_priv);
59
60static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
61 gfp_t gfp_flags)
62{
63 unsigned int last_page = 0;
64 unsigned long size = buf->size;
65
66 while (size > 0) {
67 struct page *pages;
68 int order;
69 int i;
70
71 order = get_order(size);
72
73 if ((PAGE_SIZE << order) > size)
74 order--;
75
76 pages = NULL;
77 while (!pages) {
78 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
79 __GFP_NOWARN | gfp_flags, order);
80 if (pages)
81 break;
82
83 if (order == 0) {
84 while (last_page--)
85 __free_page(buf->pages[last_page]);
86 return -ENOMEM;
87 }
88 order--;
89 }
90
91 split_page(pages, order);
92 for (i = 0; i < (1 << order); i++)
93 buf->pages[last_page++] = &pages[i];
94
95 size -= PAGE_SIZE << order;
96 }
97
98 return 0;
99}
100
101static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
102 unsigned long size)
103{
104 struct vb2_dma_sg_buf *buf;
105 struct sg_table *sgt;
106 int ret;
107 int num_pages;
108
109 if (WARN_ON(!dev) || WARN_ON(!size))
110 return ERR_PTR(-EINVAL);
111
112 buf = kzalloc(sizeof *buf, GFP_KERNEL);
113 if (!buf)
114 return ERR_PTR(-ENOMEM);
115
116 buf->vaddr = NULL;
117 buf->dma_dir = vb->vb2_queue->dma_dir;
118 buf->offset = 0;
119 buf->size = size;
120
121 buf->num_pages = size >> PAGE_SHIFT;
122 buf->dma_sgt = &buf->sg_table;
123
124
125
126
127
128
129 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
130 GFP_KERNEL | __GFP_ZERO);
131 if (!buf->pages)
132 goto fail_pages_array_alloc;
133
134 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
135 if (ret)
136 goto fail_pages_alloc;
137
138 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
139 buf->num_pages, 0, size, GFP_KERNEL);
140 if (ret)
141 goto fail_table_alloc;
142
143
144 buf->dev = get_device(dev);
145
146 sgt = &buf->sg_table;
147
148
149
150
151 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
152 DMA_ATTR_SKIP_CPU_SYNC))
153 goto fail_map;
154
155 buf->handler.refcount = &buf->refcount;
156 buf->handler.put = vb2_dma_sg_put;
157 buf->handler.arg = buf;
158 buf->vb = vb;
159
160 refcount_set(&buf->refcount, 1);
161
162 dprintk(1, "%s: Allocated buffer of %d pages\n",
163 __func__, buf->num_pages);
164 return buf;
165
166fail_map:
167 put_device(buf->dev);
168 sg_free_table(buf->dma_sgt);
169fail_table_alloc:
170 num_pages = buf->num_pages;
171 while (num_pages--)
172 __free_page(buf->pages[num_pages]);
173fail_pages_alloc:
174 kvfree(buf->pages);
175fail_pages_array_alloc:
176 kfree(buf);
177 return ERR_PTR(-ENOMEM);
178}
179
180static void vb2_dma_sg_put(void *buf_priv)
181{
182 struct vb2_dma_sg_buf *buf = buf_priv;
183 struct sg_table *sgt = &buf->sg_table;
184 int i = buf->num_pages;
185
186 if (refcount_dec_and_test(&buf->refcount)) {
187 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
188 buf->num_pages);
189 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
190 DMA_ATTR_SKIP_CPU_SYNC);
191 if (buf->vaddr)
192 vm_unmap_ram(buf->vaddr, buf->num_pages);
193 sg_free_table(buf->dma_sgt);
194 while (--i >= 0)
195 __free_page(buf->pages[i]);
196 kvfree(buf->pages);
197 put_device(buf->dev);
198 kfree(buf);
199 }
200}
201
202static void vb2_dma_sg_prepare(void *buf_priv)
203{
204 struct vb2_dma_sg_buf *buf = buf_priv;
205 struct sg_table *sgt = buf->dma_sgt;
206
207 if (buf->vb->skip_cache_sync_on_prepare)
208 return;
209
210 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
211}
212
213static void vb2_dma_sg_finish(void *buf_priv)
214{
215 struct vb2_dma_sg_buf *buf = buf_priv;
216 struct sg_table *sgt = buf->dma_sgt;
217
218 if (buf->vb->skip_cache_sync_on_finish)
219 return;
220
221 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
222}
223
224static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
225 unsigned long vaddr, unsigned long size)
226{
227 struct vb2_dma_sg_buf *buf;
228 struct sg_table *sgt;
229 struct frame_vector *vec;
230
231 if (WARN_ON(!dev))
232 return ERR_PTR(-EINVAL);
233
234 buf = kzalloc(sizeof *buf, GFP_KERNEL);
235 if (!buf)
236 return ERR_PTR(-ENOMEM);
237
238 buf->vaddr = NULL;
239 buf->dev = dev;
240 buf->dma_dir = vb->vb2_queue->dma_dir;
241 buf->offset = vaddr & ~PAGE_MASK;
242 buf->size = size;
243 buf->dma_sgt = &buf->sg_table;
244 buf->vb = vb;
245 vec = vb2_create_framevec(vaddr, size);
246 if (IS_ERR(vec))
247 goto userptr_fail_pfnvec;
248 buf->vec = vec;
249
250 buf->pages = frame_vector_pages(vec);
251 if (IS_ERR(buf->pages))
252 goto userptr_fail_sgtable;
253 buf->num_pages = frame_vector_count(vec);
254
255 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
256 buf->num_pages, buf->offset, size, 0))
257 goto userptr_fail_sgtable;
258
259 sgt = &buf->sg_table;
260
261
262
263
264 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
265 DMA_ATTR_SKIP_CPU_SYNC))
266 goto userptr_fail_map;
267
268 return buf;
269
270userptr_fail_map:
271 sg_free_table(&buf->sg_table);
272userptr_fail_sgtable:
273 vb2_destroy_framevec(vec);
274userptr_fail_pfnvec:
275 kfree(buf);
276 return ERR_PTR(-ENOMEM);
277}
278
279
280
281
282
283static void vb2_dma_sg_put_userptr(void *buf_priv)
284{
285 struct vb2_dma_sg_buf *buf = buf_priv;
286 struct sg_table *sgt = &buf->sg_table;
287 int i = buf->num_pages;
288
289 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
290 __func__, buf->num_pages);
291 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
292 if (buf->vaddr)
293 vm_unmap_ram(buf->vaddr, buf->num_pages);
294 sg_free_table(buf->dma_sgt);
295 if (buf->dma_dir == DMA_FROM_DEVICE ||
296 buf->dma_dir == DMA_BIDIRECTIONAL)
297 while (--i >= 0)
298 set_page_dirty_lock(buf->pages[i]);
299 vb2_destroy_framevec(buf->vec);
300 kfree(buf);
301}
302
303static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
304{
305 struct vb2_dma_sg_buf *buf = buf_priv;
306 struct iosys_map map;
307 int ret;
308
309 BUG_ON(!buf);
310
311 if (!buf->vaddr) {
312 if (buf->db_attach) {
313 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
314 buf->vaddr = ret ? NULL : map.vaddr;
315 } else {
316 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
317 }
318 }
319
320
321 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
322}
323
324static unsigned int vb2_dma_sg_num_users(void *buf_priv)
325{
326 struct vb2_dma_sg_buf *buf = buf_priv;
327
328 return refcount_read(&buf->refcount);
329}
330
331static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
332{
333 struct vb2_dma_sg_buf *buf = buf_priv;
334 int err;
335
336 if (!buf) {
337 printk(KERN_ERR "No memory to map\n");
338 return -EINVAL;
339 }
340
341 err = vm_map_pages(vma, buf->pages, buf->num_pages);
342 if (err) {
343 printk(KERN_ERR "Remapping memory, error: %d\n", err);
344 return err;
345 }
346
347
348
349
350 vma->vm_private_data = &buf->handler;
351 vma->vm_ops = &vb2_common_vm_ops;
352
353 vma->vm_ops->open(vma);
354
355 return 0;
356}
357
358
359
360
361
362struct vb2_dma_sg_attachment {
363 struct sg_table sgt;
364 enum dma_data_direction dma_dir;
365};
366
367static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
368 struct dma_buf_attachment *dbuf_attach)
369{
370 struct vb2_dma_sg_attachment *attach;
371 unsigned int i;
372 struct scatterlist *rd, *wr;
373 struct sg_table *sgt;
374 struct vb2_dma_sg_buf *buf = dbuf->priv;
375 int ret;
376
377 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
378 if (!attach)
379 return -ENOMEM;
380
381 sgt = &attach->sgt;
382
383
384
385 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
386 if (ret) {
387 kfree(attach);
388 return -ENOMEM;
389 }
390
391 rd = buf->dma_sgt->sgl;
392 wr = sgt->sgl;
393 for (i = 0; i < sgt->orig_nents; ++i) {
394 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
395 rd = sg_next(rd);
396 wr = sg_next(wr);
397 }
398
399 attach->dma_dir = DMA_NONE;
400 dbuf_attach->priv = attach;
401
402 return 0;
403}
404
405static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
406 struct dma_buf_attachment *db_attach)
407{
408 struct vb2_dma_sg_attachment *attach = db_attach->priv;
409 struct sg_table *sgt;
410
411 if (!attach)
412 return;
413
414 sgt = &attach->sgt;
415
416
417 if (attach->dma_dir != DMA_NONE)
418 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
419 sg_free_table(sgt);
420 kfree(attach);
421 db_attach->priv = NULL;
422}
423
424static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
425 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
426{
427 struct vb2_dma_sg_attachment *attach = db_attach->priv;
428
429 struct mutex *lock = &db_attach->dmabuf->lock;
430 struct sg_table *sgt;
431
432 mutex_lock(lock);
433
434 sgt = &attach->sgt;
435
436 if (attach->dma_dir == dma_dir) {
437 mutex_unlock(lock);
438 return sgt;
439 }
440
441
442 if (attach->dma_dir != DMA_NONE) {
443 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
444 attach->dma_dir = DMA_NONE;
445 }
446
447
448 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
449 pr_err("failed to map scatterlist\n");
450 mutex_unlock(lock);
451 return ERR_PTR(-EIO);
452 }
453
454 attach->dma_dir = dma_dir;
455
456 mutex_unlock(lock);
457
458 return sgt;
459}
460
461static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
462 struct sg_table *sgt, enum dma_data_direction dma_dir)
463{
464
465}
466
467static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
468{
469
470 vb2_dma_sg_put(dbuf->priv);
471}
472
473static int
474vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
475 enum dma_data_direction direction)
476{
477 struct vb2_dma_sg_buf *buf = dbuf->priv;
478 struct sg_table *sgt = buf->dma_sgt;
479
480 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
481 return 0;
482}
483
484static int
485vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
486 enum dma_data_direction direction)
487{
488 struct vb2_dma_sg_buf *buf = dbuf->priv;
489 struct sg_table *sgt = buf->dma_sgt;
490
491 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
492 return 0;
493}
494
495static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
496 struct iosys_map *map)
497{
498 struct vb2_dma_sg_buf *buf = dbuf->priv;
499
500 iosys_map_set_vaddr(map, buf->vaddr);
501
502 return 0;
503}
504
505static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
506 struct vm_area_struct *vma)
507{
508 return vb2_dma_sg_mmap(dbuf->priv, vma);
509}
510
511static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
512 .attach = vb2_dma_sg_dmabuf_ops_attach,
513 .detach = vb2_dma_sg_dmabuf_ops_detach,
514 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
515 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
516 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
517 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
518 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
519 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
520 .release = vb2_dma_sg_dmabuf_ops_release,
521};
522
523static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
524 void *buf_priv,
525 unsigned long flags)
526{
527 struct vb2_dma_sg_buf *buf = buf_priv;
528 struct dma_buf *dbuf;
529 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
530
531 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
532 exp_info.size = buf->size;
533 exp_info.flags = flags;
534 exp_info.priv = buf;
535
536 if (WARN_ON(!buf->dma_sgt))
537 return NULL;
538
539 dbuf = dma_buf_export(&exp_info);
540 if (IS_ERR(dbuf))
541 return NULL;
542
543
544 refcount_inc(&buf->refcount);
545
546 return dbuf;
547}
548
549
550
551
552
553static int vb2_dma_sg_map_dmabuf(void *mem_priv)
554{
555 struct vb2_dma_sg_buf *buf = mem_priv;
556 struct sg_table *sgt;
557
558 if (WARN_ON(!buf->db_attach)) {
559 pr_err("trying to pin a non attached buffer\n");
560 return -EINVAL;
561 }
562
563 if (WARN_ON(buf->dma_sgt)) {
564 pr_err("dmabuf buffer is already pinned\n");
565 return 0;
566 }
567
568
569 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
570 if (IS_ERR(sgt)) {
571 pr_err("Error getting dmabuf scatterlist\n");
572 return -EINVAL;
573 }
574
575 buf->dma_sgt = sgt;
576 buf->vaddr = NULL;
577
578 return 0;
579}
580
581static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
582{
583 struct vb2_dma_sg_buf *buf = mem_priv;
584 struct sg_table *sgt = buf->dma_sgt;
585 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
586
587 if (WARN_ON(!buf->db_attach)) {
588 pr_err("trying to unpin a not attached buffer\n");
589 return;
590 }
591
592 if (WARN_ON(!sgt)) {
593 pr_err("dmabuf buffer is already unpinned\n");
594 return;
595 }
596
597 if (buf->vaddr) {
598 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
599 buf->vaddr = NULL;
600 }
601 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
602
603 buf->dma_sgt = NULL;
604}
605
606static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
607{
608 struct vb2_dma_sg_buf *buf = mem_priv;
609
610
611 if (WARN_ON(buf->dma_sgt))
612 vb2_dma_sg_unmap_dmabuf(buf);
613
614
615 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
616 kfree(buf);
617}
618
619static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
620 struct dma_buf *dbuf, unsigned long size)
621{
622 struct vb2_dma_sg_buf *buf;
623 struct dma_buf_attachment *dba;
624
625 if (WARN_ON(!dev))
626 return ERR_PTR(-EINVAL);
627
628 if (dbuf->size < size)
629 return ERR_PTR(-EFAULT);
630
631 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
632 if (!buf)
633 return ERR_PTR(-ENOMEM);
634
635 buf->dev = dev;
636
637 dba = dma_buf_attach(dbuf, buf->dev);
638 if (IS_ERR(dba)) {
639 pr_err("failed to attach dmabuf\n");
640 kfree(buf);
641 return dba;
642 }
643
644 buf->dma_dir = vb->vb2_queue->dma_dir;
645 buf->size = size;
646 buf->db_attach = dba;
647 buf->vb = vb;
648
649 return buf;
650}
651
652static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
653{
654 struct vb2_dma_sg_buf *buf = buf_priv;
655
656 return buf->dma_sgt;
657}
658
659const struct vb2_mem_ops vb2_dma_sg_memops = {
660 .alloc = vb2_dma_sg_alloc,
661 .put = vb2_dma_sg_put,
662 .get_userptr = vb2_dma_sg_get_userptr,
663 .put_userptr = vb2_dma_sg_put_userptr,
664 .prepare = vb2_dma_sg_prepare,
665 .finish = vb2_dma_sg_finish,
666 .vaddr = vb2_dma_sg_vaddr,
667 .mmap = vb2_dma_sg_mmap,
668 .num_users = vb2_dma_sg_num_users,
669 .get_dmabuf = vb2_dma_sg_get_dmabuf,
670 .map_dmabuf = vb2_dma_sg_map_dmabuf,
671 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
672 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
673 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
674 .cookie = vb2_dma_sg_cookie,
675};
676EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
677
678MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
679MODULE_AUTHOR("Andrzej Pietrasiewicz");
680MODULE_LICENSE("GPL");
681MODULE_IMPORT_NS(DMA_BUF);
682