1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
25#include <media/videobuf2-core.h>
26
27static int debug;
28module_param(debug, int, 0644);
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34 } while (0)
35
36#define call_memop(q, op, args...) \
37 (((q)->mem_ops->op) ? \
38 ((q)->mem_ops->op(args)) : 0)
39
40#define call_qop(q, op, args...) \
41 (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
42
43#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
44 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
45 V4L2_BUF_FLAG_PREPARED | \
46 V4L2_BUF_FLAG_TIMESTAMP_MASK)
47
48
49
50
51static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
52{
53 struct vb2_queue *q = vb->vb2_queue;
54 void *mem_priv;
55 int plane;
56
57
58
59
60
61 for (plane = 0; plane < vb->num_planes; ++plane) {
62 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
63
64 mem_priv = call_memop(q, alloc, q->alloc_ctx[plane],
65 size, q->gfp_flags);
66 if (IS_ERR_OR_NULL(mem_priv))
67 goto free;
68
69
70 vb->planes[plane].mem_priv = mem_priv;
71 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
72 }
73
74 return 0;
75free:
76
77 for (; plane > 0; --plane) {
78 call_memop(q, put, vb->planes[plane - 1].mem_priv);
79 vb->planes[plane - 1].mem_priv = NULL;
80 }
81
82 return -ENOMEM;
83}
84
85
86
87
88static void __vb2_buf_mem_free(struct vb2_buffer *vb)
89{
90 struct vb2_queue *q = vb->vb2_queue;
91 unsigned int plane;
92
93 for (plane = 0; plane < vb->num_planes; ++plane) {
94 call_memop(q, put, vb->planes[plane].mem_priv);
95 vb->planes[plane].mem_priv = NULL;
96 dprintk(3, "Freed plane %d of buffer %d\n", plane,
97 vb->v4l2_buf.index);
98 }
99}
100
101
102
103
104
105static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
106{
107 struct vb2_queue *q = vb->vb2_queue;
108 unsigned int plane;
109
110 for (plane = 0; plane < vb->num_planes; ++plane) {
111 if (vb->planes[plane].mem_priv)
112 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
113 vb->planes[plane].mem_priv = NULL;
114 }
115}
116
117
118
119
120
121static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p)
122{
123 if (!p->mem_priv)
124 return;
125
126 if (p->dbuf_mapped)
127 call_memop(q, unmap_dmabuf, p->mem_priv);
128
129 call_memop(q, detach_dmabuf, p->mem_priv);
130 dma_buf_put(p->dbuf);
131 memset(p, 0, sizeof(*p));
132}
133
134
135
136
137
138static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
139{
140 struct vb2_queue *q = vb->vb2_queue;
141 unsigned int plane;
142
143 for (plane = 0; plane < vb->num_planes; ++plane)
144 __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
145}
146
147
148
149
150
151static void __setup_offsets(struct vb2_queue *q, unsigned int n)
152{
153 unsigned int buffer, plane;
154 struct vb2_buffer *vb;
155 unsigned long off;
156
157 if (q->num_buffers) {
158 struct v4l2_plane *p;
159 vb = q->bufs[q->num_buffers - 1];
160 p = &vb->v4l2_planes[vb->num_planes - 1];
161 off = PAGE_ALIGN(p->m.mem_offset + p->length);
162 } else {
163 off = 0;
164 }
165
166 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
167 vb = q->bufs[buffer];
168 if (!vb)
169 continue;
170
171 for (plane = 0; plane < vb->num_planes; ++plane) {
172 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
173 vb->v4l2_planes[plane].m.mem_offset = off;
174
175 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
176 buffer, plane, off);
177
178 off += vb->v4l2_planes[plane].length;
179 off = PAGE_ALIGN(off);
180 }
181 }
182}
183
184
185
186
187
188
189
190
191static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
192 unsigned int num_buffers, unsigned int num_planes)
193{
194 unsigned int buffer;
195 struct vb2_buffer *vb;
196 int ret;
197
198 for (buffer = 0; buffer < num_buffers; ++buffer) {
199
200 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
201 if (!vb) {
202 dprintk(1, "Memory alloc for buffer struct failed\n");
203 break;
204 }
205
206
207 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
208 vb->v4l2_buf.length = num_planes;
209
210 vb->state = VB2_BUF_STATE_DEQUEUED;
211 vb->vb2_queue = q;
212 vb->num_planes = num_planes;
213 vb->v4l2_buf.index = q->num_buffers + buffer;
214 vb->v4l2_buf.type = q->type;
215 vb->v4l2_buf.memory = memory;
216
217
218 if (memory == V4L2_MEMORY_MMAP) {
219 ret = __vb2_buf_mem_alloc(vb);
220 if (ret) {
221 dprintk(1, "Failed allocating memory for "
222 "buffer %d\n", buffer);
223 kfree(vb);
224 break;
225 }
226
227
228
229
230
231 ret = call_qop(q, buf_init, vb);
232 if (ret) {
233 dprintk(1, "Buffer %d %p initialization"
234 " failed\n", buffer, vb);
235 __vb2_buf_mem_free(vb);
236 kfree(vb);
237 break;
238 }
239 }
240
241 q->bufs[q->num_buffers + buffer] = vb;
242 }
243
244 __setup_offsets(q, buffer);
245
246 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
247 buffer, num_planes);
248
249 return buffer;
250}
251
252
253
254
255static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
256{
257 unsigned int buffer;
258 struct vb2_buffer *vb;
259
260 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
261 ++buffer) {
262 vb = q->bufs[buffer];
263 if (!vb)
264 continue;
265
266
267 if (q->memory == V4L2_MEMORY_MMAP)
268 __vb2_buf_mem_free(vb);
269 else if (q->memory == V4L2_MEMORY_DMABUF)
270 __vb2_buf_dmabuf_put(vb);
271 else
272 __vb2_buf_userptr_put(vb);
273 }
274}
275
276
277
278
279
280
281static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
282{
283 unsigned int buffer;
284
285
286 if (q->ops->buf_cleanup) {
287 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
288 ++buffer) {
289 if (NULL == q->bufs[buffer])
290 continue;
291 q->ops->buf_cleanup(q->bufs[buffer]);
292 }
293 }
294
295
296 __vb2_free_mem(q, buffers);
297
298
299 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
300 ++buffer) {
301 kfree(q->bufs[buffer]);
302 q->bufs[buffer] = NULL;
303 }
304
305 q->num_buffers -= buffers;
306 if (!q->num_buffers)
307 q->memory = 0;
308 INIT_LIST_HEAD(&q->queued_list);
309}
310
311
312
313
314
315static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
316{
317 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
318 return 0;
319
320
321 if (NULL == b->m.planes) {
322 dprintk(1, "Multi-planar buffer passed but "
323 "planes array not provided\n");
324 return -EINVAL;
325 }
326
327 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
328 dprintk(1, "Incorrect planes array length, "
329 "expected %d, got %d\n", vb->num_planes, b->length);
330 return -EINVAL;
331 }
332
333 return 0;
334}
335
336
337
338
339
340static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
341{
342 unsigned int plane;
343 for (plane = 0; plane < vb->num_planes; ++plane) {
344 void *mem_priv = vb->planes[plane].mem_priv;
345
346
347
348
349
350
351 if (mem_priv && call_memop(q, num_users, mem_priv) > 1)
352 return true;
353 }
354 return false;
355}
356
357
358
359
360
361static bool __buffers_in_use(struct vb2_queue *q)
362{
363 unsigned int buffer;
364 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
365 if (__buffer_in_use(q, q->bufs[buffer]))
366 return true;
367 }
368 return false;
369}
370
371
372
373
374
375static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
376{
377 struct vb2_queue *q = vb->vb2_queue;
378
379
380 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
381 b->reserved2 = vb->v4l2_buf.reserved2;
382 b->reserved = vb->v4l2_buf.reserved;
383
384 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
385
386
387
388
389 b->length = vb->num_planes;
390 memcpy(b->m.planes, vb->v4l2_planes,
391 b->length * sizeof(struct v4l2_plane));
392 } else {
393
394
395
396
397 b->length = vb->v4l2_planes[0].length;
398 b->bytesused = vb->v4l2_planes[0].bytesused;
399 if (q->memory == V4L2_MEMORY_MMAP)
400 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
401 else if (q->memory == V4L2_MEMORY_USERPTR)
402 b->m.userptr = vb->v4l2_planes[0].m.userptr;
403 else if (q->memory == V4L2_MEMORY_DMABUF)
404 b->m.fd = vb->v4l2_planes[0].m.fd;
405 }
406
407
408
409
410 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
411 b->flags |= q->timestamp_type;
412
413 switch (vb->state) {
414 case VB2_BUF_STATE_QUEUED:
415 case VB2_BUF_STATE_ACTIVE:
416 b->flags |= V4L2_BUF_FLAG_QUEUED;
417 break;
418 case VB2_BUF_STATE_ERROR:
419 b->flags |= V4L2_BUF_FLAG_ERROR;
420
421 case VB2_BUF_STATE_DONE:
422 b->flags |= V4L2_BUF_FLAG_DONE;
423 break;
424 case VB2_BUF_STATE_PREPARED:
425 b->flags |= V4L2_BUF_FLAG_PREPARED;
426 break;
427 case VB2_BUF_STATE_DEQUEUED:
428
429 break;
430 }
431
432 if (__buffer_in_use(q, vb))
433 b->flags |= V4L2_BUF_FLAG_MAPPED;
434}
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
450{
451 struct vb2_buffer *vb;
452 int ret;
453
454 if (b->type != q->type) {
455 dprintk(1, "querybuf: wrong buffer type\n");
456 return -EINVAL;
457 }
458
459 if (b->index >= q->num_buffers) {
460 dprintk(1, "querybuf: buffer index out of range\n");
461 return -EINVAL;
462 }
463 vb = q->bufs[b->index];
464 ret = __verify_planes_array(vb, b);
465 if (!ret)
466 __fill_v4l2_buffer(vb, b);
467 return ret;
468}
469EXPORT_SYMBOL(vb2_querybuf);
470
471
472
473
474
475static int __verify_userptr_ops(struct vb2_queue *q)
476{
477 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
478 !q->mem_ops->put_userptr)
479 return -EINVAL;
480
481 return 0;
482}
483
484
485
486
487
488static int __verify_mmap_ops(struct vb2_queue *q)
489{
490 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
491 !q->mem_ops->put || !q->mem_ops->mmap)
492 return -EINVAL;
493
494 return 0;
495}
496
497
498
499
500
501static int __verify_dmabuf_ops(struct vb2_queue *q)
502{
503 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
504 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
505 !q->mem_ops->unmap_dmabuf)
506 return -EINVAL;
507
508 return 0;
509}
510
511
512
513
514
515static int __verify_memory_type(struct vb2_queue *q,
516 enum v4l2_memory memory, enum v4l2_buf_type type)
517{
518 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
519 memory != V4L2_MEMORY_DMABUF) {
520 dprintk(1, "reqbufs: unsupported memory type\n");
521 return -EINVAL;
522 }
523
524 if (type != q->type) {
525 dprintk(1, "reqbufs: requested type is incorrect\n");
526 return -EINVAL;
527 }
528
529
530
531
532
533 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
534 dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
535 return -EINVAL;
536 }
537
538 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
539 dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
540 return -EINVAL;
541 }
542
543 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
544 dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
545 return -EINVAL;
546 }
547
548
549
550
551
552
553 if (q->fileio) {
554 dprintk(1, "reqbufs: file io in progress\n");
555 return -EBUSY;
556 }
557 return 0;
558}
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
584{
585 unsigned int num_buffers, allocated_buffers, num_planes = 0;
586 int ret;
587
588 if (q->streaming) {
589 dprintk(1, "reqbufs: streaming active\n");
590 return -EBUSY;
591 }
592
593 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
594
595
596
597
598 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
599 dprintk(1, "reqbufs: memory in use, cannot free\n");
600 return -EBUSY;
601 }
602
603 __vb2_queue_free(q, q->num_buffers);
604
605
606
607
608
609 if (req->count == 0)
610 return 0;
611 }
612
613
614
615
616 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
617 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
618 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
619 q->memory = req->memory;
620
621
622
623
624
625 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
626 q->plane_sizes, q->alloc_ctx);
627 if (ret)
628 return ret;
629
630
631 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
632 if (ret == 0) {
633 dprintk(1, "Memory allocation failed\n");
634 return -ENOMEM;
635 }
636
637 allocated_buffers = ret;
638
639
640
641
642 if (allocated_buffers < num_buffers) {
643 num_buffers = allocated_buffers;
644
645 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
646 &num_planes, q->plane_sizes, q->alloc_ctx);
647
648 if (!ret && allocated_buffers < num_buffers)
649 ret = -ENOMEM;
650
651
652
653
654
655 }
656
657 q->num_buffers = allocated_buffers;
658
659 if (ret < 0) {
660 __vb2_queue_free(q, allocated_buffers);
661 return ret;
662 }
663
664
665
666
667
668 req->count = allocated_buffers;
669
670 return 0;
671}
672
673
674
675
676
677
678
679int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
680{
681 int ret = __verify_memory_type(q, req->memory, req->type);
682
683 return ret ? ret : __reqbufs(q, req);
684}
685EXPORT_SYMBOL_GPL(vb2_reqbufs);
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
703{
704 unsigned int num_planes = 0, num_buffers, allocated_buffers;
705 int ret;
706
707 if (q->num_buffers == VIDEO_MAX_FRAME) {
708 dprintk(1, "%s(): maximum number of buffers already allocated\n",
709 __func__);
710 return -ENOBUFS;
711 }
712
713 if (!q->num_buffers) {
714 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
715 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
716 q->memory = create->memory;
717 }
718
719 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
720
721
722
723
724
725 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
726 &num_planes, q->plane_sizes, q->alloc_ctx);
727 if (ret)
728 return ret;
729
730
731 ret = __vb2_queue_alloc(q, create->memory, num_buffers,
732 num_planes);
733 if (ret == 0) {
734 dprintk(1, "Memory allocation failed\n");
735 return -ENOMEM;
736 }
737
738 allocated_buffers = ret;
739
740
741
742
743 if (ret < num_buffers) {
744 num_buffers = ret;
745
746
747
748
749
750 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
751 &num_planes, q->plane_sizes, q->alloc_ctx);
752
753 if (!ret && allocated_buffers < num_buffers)
754 ret = -ENOMEM;
755
756
757
758
759
760 }
761
762 q->num_buffers += allocated_buffers;
763
764 if (ret < 0) {
765 __vb2_queue_free(q, allocated_buffers);
766 return -ENOMEM;
767 }
768
769
770
771
772
773 create->count = allocated_buffers;
774
775 return 0;
776}
777
778
779
780
781
782
783
784
785int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
786{
787 int ret = __verify_memory_type(q, create->memory, create->format.type);
788
789 create->index = q->num_buffers;
790 if (create->count == 0)
791 return ret != -EBUSY ? ret : 0;
792 return ret ? ret : __create_bufs(q, create);
793}
794EXPORT_SYMBOL_GPL(vb2_create_bufs);
795
796
797
798
799
800
801
802
803
804void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
805{
806 struct vb2_queue *q = vb->vb2_queue;
807
808 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
809 return NULL;
810
811 return call_memop(q, vaddr, vb->planes[plane_no].mem_priv);
812
813}
814EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
815
816
817
818
819
820
821
822
823
824
825
826
827void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
828{
829 struct vb2_queue *q = vb->vb2_queue;
830
831 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
832 return NULL;
833
834 return call_memop(q, cookie, vb->planes[plane_no].mem_priv);
835}
836EXPORT_SYMBOL_GPL(vb2_plane_cookie);
837
838
839
840
841
842
843
844
845
846
847
848
849
850void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
851{
852 struct vb2_queue *q = vb->vb2_queue;
853 unsigned long flags;
854 unsigned int plane;
855
856 if (vb->state != VB2_BUF_STATE_ACTIVE)
857 return;
858
859 if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
860 return;
861
862 dprintk(4, "Done processing on buffer %d, state: %d\n",
863 vb->v4l2_buf.index, state);
864
865
866 for (plane = 0; plane < vb->num_planes; ++plane)
867 call_memop(q, finish, vb->planes[plane].mem_priv);
868
869
870 spin_lock_irqsave(&q->done_lock, flags);
871 vb->state = state;
872 list_add_tail(&vb->done_entry, &q->done_list);
873 atomic_dec(&q->queued_count);
874 spin_unlock_irqrestore(&q->done_lock, flags);
875
876
877 wake_up(&q->done_wq);
878}
879EXPORT_SYMBOL_GPL(vb2_buffer_done);
880
881
882
883
884
885
886static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
887 struct v4l2_plane *v4l2_planes)
888{
889 unsigned int plane;
890
891 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
892
893 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
894
895
896
897
898 for (plane = 0; plane < vb->num_planes; ++plane) {
899 v4l2_planes[plane].bytesused =
900 b->m.planes[plane].bytesused;
901 v4l2_planes[plane].data_offset =
902 b->m.planes[plane].data_offset;
903 }
904 }
905
906 if (b->memory == V4L2_MEMORY_USERPTR) {
907 for (plane = 0; plane < vb->num_planes; ++plane) {
908 v4l2_planes[plane].m.userptr =
909 b->m.planes[plane].m.userptr;
910 v4l2_planes[plane].length =
911 b->m.planes[plane].length;
912 }
913 }
914 if (b->memory == V4L2_MEMORY_DMABUF) {
915 for (plane = 0; plane < vb->num_planes; ++plane) {
916 v4l2_planes[plane].m.fd =
917 b->m.planes[plane].m.fd;
918 v4l2_planes[plane].length =
919 b->m.planes[plane].length;
920 v4l2_planes[plane].data_offset =
921 b->m.planes[plane].data_offset;
922 }
923 }
924 } else {
925
926
927
928
929
930
931 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
932 v4l2_planes[0].bytesused = b->bytesused;
933 v4l2_planes[0].data_offset = 0;
934 }
935
936 if (b->memory == V4L2_MEMORY_USERPTR) {
937 v4l2_planes[0].m.userptr = b->m.userptr;
938 v4l2_planes[0].length = b->length;
939 }
940
941 if (b->memory == V4L2_MEMORY_DMABUF) {
942 v4l2_planes[0].m.fd = b->m.fd;
943 v4l2_planes[0].length = b->length;
944 v4l2_planes[0].data_offset = 0;
945 }
946
947 }
948
949 vb->v4l2_buf.field = b->field;
950 vb->v4l2_buf.timestamp = b->timestamp;
951 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
952}
953
954
955
956
957static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
958{
959 struct v4l2_plane planes[VIDEO_MAX_PLANES];
960 struct vb2_queue *q = vb->vb2_queue;
961 void *mem_priv;
962 unsigned int plane;
963 int ret;
964 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
965
966
967 __fill_vb2_buffer(vb, b, planes);
968
969 for (plane = 0; plane < vb->num_planes; ++plane) {
970
971 if (vb->v4l2_planes[plane].m.userptr &&
972 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
973 && vb->v4l2_planes[plane].length == planes[plane].length)
974 continue;
975
976 dprintk(3, "qbuf: userspace address for plane %d changed, "
977 "reacquiring memory\n", plane);
978
979
980 if (planes[plane].length < q->plane_sizes[plane]) {
981 ret = -EINVAL;
982 goto err;
983 }
984
985
986 if (vb->planes[plane].mem_priv)
987 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
988
989 vb->planes[plane].mem_priv = NULL;
990 vb->v4l2_planes[plane].m.userptr = 0;
991 vb->v4l2_planes[plane].length = 0;
992
993
994 mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane],
995 planes[plane].m.userptr,
996 planes[plane].length, write);
997 if (IS_ERR_OR_NULL(mem_priv)) {
998 dprintk(1, "qbuf: failed acquiring userspace "
999 "memory for plane %d\n", plane);
1000 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1001 goto err;
1002 }
1003 vb->planes[plane].mem_priv = mem_priv;
1004 }
1005
1006
1007
1008
1009
1010 ret = call_qop(q, buf_init, vb);
1011 if (ret) {
1012 dprintk(1, "qbuf: buffer initialization failed\n");
1013 goto err;
1014 }
1015
1016
1017
1018
1019
1020 for (plane = 0; plane < vb->num_planes; ++plane)
1021 vb->v4l2_planes[plane] = planes[plane];
1022
1023 return 0;
1024err:
1025
1026 for (plane = 0; plane < vb->num_planes; ++plane) {
1027 if (vb->planes[plane].mem_priv)
1028 call_memop(q, put_userptr, vb->planes[plane].mem_priv);
1029 vb->planes[plane].mem_priv = NULL;
1030 vb->v4l2_planes[plane].m.userptr = 0;
1031 vb->v4l2_planes[plane].length = 0;
1032 }
1033
1034 return ret;
1035}
1036
1037
1038
1039
1040static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1041{
1042 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
1043 return 0;
1044}
1045
1046
1047
1048
1049static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1050{
1051 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1052 struct vb2_queue *q = vb->vb2_queue;
1053 void *mem_priv;
1054 unsigned int plane;
1055 int ret;
1056 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1057
1058
1059 __fill_vb2_buffer(vb, b, planes);
1060
1061 for (plane = 0; plane < vb->num_planes; ++plane) {
1062 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1063
1064 if (IS_ERR_OR_NULL(dbuf)) {
1065 dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
1066 plane);
1067 ret = -EINVAL;
1068 goto err;
1069 }
1070
1071
1072 if (planes[plane].length == 0)
1073 planes[plane].length = dbuf->size;
1074
1075 if (planes[plane].length < planes[plane].data_offset +
1076 q->plane_sizes[plane]) {
1077 ret = -EINVAL;
1078 goto err;
1079 }
1080
1081
1082 if (dbuf == vb->planes[plane].dbuf &&
1083 vb->v4l2_planes[plane].length == planes[plane].length) {
1084 dma_buf_put(dbuf);
1085 continue;
1086 }
1087
1088 dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
1089
1090
1091 __vb2_plane_dmabuf_put(q, &vb->planes[plane]);
1092 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1093
1094
1095 mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane],
1096 dbuf, planes[plane].length, write);
1097 if (IS_ERR(mem_priv)) {
1098 dprintk(1, "qbuf: failed to attach dmabuf\n");
1099 ret = PTR_ERR(mem_priv);
1100 dma_buf_put(dbuf);
1101 goto err;
1102 }
1103
1104 vb->planes[plane].dbuf = dbuf;
1105 vb->planes[plane].mem_priv = mem_priv;
1106 }
1107
1108
1109
1110
1111
1112 for (plane = 0; plane < vb->num_planes; ++plane) {
1113 ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv);
1114 if (ret) {
1115 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
1116 plane);
1117 goto err;
1118 }
1119 vb->planes[plane].dbuf_mapped = 1;
1120 }
1121
1122
1123
1124
1125
1126 ret = call_qop(q, buf_init, vb);
1127 if (ret) {
1128 dprintk(1, "qbuf: buffer initialization failed\n");
1129 goto err;
1130 }
1131
1132
1133
1134
1135
1136 for (plane = 0; plane < vb->num_planes; ++plane)
1137 vb->v4l2_planes[plane] = planes[plane];
1138
1139 return 0;
1140err:
1141
1142 __vb2_buf_dmabuf_put(vb);
1143
1144 return ret;
1145}
1146
1147
1148
1149
1150static void __enqueue_in_driver(struct vb2_buffer *vb)
1151{
1152 struct vb2_queue *q = vb->vb2_queue;
1153 unsigned int plane;
1154
1155 vb->state = VB2_BUF_STATE_ACTIVE;
1156 atomic_inc(&q->queued_count);
1157
1158
1159 for (plane = 0; plane < vb->num_planes; ++plane)
1160 call_memop(q, prepare, vb->planes[plane].mem_priv);
1161
1162 q->ops->buf_queue(vb);
1163}
1164
1165static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1166{
1167 struct vb2_queue *q = vb->vb2_queue;
1168 int ret;
1169
1170 switch (q->memory) {
1171 case V4L2_MEMORY_MMAP:
1172 ret = __qbuf_mmap(vb, b);
1173 break;
1174 case V4L2_MEMORY_USERPTR:
1175 ret = __qbuf_userptr(vb, b);
1176 break;
1177 case V4L2_MEMORY_DMABUF:
1178 ret = __qbuf_dmabuf(vb, b);
1179 break;
1180 default:
1181 WARN(1, "Invalid queue type\n");
1182 ret = -EINVAL;
1183 }
1184
1185 if (!ret)
1186 ret = call_qop(q, buf_prepare, vb);
1187 if (ret)
1188 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
1189 else
1190 vb->state = VB2_BUF_STATE_PREPARED;
1191
1192 return ret;
1193}
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1211{
1212 struct vb2_buffer *vb;
1213 int ret;
1214
1215 if (q->fileio) {
1216 dprintk(1, "%s(): file io in progress\n", __func__);
1217 return -EBUSY;
1218 }
1219
1220 if (b->type != q->type) {
1221 dprintk(1, "%s(): invalid buffer type\n", __func__);
1222 return -EINVAL;
1223 }
1224
1225 if (b->index >= q->num_buffers) {
1226 dprintk(1, "%s(): buffer index out of range\n", __func__);
1227 return -EINVAL;
1228 }
1229
1230 vb = q->bufs[b->index];
1231 if (NULL == vb) {
1232
1233 dprintk(1, "%s(): buffer is NULL\n", __func__);
1234 return -EINVAL;
1235 }
1236
1237 if (b->memory != q->memory) {
1238 dprintk(1, "%s(): invalid memory type\n", __func__);
1239 return -EINVAL;
1240 }
1241
1242 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1243 dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
1244 return -EINVAL;
1245 }
1246 ret = __verify_planes_array(vb, b);
1247 if (ret < 0)
1248 return ret;
1249 ret = __buf_prepare(vb, b);
1250 if (ret < 0)
1251 return ret;
1252
1253 __fill_v4l2_buffer(vb, b);
1254
1255 return 0;
1256}
1257EXPORT_SYMBOL_GPL(vb2_prepare_buf);
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1277{
1278 struct rw_semaphore *mmap_sem = NULL;
1279 struct vb2_buffer *vb;
1280 int ret = 0;
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 if (q->memory == V4L2_MEMORY_USERPTR) {
1299 mmap_sem = ¤t->mm->mmap_sem;
1300 call_qop(q, wait_prepare, q);
1301 down_read(mmap_sem);
1302 call_qop(q, wait_finish, q);
1303 }
1304
1305 if (q->fileio) {
1306 dprintk(1, "qbuf: file io in progress\n");
1307 ret = -EBUSY;
1308 goto unlock;
1309 }
1310
1311 if (b->type != q->type) {
1312 dprintk(1, "qbuf: invalid buffer type\n");
1313 ret = -EINVAL;
1314 goto unlock;
1315 }
1316
1317 if (b->index >= q->num_buffers) {
1318 dprintk(1, "qbuf: buffer index out of range\n");
1319 ret = -EINVAL;
1320 goto unlock;
1321 }
1322
1323 vb = q->bufs[b->index];
1324 if (NULL == vb) {
1325
1326 dprintk(1, "qbuf: buffer is NULL\n");
1327 ret = -EINVAL;
1328 goto unlock;
1329 }
1330
1331 if (b->memory != q->memory) {
1332 dprintk(1, "qbuf: invalid memory type\n");
1333 ret = -EINVAL;
1334 goto unlock;
1335 }
1336 ret = __verify_planes_array(vb, b);
1337 if (ret)
1338 goto unlock;
1339
1340 switch (vb->state) {
1341 case VB2_BUF_STATE_DEQUEUED:
1342 ret = __buf_prepare(vb, b);
1343 if (ret)
1344 goto unlock;
1345 case VB2_BUF_STATE_PREPARED:
1346 break;
1347 default:
1348 dprintk(1, "qbuf: buffer already in use\n");
1349 ret = -EINVAL;
1350 goto unlock;
1351 }
1352
1353
1354
1355
1356
1357 list_add_tail(&vb->queued_entry, &q->queued_list);
1358 vb->state = VB2_BUF_STATE_QUEUED;
1359
1360
1361
1362
1363
1364 if (q->streaming)
1365 __enqueue_in_driver(vb);
1366
1367
1368 __fill_v4l2_buffer(vb, b);
1369
1370 dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
1371unlock:
1372 if (mmap_sem)
1373 up_read(mmap_sem);
1374 return ret;
1375}
1376EXPORT_SYMBOL_GPL(vb2_qbuf);
1377
1378
1379
1380
1381
1382
1383
1384static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1385{
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 for (;;) {
1396 int ret;
1397
1398 if (!q->streaming) {
1399 dprintk(1, "Streaming off, will not wait for buffers\n");
1400 return -EINVAL;
1401 }
1402
1403 if (!list_empty(&q->done_list)) {
1404
1405
1406
1407 break;
1408 }
1409
1410 if (nonblocking) {
1411 dprintk(1, "Nonblocking and no buffers to dequeue, "
1412 "will not wait\n");
1413 return -EAGAIN;
1414 }
1415
1416
1417
1418
1419
1420
1421 call_qop(q, wait_prepare, q);
1422
1423
1424
1425
1426 dprintk(3, "Will sleep waiting for buffers\n");
1427 ret = wait_event_interruptible(q->done_wq,
1428 !list_empty(&q->done_list) || !q->streaming);
1429
1430
1431
1432
1433
1434 call_qop(q, wait_finish, q);
1435 if (ret) {
1436 dprintk(1, "Sleep was interrupted\n");
1437 return ret;
1438 }
1439 }
1440 return 0;
1441}
1442
1443
1444
1445
1446
1447
1448static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1449 struct v4l2_buffer *b, int nonblocking)
1450{
1451 unsigned long flags;
1452 int ret;
1453
1454
1455
1456
1457 ret = __vb2_wait_for_done_vb(q, nonblocking);
1458 if (ret)
1459 return ret;
1460
1461
1462
1463
1464
1465 spin_lock_irqsave(&q->done_lock, flags);
1466 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1467
1468
1469
1470
1471 ret = __verify_planes_array(*vb, b);
1472 if (!ret)
1473 list_del(&(*vb)->done_entry);
1474 spin_unlock_irqrestore(&q->done_lock, flags);
1475
1476 return ret;
1477}
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488int vb2_wait_for_all_buffers(struct vb2_queue *q)
1489{
1490 if (!q->streaming) {
1491 dprintk(1, "Streaming off, will not wait for buffers\n");
1492 return -EINVAL;
1493 }
1494
1495 wait_event(q->done_wq, !atomic_read(&q->queued_count));
1496 return 0;
1497}
1498EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1499
1500
1501
1502
1503static void __vb2_dqbuf(struct vb2_buffer *vb)
1504{
1505 struct vb2_queue *q = vb->vb2_queue;
1506 unsigned int i;
1507
1508
1509 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1510 return;
1511
1512 vb->state = VB2_BUF_STATE_DEQUEUED;
1513
1514
1515 if (q->memory == V4L2_MEMORY_DMABUF)
1516 for (i = 0; i < vb->num_planes; ++i) {
1517 if (!vb->planes[i].dbuf_mapped)
1518 continue;
1519 call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv);
1520 vb->planes[i].dbuf_mapped = 0;
1521 }
1522}
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1546{
1547 struct vb2_buffer *vb = NULL;
1548 int ret;
1549
1550 if (q->fileio) {
1551 dprintk(1, "dqbuf: file io in progress\n");
1552 return -EBUSY;
1553 }
1554
1555 if (b->type != q->type) {
1556 dprintk(1, "dqbuf: invalid buffer type\n");
1557 return -EINVAL;
1558 }
1559 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1560 if (ret < 0)
1561 return ret;
1562
1563 ret = call_qop(q, buf_finish, vb);
1564 if (ret) {
1565 dprintk(1, "dqbuf: buffer finish failed\n");
1566 return ret;
1567 }
1568
1569 switch (vb->state) {
1570 case VB2_BUF_STATE_DONE:
1571 dprintk(3, "dqbuf: Returning done buffer\n");
1572 break;
1573 case VB2_BUF_STATE_ERROR:
1574 dprintk(3, "dqbuf: Returning done buffer with errors\n");
1575 break;
1576 default:
1577 dprintk(1, "dqbuf: Invalid buffer state\n");
1578 return -EINVAL;
1579 }
1580
1581
1582 __fill_v4l2_buffer(vb, b);
1583
1584 list_del(&vb->queued_entry);
1585
1586 __vb2_dqbuf(vb);
1587
1588 dprintk(1, "dqbuf of buffer %d, with state %d\n",
1589 vb->v4l2_buf.index, vb->state);
1590
1591 return 0;
1592}
1593EXPORT_SYMBOL_GPL(vb2_dqbuf);
1594
1595
1596
1597
1598
1599
1600
1601static void __vb2_queue_cancel(struct vb2_queue *q)
1602{
1603 unsigned int i;
1604
1605
1606
1607
1608
1609 if (q->streaming)
1610 call_qop(q, stop_streaming, q);
1611 q->streaming = 0;
1612
1613
1614
1615
1616 INIT_LIST_HEAD(&q->queued_list);
1617
1618
1619
1620
1621 INIT_LIST_HEAD(&q->done_list);
1622 atomic_set(&q->queued_count, 0);
1623 wake_up_all(&q->done_wq);
1624
1625
1626
1627
1628 for (i = 0; i < q->num_buffers; ++i)
1629 __vb2_dqbuf(q->bufs[i]);
1630}
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
1646{
1647 struct vb2_buffer *vb;
1648 int ret;
1649
1650 if (q->fileio) {
1651 dprintk(1, "streamon: file io in progress\n");
1652 return -EBUSY;
1653 }
1654
1655 if (type != q->type) {
1656 dprintk(1, "streamon: invalid stream type\n");
1657 return -EINVAL;
1658 }
1659
1660 if (q->streaming) {
1661 dprintk(1, "streamon: already streaming\n");
1662 return -EBUSY;
1663 }
1664
1665
1666
1667
1668
1669 list_for_each_entry(vb, &q->queued_list, queued_entry)
1670 __enqueue_in_driver(vb);
1671
1672
1673
1674
1675 ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
1676 if (ret) {
1677 dprintk(1, "streamon: driver refused to start streaming\n");
1678 __vb2_queue_cancel(q);
1679 return ret;
1680 }
1681
1682 q->streaming = 1;
1683
1684 dprintk(3, "Streamon successful\n");
1685 return 0;
1686}
1687EXPORT_SYMBOL_GPL(vb2_streamon);
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
1706{
1707 if (q->fileio) {
1708 dprintk(1, "streamoff: file io in progress\n");
1709 return -EBUSY;
1710 }
1711
1712 if (type != q->type) {
1713 dprintk(1, "streamoff: invalid stream type\n");
1714 return -EINVAL;
1715 }
1716
1717 if (!q->streaming) {
1718 dprintk(1, "streamoff: not streaming\n");
1719 return -EINVAL;
1720 }
1721
1722
1723
1724
1725
1726 __vb2_queue_cancel(q);
1727
1728 dprintk(3, "Streamoff successful\n");
1729 return 0;
1730}
1731EXPORT_SYMBOL_GPL(vb2_streamoff);
1732
1733
1734
1735
1736static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
1737 unsigned int *_buffer, unsigned int *_plane)
1738{
1739 struct vb2_buffer *vb;
1740 unsigned int buffer, plane;
1741
1742
1743
1744
1745
1746
1747 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
1748 vb = q->bufs[buffer];
1749
1750 for (plane = 0; plane < vb->num_planes; ++plane) {
1751 if (vb->v4l2_planes[plane].m.mem_offset == off) {
1752 *_buffer = buffer;
1753 *_plane = plane;
1754 return 0;
1755 }
1756 }
1757 }
1758
1759 return -EINVAL;
1760}
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
1772{
1773 struct vb2_buffer *vb = NULL;
1774 struct vb2_plane *vb_plane;
1775 int ret;
1776 struct dma_buf *dbuf;
1777
1778 if (q->memory != V4L2_MEMORY_MMAP) {
1779 dprintk(1, "Queue is not currently set up for mmap\n");
1780 return -EINVAL;
1781 }
1782
1783 if (!q->mem_ops->get_dmabuf) {
1784 dprintk(1, "Queue does not support DMA buffer exporting\n");
1785 return -EINVAL;
1786 }
1787
1788 if (eb->flags & ~O_CLOEXEC) {
1789 dprintk(1, "Queue does support only O_CLOEXEC flag\n");
1790 return -EINVAL;
1791 }
1792
1793 if (eb->type != q->type) {
1794 dprintk(1, "qbuf: invalid buffer type\n");
1795 return -EINVAL;
1796 }
1797
1798 if (eb->index >= q->num_buffers) {
1799 dprintk(1, "buffer index out of range\n");
1800 return -EINVAL;
1801 }
1802
1803 vb = q->bufs[eb->index];
1804
1805 if (eb->plane >= vb->num_planes) {
1806 dprintk(1, "buffer plane out of range\n");
1807 return -EINVAL;
1808 }
1809
1810 vb_plane = &vb->planes[eb->plane];
1811
1812 dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
1813 if (IS_ERR_OR_NULL(dbuf)) {
1814 dprintk(1, "Failed to export buffer %d, plane %d\n",
1815 eb->index, eb->plane);
1816 return -EINVAL;
1817 }
1818
1819 ret = dma_buf_fd(dbuf, eb->flags);
1820 if (ret < 0) {
1821 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
1822 eb->index, eb->plane, ret);
1823 dma_buf_put(dbuf);
1824 return ret;
1825 }
1826
1827 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
1828 eb->index, eb->plane, ret);
1829 eb->fd = ret;
1830
1831 return 0;
1832}
1833EXPORT_SYMBOL_GPL(vb2_expbuf);
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1855{
1856 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1857 struct vb2_buffer *vb;
1858 unsigned int buffer, plane;
1859 int ret;
1860 unsigned long length;
1861
1862 if (q->memory != V4L2_MEMORY_MMAP) {
1863 dprintk(1, "Queue is not currently set up for mmap\n");
1864 return -EINVAL;
1865 }
1866
1867
1868
1869
1870 if (!(vma->vm_flags & VM_SHARED)) {
1871 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
1872 return -EINVAL;
1873 }
1874 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1875 if (!(vma->vm_flags & VM_WRITE)) {
1876 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
1877 return -EINVAL;
1878 }
1879 } else {
1880 if (!(vma->vm_flags & VM_READ)) {
1881 dprintk(1, "Invalid vma flags, VM_READ needed\n");
1882 return -EINVAL;
1883 }
1884 }
1885
1886
1887
1888
1889 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1890 if (ret)
1891 return ret;
1892
1893 vb = q->bufs[buffer];
1894
1895
1896
1897
1898
1899
1900 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
1901 if (length < (vma->vm_end - vma->vm_start)) {
1902 dprintk(1,
1903 "MMAP invalid, as it would overflow buffer length\n");
1904 return -EINVAL;
1905 }
1906
1907 ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma);
1908 if (ret)
1909 return ret;
1910
1911 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
1912 return 0;
1913}
1914EXPORT_SYMBOL_GPL(vb2_mmap);
1915
1916#ifndef CONFIG_MMU
1917unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
1918 unsigned long addr,
1919 unsigned long len,
1920 unsigned long pgoff,
1921 unsigned long flags)
1922{
1923 unsigned long off = pgoff << PAGE_SHIFT;
1924 struct vb2_buffer *vb;
1925 unsigned int buffer, plane;
1926 int ret;
1927
1928 if (q->memory != V4L2_MEMORY_MMAP) {
1929 dprintk(1, "Queue is not currently set up for mmap\n");
1930 return -EINVAL;
1931 }
1932
1933
1934
1935
1936 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1937 if (ret)
1938 return ret;
1939
1940 vb = q->bufs[buffer];
1941
1942 return (unsigned long)vb2_plane_vaddr(vb, plane);
1943}
1944EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
1945#endif
1946
1947static int __vb2_init_fileio(struct vb2_queue *q, int read);
1948static int __vb2_cleanup_fileio(struct vb2_queue *q);
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
1970{
1971 struct video_device *vfd = video_devdata(file);
1972 unsigned long req_events = poll_requested_events(wait);
1973 struct vb2_buffer *vb = NULL;
1974 unsigned int res = 0;
1975 unsigned long flags;
1976
1977 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
1978 struct v4l2_fh *fh = file->private_data;
1979
1980 if (v4l2_event_pending(fh))
1981 res = POLLPRI;
1982 else if (req_events & POLLPRI)
1983 poll_wait(file, &fh->wait, wait);
1984 }
1985
1986 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
1987 return res;
1988 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
1989 return res;
1990
1991
1992
1993
1994 if (q->num_buffers == 0 && q->fileio == NULL) {
1995 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
1996 (req_events & (POLLIN | POLLRDNORM))) {
1997 if (__vb2_init_fileio(q, 1))
1998 return res | POLLERR;
1999 }
2000 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2001 (req_events & (POLLOUT | POLLWRNORM))) {
2002 if (__vb2_init_fileio(q, 0))
2003 return res | POLLERR;
2004
2005
2006
2007 return res | POLLOUT | POLLWRNORM;
2008 }
2009 }
2010
2011
2012
2013
2014 if (list_empty(&q->queued_list))
2015 return res | POLLERR;
2016
2017 if (list_empty(&q->done_list))
2018 poll_wait(file, &q->done_wq, wait);
2019
2020
2021
2022
2023 spin_lock_irqsave(&q->done_lock, flags);
2024 if (!list_empty(&q->done_list))
2025 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2026 done_entry);
2027 spin_unlock_irqrestore(&q->done_lock, flags);
2028
2029 if (vb && (vb->state == VB2_BUF_STATE_DONE
2030 || vb->state == VB2_BUF_STATE_ERROR)) {
2031 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2032 res | POLLOUT | POLLWRNORM :
2033 res | POLLIN | POLLRDNORM;
2034 }
2035 return res;
2036}
2037EXPORT_SYMBOL_GPL(vb2_poll);
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050int vb2_queue_init(struct vb2_queue *q)
2051{
2052
2053
2054
2055 if (WARN_ON(!q) ||
2056 WARN_ON(!q->ops) ||
2057 WARN_ON(!q->mem_ops) ||
2058 WARN_ON(!q->type) ||
2059 WARN_ON(!q->io_modes) ||
2060 WARN_ON(!q->ops->queue_setup) ||
2061 WARN_ON(!q->ops->buf_queue) ||
2062 WARN_ON(q->timestamp_type & ~V4L2_BUF_FLAG_TIMESTAMP_MASK))
2063 return -EINVAL;
2064
2065
2066 WARN_ON(q->timestamp_type == V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
2067
2068 INIT_LIST_HEAD(&q->queued_list);
2069 INIT_LIST_HEAD(&q->done_list);
2070 spin_lock_init(&q->done_lock);
2071 init_waitqueue_head(&q->done_wq);
2072
2073 if (q->buf_struct_size == 0)
2074 q->buf_struct_size = sizeof(struct vb2_buffer);
2075
2076 return 0;
2077}
2078EXPORT_SYMBOL_GPL(vb2_queue_init);
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088void vb2_queue_release(struct vb2_queue *q)
2089{
2090 __vb2_cleanup_fileio(q);
2091 __vb2_queue_cancel(q);
2092 __vb2_queue_free(q, q->num_buffers);
2093}
2094EXPORT_SYMBOL_GPL(vb2_queue_release);
2095
2096
2097
2098
2099
2100
2101
2102
2103struct vb2_fileio_buf {
2104 void *vaddr;
2105 unsigned int size;
2106 unsigned int pos;
2107 unsigned int queued:1;
2108};
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118struct vb2_fileio_data {
2119 struct v4l2_requestbuffers req;
2120 struct v4l2_buffer b;
2121 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
2122 unsigned int index;
2123 unsigned int q_count;
2124 unsigned int dq_count;
2125 unsigned int flags;
2126};
2127
2128
2129
2130
2131
2132
2133static int __vb2_init_fileio(struct vb2_queue *q, int read)
2134{
2135 struct vb2_fileio_data *fileio;
2136 int i, ret;
2137 unsigned int count = 0;
2138
2139
2140
2141
2142 if ((read && !(q->io_modes & VB2_READ)) ||
2143 (!read && !(q->io_modes & VB2_WRITE)))
2144 BUG();
2145
2146
2147
2148
2149 if (!q->mem_ops->vaddr)
2150 return -EBUSY;
2151
2152
2153
2154
2155 if (q->streaming || q->num_buffers > 0)
2156 return -EBUSY;
2157
2158
2159
2160
2161 count = 1;
2162
2163 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2164 (read) ? "read" : "write", count, q->io_flags);
2165
2166 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2167 if (fileio == NULL)
2168 return -ENOMEM;
2169
2170 fileio->flags = q->io_flags;
2171
2172
2173
2174
2175
2176 fileio->req.count = count;
2177 fileio->req.memory = V4L2_MEMORY_MMAP;
2178 fileio->req.type = q->type;
2179 ret = vb2_reqbufs(q, &fileio->req);
2180 if (ret)
2181 goto err_kfree;
2182
2183
2184
2185
2186
2187 if (q->bufs[0]->num_planes != 1) {
2188 ret = -EBUSY;
2189 goto err_reqbufs;
2190 }
2191
2192
2193
2194
2195 for (i = 0; i < q->num_buffers; i++) {
2196 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
2197 if (fileio->bufs[i].vaddr == NULL) {
2198 ret = -EINVAL;
2199 goto err_reqbufs;
2200 }
2201 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2202 }
2203
2204
2205
2206
2207 if (read) {
2208
2209
2210
2211 for (i = 0; i < q->num_buffers; i++) {
2212 struct v4l2_buffer *b = &fileio->b;
2213 memset(b, 0, sizeof(*b));
2214 b->type = q->type;
2215 b->memory = q->memory;
2216 b->index = i;
2217 ret = vb2_qbuf(q, b);
2218 if (ret)
2219 goto err_reqbufs;
2220 fileio->bufs[i].queued = 1;
2221 }
2222
2223
2224
2225
2226 ret = vb2_streamon(q, q->type);
2227 if (ret)
2228 goto err_reqbufs;
2229 }
2230
2231 q->fileio = fileio;
2232
2233 return ret;
2234
2235err_reqbufs:
2236 fileio->req.count = 0;
2237 vb2_reqbufs(q, &fileio->req);
2238
2239err_kfree:
2240 kfree(fileio);
2241 return ret;
2242}
2243
2244
2245
2246
2247
2248static int __vb2_cleanup_fileio(struct vb2_queue *q)
2249{
2250 struct vb2_fileio_data *fileio = q->fileio;
2251
2252 if (fileio) {
2253
2254
2255
2256
2257 q->fileio = NULL;
2258
2259 vb2_streamoff(q, q->type);
2260 fileio->req.count = 0;
2261 vb2_reqbufs(q, &fileio->req);
2262 kfree(fileio);
2263 dprintk(3, "file io emulator closed\n");
2264 }
2265 return 0;
2266}
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2278 loff_t *ppos, int nonblock, int read)
2279{
2280 struct vb2_fileio_data *fileio;
2281 struct vb2_fileio_buf *buf;
2282 int ret, index;
2283
2284 dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
2285 read ? "read" : "write", (long)*ppos, count,
2286 nonblock ? "non" : "");
2287
2288 if (!data)
2289 return -EINVAL;
2290
2291
2292
2293
2294 if (!q->fileio) {
2295 ret = __vb2_init_fileio(q, read);
2296 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
2297 if (ret)
2298 return ret;
2299 }
2300 fileio = q->fileio;
2301
2302
2303
2304
2305
2306 q->fileio = NULL;
2307
2308 index = fileio->index;
2309 buf = &fileio->bufs[index];
2310
2311
2312
2313
2314 if (buf->queued) {
2315 struct vb2_buffer *vb;
2316
2317
2318
2319
2320 memset(&fileio->b, 0, sizeof(fileio->b));
2321 fileio->b.type = q->type;
2322 fileio->b.memory = q->memory;
2323 fileio->b.index = index;
2324 ret = vb2_dqbuf(q, &fileio->b, nonblock);
2325 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2326 if (ret)
2327 goto end;
2328 fileio->dq_count += 1;
2329
2330
2331
2332
2333 vb = q->bufs[index];
2334 buf->size = vb2_get_plane_payload(vb, 0);
2335 buf->queued = 0;
2336 }
2337
2338
2339
2340
2341 if (buf->pos + count > buf->size) {
2342 count = buf->size - buf->pos;
2343 dprintk(5, "reducing read count: %zd\n", count);
2344 }
2345
2346
2347
2348
2349 dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
2350 count, index, buf->pos);
2351 if (read)
2352 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2353 else
2354 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2355 if (ret) {
2356 dprintk(3, "file io: error copying data\n");
2357 ret = -EFAULT;
2358 goto end;
2359 }
2360
2361
2362
2363
2364 buf->pos += count;
2365 *ppos += count;
2366
2367
2368
2369
2370 if (buf->pos == buf->size ||
2371 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2372
2373
2374
2375 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2376 fileio->dq_count == 1) {
2377 dprintk(3, "file io: read limit reached\n");
2378
2379
2380
2381 q->fileio = fileio;
2382 return __vb2_cleanup_fileio(q);
2383 }
2384
2385
2386
2387
2388 memset(&fileio->b, 0, sizeof(fileio->b));
2389 fileio->b.type = q->type;
2390 fileio->b.memory = q->memory;
2391 fileio->b.index = index;
2392 fileio->b.bytesused = buf->pos;
2393 ret = vb2_qbuf(q, &fileio->b);
2394 dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2395 if (ret)
2396 goto end;
2397
2398
2399
2400
2401 buf->pos = 0;
2402 buf->queued = 1;
2403 buf->size = q->bufs[0]->v4l2_planes[0].length;
2404 fileio->q_count += 1;
2405
2406
2407
2408
2409 fileio->index = (index + 1) % q->num_buffers;
2410
2411
2412
2413
2414 if (!read && !q->streaming) {
2415 ret = vb2_streamon(q, q->type);
2416 if (ret)
2417 goto end;
2418 }
2419 }
2420
2421
2422
2423
2424 if (ret == 0)
2425 ret = count;
2426end:
2427
2428
2429
2430 q->fileio = fileio;
2431 return ret;
2432}
2433
2434size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2435 loff_t *ppos, int nonblocking)
2436{
2437 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2438}
2439EXPORT_SYMBOL_GPL(vb2_read);
2440
2441size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
2442 loff_t *ppos, int nonblocking)
2443{
2444 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
2445}
2446EXPORT_SYMBOL_GPL(vb2_write);
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2459{
2460 return vdev->queue->owner && vdev->queue->owner != file->private_data;
2461}
2462
2463
2464
2465int vb2_ioctl_reqbufs(struct file *file, void *priv,
2466 struct v4l2_requestbuffers *p)
2467{
2468 struct video_device *vdev = video_devdata(file);
2469 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2470
2471 if (res)
2472 return res;
2473 if (vb2_queue_is_busy(vdev, file))
2474 return -EBUSY;
2475 res = __reqbufs(vdev->queue, p);
2476
2477
2478 if (res == 0)
2479 vdev->queue->owner = p->count ? file->private_data : NULL;
2480 return res;
2481}
2482EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
2483
2484int vb2_ioctl_create_bufs(struct file *file, void *priv,
2485 struct v4l2_create_buffers *p)
2486{
2487 struct video_device *vdev = video_devdata(file);
2488 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
2489
2490 p->index = vdev->queue->num_buffers;
2491
2492
2493 if (p->count == 0)
2494 return res != -EBUSY ? res : 0;
2495 if (res)
2496 return res;
2497 if (vb2_queue_is_busy(vdev, file))
2498 return -EBUSY;
2499 res = __create_bufs(vdev->queue, p);
2500 if (res == 0)
2501 vdev->queue->owner = file->private_data;
2502 return res;
2503}
2504EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
2505
2506int vb2_ioctl_prepare_buf(struct file *file, void *priv,
2507 struct v4l2_buffer *p)
2508{
2509 struct video_device *vdev = video_devdata(file);
2510
2511 if (vb2_queue_is_busy(vdev, file))
2512 return -EBUSY;
2513 return vb2_prepare_buf(vdev->queue, p);
2514}
2515EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
2516
2517int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
2518{
2519 struct video_device *vdev = video_devdata(file);
2520
2521
2522 return vb2_querybuf(vdev->queue, p);
2523}
2524EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
2525
2526int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2527{
2528 struct video_device *vdev = video_devdata(file);
2529
2530 if (vb2_queue_is_busy(vdev, file))
2531 return -EBUSY;
2532 return vb2_qbuf(vdev->queue, p);
2533}
2534EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
2535
2536int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
2537{
2538 struct video_device *vdev = video_devdata(file);
2539
2540 if (vb2_queue_is_busy(vdev, file))
2541 return -EBUSY;
2542 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
2543}
2544EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
2545
2546int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
2547{
2548 struct video_device *vdev = video_devdata(file);
2549
2550 if (vb2_queue_is_busy(vdev, file))
2551 return -EBUSY;
2552 return vb2_streamon(vdev->queue, i);
2553}
2554EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
2555
2556int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
2557{
2558 struct video_device *vdev = video_devdata(file);
2559
2560 if (vb2_queue_is_busy(vdev, file))
2561 return -EBUSY;
2562 return vb2_streamoff(vdev->queue, i);
2563}
2564EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
2565
2566int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
2567{
2568 struct video_device *vdev = video_devdata(file);
2569
2570 if (vb2_queue_is_busy(vdev, file))
2571 return -EBUSY;
2572 return vb2_expbuf(vdev->queue, p);
2573}
2574EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
2575
2576
2577
2578int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
2579{
2580 struct video_device *vdev = video_devdata(file);
2581
2582 return vb2_mmap(vdev->queue, vma);
2583}
2584EXPORT_SYMBOL_GPL(vb2_fop_mmap);
2585
2586int vb2_fop_release(struct file *file)
2587{
2588 struct video_device *vdev = video_devdata(file);
2589
2590 if (file->private_data == vdev->queue->owner) {
2591 vb2_queue_release(vdev->queue);
2592 vdev->queue->owner = NULL;
2593 }
2594 return v4l2_fh_release(file);
2595}
2596EXPORT_SYMBOL_GPL(vb2_fop_release);
2597
2598ssize_t vb2_fop_write(struct file *file, char __user *buf,
2599 size_t count, loff_t *ppos)
2600{
2601 struct video_device *vdev = video_devdata(file);
2602 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2603 int err = -EBUSY;
2604
2605 if (lock && mutex_lock_interruptible(lock))
2606 return -ERESTARTSYS;
2607 if (vb2_queue_is_busy(vdev, file))
2608 goto exit;
2609 err = vb2_write(vdev->queue, buf, count, ppos,
2610 file->f_flags & O_NONBLOCK);
2611 if (vdev->queue->fileio)
2612 vdev->queue->owner = file->private_data;
2613exit:
2614 if (lock)
2615 mutex_unlock(lock);
2616 return err;
2617}
2618EXPORT_SYMBOL_GPL(vb2_fop_write);
2619
2620ssize_t vb2_fop_read(struct file *file, char __user *buf,
2621 size_t count, loff_t *ppos)
2622{
2623 struct video_device *vdev = video_devdata(file);
2624 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
2625 int err = -EBUSY;
2626
2627 if (lock && mutex_lock_interruptible(lock))
2628 return -ERESTARTSYS;
2629 if (vb2_queue_is_busy(vdev, file))
2630 goto exit;
2631 err = vb2_read(vdev->queue, buf, count, ppos,
2632 file->f_flags & O_NONBLOCK);
2633 if (vdev->queue->fileio)
2634 vdev->queue->owner = file->private_data;
2635exit:
2636 if (lock)
2637 mutex_unlock(lock);
2638 return err;
2639}
2640EXPORT_SYMBOL_GPL(vb2_fop_read);
2641
2642unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
2643{
2644 struct video_device *vdev = video_devdata(file);
2645 struct vb2_queue *q = vdev->queue;
2646 struct mutex *lock = q->lock ? q->lock : vdev->lock;
2647 unsigned long req_events = poll_requested_events(wait);
2648 unsigned res;
2649 void *fileio;
2650 bool must_lock = false;
2651
2652
2653
2654 if (q->num_buffers == 0 && q->fileio == NULL) {
2655 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2656 (req_events & (POLLIN | POLLRDNORM)))
2657 must_lock = true;
2658 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2659 (req_events & (POLLOUT | POLLWRNORM)))
2660 must_lock = true;
2661 }
2662
2663
2664
2665 WARN_ON(must_lock && !lock);
2666
2667 if (must_lock && lock && mutex_lock_interruptible(lock))
2668 return POLLERR;
2669
2670 fileio = q->fileio;
2671
2672 res = vb2_poll(vdev->queue, file, wait);
2673
2674
2675 if (must_lock && !fileio && q->fileio)
2676 q->owner = file->private_data;
2677 if (must_lock && lock)
2678 mutex_unlock(lock);
2679 return res;
2680}
2681EXPORT_SYMBOL_GPL(vb2_fop_poll);
2682
2683#ifndef CONFIG_MMU
2684unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
2685 unsigned long len, unsigned long pgoff, unsigned long flags)
2686{
2687 struct video_device *vdev = video_devdata(file);
2688
2689 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
2690}
2691EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
2692#endif
2693
2694
2695
2696void vb2_ops_wait_prepare(struct vb2_queue *vq)
2697{
2698 mutex_unlock(vq->lock);
2699}
2700EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
2701
2702void vb2_ops_wait_finish(struct vb2_queue *vq)
2703{
2704 mutex_lock(vq->lock);
2705}
2706EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
2707
2708MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
2709MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
2710MODULE_LICENSE("GPL");
2711