1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
22#include <media/videobuf2-core.h>
23
24static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
31 } while (0)
32
33#define call_memop(q, plane, op, args...) \
34 (((q)->mem_ops->op) ? \
35 ((q)->mem_ops->op(args)) : 0)
36
37#define call_qop(q, op, args...) \
38 (((q)->ops->op) ? ((q)->ops->op(args)) : 0)
39
40#define V4L2_BUFFER_STATE_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
41 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
42 V4L2_BUF_FLAG_PREPARED)
43
44
45
46
47static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
48{
49 struct vb2_queue *q = vb->vb2_queue;
50 void *mem_priv;
51 int plane;
52
53
54 for (plane = 0; plane < vb->num_planes; ++plane) {
55 mem_priv = call_memop(q, plane, alloc, q->alloc_ctx[plane],
56 q->plane_sizes[plane]);
57 if (IS_ERR_OR_NULL(mem_priv))
58 goto free;
59
60
61 vb->planes[plane].mem_priv = mem_priv;
62 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
63 }
64
65 return 0;
66free:
67
68 for (; plane > 0; --plane)
69 call_memop(q, plane, put, vb->planes[plane - 1].mem_priv);
70
71 return -ENOMEM;
72}
73
74
75
76
77static void __vb2_buf_mem_free(struct vb2_buffer *vb)
78{
79 struct vb2_queue *q = vb->vb2_queue;
80 unsigned int plane;
81
82 for (plane = 0; plane < vb->num_planes; ++plane) {
83 call_memop(q, plane, put, vb->planes[plane].mem_priv);
84 vb->planes[plane].mem_priv = NULL;
85 dprintk(3, "Freed plane %d of buffer %d\n",
86 plane, vb->v4l2_buf.index);
87 }
88}
89
90
91
92
93
94static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
95{
96 struct vb2_queue *q = vb->vb2_queue;
97 unsigned int plane;
98
99 for (plane = 0; plane < vb->num_planes; ++plane) {
100 void *mem_priv = vb->planes[plane].mem_priv;
101
102 if (mem_priv) {
103 call_memop(q, plane, put_userptr, mem_priv);
104 vb->planes[plane].mem_priv = NULL;
105 }
106 }
107}
108
109
110
111
112
113static void __setup_offsets(struct vb2_queue *q, unsigned int n)
114{
115 unsigned int buffer, plane;
116 struct vb2_buffer *vb;
117 unsigned long off;
118
119 if (q->num_buffers) {
120 struct v4l2_plane *p;
121 vb = q->bufs[q->num_buffers - 1];
122 p = &vb->v4l2_planes[vb->num_planes - 1];
123 off = PAGE_ALIGN(p->m.mem_offset + p->length);
124 } else {
125 off = 0;
126 }
127
128 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
129 vb = q->bufs[buffer];
130 if (!vb)
131 continue;
132
133 for (plane = 0; plane < vb->num_planes; ++plane) {
134 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
135 vb->v4l2_planes[plane].m.mem_offset = off;
136
137 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
138 buffer, plane, off);
139
140 off += vb->v4l2_planes[plane].length;
141 off = PAGE_ALIGN(off);
142 }
143 }
144}
145
146
147
148
149
150
151
152
153static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
154 unsigned int num_buffers, unsigned int num_planes)
155{
156 unsigned int buffer;
157 struct vb2_buffer *vb;
158 int ret;
159
160 for (buffer = 0; buffer < num_buffers; ++buffer) {
161
162 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
163 if (!vb) {
164 dprintk(1, "Memory alloc for buffer struct failed\n");
165 break;
166 }
167
168
169 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
170 vb->v4l2_buf.length = num_planes;
171
172 vb->state = VB2_BUF_STATE_DEQUEUED;
173 vb->vb2_queue = q;
174 vb->num_planes = num_planes;
175 vb->v4l2_buf.index = q->num_buffers + buffer;
176 vb->v4l2_buf.type = q->type;
177 vb->v4l2_buf.memory = memory;
178
179
180 if (memory == V4L2_MEMORY_MMAP) {
181 ret = __vb2_buf_mem_alloc(vb);
182 if (ret) {
183 dprintk(1, "Failed allocating memory for "
184 "buffer %d\n", buffer);
185 kfree(vb);
186 break;
187 }
188
189
190
191
192
193 ret = call_qop(q, buf_init, vb);
194 if (ret) {
195 dprintk(1, "Buffer %d %p initialization"
196 " failed\n", buffer, vb);
197 __vb2_buf_mem_free(vb);
198 kfree(vb);
199 break;
200 }
201 }
202
203 q->bufs[q->num_buffers + buffer] = vb;
204 }
205
206 __setup_offsets(q, buffer);
207
208 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
209 buffer, num_planes);
210
211 return buffer;
212}
213
214
215
216
217static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
218{
219 unsigned int buffer;
220 struct vb2_buffer *vb;
221
222 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
223 ++buffer) {
224 vb = q->bufs[buffer];
225 if (!vb)
226 continue;
227
228
229 if (q->memory == V4L2_MEMORY_MMAP)
230 __vb2_buf_mem_free(vb);
231 else
232 __vb2_buf_userptr_put(vb);
233 }
234}
235
236
237
238
239
240
241static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
242{
243 unsigned int buffer;
244
245
246 if (q->ops->buf_cleanup) {
247 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
248 ++buffer) {
249 if (NULL == q->bufs[buffer])
250 continue;
251 q->ops->buf_cleanup(q->bufs[buffer]);
252 }
253 }
254
255
256 __vb2_free_mem(q, buffers);
257
258
259 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
260 ++buffer) {
261 kfree(q->bufs[buffer]);
262 q->bufs[buffer] = NULL;
263 }
264
265 q->num_buffers -= buffers;
266 if (!q->num_buffers)
267 q->memory = 0;
268 INIT_LIST_HEAD(&q->queued_list);
269}
270
271
272
273
274
275static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
276{
277
278 if (NULL == b->m.planes) {
279 dprintk(1, "Multi-planar buffer passed but "
280 "planes array not provided\n");
281 return -EINVAL;
282 }
283
284 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
285 dprintk(1, "Incorrect planes array length, "
286 "expected %d, got %d\n", vb->num_planes, b->length);
287 return -EINVAL;
288 }
289
290 return 0;
291}
292
293
294
295
296
297static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
298{
299 unsigned int plane;
300 for (plane = 0; plane < vb->num_planes; ++plane) {
301 void *mem_priv = vb->planes[plane].mem_priv;
302
303
304
305
306
307
308 if (mem_priv && call_memop(q, plane, num_users, mem_priv) > 1)
309 return true;
310 }
311 return false;
312}
313
314
315
316
317
318static bool __buffers_in_use(struct vb2_queue *q)
319{
320 unsigned int buffer;
321 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
322 if (__buffer_in_use(q, q->bufs[buffer]))
323 return true;
324 }
325 return false;
326}
327
328
329
330
331
332static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
333{
334 struct vb2_queue *q = vb->vb2_queue;
335 int ret;
336
337
338 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
339 b->input = vb->v4l2_buf.input;
340 b->reserved = vb->v4l2_buf.reserved;
341
342 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
343 ret = __verify_planes_array(vb, b);
344 if (ret)
345 return ret;
346
347
348
349
350
351 memcpy(b->m.planes, vb->v4l2_planes,
352 b->length * sizeof(struct v4l2_plane));
353 } else {
354
355
356
357
358 b->length = vb->v4l2_planes[0].length;
359 b->bytesused = vb->v4l2_planes[0].bytesused;
360 if (q->memory == V4L2_MEMORY_MMAP)
361 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
362 else if (q->memory == V4L2_MEMORY_USERPTR)
363 b->m.userptr = vb->v4l2_planes[0].m.userptr;
364 }
365
366
367
368
369 b->flags &= ~V4L2_BUFFER_STATE_FLAGS;
370
371 switch (vb->state) {
372 case VB2_BUF_STATE_QUEUED:
373 case VB2_BUF_STATE_ACTIVE:
374 b->flags |= V4L2_BUF_FLAG_QUEUED;
375 break;
376 case VB2_BUF_STATE_ERROR:
377 b->flags |= V4L2_BUF_FLAG_ERROR;
378
379 case VB2_BUF_STATE_DONE:
380 b->flags |= V4L2_BUF_FLAG_DONE;
381 break;
382 case VB2_BUF_STATE_PREPARED:
383 b->flags |= V4L2_BUF_FLAG_PREPARED;
384 break;
385 case VB2_BUF_STATE_DEQUEUED:
386
387 break;
388 }
389
390 if (__buffer_in_use(q, vb))
391 b->flags |= V4L2_BUF_FLAG_MAPPED;
392
393 return 0;
394}
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
410{
411 struct vb2_buffer *vb;
412
413 if (b->type != q->type) {
414 dprintk(1, "querybuf: wrong buffer type\n");
415 return -EINVAL;
416 }
417
418 if (b->index >= q->num_buffers) {
419 dprintk(1, "querybuf: buffer index out of range\n");
420 return -EINVAL;
421 }
422 vb = q->bufs[b->index];
423
424 return __fill_v4l2_buffer(vb, b);
425}
426EXPORT_SYMBOL(vb2_querybuf);
427
428
429
430
431
432static int __verify_userptr_ops(struct vb2_queue *q)
433{
434 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
435 !q->mem_ops->put_userptr)
436 return -EINVAL;
437
438 return 0;
439}
440
441
442
443
444
445static int __verify_mmap_ops(struct vb2_queue *q)
446{
447 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
448 !q->mem_ops->put || !q->mem_ops->mmap)
449 return -EINVAL;
450
451 return 0;
452}
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
478{
479 unsigned int num_buffers, allocated_buffers, num_planes = 0;
480 int ret = 0;
481
482 if (q->fileio) {
483 dprintk(1, "reqbufs: file io in progress\n");
484 return -EBUSY;
485 }
486
487 if (req->memory != V4L2_MEMORY_MMAP
488 && req->memory != V4L2_MEMORY_USERPTR) {
489 dprintk(1, "reqbufs: unsupported memory type\n");
490 return -EINVAL;
491 }
492
493 if (req->type != q->type) {
494 dprintk(1, "reqbufs: requested type is incorrect\n");
495 return -EINVAL;
496 }
497
498 if (q->streaming) {
499 dprintk(1, "reqbufs: streaming active\n");
500 return -EBUSY;
501 }
502
503
504
505
506
507 if (req->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
508 dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
509 return -EINVAL;
510 }
511
512 if (req->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
513 dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
514 return -EINVAL;
515 }
516
517 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
518
519
520
521
522 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
523 dprintk(1, "reqbufs: memory in use, cannot free\n");
524 return -EBUSY;
525 }
526
527 __vb2_queue_free(q, q->num_buffers);
528
529
530
531
532
533 if (req->count == 0)
534 return 0;
535 }
536
537
538
539
540 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
541 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
542 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
543 q->memory = req->memory;
544
545
546
547
548
549 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
550 q->plane_sizes, q->alloc_ctx);
551 if (ret)
552 return ret;
553
554
555 ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
556 if (ret == 0) {
557 dprintk(1, "Memory allocation failed\n");
558 return -ENOMEM;
559 }
560
561 allocated_buffers = ret;
562
563
564
565
566 if (allocated_buffers < num_buffers) {
567 num_buffers = allocated_buffers;
568
569 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
570 &num_planes, q->plane_sizes, q->alloc_ctx);
571
572 if (!ret && allocated_buffers < num_buffers)
573 ret = -ENOMEM;
574
575
576
577
578
579 }
580
581 q->num_buffers = allocated_buffers;
582
583 if (ret < 0) {
584 __vb2_queue_free(q, allocated_buffers);
585 return ret;
586 }
587
588
589
590
591
592 req->count = allocated_buffers;
593
594 return 0;
595}
596EXPORT_SYMBOL_GPL(vb2_reqbufs);
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
614{
615 unsigned int num_planes = 0, num_buffers, allocated_buffers;
616 int ret = 0;
617
618 if (q->fileio) {
619 dprintk(1, "%s(): file io in progress\n", __func__);
620 return -EBUSY;
621 }
622
623 if (create->memory != V4L2_MEMORY_MMAP
624 && create->memory != V4L2_MEMORY_USERPTR) {
625 dprintk(1, "%s(): unsupported memory type\n", __func__);
626 return -EINVAL;
627 }
628
629 if (create->format.type != q->type) {
630 dprintk(1, "%s(): requested type is incorrect\n", __func__);
631 return -EINVAL;
632 }
633
634
635
636
637
638 if (create->memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
639 dprintk(1, "%s(): MMAP for current setup unsupported\n", __func__);
640 return -EINVAL;
641 }
642
643 if (create->memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
644 dprintk(1, "%s(): USERPTR for current setup unsupported\n", __func__);
645 return -EINVAL;
646 }
647
648 if (q->num_buffers == VIDEO_MAX_FRAME) {
649 dprintk(1, "%s(): maximum number of buffers already allocated\n",
650 __func__);
651 return -ENOBUFS;
652 }
653
654 create->index = q->num_buffers;
655
656 if (!q->num_buffers) {
657 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
658 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
659 q->memory = create->memory;
660 }
661
662 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
663
664
665
666
667
668 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
669 &num_planes, q->plane_sizes, q->alloc_ctx);
670 if (ret)
671 return ret;
672
673
674 ret = __vb2_queue_alloc(q, create->memory, num_buffers,
675 num_planes);
676 if (ret < 0) {
677 dprintk(1, "Memory allocation failed with error: %d\n", ret);
678 return ret;
679 }
680
681 allocated_buffers = ret;
682
683
684
685
686 if (ret < num_buffers) {
687 num_buffers = ret;
688
689
690
691
692
693 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
694 &num_planes, q->plane_sizes, q->alloc_ctx);
695
696 if (!ret && allocated_buffers < num_buffers)
697 ret = -ENOMEM;
698
699
700
701
702
703 }
704
705 q->num_buffers += allocated_buffers;
706
707 if (ret < 0) {
708 __vb2_queue_free(q, allocated_buffers);
709 return ret;
710 }
711
712
713
714
715
716 create->count = allocated_buffers;
717
718 return 0;
719}
720EXPORT_SYMBOL_GPL(vb2_create_bufs);
721
722
723
724
725
726
727
728
729
730void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
731{
732 struct vb2_queue *q = vb->vb2_queue;
733
734 if (plane_no > vb->num_planes)
735 return NULL;
736
737 return call_memop(q, plane_no, vaddr, vb->planes[plane_no].mem_priv);
738
739}
740EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
741
742
743
744
745
746
747
748
749
750
751
752
753void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
754{
755 struct vb2_queue *q = vb->vb2_queue;
756
757 if (plane_no > vb->num_planes)
758 return NULL;
759
760 return call_memop(q, plane_no, cookie, vb->planes[plane_no].mem_priv);
761}
762EXPORT_SYMBOL_GPL(vb2_plane_cookie);
763
764
765
766
767
768
769
770
771
772
773
774
775
776void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
777{
778 struct vb2_queue *q = vb->vb2_queue;
779 unsigned long flags;
780
781 if (vb->state != VB2_BUF_STATE_ACTIVE)
782 return;
783
784 if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR)
785 return;
786
787 dprintk(4, "Done processing on buffer %d, state: %d\n",
788 vb->v4l2_buf.index, vb->state);
789
790
791 spin_lock_irqsave(&q->done_lock, flags);
792 vb->state = state;
793 list_add_tail(&vb->done_entry, &q->done_list);
794 atomic_dec(&q->queued_count);
795 spin_unlock_irqrestore(&q->done_lock, flags);
796
797
798 wake_up(&q->done_wq);
799}
800EXPORT_SYMBOL_GPL(vb2_buffer_done);
801
802
803
804
805
806static int __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
807 struct v4l2_plane *v4l2_planes)
808{
809 unsigned int plane;
810 int ret;
811
812 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
813
814
815
816
817 ret = __verify_planes_array(vb, b);
818 if (ret)
819 return ret;
820
821
822 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
823
824
825
826
827 for (plane = 0; plane < vb->num_planes; ++plane) {
828 v4l2_planes[plane].bytesused =
829 b->m.planes[plane].bytesused;
830 v4l2_planes[plane].data_offset =
831 b->m.planes[plane].data_offset;
832 }
833 }
834
835 if (b->memory == V4L2_MEMORY_USERPTR) {
836 for (plane = 0; plane < vb->num_planes; ++plane) {
837 v4l2_planes[plane].m.userptr =
838 b->m.planes[plane].m.userptr;
839 v4l2_planes[plane].length =
840 b->m.planes[plane].length;
841 }
842 }
843 } else {
844
845
846
847
848
849
850 if (V4L2_TYPE_IS_OUTPUT(b->type))
851 v4l2_planes[0].bytesused = b->bytesused;
852
853 if (b->memory == V4L2_MEMORY_USERPTR) {
854 v4l2_planes[0].m.userptr = b->m.userptr;
855 v4l2_planes[0].length = b->length;
856 }
857 }
858
859 vb->v4l2_buf.field = b->field;
860 vb->v4l2_buf.timestamp = b->timestamp;
861 vb->v4l2_buf.input = b->input;
862 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_STATE_FLAGS;
863
864 return 0;
865}
866
867
868
869
870static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
871{
872 struct v4l2_plane planes[VIDEO_MAX_PLANES];
873 struct vb2_queue *q = vb->vb2_queue;
874 void *mem_priv;
875 unsigned int plane;
876 int ret;
877 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
878
879
880 ret = __fill_vb2_buffer(vb, b, planes);
881 if (ret)
882 return ret;
883
884 for (plane = 0; plane < vb->num_planes; ++plane) {
885
886 if (vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
887 && vb->v4l2_planes[plane].length == planes[plane].length)
888 continue;
889
890 dprintk(3, "qbuf: userspace address for plane %d changed, "
891 "reacquiring memory\n", plane);
892
893
894 if (planes[plane].length < q->plane_sizes[plane]) {
895 ret = -EINVAL;
896 goto err;
897 }
898
899
900 if (vb->planes[plane].mem_priv)
901 call_memop(q, plane, put_userptr,
902 vb->planes[plane].mem_priv);
903
904 vb->planes[plane].mem_priv = NULL;
905 vb->v4l2_planes[plane].m.userptr = 0;
906 vb->v4l2_planes[plane].length = 0;
907
908
909 if (q->mem_ops->get_userptr) {
910 mem_priv = q->mem_ops->get_userptr(q->alloc_ctx[plane],
911 planes[plane].m.userptr,
912 planes[plane].length,
913 write);
914 if (IS_ERR(mem_priv)) {
915 dprintk(1, "qbuf: failed acquiring userspace "
916 "memory for plane %d\n", plane);
917 ret = PTR_ERR(mem_priv);
918 goto err;
919 }
920 vb->planes[plane].mem_priv = mem_priv;
921 }
922 }
923
924
925
926
927
928 ret = call_qop(q, buf_init, vb);
929 if (ret) {
930 dprintk(1, "qbuf: buffer initialization failed\n");
931 goto err;
932 }
933
934
935
936
937
938 for (plane = 0; plane < vb->num_planes; ++plane)
939 vb->v4l2_planes[plane] = planes[plane];
940
941 return 0;
942err:
943
944 for (plane = 0; plane < vb->num_planes; ++plane) {
945 if (vb->planes[plane].mem_priv)
946 call_memop(q, plane, put_userptr,
947 vb->planes[plane].mem_priv);
948 vb->planes[plane].mem_priv = NULL;
949 vb->v4l2_planes[plane].m.userptr = 0;
950 vb->v4l2_planes[plane].length = 0;
951 }
952
953 return ret;
954}
955
956
957
958
959static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
960{
961 return __fill_vb2_buffer(vb, b, vb->v4l2_planes);
962}
963
964
965
966
967static void __enqueue_in_driver(struct vb2_buffer *vb)
968{
969 struct vb2_queue *q = vb->vb2_queue;
970
971 vb->state = VB2_BUF_STATE_ACTIVE;
972 atomic_inc(&q->queued_count);
973 q->ops->buf_queue(vb);
974}
975
976static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
977{
978 struct vb2_queue *q = vb->vb2_queue;
979 int ret;
980
981 switch (q->memory) {
982 case V4L2_MEMORY_MMAP:
983 ret = __qbuf_mmap(vb, b);
984 break;
985 case V4L2_MEMORY_USERPTR:
986 ret = __qbuf_userptr(vb, b);
987 break;
988 default:
989 WARN(1, "Invalid queue type\n");
990 ret = -EINVAL;
991 }
992
993 if (!ret)
994 ret = call_qop(q, buf_prepare, vb);
995 if (ret)
996 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
997 else
998 vb->state = VB2_BUF_STATE_PREPARED;
999
1000 return ret;
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1019{
1020 struct vb2_buffer *vb;
1021 int ret;
1022
1023 if (q->fileio) {
1024 dprintk(1, "%s(): file io in progress\n", __func__);
1025 return -EBUSY;
1026 }
1027
1028 if (b->type != q->type) {
1029 dprintk(1, "%s(): invalid buffer type\n", __func__);
1030 return -EINVAL;
1031 }
1032
1033 if (b->index >= q->num_buffers) {
1034 dprintk(1, "%s(): buffer index out of range\n", __func__);
1035 return -EINVAL;
1036 }
1037
1038 vb = q->bufs[b->index];
1039 if (NULL == vb) {
1040
1041 dprintk(1, "%s(): buffer is NULL\n", __func__);
1042 return -EINVAL;
1043 }
1044
1045 if (b->memory != q->memory) {
1046 dprintk(1, "%s(): invalid memory type\n", __func__);
1047 return -EINVAL;
1048 }
1049
1050 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1051 dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
1052 return -EINVAL;
1053 }
1054
1055 ret = __buf_prepare(vb, b);
1056 if (ret < 0)
1057 return ret;
1058
1059 __fill_v4l2_buffer(vb, b);
1060
1061 return 0;
1062}
1063EXPORT_SYMBOL_GPL(vb2_prepare_buf);
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1083{
1084 struct vb2_buffer *vb;
1085 int ret;
1086
1087 if (q->fileio) {
1088 dprintk(1, "qbuf: file io in progress\n");
1089 return -EBUSY;
1090 }
1091
1092 if (b->type != q->type) {
1093 dprintk(1, "qbuf: invalid buffer type\n");
1094 return -EINVAL;
1095 }
1096
1097 if (b->index >= q->num_buffers) {
1098 dprintk(1, "qbuf: buffer index out of range\n");
1099 return -EINVAL;
1100 }
1101
1102 vb = q->bufs[b->index];
1103 if (NULL == vb) {
1104
1105 dprintk(1, "qbuf: buffer is NULL\n");
1106 return -EINVAL;
1107 }
1108
1109 if (b->memory != q->memory) {
1110 dprintk(1, "qbuf: invalid memory type\n");
1111 return -EINVAL;
1112 }
1113
1114 switch (vb->state) {
1115 case VB2_BUF_STATE_DEQUEUED:
1116 ret = __buf_prepare(vb, b);
1117 if (ret)
1118 return ret;
1119 case VB2_BUF_STATE_PREPARED:
1120 break;
1121 default:
1122 dprintk(1, "qbuf: buffer already in use\n");
1123 return -EINVAL;
1124 }
1125
1126
1127
1128
1129
1130 list_add_tail(&vb->queued_entry, &q->queued_list);
1131 vb->state = VB2_BUF_STATE_QUEUED;
1132
1133
1134
1135
1136
1137 if (q->streaming)
1138 __enqueue_in_driver(vb);
1139
1140
1141 __fill_v4l2_buffer(vb, b);
1142
1143 dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
1144 return 0;
1145}
1146EXPORT_SYMBOL_GPL(vb2_qbuf);
1147
1148
1149
1150
1151
1152
1153
1154static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1155{
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165 for (;;) {
1166 int ret;
1167
1168 if (!q->streaming) {
1169 dprintk(1, "Streaming off, will not wait for buffers\n");
1170 return -EINVAL;
1171 }
1172
1173 if (!list_empty(&q->done_list)) {
1174
1175
1176
1177 break;
1178 }
1179
1180 if (nonblocking) {
1181 dprintk(1, "Nonblocking and no buffers to dequeue, "
1182 "will not wait\n");
1183 return -EAGAIN;
1184 }
1185
1186
1187
1188
1189
1190
1191 call_qop(q, wait_prepare, q);
1192
1193
1194
1195
1196 dprintk(3, "Will sleep waiting for buffers\n");
1197 ret = wait_event_interruptible(q->done_wq,
1198 !list_empty(&q->done_list) || !q->streaming);
1199
1200
1201
1202
1203
1204 call_qop(q, wait_finish, q);
1205 if (ret)
1206 return ret;
1207 }
1208 return 0;
1209}
1210
1211
1212
1213
1214
1215
1216static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1217 int nonblocking)
1218{
1219 unsigned long flags;
1220 int ret;
1221
1222
1223
1224
1225 ret = __vb2_wait_for_done_vb(q, nonblocking);
1226 if (ret)
1227 return ret;
1228
1229
1230
1231
1232
1233 spin_lock_irqsave(&q->done_lock, flags);
1234 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1235 list_del(&(*vb)->done_entry);
1236 spin_unlock_irqrestore(&q->done_lock, flags);
1237
1238 return 0;
1239}
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250int vb2_wait_for_all_buffers(struct vb2_queue *q)
1251{
1252 if (!q->streaming) {
1253 dprintk(1, "Streaming off, will not wait for buffers\n");
1254 return -EINVAL;
1255 }
1256
1257 wait_event(q->done_wq, !atomic_read(&q->queued_count));
1258 return 0;
1259}
1260EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
1284{
1285 struct vb2_buffer *vb = NULL;
1286 int ret;
1287
1288 if (q->fileio) {
1289 dprintk(1, "dqbuf: file io in progress\n");
1290 return -EBUSY;
1291 }
1292
1293 if (b->type != q->type) {
1294 dprintk(1, "dqbuf: invalid buffer type\n");
1295 return -EINVAL;
1296 }
1297
1298 ret = __vb2_get_done_vb(q, &vb, nonblocking);
1299 if (ret < 0) {
1300 dprintk(1, "dqbuf: error getting next done buffer\n");
1301 return ret;
1302 }
1303
1304 ret = call_qop(q, buf_finish, vb);
1305 if (ret) {
1306 dprintk(1, "dqbuf: buffer finish failed\n");
1307 return ret;
1308 }
1309
1310 switch (vb->state) {
1311 case VB2_BUF_STATE_DONE:
1312 dprintk(3, "dqbuf: Returning done buffer\n");
1313 break;
1314 case VB2_BUF_STATE_ERROR:
1315 dprintk(3, "dqbuf: Returning done buffer with errors\n");
1316 break;
1317 default:
1318 dprintk(1, "dqbuf: Invalid buffer state\n");
1319 return -EINVAL;
1320 }
1321
1322
1323 __fill_v4l2_buffer(vb, b);
1324
1325 list_del(&vb->queued_entry);
1326
1327 dprintk(1, "dqbuf of buffer %d, with state %d\n",
1328 vb->v4l2_buf.index, vb->state);
1329
1330 vb->state = VB2_BUF_STATE_DEQUEUED;
1331 return 0;
1332}
1333EXPORT_SYMBOL_GPL(vb2_dqbuf);
1334
1335
1336
1337
1338
1339
1340
1341static void __vb2_queue_cancel(struct vb2_queue *q)
1342{
1343 unsigned int i;
1344
1345
1346
1347
1348
1349 if (q->streaming)
1350 call_qop(q, stop_streaming, q);
1351 q->streaming = 0;
1352
1353
1354
1355
1356 INIT_LIST_HEAD(&q->queued_list);
1357
1358
1359
1360
1361 INIT_LIST_HEAD(&q->done_list);
1362 atomic_set(&q->queued_count, 0);
1363 wake_up_all(&q->done_wq);
1364
1365
1366
1367
1368 for (i = 0; i < q->num_buffers; ++i)
1369 q->bufs[i]->state = VB2_BUF_STATE_DEQUEUED;
1370}
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
1386{
1387 struct vb2_buffer *vb;
1388 int ret;
1389
1390 if (q->fileio) {
1391 dprintk(1, "streamon: file io in progress\n");
1392 return -EBUSY;
1393 }
1394
1395 if (type != q->type) {
1396 dprintk(1, "streamon: invalid stream type\n");
1397 return -EINVAL;
1398 }
1399
1400 if (q->streaming) {
1401 dprintk(1, "streamon: already streaming\n");
1402 return -EBUSY;
1403 }
1404
1405
1406
1407
1408
1409 list_for_each_entry(vb, &q->queued_list, queued_entry)
1410 __enqueue_in_driver(vb);
1411
1412
1413
1414
1415 ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count));
1416 if (ret) {
1417 dprintk(1, "streamon: driver refused to start streaming\n");
1418 __vb2_queue_cancel(q);
1419 return ret;
1420 }
1421
1422 q->streaming = 1;
1423
1424 dprintk(3, "Streamon successful\n");
1425 return 0;
1426}
1427EXPORT_SYMBOL_GPL(vb2_streamon);
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
1446{
1447 if (q->fileio) {
1448 dprintk(1, "streamoff: file io in progress\n");
1449 return -EBUSY;
1450 }
1451
1452 if (type != q->type) {
1453 dprintk(1, "streamoff: invalid stream type\n");
1454 return -EINVAL;
1455 }
1456
1457 if (!q->streaming) {
1458 dprintk(1, "streamoff: not streaming\n");
1459 return -EINVAL;
1460 }
1461
1462
1463
1464
1465
1466 __vb2_queue_cancel(q);
1467
1468 dprintk(3, "Streamoff successful\n");
1469 return 0;
1470}
1471EXPORT_SYMBOL_GPL(vb2_streamoff);
1472
1473
1474
1475
1476static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
1477 unsigned int *_buffer, unsigned int *_plane)
1478{
1479 struct vb2_buffer *vb;
1480 unsigned int buffer, plane;
1481
1482
1483
1484
1485
1486
1487 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
1488 vb = q->bufs[buffer];
1489
1490 for (plane = 0; plane < vb->num_planes; ++plane) {
1491 if (vb->v4l2_planes[plane].m.mem_offset == off) {
1492 *_buffer = buffer;
1493 *_plane = plane;
1494 return 0;
1495 }
1496 }
1497 }
1498
1499 return -EINVAL;
1500}
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
1522{
1523 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
1524 struct vb2_plane *vb_plane;
1525 struct vb2_buffer *vb;
1526 unsigned int buffer, plane;
1527 int ret;
1528
1529 if (q->memory != V4L2_MEMORY_MMAP) {
1530 dprintk(1, "Queue is not currently set up for mmap\n");
1531 return -EINVAL;
1532 }
1533
1534
1535
1536
1537 if (!(vma->vm_flags & VM_SHARED)) {
1538 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
1539 return -EINVAL;
1540 }
1541 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1542 if (!(vma->vm_flags & VM_WRITE)) {
1543 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
1544 return -EINVAL;
1545 }
1546 } else {
1547 if (!(vma->vm_flags & VM_READ)) {
1548 dprintk(1, "Invalid vma flags, VM_READ needed\n");
1549 return -EINVAL;
1550 }
1551 }
1552
1553
1554
1555
1556 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1557 if (ret)
1558 return ret;
1559
1560 vb = q->bufs[buffer];
1561 vb_plane = &vb->planes[plane];
1562
1563 ret = q->mem_ops->mmap(vb_plane->mem_priv, vma);
1564 if (ret)
1565 return ret;
1566
1567 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
1568 return 0;
1569}
1570EXPORT_SYMBOL_GPL(vb2_mmap);
1571
1572#ifndef CONFIG_MMU
1573unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
1574 unsigned long addr,
1575 unsigned long len,
1576 unsigned long pgoff,
1577 unsigned long flags)
1578{
1579 unsigned long off = pgoff << PAGE_SHIFT;
1580 struct vb2_buffer *vb;
1581 unsigned int buffer, plane;
1582 int ret;
1583
1584 if (q->memory != V4L2_MEMORY_MMAP) {
1585 dprintk(1, "Queue is not currently set up for mmap\n");
1586 return -EINVAL;
1587 }
1588
1589
1590
1591
1592 ret = __find_plane_by_offset(q, off, &buffer, &plane);
1593 if (ret)
1594 return ret;
1595
1596 vb = q->bufs[buffer];
1597
1598 return (unsigned long)vb2_plane_vaddr(vb, plane);
1599}
1600EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
1601#endif
1602
1603static int __vb2_init_fileio(struct vb2_queue *q, int read);
1604static int __vb2_cleanup_fileio(struct vb2_queue *q);
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
1623{
1624 unsigned long flags;
1625 unsigned int ret;
1626 struct vb2_buffer *vb = NULL;
1627
1628
1629
1630
1631 if (q->num_buffers == 0 && q->fileio == NULL) {
1632 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ)) {
1633 ret = __vb2_init_fileio(q, 1);
1634 if (ret)
1635 return POLLERR;
1636 }
1637 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE)) {
1638 ret = __vb2_init_fileio(q, 0);
1639 if (ret)
1640 return POLLERR;
1641
1642
1643
1644 return POLLOUT | POLLWRNORM;
1645 }
1646 }
1647
1648
1649
1650
1651 if (list_empty(&q->queued_list))
1652 return POLLERR;
1653
1654 poll_wait(file, &q->done_wq, wait);
1655
1656
1657
1658
1659 spin_lock_irqsave(&q->done_lock, flags);
1660 if (!list_empty(&q->done_list))
1661 vb = list_first_entry(&q->done_list, struct vb2_buffer,
1662 done_entry);
1663 spin_unlock_irqrestore(&q->done_lock, flags);
1664
1665 if (vb && (vb->state == VB2_BUF_STATE_DONE
1666 || vb->state == VB2_BUF_STATE_ERROR)) {
1667 return (V4L2_TYPE_IS_OUTPUT(q->type)) ? POLLOUT | POLLWRNORM :
1668 POLLIN | POLLRDNORM;
1669 }
1670 return 0;
1671}
1672EXPORT_SYMBOL_GPL(vb2_poll);
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685int vb2_queue_init(struct vb2_queue *q)
1686{
1687 BUG_ON(!q);
1688 BUG_ON(!q->ops);
1689 BUG_ON(!q->mem_ops);
1690 BUG_ON(!q->type);
1691 BUG_ON(!q->io_modes);
1692
1693 BUG_ON(!q->ops->queue_setup);
1694 BUG_ON(!q->ops->buf_queue);
1695
1696 INIT_LIST_HEAD(&q->queued_list);
1697 INIT_LIST_HEAD(&q->done_list);
1698 spin_lock_init(&q->done_lock);
1699 init_waitqueue_head(&q->done_wq);
1700
1701 if (q->buf_struct_size == 0)
1702 q->buf_struct_size = sizeof(struct vb2_buffer);
1703
1704 return 0;
1705}
1706EXPORT_SYMBOL_GPL(vb2_queue_init);
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716void vb2_queue_release(struct vb2_queue *q)
1717{
1718 __vb2_cleanup_fileio(q);
1719 __vb2_queue_cancel(q);
1720 __vb2_queue_free(q, q->num_buffers);
1721}
1722EXPORT_SYMBOL_GPL(vb2_queue_release);
1723
1724
1725
1726
1727
1728
1729
1730
1731struct vb2_fileio_buf {
1732 void *vaddr;
1733 unsigned int size;
1734 unsigned int pos;
1735 unsigned int queued:1;
1736};
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746struct vb2_fileio_data {
1747 struct v4l2_requestbuffers req;
1748 struct v4l2_buffer b;
1749 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
1750 unsigned int index;
1751 unsigned int q_count;
1752 unsigned int dq_count;
1753 unsigned int flags;
1754};
1755
1756
1757
1758
1759
1760
1761static int __vb2_init_fileio(struct vb2_queue *q, int read)
1762{
1763 struct vb2_fileio_data *fileio;
1764 int i, ret;
1765 unsigned int count = 0;
1766
1767
1768
1769
1770 if ((read && !(q->io_modes & VB2_READ)) ||
1771 (!read && !(q->io_modes & VB2_WRITE)))
1772 BUG();
1773
1774
1775
1776
1777 if (!q->mem_ops->vaddr)
1778 return -EBUSY;
1779
1780
1781
1782
1783 if (q->streaming || q->num_buffers > 0)
1784 return -EBUSY;
1785
1786
1787
1788
1789 count = 1;
1790
1791 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
1792 (read) ? "read" : "write", count, q->io_flags);
1793
1794 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
1795 if (fileio == NULL)
1796 return -ENOMEM;
1797
1798 fileio->flags = q->io_flags;
1799
1800
1801
1802
1803
1804 fileio->req.count = count;
1805 fileio->req.memory = V4L2_MEMORY_MMAP;
1806 fileio->req.type = q->type;
1807 ret = vb2_reqbufs(q, &fileio->req);
1808 if (ret)
1809 goto err_kfree;
1810
1811
1812
1813
1814
1815 if (q->bufs[0]->num_planes != 1) {
1816 fileio->req.count = 0;
1817 ret = -EBUSY;
1818 goto err_reqbufs;
1819 }
1820
1821
1822
1823
1824 for (i = 0; i < q->num_buffers; i++) {
1825 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
1826 if (fileio->bufs[i].vaddr == NULL)
1827 goto err_reqbufs;
1828 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
1829 }
1830
1831
1832
1833
1834 if (read) {
1835
1836
1837
1838 for (i = 0; i < q->num_buffers; i++) {
1839 struct v4l2_buffer *b = &fileio->b;
1840 memset(b, 0, sizeof(*b));
1841 b->type = q->type;
1842 b->memory = q->memory;
1843 b->index = i;
1844 ret = vb2_qbuf(q, b);
1845 if (ret)
1846 goto err_reqbufs;
1847 fileio->bufs[i].queued = 1;
1848 }
1849
1850
1851
1852
1853 ret = vb2_streamon(q, q->type);
1854 if (ret)
1855 goto err_reqbufs;
1856 }
1857
1858 q->fileio = fileio;
1859
1860 return ret;
1861
1862err_reqbufs:
1863 vb2_reqbufs(q, &fileio->req);
1864
1865err_kfree:
1866 kfree(fileio);
1867 return ret;
1868}
1869
1870
1871
1872
1873
1874static int __vb2_cleanup_fileio(struct vb2_queue *q)
1875{
1876 struct vb2_fileio_data *fileio = q->fileio;
1877
1878 if (fileio) {
1879
1880
1881
1882
1883 q->fileio = NULL;
1884
1885 vb2_streamoff(q, q->type);
1886 fileio->req.count = 0;
1887 vb2_reqbufs(q, &fileio->req);
1888 kfree(fileio);
1889 dprintk(3, "file io emulator closed\n");
1890 }
1891 return 0;
1892}
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
1904 loff_t *ppos, int nonblock, int read)
1905{
1906 struct vb2_fileio_data *fileio;
1907 struct vb2_fileio_buf *buf;
1908 int ret, index;
1909
1910 dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
1911 read ? "read" : "write", (long)*ppos, count,
1912 nonblock ? "non" : "");
1913
1914 if (!data)
1915 return -EINVAL;
1916
1917
1918
1919
1920 if (!q->fileio) {
1921 ret = __vb2_init_fileio(q, read);
1922 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
1923 if (ret)
1924 return ret;
1925 }
1926 fileio = q->fileio;
1927
1928
1929
1930
1931
1932 q->fileio = NULL;
1933
1934 index = fileio->index;
1935 buf = &fileio->bufs[index];
1936
1937
1938
1939
1940 if (buf->queued) {
1941 struct vb2_buffer *vb;
1942
1943
1944
1945
1946 memset(&fileio->b, 0, sizeof(fileio->b));
1947 fileio->b.type = q->type;
1948 fileio->b.memory = q->memory;
1949 fileio->b.index = index;
1950 ret = vb2_dqbuf(q, &fileio->b, nonblock);
1951 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
1952 if (ret)
1953 goto end;
1954 fileio->dq_count += 1;
1955
1956
1957
1958
1959 vb = q->bufs[index];
1960 buf->size = vb2_get_plane_payload(vb, 0);
1961 buf->queued = 0;
1962 }
1963
1964
1965
1966
1967 if (buf->pos + count > buf->size) {
1968 count = buf->size - buf->pos;
1969 dprintk(5, "reducing read count: %zd\n", count);
1970 }
1971
1972
1973
1974
1975 dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
1976 count, index, buf->pos);
1977 if (read)
1978 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
1979 else
1980 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
1981 if (ret) {
1982 dprintk(3, "file io: error copying data\n");
1983 ret = -EFAULT;
1984 goto end;
1985 }
1986
1987
1988
1989
1990 buf->pos += count;
1991 *ppos += count;
1992
1993
1994
1995
1996 if (buf->pos == buf->size ||
1997 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
1998
1999
2000
2001 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2002 fileio->dq_count == 1) {
2003 dprintk(3, "file io: read limit reached\n");
2004
2005
2006
2007 q->fileio = fileio;
2008 return __vb2_cleanup_fileio(q);
2009 }
2010
2011
2012
2013
2014 memset(&fileio->b, 0, sizeof(fileio->b));
2015 fileio->b.type = q->type;
2016 fileio->b.memory = q->memory;
2017 fileio->b.index = index;
2018 fileio->b.bytesused = buf->pos;
2019 ret = vb2_qbuf(q, &fileio->b);
2020 dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2021 if (ret)
2022 goto end;
2023
2024
2025
2026
2027 buf->pos = 0;
2028 buf->queued = 1;
2029 buf->size = q->bufs[0]->v4l2_planes[0].length;
2030 fileio->q_count += 1;
2031
2032
2033
2034
2035 fileio->index = (index + 1) % q->num_buffers;
2036
2037
2038
2039
2040 if (!read && !q->streaming) {
2041 ret = vb2_streamon(q, q->type);
2042 if (ret)
2043 goto end;
2044 }
2045 }
2046
2047
2048
2049
2050 if (ret == 0)
2051 ret = count;
2052end:
2053
2054
2055
2056 q->fileio = fileio;
2057 return ret;
2058}
2059
2060size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2061 loff_t *ppos, int nonblocking)
2062{
2063 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2064}
2065EXPORT_SYMBOL_GPL(vb2_read);
2066
2067size_t vb2_write(struct vb2_queue *q, char __user *data, size_t count,
2068 loff_t *ppos, int nonblocking)
2069{
2070 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 0);
2071}
2072EXPORT_SYMBOL_GPL(vb2_write);
2073
2074MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
2075MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
2076MODULE_LICENSE("GPL");
2077