1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/err.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/mm.h>
21#include <linux/poll.h>
22#include <linux/slab.h>
23#include <linux/sched.h>
24#include <linux/freezer.h>
25#include <linux/kthread.h>
26
27#include <media/v4l2-dev.h>
28#include <media/v4l2-fh.h>
29#include <media/v4l2-event.h>
30#include <media/v4l2-common.h>
31#include <media/videobuf2-core.h>
32
33static int debug;
34module_param(debug, int, 0644);
35
36#define dprintk(level, fmt, arg...) \
37 do { \
38 if (debug >= level) \
39 pr_debug("vb2: %s: " fmt, __func__, ## arg); \
40 } while (0)
41
42#ifdef CONFIG_VIDEO_ADV_DEBUG
43
44
45
46
47
48
49
50
51
52#define log_memop(vb, op) \
53 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
54 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
55 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
56
57#define call_memop(vb, op, args...) \
58({ \
59 struct vb2_queue *_q = (vb)->vb2_queue; \
60 int err; \
61 \
62 log_memop(vb, op); \
63 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
64 if (!err) \
65 (vb)->cnt_mem_ ## op++; \
66 err; \
67})
68
69#define call_ptr_memop(vb, op, args...) \
70({ \
71 struct vb2_queue *_q = (vb)->vb2_queue; \
72 void *ptr; \
73 \
74 log_memop(vb, op); \
75 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
76 if (!IS_ERR_OR_NULL(ptr)) \
77 (vb)->cnt_mem_ ## op++; \
78 ptr; \
79})
80
81#define call_void_memop(vb, op, args...) \
82({ \
83 struct vb2_queue *_q = (vb)->vb2_queue; \
84 \
85 log_memop(vb, op); \
86 if (_q->mem_ops->op) \
87 _q->mem_ops->op(args); \
88 (vb)->cnt_mem_ ## op++; \
89})
90
91#define log_qop(q, op) \
92 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
93 (q)->ops->op ? "" : " (nop)")
94
95#define call_qop(q, op, args...) \
96({ \
97 int err; \
98 \
99 log_qop(q, op); \
100 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
101 if (!err) \
102 (q)->cnt_ ## op++; \
103 err; \
104})
105
106#define call_void_qop(q, op, args...) \
107({ \
108 log_qop(q, op); \
109 if ((q)->ops->op) \
110 (q)->ops->op(args); \
111 (q)->cnt_ ## op++; \
112})
113
114#define log_vb_qop(vb, op, args...) \
115 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
116 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
117 (vb)->vb2_queue->ops->op ? "" : " (nop)")
118
119#define call_vb_qop(vb, op, args...) \
120({ \
121 int err; \
122 \
123 log_vb_qop(vb, op); \
124 err = (vb)->vb2_queue->ops->op ? \
125 (vb)->vb2_queue->ops->op(args) : 0; \
126 if (!err) \
127 (vb)->cnt_ ## op++; \
128 err; \
129})
130
131#define call_void_vb_qop(vb, op, args...) \
132({ \
133 log_vb_qop(vb, op); \
134 if ((vb)->vb2_queue->ops->op) \
135 (vb)->vb2_queue->ops->op(args); \
136 (vb)->cnt_ ## op++; \
137})
138
139#else
140
141#define call_memop(vb, op, args...) \
142 ((vb)->vb2_queue->mem_ops->op ? \
143 (vb)->vb2_queue->mem_ops->op(args) : 0)
144
145#define call_ptr_memop(vb, op, args...) \
146 ((vb)->vb2_queue->mem_ops->op ? \
147 (vb)->vb2_queue->mem_ops->op(args) : NULL)
148
149#define call_void_memop(vb, op, args...) \
150 do { \
151 if ((vb)->vb2_queue->mem_ops->op) \
152 (vb)->vb2_queue->mem_ops->op(args); \
153 } while (0)
154
155#define call_qop(q, op, args...) \
156 ((q)->ops->op ? (q)->ops->op(args) : 0)
157
158#define call_void_qop(q, op, args...) \
159 do { \
160 if ((q)->ops->op) \
161 (q)->ops->op(args); \
162 } while (0)
163
164#define call_vb_qop(vb, op, args...) \
165 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
166
167#define call_void_vb_qop(vb, op, args...) \
168 do { \
169 if ((vb)->vb2_queue->ops->op) \
170 (vb)->vb2_queue->ops->op(args); \
171 } while (0)
172
173#endif
174
175
176#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
177 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
178 V4L2_BUF_FLAG_PREPARED | \
179 V4L2_BUF_FLAG_TIMESTAMP_MASK)
180
181#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
182 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
183
184static void __vb2_queue_cancel(struct vb2_queue *q);
185
186
187
188
189static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
190{
191 struct vb2_queue *q = vb->vb2_queue;
192 void *mem_priv;
193 int plane;
194
195
196
197
198
199 for (plane = 0; plane < vb->num_planes; ++plane) {
200 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
201
202 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
203 size, q->gfp_flags);
204 if (IS_ERR_OR_NULL(mem_priv))
205 goto free;
206
207
208 vb->planes[plane].mem_priv = mem_priv;
209 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
210 }
211
212 return 0;
213free:
214
215 for (; plane > 0; --plane) {
216 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
217 vb->planes[plane - 1].mem_priv = NULL;
218 }
219
220 return -ENOMEM;
221}
222
223
224
225
226static void __vb2_buf_mem_free(struct vb2_buffer *vb)
227{
228 unsigned int plane;
229
230 for (plane = 0; plane < vb->num_planes; ++plane) {
231 call_void_memop(vb, put, vb->planes[plane].mem_priv);
232 vb->planes[plane].mem_priv = NULL;
233 dprintk(3, "freed plane %d of buffer %d\n", plane,
234 vb->v4l2_buf.index);
235 }
236}
237
238
239
240
241
242static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
243{
244 unsigned int plane;
245
246 for (plane = 0; plane < vb->num_planes; ++plane) {
247 if (vb->planes[plane].mem_priv)
248 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
249 vb->planes[plane].mem_priv = NULL;
250 }
251}
252
253
254
255
256
257static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
258{
259 if (!p->mem_priv)
260 return;
261
262 if (p->dbuf_mapped)
263 call_void_memop(vb, unmap_dmabuf, p->mem_priv);
264
265 call_void_memop(vb, detach_dmabuf, p->mem_priv);
266 dma_buf_put(p->dbuf);
267 memset(p, 0, sizeof(*p));
268}
269
270
271
272
273
274static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
275{
276 unsigned int plane;
277
278 for (plane = 0; plane < vb->num_planes; ++plane)
279 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
280}
281
282
283
284
285
286static void __setup_lengths(struct vb2_queue *q, unsigned int n)
287{
288 unsigned int buffer, plane;
289 struct vb2_buffer *vb;
290
291 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
292 vb = q->bufs[buffer];
293 if (!vb)
294 continue;
295
296 for (plane = 0; plane < vb->num_planes; ++plane)
297 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
298 }
299}
300
301
302
303
304
305static void __setup_offsets(struct vb2_queue *q, unsigned int n)
306{
307 unsigned int buffer, plane;
308 struct vb2_buffer *vb;
309 unsigned long off;
310
311 if (q->num_buffers) {
312 struct v4l2_plane *p;
313 vb = q->bufs[q->num_buffers - 1];
314 p = &vb->v4l2_planes[vb->num_planes - 1];
315 off = PAGE_ALIGN(p->m.mem_offset + p->length);
316 } else {
317 off = 0;
318 }
319
320 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
321 vb = q->bufs[buffer];
322 if (!vb)
323 continue;
324
325 for (plane = 0; plane < vb->num_planes; ++plane) {
326 vb->v4l2_planes[plane].m.mem_offset = off;
327
328 dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
329 buffer, plane, off);
330
331 off += vb->v4l2_planes[plane].length;
332 off = PAGE_ALIGN(off);
333 }
334 }
335}
336
337
338
339
340
341
342
343
344static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
345 unsigned int num_buffers, unsigned int num_planes)
346{
347 unsigned int buffer;
348 struct vb2_buffer *vb;
349 int ret;
350
351 for (buffer = 0; buffer < num_buffers; ++buffer) {
352
353 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
354 if (!vb) {
355 dprintk(1, "memory alloc for buffer struct failed\n");
356 break;
357 }
358
359
360 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
361 vb->v4l2_buf.length = num_planes;
362
363 vb->state = VB2_BUF_STATE_DEQUEUED;
364 vb->vb2_queue = q;
365 vb->num_planes = num_planes;
366 vb->v4l2_buf.index = q->num_buffers + buffer;
367 vb->v4l2_buf.type = q->type;
368 vb->v4l2_buf.memory = memory;
369
370
371 if (memory == V4L2_MEMORY_MMAP) {
372 ret = __vb2_buf_mem_alloc(vb);
373 if (ret) {
374 dprintk(1, "failed allocating memory for "
375 "buffer %d\n", buffer);
376 kfree(vb);
377 break;
378 }
379
380
381
382
383
384 ret = call_vb_qop(vb, buf_init, vb);
385 if (ret) {
386 dprintk(1, "buffer %d %p initialization"
387 " failed\n", buffer, vb);
388 __vb2_buf_mem_free(vb);
389 kfree(vb);
390 break;
391 }
392 }
393
394 q->bufs[q->num_buffers + buffer] = vb;
395 }
396
397 __setup_lengths(q, buffer);
398 if (memory == V4L2_MEMORY_MMAP)
399 __setup_offsets(q, buffer);
400
401 dprintk(1, "allocated %d buffers, %d plane(s) each\n",
402 buffer, num_planes);
403
404 return buffer;
405}
406
407
408
409
410static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
411{
412 unsigned int buffer;
413 struct vb2_buffer *vb;
414
415 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
416 ++buffer) {
417 vb = q->bufs[buffer];
418 if (!vb)
419 continue;
420
421
422 if (q->memory == V4L2_MEMORY_MMAP)
423 __vb2_buf_mem_free(vb);
424 else if (q->memory == V4L2_MEMORY_DMABUF)
425 __vb2_buf_dmabuf_put(vb);
426 else
427 __vb2_buf_userptr_put(vb);
428 }
429}
430
431
432
433
434
435
436static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
437{
438 unsigned int buffer;
439
440
441
442
443
444
445
446
447
448 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
449 ++buffer) {
450 if (q->bufs[buffer] == NULL)
451 continue;
452 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
453 dprintk(1, "preparing buffers, cannot free\n");
454 return -EAGAIN;
455 }
456 }
457
458
459 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
460 ++buffer) {
461 struct vb2_buffer *vb = q->bufs[buffer];
462
463 if (vb && vb->planes[0].mem_priv)
464 call_void_vb_qop(vb, buf_cleanup, vb);
465 }
466
467
468 __vb2_free_mem(q, buffers);
469
470#ifdef CONFIG_VIDEO_ADV_DEBUG
471
472
473
474
475
476 if (q->num_buffers) {
477 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
478 q->cnt_wait_prepare != q->cnt_wait_finish;
479
480 if (unbalanced || debug) {
481 pr_info("vb2: counters for queue %p:%s\n", q,
482 unbalanced ? " UNBALANCED!" : "");
483 pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
484 q->cnt_queue_setup, q->cnt_start_streaming,
485 q->cnt_stop_streaming);
486 pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
487 q->cnt_wait_prepare, q->cnt_wait_finish);
488 }
489 q->cnt_queue_setup = 0;
490 q->cnt_wait_prepare = 0;
491 q->cnt_wait_finish = 0;
492 q->cnt_start_streaming = 0;
493 q->cnt_stop_streaming = 0;
494 }
495 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
496 struct vb2_buffer *vb = q->bufs[buffer];
497 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
498 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
499 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
500 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
501 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
502 vb->cnt_buf_queue != vb->cnt_buf_done ||
503 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
504 vb->cnt_buf_init != vb->cnt_buf_cleanup;
505
506 if (unbalanced || debug) {
507 pr_info("vb2: counters for queue %p, buffer %d:%s\n",
508 q, buffer, unbalanced ? " UNBALANCED!" : "");
509 pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
510 vb->cnt_buf_init, vb->cnt_buf_cleanup,
511 vb->cnt_buf_prepare, vb->cnt_buf_finish);
512 pr_info("vb2: buf_queue: %u buf_done: %u\n",
513 vb->cnt_buf_queue, vb->cnt_buf_done);
514 pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
515 vb->cnt_mem_alloc, vb->cnt_mem_put,
516 vb->cnt_mem_prepare, vb->cnt_mem_finish,
517 vb->cnt_mem_mmap);
518 pr_info("vb2: get_userptr: %u put_userptr: %u\n",
519 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
520 pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
521 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
522 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
523 pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
524 vb->cnt_mem_get_dmabuf,
525 vb->cnt_mem_num_users,
526 vb->cnt_mem_vaddr,
527 vb->cnt_mem_cookie);
528 }
529 }
530#endif
531
532
533 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
534 ++buffer) {
535 kfree(q->bufs[buffer]);
536 q->bufs[buffer] = NULL;
537 }
538
539 q->num_buffers -= buffers;
540 if (!q->num_buffers) {
541 q->memory = 0;
542 INIT_LIST_HEAD(&q->queued_list);
543 }
544 return 0;
545}
546
547
548
549
550
551static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
552{
553 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
554 return 0;
555
556
557 if (NULL == b->m.planes) {
558 dprintk(1, "multi-planar buffer passed but "
559 "planes array not provided\n");
560 return -EINVAL;
561 }
562
563 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
564 dprintk(1, "incorrect planes array length, "
565 "expected %d, got %d\n", vb->num_planes, b->length);
566 return -EINVAL;
567 }
568
569 return 0;
570}
571
572
573
574
575
576static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
577{
578 unsigned int length;
579 unsigned int bytesused;
580 unsigned int plane;
581
582 if (!V4L2_TYPE_IS_OUTPUT(b->type))
583 return 0;
584
585 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
586 for (plane = 0; plane < vb->num_planes; ++plane) {
587 length = (b->memory == V4L2_MEMORY_USERPTR ||
588 b->memory == V4L2_MEMORY_DMABUF)
589 ? b->m.planes[plane].length
590 : vb->v4l2_planes[plane].length;
591 bytesused = b->m.planes[plane].bytesused
592 ? b->m.planes[plane].bytesused : length;
593
594 if (b->m.planes[plane].bytesused > length)
595 return -EINVAL;
596
597 if (b->m.planes[plane].data_offset > 0 &&
598 b->m.planes[plane].data_offset >= bytesused)
599 return -EINVAL;
600 }
601 } else {
602 length = (b->memory == V4L2_MEMORY_USERPTR)
603 ? b->length : vb->v4l2_planes[0].length;
604 bytesused = b->bytesused ? b->bytesused : length;
605
606 if (b->bytesused > length)
607 return -EINVAL;
608 }
609
610 return 0;
611}
612
613
614
615
616
617static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
618{
619 unsigned int plane;
620 for (plane = 0; plane < vb->num_planes; ++plane) {
621 void *mem_priv = vb->planes[plane].mem_priv;
622
623
624
625
626
627
628 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
629 return true;
630 }
631 return false;
632}
633
634
635
636
637
638static bool __buffers_in_use(struct vb2_queue *q)
639{
640 unsigned int buffer;
641 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
642 if (__buffer_in_use(q, q->bufs[buffer]))
643 return true;
644 }
645 return false;
646}
647
648
649
650
651
652static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
653{
654 struct vb2_queue *q = vb->vb2_queue;
655
656
657 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
658 b->reserved2 = vb->v4l2_buf.reserved2;
659 b->reserved = vb->v4l2_buf.reserved;
660
661 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
662
663
664
665
666 b->length = vb->num_planes;
667 memcpy(b->m.planes, vb->v4l2_planes,
668 b->length * sizeof(struct v4l2_plane));
669 } else {
670
671
672
673
674 b->length = vb->v4l2_planes[0].length;
675 b->bytesused = vb->v4l2_planes[0].bytesused;
676 if (q->memory == V4L2_MEMORY_MMAP)
677 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
678 else if (q->memory == V4L2_MEMORY_USERPTR)
679 b->m.userptr = vb->v4l2_planes[0].m.userptr;
680 else if (q->memory == V4L2_MEMORY_DMABUF)
681 b->m.fd = vb->v4l2_planes[0].m.fd;
682 }
683
684
685
686
687 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
688 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
689 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
690 V4L2_BUF_FLAG_TIMESTAMP_COPY) {
691
692
693
694
695 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
696 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
697 }
698
699 switch (vb->state) {
700 case VB2_BUF_STATE_QUEUED:
701 case VB2_BUF_STATE_ACTIVE:
702 b->flags |= V4L2_BUF_FLAG_QUEUED;
703 break;
704 case VB2_BUF_STATE_ERROR:
705 b->flags |= V4L2_BUF_FLAG_ERROR;
706
707 case VB2_BUF_STATE_DONE:
708 b->flags |= V4L2_BUF_FLAG_DONE;
709 break;
710 case VB2_BUF_STATE_PREPARED:
711 b->flags |= V4L2_BUF_FLAG_PREPARED;
712 break;
713 case VB2_BUF_STATE_PREPARING:
714 case VB2_BUF_STATE_DEQUEUED:
715
716 break;
717 }
718
719 if (__buffer_in_use(q, vb))
720 b->flags |= V4L2_BUF_FLAG_MAPPED;
721}
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
737{
738 struct vb2_buffer *vb;
739 int ret;
740
741 if (b->type != q->type) {
742 dprintk(1, "wrong buffer type\n");
743 return -EINVAL;
744 }
745
746 if (b->index >= q->num_buffers) {
747 dprintk(1, "buffer index out of range\n");
748 return -EINVAL;
749 }
750 vb = q->bufs[b->index];
751 ret = __verify_planes_array(vb, b);
752 if (!ret)
753 __fill_v4l2_buffer(vb, b);
754 return ret;
755}
756EXPORT_SYMBOL(vb2_querybuf);
757
758
759
760
761
762static int __verify_userptr_ops(struct vb2_queue *q)
763{
764 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
765 !q->mem_ops->put_userptr)
766 return -EINVAL;
767
768 return 0;
769}
770
771
772
773
774
775static int __verify_mmap_ops(struct vb2_queue *q)
776{
777 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
778 !q->mem_ops->put || !q->mem_ops->mmap)
779 return -EINVAL;
780
781 return 0;
782}
783
784
785
786
787
788static int __verify_dmabuf_ops(struct vb2_queue *q)
789{
790 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
791 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
792 !q->mem_ops->unmap_dmabuf)
793 return -EINVAL;
794
795 return 0;
796}
797
798
799
800
801
802static int __verify_memory_type(struct vb2_queue *q,
803 enum v4l2_memory memory, enum v4l2_buf_type type)
804{
805 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
806 memory != V4L2_MEMORY_DMABUF) {
807 dprintk(1, "unsupported memory type\n");
808 return -EINVAL;
809 }
810
811 if (type != q->type) {
812 dprintk(1, "requested type is incorrect\n");
813 return -EINVAL;
814 }
815
816
817
818
819
820 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
821 dprintk(1, "MMAP for current setup unsupported\n");
822 return -EINVAL;
823 }
824
825 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
826 dprintk(1, "USERPTR for current setup unsupported\n");
827 return -EINVAL;
828 }
829
830 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
831 dprintk(1, "DMABUF for current setup unsupported\n");
832 return -EINVAL;
833 }
834
835
836
837
838
839
840 if (vb2_fileio_is_active(q)) {
841 dprintk(1, "file io in progress\n");
842 return -EBUSY;
843 }
844 return 0;
845}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
871{
872 unsigned int num_buffers, allocated_buffers, num_planes = 0;
873 int ret;
874
875 if (q->streaming) {
876 dprintk(1, "streaming active\n");
877 return -EBUSY;
878 }
879
880 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
881
882
883
884
885 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
886 dprintk(1, "memory in use, cannot free\n");
887 return -EBUSY;
888 }
889
890
891
892
893
894
895 __vb2_queue_cancel(q);
896 ret = __vb2_queue_free(q, q->num_buffers);
897 if (ret)
898 return ret;
899
900
901
902
903
904 if (req->count == 0)
905 return 0;
906 }
907
908
909
910
911 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
912 num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
913 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
914 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
915 q->memory = req->memory;
916
917
918
919
920
921 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
922 q->plane_sizes, q->alloc_ctx);
923 if (ret)
924 return ret;
925
926
927 allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
928 if (allocated_buffers == 0) {
929 dprintk(1, "memory allocation failed\n");
930 return -ENOMEM;
931 }
932
933
934
935
936
937 if (allocated_buffers < q->min_buffers_needed)
938 ret = -ENOMEM;
939
940
941
942
943 if (!ret && allocated_buffers < num_buffers) {
944 num_buffers = allocated_buffers;
945
946 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
947 &num_planes, q->plane_sizes, q->alloc_ctx);
948
949 if (!ret && allocated_buffers < num_buffers)
950 ret = -ENOMEM;
951
952
953
954
955
956 }
957
958 q->num_buffers = allocated_buffers;
959
960 if (ret < 0) {
961
962
963
964
965 __vb2_queue_free(q, allocated_buffers);
966 return ret;
967 }
968
969
970
971
972
973 req->count = allocated_buffers;
974 q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
975
976 return 0;
977}
978
979
980
981
982
983
984
985int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
986{
987 int ret = __verify_memory_type(q, req->memory, req->type);
988
989 return ret ? ret : __reqbufs(q, req);
990}
991EXPORT_SYMBOL_GPL(vb2_reqbufs);
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1009{
1010 unsigned int num_planes = 0, num_buffers, allocated_buffers;
1011 int ret;
1012
1013 if (q->num_buffers == VIDEO_MAX_FRAME) {
1014 dprintk(1, "maximum number of buffers already allocated\n");
1015 return -ENOBUFS;
1016 }
1017
1018 if (!q->num_buffers) {
1019 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
1020 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
1021 q->memory = create->memory;
1022 q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
1023 }
1024
1025 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
1026
1027
1028
1029
1030
1031 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1032 &num_planes, q->plane_sizes, q->alloc_ctx);
1033 if (ret)
1034 return ret;
1035
1036
1037 allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
1038 num_planes);
1039 if (allocated_buffers == 0) {
1040 dprintk(1, "memory allocation failed\n");
1041 return -ENOMEM;
1042 }
1043
1044
1045
1046
1047 if (allocated_buffers < num_buffers) {
1048 num_buffers = allocated_buffers;
1049
1050
1051
1052
1053
1054 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1055 &num_planes, q->plane_sizes, q->alloc_ctx);
1056
1057 if (!ret && allocated_buffers < num_buffers)
1058 ret = -ENOMEM;
1059
1060
1061
1062
1063
1064 }
1065
1066 q->num_buffers += allocated_buffers;
1067
1068 if (ret < 0) {
1069
1070
1071
1072
1073 __vb2_queue_free(q, allocated_buffers);
1074 return -ENOMEM;
1075 }
1076
1077
1078
1079
1080
1081 create->count = allocated_buffers;
1082
1083 return 0;
1084}
1085
1086
1087
1088
1089
1090
1091
1092
1093int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1094{
1095 int ret = __verify_memory_type(q, create->memory, create->format.type);
1096
1097 create->index = q->num_buffers;
1098 if (create->count == 0)
1099 return ret != -EBUSY ? ret : 0;
1100 return ret ? ret : __create_bufs(q, create);
1101}
1102EXPORT_SYMBOL_GPL(vb2_create_bufs);
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1113{
1114 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
1115 return NULL;
1116
1117 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
1118
1119}
1120EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1134{
1135 if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
1136 return NULL;
1137
1138 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
1139}
1140EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1162{
1163 struct vb2_queue *q = vb->vb2_queue;
1164 unsigned long flags;
1165 unsigned int plane;
1166
1167 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
1168 return;
1169
1170 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1171 state != VB2_BUF_STATE_ERROR &&
1172 state != VB2_BUF_STATE_QUEUED))
1173 state = VB2_BUF_STATE_ERROR;
1174
1175#ifdef CONFIG_VIDEO_ADV_DEBUG
1176
1177
1178
1179
1180 vb->cnt_buf_done++;
1181#endif
1182 dprintk(4, "done processing on buffer %d, state: %d\n",
1183 vb->v4l2_buf.index, state);
1184
1185
1186 for (plane = 0; plane < vb->num_planes; ++plane)
1187 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
1188
1189
1190 spin_lock_irqsave(&q->done_lock, flags);
1191 vb->state = state;
1192 if (state != VB2_BUF_STATE_QUEUED)
1193 list_add_tail(&vb->done_entry, &q->done_list);
1194 atomic_dec(&q->owned_by_drv_count);
1195 spin_unlock_irqrestore(&q->done_lock, flags);
1196
1197 if (state == VB2_BUF_STATE_QUEUED)
1198 return;
1199
1200
1201 wake_up(&q->done_wq);
1202}
1203EXPORT_SYMBOL_GPL(vb2_buffer_done);
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217void vb2_discard_done(struct vb2_queue *q)
1218{
1219 struct vb2_buffer *vb;
1220 unsigned long flags;
1221
1222 spin_lock_irqsave(&q->done_lock, flags);
1223 list_for_each_entry(vb, &q->done_list, done_entry)
1224 vb->state = VB2_BUF_STATE_ERROR;
1225 spin_unlock_irqrestore(&q->done_lock, flags);
1226}
1227EXPORT_SYMBOL_GPL(vb2_discard_done);
1228
1229
1230
1231
1232
1233
1234static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
1235 struct v4l2_plane *v4l2_planes)
1236{
1237 unsigned int plane;
1238
1239 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
1240 if (b->memory == V4L2_MEMORY_USERPTR) {
1241 for (plane = 0; plane < vb->num_planes; ++plane) {
1242 v4l2_planes[plane].m.userptr =
1243 b->m.planes[plane].m.userptr;
1244 v4l2_planes[plane].length =
1245 b->m.planes[plane].length;
1246 }
1247 }
1248 if (b->memory == V4L2_MEMORY_DMABUF) {
1249 for (plane = 0; plane < vb->num_planes; ++plane) {
1250 v4l2_planes[plane].m.fd =
1251 b->m.planes[plane].m.fd;
1252 v4l2_planes[plane].length =
1253 b->m.planes[plane].length;
1254 }
1255 }
1256
1257
1258 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 for (plane = 0; plane < vb->num_planes; ++plane) {
1270 struct v4l2_plane *pdst = &v4l2_planes[plane];
1271 struct v4l2_plane *psrc = &b->m.planes[plane];
1272
1273 pdst->bytesused = psrc->bytesused ?
1274 psrc->bytesused : pdst->length;
1275 pdst->data_offset = psrc->data_offset;
1276 }
1277 }
1278 } else {
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 if (b->memory == V4L2_MEMORY_USERPTR) {
1289 v4l2_planes[0].m.userptr = b->m.userptr;
1290 v4l2_planes[0].length = b->length;
1291 }
1292
1293 if (b->memory == V4L2_MEMORY_DMABUF) {
1294 v4l2_planes[0].m.fd = b->m.fd;
1295 v4l2_planes[0].length = b->length;
1296 }
1297
1298 if (V4L2_TYPE_IS_OUTPUT(b->type))
1299 v4l2_planes[0].bytesused = b->bytesused ?
1300 b->bytesused : v4l2_planes[0].length;
1301 else
1302 v4l2_planes[0].bytesused = 0;
1303
1304 }
1305
1306
1307 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
1308 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1309 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1310
1311
1312
1313
1314
1315 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1316 }
1317
1318 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1319
1320
1321
1322
1323
1324
1325 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
1326 vb->v4l2_buf.field = b->field;
1327 } else {
1328
1329 vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
1330 }
1331}
1332
1333
1334
1335
1336static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1337{
1338 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
1339 return call_vb_qop(vb, buf_prepare, vb);
1340}
1341
1342
1343
1344
1345static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1346{
1347 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1348 struct vb2_queue *q = vb->vb2_queue;
1349 void *mem_priv;
1350 unsigned int plane;
1351 int ret;
1352 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1353 bool reacquired = vb->planes[0].mem_priv == NULL;
1354
1355 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1356
1357 __fill_vb2_buffer(vb, b, planes);
1358
1359 for (plane = 0; plane < vb->num_planes; ++plane) {
1360
1361 if (vb->v4l2_planes[plane].m.userptr &&
1362 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
1363 && vb->v4l2_planes[plane].length == planes[plane].length)
1364 continue;
1365
1366 dprintk(3, "userspace address for plane %d changed, "
1367 "reacquiring memory\n", plane);
1368
1369
1370 if (planes[plane].length < q->plane_sizes[plane]) {
1371 dprintk(1, "provided buffer size %u is less than "
1372 "setup size %u for plane %d\n",
1373 planes[plane].length,
1374 q->plane_sizes[plane], plane);
1375 ret = -EINVAL;
1376 goto err;
1377 }
1378
1379
1380 if (vb->planes[plane].mem_priv) {
1381 if (!reacquired) {
1382 reacquired = true;
1383 call_void_vb_qop(vb, buf_cleanup, vb);
1384 }
1385 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1386 }
1387
1388 vb->planes[plane].mem_priv = NULL;
1389 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1390
1391
1392 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
1393 planes[plane].m.userptr,
1394 planes[plane].length, write);
1395 if (IS_ERR_OR_NULL(mem_priv)) {
1396 dprintk(1, "failed acquiring userspace "
1397 "memory for plane %d\n", plane);
1398 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1399 goto err;
1400 }
1401 vb->planes[plane].mem_priv = mem_priv;
1402 }
1403
1404
1405
1406
1407
1408 for (plane = 0; plane < vb->num_planes; ++plane)
1409 vb->v4l2_planes[plane] = planes[plane];
1410
1411 if (reacquired) {
1412
1413
1414
1415
1416
1417 ret = call_vb_qop(vb, buf_init, vb);
1418 if (ret) {
1419 dprintk(1, "buffer initialization failed\n");
1420 goto err;
1421 }
1422 }
1423
1424 ret = call_vb_qop(vb, buf_prepare, vb);
1425 if (ret) {
1426 dprintk(1, "buffer preparation failed\n");
1427 call_void_vb_qop(vb, buf_cleanup, vb);
1428 goto err;
1429 }
1430
1431 return 0;
1432err:
1433
1434 for (plane = 0; plane < vb->num_planes; ++plane) {
1435 if (vb->planes[plane].mem_priv)
1436 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1437 vb->planes[plane].mem_priv = NULL;
1438 vb->v4l2_planes[plane].m.userptr = 0;
1439 vb->v4l2_planes[plane].length = 0;
1440 }
1441
1442 return ret;
1443}
1444
1445
1446
1447
1448static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1449{
1450 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1451 struct vb2_queue *q = vb->vb2_queue;
1452 void *mem_priv;
1453 unsigned int plane;
1454 int ret;
1455 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1456 bool reacquired = vb->planes[0].mem_priv == NULL;
1457
1458 memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1459
1460 __fill_vb2_buffer(vb, b, planes);
1461
1462 for (plane = 0; plane < vb->num_planes; ++plane) {
1463 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1464
1465 if (IS_ERR_OR_NULL(dbuf)) {
1466 dprintk(1, "invalid dmabuf fd for plane %d\n",
1467 plane);
1468 ret = -EINVAL;
1469 goto err;
1470 }
1471
1472
1473 if (planes[plane].length == 0)
1474 planes[plane].length = dbuf->size;
1475
1476 if (planes[plane].length < q->plane_sizes[plane]) {
1477 dprintk(1, "invalid dmabuf length for plane %d\n",
1478 plane);
1479 ret = -EINVAL;
1480 goto err;
1481 }
1482
1483
1484 if (dbuf == vb->planes[plane].dbuf &&
1485 vb->v4l2_planes[plane].length == planes[plane].length) {
1486 dma_buf_put(dbuf);
1487 continue;
1488 }
1489
1490 dprintk(1, "buffer for plane %d changed\n", plane);
1491
1492 if (!reacquired) {
1493 reacquired = true;
1494 call_void_vb_qop(vb, buf_cleanup, vb);
1495 }
1496
1497
1498 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1499 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1500
1501
1502 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
1503 dbuf, planes[plane].length, write);
1504 if (IS_ERR(mem_priv)) {
1505 dprintk(1, "failed to attach dmabuf\n");
1506 ret = PTR_ERR(mem_priv);
1507 dma_buf_put(dbuf);
1508 goto err;
1509 }
1510
1511 vb->planes[plane].dbuf = dbuf;
1512 vb->planes[plane].mem_priv = mem_priv;
1513 }
1514
1515
1516
1517
1518
1519 for (plane = 0; plane < vb->num_planes; ++plane) {
1520 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1521 if (ret) {
1522 dprintk(1, "failed to map dmabuf for plane %d\n",
1523 plane);
1524 goto err;
1525 }
1526 vb->planes[plane].dbuf_mapped = 1;
1527 }
1528
1529
1530
1531
1532
1533 for (plane = 0; plane < vb->num_planes; ++plane)
1534 vb->v4l2_planes[plane] = planes[plane];
1535
1536 if (reacquired) {
1537
1538
1539
1540
1541 ret = call_vb_qop(vb, buf_init, vb);
1542 if (ret) {
1543 dprintk(1, "buffer initialization failed\n");
1544 goto err;
1545 }
1546 }
1547
1548 ret = call_vb_qop(vb, buf_prepare, vb);
1549 if (ret) {
1550 dprintk(1, "buffer preparation failed\n");
1551 call_void_vb_qop(vb, buf_cleanup, vb);
1552 goto err;
1553 }
1554
1555 return 0;
1556err:
1557
1558 __vb2_buf_dmabuf_put(vb);
1559
1560 return ret;
1561}
1562
1563
1564
1565
1566static void __enqueue_in_driver(struct vb2_buffer *vb)
1567{
1568 struct vb2_queue *q = vb->vb2_queue;
1569 unsigned int plane;
1570
1571 vb->state = VB2_BUF_STATE_ACTIVE;
1572 atomic_inc(&q->owned_by_drv_count);
1573
1574
1575 for (plane = 0; plane < vb->num_planes; ++plane)
1576 call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
1577
1578 call_void_vb_qop(vb, buf_queue, vb);
1579}
1580
1581static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1582{
1583 struct vb2_queue *q = vb->vb2_queue;
1584 struct rw_semaphore *mmap_sem;
1585 int ret;
1586
1587 ret = __verify_length(vb, b);
1588 if (ret < 0) {
1589 dprintk(1, "plane parameters verification failed: %d\n", ret);
1590 return ret;
1591 }
1592 if (b->field == V4L2_FIELD_ALTERNATE && V4L2_TYPE_IS_OUTPUT(q->type)) {
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602 dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
1603 return -EINVAL;
1604 }
1605
1606 if (q->error) {
1607 dprintk(1, "fatal error occurred on queue\n");
1608 return -EIO;
1609 }
1610
1611 vb->state = VB2_BUF_STATE_PREPARING;
1612 vb->v4l2_buf.timestamp.tv_sec = 0;
1613 vb->v4l2_buf.timestamp.tv_usec = 0;
1614 vb->v4l2_buf.sequence = 0;
1615
1616 switch (q->memory) {
1617 case V4L2_MEMORY_MMAP:
1618 ret = __qbuf_mmap(vb, b);
1619 break;
1620 case V4L2_MEMORY_USERPTR:
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633 mmap_sem = ¤t->mm->mmap_sem;
1634 call_void_qop(q, wait_prepare, q);
1635 down_read(mmap_sem);
1636 call_void_qop(q, wait_finish, q);
1637
1638 ret = __qbuf_userptr(vb, b);
1639
1640 up_read(mmap_sem);
1641 break;
1642 case V4L2_MEMORY_DMABUF:
1643 ret = __qbuf_dmabuf(vb, b);
1644 break;
1645 default:
1646 WARN(1, "Invalid queue type\n");
1647 ret = -EINVAL;
1648 }
1649
1650 if (ret)
1651 dprintk(1, "buffer preparation failed: %d\n", ret);
1652 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
1653
1654 return ret;
1655}
1656
1657static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
1658 const char *opname)
1659{
1660 if (b->type != q->type) {
1661 dprintk(1, "%s: invalid buffer type\n", opname);
1662 return -EINVAL;
1663 }
1664
1665 if (b->index >= q->num_buffers) {
1666 dprintk(1, "%s: buffer index out of range\n", opname);
1667 return -EINVAL;
1668 }
1669
1670 if (q->bufs[b->index] == NULL) {
1671
1672 dprintk(1, "%s: buffer is NULL\n", opname);
1673 return -EINVAL;
1674 }
1675
1676 if (b->memory != q->memory) {
1677 dprintk(1, "%s: invalid memory type\n", opname);
1678 return -EINVAL;
1679 }
1680
1681 return __verify_planes_array(q->bufs[b->index], b);
1682}
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1700{
1701 struct vb2_buffer *vb;
1702 int ret;
1703
1704 if (vb2_fileio_is_active(q)) {
1705 dprintk(1, "file io in progress\n");
1706 return -EBUSY;
1707 }
1708
1709 ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
1710 if (ret)
1711 return ret;
1712
1713 vb = q->bufs[b->index];
1714 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1715 dprintk(1, "invalid buffer state %d\n",
1716 vb->state);
1717 return -EINVAL;
1718 }
1719
1720 ret = __buf_prepare(vb, b);
1721 if (!ret) {
1722
1723 __fill_v4l2_buffer(vb, b);
1724
1725 dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
1726 }
1727 return ret;
1728}
1729EXPORT_SYMBOL_GPL(vb2_prepare_buf);
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742static int vb2_start_streaming(struct vb2_queue *q)
1743{
1744 struct vb2_buffer *vb;
1745 int ret;
1746
1747
1748
1749
1750
1751 list_for_each_entry(vb, &q->queued_list, queued_entry)
1752 __enqueue_in_driver(vb);
1753
1754
1755 q->start_streaming_called = 1;
1756 ret = call_qop(q, start_streaming, q,
1757 atomic_read(&q->owned_by_drv_count));
1758 if (!ret)
1759 return 0;
1760
1761 q->start_streaming_called = 0;
1762
1763 dprintk(1, "driver refused to start streaming\n");
1764
1765
1766
1767
1768
1769
1770 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1771 unsigned i;
1772
1773
1774
1775
1776
1777 for (i = 0; i < q->num_buffers; ++i) {
1778 vb = q->bufs[i];
1779 if (vb->state == VB2_BUF_STATE_ACTIVE)
1780 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1781 }
1782
1783 WARN_ON(atomic_read(&q->owned_by_drv_count));
1784 }
1785
1786
1787
1788
1789
1790 WARN_ON(!list_empty(&q->done_list));
1791 return ret;
1792}
1793
1794static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1795{
1796 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1797 struct vb2_buffer *vb;
1798
1799 if (ret)
1800 return ret;
1801
1802 vb = q->bufs[b->index];
1803
1804 switch (vb->state) {
1805 case VB2_BUF_STATE_DEQUEUED:
1806 ret = __buf_prepare(vb, b);
1807 if (ret)
1808 return ret;
1809 break;
1810 case VB2_BUF_STATE_PREPARED:
1811 break;
1812 case VB2_BUF_STATE_PREPARING:
1813 dprintk(1, "buffer still being prepared\n");
1814 return -EINVAL;
1815 default:
1816 dprintk(1, "invalid buffer state %d\n", vb->state);
1817 return -EINVAL;
1818 }
1819
1820
1821
1822
1823
1824 list_add_tail(&vb->queued_entry, &q->queued_list);
1825 q->queued_count++;
1826 q->waiting_for_buffers = false;
1827 vb->state = VB2_BUF_STATE_QUEUED;
1828 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1829
1830
1831
1832
1833 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1834 V4L2_BUF_FLAG_TIMESTAMP_COPY)
1835 vb->v4l2_buf.timestamp = b->timestamp;
1836 vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1837 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1838 vb->v4l2_buf.timecode = b->timecode;
1839 }
1840
1841
1842
1843
1844
1845 if (q->start_streaming_called)
1846 __enqueue_in_driver(vb);
1847
1848
1849 __fill_v4l2_buffer(vb, b);
1850
1851
1852
1853
1854
1855
1856
1857 if (q->streaming && !q->start_streaming_called &&
1858 q->queued_count >= q->min_buffers_needed) {
1859 ret = vb2_start_streaming(q);
1860 if (ret)
1861 return ret;
1862 }
1863
1864 dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
1865 return 0;
1866}
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1886{
1887 if (vb2_fileio_is_active(q)) {
1888 dprintk(1, "file io in progress\n");
1889 return -EBUSY;
1890 }
1891
1892 return vb2_internal_qbuf(q, b);
1893}
1894EXPORT_SYMBOL_GPL(vb2_qbuf);
1895
1896
1897
1898
1899
1900
1901
1902static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1903{
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 for (;;) {
1914 int ret;
1915
1916 if (!q->streaming) {
1917 dprintk(1, "streaming off, will not wait for buffers\n");
1918 return -EINVAL;
1919 }
1920
1921 if (q->error) {
1922 dprintk(1, "Queue in error state, will not wait for buffers\n");
1923 return -EIO;
1924 }
1925
1926 if (!list_empty(&q->done_list)) {
1927
1928
1929
1930 break;
1931 }
1932
1933 if (nonblocking) {
1934 dprintk(1, "nonblocking and no buffers to dequeue, "
1935 "will not wait\n");
1936 return -EAGAIN;
1937 }
1938
1939
1940
1941
1942
1943
1944 call_void_qop(q, wait_prepare, q);
1945
1946
1947
1948
1949 dprintk(3, "will sleep waiting for buffers\n");
1950 ret = wait_event_interruptible(q->done_wq,
1951 !list_empty(&q->done_list) || !q->streaming ||
1952 q->error);
1953
1954
1955
1956
1957
1958 call_void_qop(q, wait_finish, q);
1959 if (ret) {
1960 dprintk(1, "sleep was interrupted\n");
1961 return ret;
1962 }
1963 }
1964 return 0;
1965}
1966
1967
1968
1969
1970
1971
1972static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1973 struct v4l2_buffer *b, int nonblocking)
1974{
1975 unsigned long flags;
1976 int ret;
1977
1978
1979
1980
1981 ret = __vb2_wait_for_done_vb(q, nonblocking);
1982 if (ret)
1983 return ret;
1984
1985
1986
1987
1988
1989 spin_lock_irqsave(&q->done_lock, flags);
1990 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1991
1992
1993
1994
1995 ret = __verify_planes_array(*vb, b);
1996 if (!ret)
1997 list_del(&(*vb)->done_entry);
1998 spin_unlock_irqrestore(&q->done_lock, flags);
1999
2000 return ret;
2001}
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012int vb2_wait_for_all_buffers(struct vb2_queue *q)
2013{
2014 if (!q->streaming) {
2015 dprintk(1, "streaming off, will not wait for buffers\n");
2016 return -EINVAL;
2017 }
2018
2019 if (q->start_streaming_called)
2020 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
2021 return 0;
2022}
2023EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
2024
2025
2026
2027
2028static void __vb2_dqbuf(struct vb2_buffer *vb)
2029{
2030 struct vb2_queue *q = vb->vb2_queue;
2031 unsigned int i;
2032
2033
2034 if (vb->state == VB2_BUF_STATE_DEQUEUED)
2035 return;
2036
2037 vb->state = VB2_BUF_STATE_DEQUEUED;
2038
2039
2040 if (q->memory == V4L2_MEMORY_DMABUF)
2041 for (i = 0; i < vb->num_planes; ++i) {
2042 if (!vb->planes[i].dbuf_mapped)
2043 continue;
2044 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
2045 vb->planes[i].dbuf_mapped = 0;
2046 }
2047}
2048
2049static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2050{
2051 struct vb2_buffer *vb = NULL;
2052 int ret;
2053
2054 if (b->type != q->type) {
2055 dprintk(1, "invalid buffer type\n");
2056 return -EINVAL;
2057 }
2058 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
2059 if (ret < 0)
2060 return ret;
2061
2062 switch (vb->state) {
2063 case VB2_BUF_STATE_DONE:
2064 dprintk(3, "returning done buffer\n");
2065 break;
2066 case VB2_BUF_STATE_ERROR:
2067 dprintk(3, "returning done buffer with errors\n");
2068 break;
2069 default:
2070 dprintk(1, "invalid buffer state\n");
2071 return -EINVAL;
2072 }
2073
2074 call_void_vb_qop(vb, buf_finish, vb);
2075
2076
2077 __fill_v4l2_buffer(vb, b);
2078
2079 list_del(&vb->queued_entry);
2080 q->queued_count--;
2081
2082 __vb2_dqbuf(vb);
2083
2084 dprintk(1, "dqbuf of buffer %d, with state %d\n",
2085 vb->v4l2_buf.index, vb->state);
2086
2087 return 0;
2088}
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2112{
2113 if (vb2_fileio_is_active(q)) {
2114 dprintk(1, "file io in progress\n");
2115 return -EBUSY;
2116 }
2117 return vb2_internal_dqbuf(q, b, nonblocking);
2118}
2119EXPORT_SYMBOL_GPL(vb2_dqbuf);
2120
2121
2122
2123
2124
2125
2126
2127static void __vb2_queue_cancel(struct vb2_queue *q)
2128{
2129 unsigned int i;
2130
2131
2132
2133
2134
2135 if (q->start_streaming_called)
2136 call_void_qop(q, stop_streaming, q);
2137
2138
2139
2140
2141
2142
2143
2144 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2145 for (i = 0; i < q->num_buffers; ++i)
2146 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
2147 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
2148
2149 WARN_ON(atomic_read(&q->owned_by_drv_count));
2150 }
2151
2152 q->streaming = 0;
2153 q->start_streaming_called = 0;
2154 q->queued_count = 0;
2155 q->error = 0;
2156
2157
2158
2159
2160 INIT_LIST_HEAD(&q->queued_list);
2161
2162
2163
2164
2165 INIT_LIST_HEAD(&q->done_list);
2166 atomic_set(&q->owned_by_drv_count, 0);
2167 wake_up_all(&q->done_wq);
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178 for (i = 0; i < q->num_buffers; ++i) {
2179 struct vb2_buffer *vb = q->bufs[i];
2180
2181 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
2182 vb->state = VB2_BUF_STATE_PREPARED;
2183 call_void_vb_qop(vb, buf_finish, vb);
2184 }
2185 __vb2_dqbuf(vb);
2186 }
2187}
2188
2189static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
2190{
2191 int ret;
2192
2193 if (type != q->type) {
2194 dprintk(1, "invalid stream type\n");
2195 return -EINVAL;
2196 }
2197
2198 if (q->streaming) {
2199 dprintk(3, "already streaming\n");
2200 return 0;
2201 }
2202
2203 if (!q->num_buffers) {
2204 dprintk(1, "no buffers have been allocated\n");
2205 return -EINVAL;
2206 }
2207
2208 if (q->num_buffers < q->min_buffers_needed) {
2209 dprintk(1, "need at least %u allocated buffers\n",
2210 q->min_buffers_needed);
2211 return -EINVAL;
2212 }
2213
2214
2215
2216
2217
2218 if (q->queued_count >= q->min_buffers_needed) {
2219 ret = vb2_start_streaming(q);
2220 if (ret) {
2221 __vb2_queue_cancel(q);
2222 return ret;
2223 }
2224 }
2225
2226 q->streaming = 1;
2227
2228 dprintk(3, "successful\n");
2229 return 0;
2230}
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245void vb2_queue_error(struct vb2_queue *q)
2246{
2247 q->error = 1;
2248
2249 wake_up_all(&q->done_wq);
2250}
2251EXPORT_SYMBOL_GPL(vb2_queue_error);
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
2267{
2268 if (vb2_fileio_is_active(q)) {
2269 dprintk(1, "file io in progress\n");
2270 return -EBUSY;
2271 }
2272 return vb2_internal_streamon(q, type);
2273}
2274EXPORT_SYMBOL_GPL(vb2_streamon);
2275
2276static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2277{
2278 if (type != q->type) {
2279 dprintk(1, "invalid stream type\n");
2280 return -EINVAL;
2281 }
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292 __vb2_queue_cancel(q);
2293 q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
2294
2295 dprintk(3, "successful\n");
2296 return 0;
2297}
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2315{
2316 if (vb2_fileio_is_active(q)) {
2317 dprintk(1, "file io in progress\n");
2318 return -EBUSY;
2319 }
2320 return vb2_internal_streamoff(q, type);
2321}
2322EXPORT_SYMBOL_GPL(vb2_streamoff);
2323
2324
2325
2326
2327static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2328 unsigned int *_buffer, unsigned int *_plane)
2329{
2330 struct vb2_buffer *vb;
2331 unsigned int buffer, plane;
2332
2333
2334
2335
2336
2337
2338 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2339 vb = q->bufs[buffer];
2340
2341 for (plane = 0; plane < vb->num_planes; ++plane) {
2342 if (vb->v4l2_planes[plane].m.mem_offset == off) {
2343 *_buffer = buffer;
2344 *_plane = plane;
2345 return 0;
2346 }
2347 }
2348 }
2349
2350 return -EINVAL;
2351}
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2363{
2364 struct vb2_buffer *vb = NULL;
2365 struct vb2_plane *vb_plane;
2366 int ret;
2367 struct dma_buf *dbuf;
2368
2369 if (q->memory != V4L2_MEMORY_MMAP) {
2370 dprintk(1, "queue is not currently set up for mmap\n");
2371 return -EINVAL;
2372 }
2373
2374 if (!q->mem_ops->get_dmabuf) {
2375 dprintk(1, "queue does not support DMA buffer exporting\n");
2376 return -EINVAL;
2377 }
2378
2379 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
2380 dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
2381 return -EINVAL;
2382 }
2383
2384 if (eb->type != q->type) {
2385 dprintk(1, "invalid buffer type\n");
2386 return -EINVAL;
2387 }
2388
2389 if (eb->index >= q->num_buffers) {
2390 dprintk(1, "buffer index out of range\n");
2391 return -EINVAL;
2392 }
2393
2394 vb = q->bufs[eb->index];
2395
2396 if (eb->plane >= vb->num_planes) {
2397 dprintk(1, "buffer plane out of range\n");
2398 return -EINVAL;
2399 }
2400
2401 if (vb2_fileio_is_active(q)) {
2402 dprintk(1, "expbuf: file io in progress\n");
2403 return -EBUSY;
2404 }
2405
2406 vb_plane = &vb->planes[eb->plane];
2407
2408 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
2409 if (IS_ERR_OR_NULL(dbuf)) {
2410 dprintk(1, "failed to export buffer %d, plane %d\n",
2411 eb->index, eb->plane);
2412 return -EINVAL;
2413 }
2414
2415 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
2416 if (ret < 0) {
2417 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2418 eb->index, eb->plane, ret);
2419 dma_buf_put(dbuf);
2420 return ret;
2421 }
2422
2423 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2424 eb->index, eb->plane, ret);
2425 eb->fd = ret;
2426
2427 return 0;
2428}
2429EXPORT_SYMBOL_GPL(vb2_expbuf);
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2451{
2452 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
2453 struct vb2_buffer *vb;
2454 unsigned int buffer = 0, plane = 0;
2455 int ret;
2456 unsigned long length;
2457
2458 if (q->memory != V4L2_MEMORY_MMAP) {
2459 dprintk(1, "queue is not currently set up for mmap\n");
2460 return -EINVAL;
2461 }
2462
2463
2464
2465
2466 if (!(vma->vm_flags & VM_SHARED)) {
2467 dprintk(1, "invalid vma flags, VM_SHARED needed\n");
2468 return -EINVAL;
2469 }
2470 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2471 if (!(vma->vm_flags & VM_WRITE)) {
2472 dprintk(1, "invalid vma flags, VM_WRITE needed\n");
2473 return -EINVAL;
2474 }
2475 } else {
2476 if (!(vma->vm_flags & VM_READ)) {
2477 dprintk(1, "invalid vma flags, VM_READ needed\n");
2478 return -EINVAL;
2479 }
2480 }
2481 if (vb2_fileio_is_active(q)) {
2482 dprintk(1, "mmap: file io in progress\n");
2483 return -EBUSY;
2484 }
2485
2486
2487
2488
2489 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2490 if (ret)
2491 return ret;
2492
2493 vb = q->bufs[buffer];
2494
2495
2496
2497
2498
2499
2500 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2501 if (length < (vma->vm_end - vma->vm_start)) {
2502 dprintk(1,
2503 "MMAP invalid, as it would overflow buffer length\n");
2504 return -EINVAL;
2505 }
2506
2507 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2508 if (ret)
2509 return ret;
2510
2511 dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
2512 return 0;
2513}
2514EXPORT_SYMBOL_GPL(vb2_mmap);
2515
2516#ifndef CONFIG_MMU
2517unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2518 unsigned long addr,
2519 unsigned long len,
2520 unsigned long pgoff,
2521 unsigned long flags)
2522{
2523 unsigned long off = pgoff << PAGE_SHIFT;
2524 struct vb2_buffer *vb;
2525 unsigned int buffer, plane;
2526 int ret;
2527
2528 if (q->memory != V4L2_MEMORY_MMAP) {
2529 dprintk(1, "queue is not currently set up for mmap\n");
2530 return -EINVAL;
2531 }
2532
2533
2534
2535
2536 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2537 if (ret)
2538 return ret;
2539
2540 vb = q->bufs[buffer];
2541
2542 return (unsigned long)vb2_plane_vaddr(vb, plane);
2543}
2544EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2545#endif
2546
2547static int __vb2_init_fileio(struct vb2_queue *q, int read);
2548static int __vb2_cleanup_fileio(struct vb2_queue *q);
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2570{
2571 struct video_device *vfd = video_devdata(file);
2572 unsigned long req_events = poll_requested_events(wait);
2573 struct vb2_buffer *vb = NULL;
2574 unsigned int res = 0;
2575 unsigned long flags;
2576
2577 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2578 struct v4l2_fh *fh = file->private_data;
2579
2580 if (v4l2_event_pending(fh))
2581 res = POLLPRI;
2582 else if (req_events & POLLPRI)
2583 poll_wait(file, &fh->wait, wait);
2584 }
2585
2586 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2587 return res;
2588 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2589 return res;
2590
2591
2592
2593
2594 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
2595 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2596 (req_events & (POLLIN | POLLRDNORM))) {
2597 if (__vb2_init_fileio(q, 1))
2598 return res | POLLERR;
2599 }
2600 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2601 (req_events & (POLLOUT | POLLWRNORM))) {
2602 if (__vb2_init_fileio(q, 0))
2603 return res | POLLERR;
2604
2605
2606
2607 return res | POLLOUT | POLLWRNORM;
2608 }
2609 }
2610
2611
2612
2613
2614
2615 if (!vb2_is_streaming(q) || q->error)
2616 return res | POLLERR;
2617
2618
2619
2620
2621
2622 if (q->waiting_for_buffers)
2623 return res | POLLERR;
2624
2625
2626
2627
2628
2629 if (V4L2_TYPE_IS_OUTPUT(q->type) && q->queued_count < q->num_buffers)
2630 return res | POLLOUT | POLLWRNORM;
2631
2632 if (list_empty(&q->done_list))
2633 poll_wait(file, &q->done_wq, wait);
2634
2635
2636
2637
2638 spin_lock_irqsave(&q->done_lock, flags);
2639 if (!list_empty(&q->done_list))
2640 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2641 done_entry);
2642 spin_unlock_irqrestore(&q->done_lock, flags);
2643
2644 if (vb && (vb->state == VB2_BUF_STATE_DONE
2645 || vb->state == VB2_BUF_STATE_ERROR)) {
2646 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2647 res | POLLOUT | POLLWRNORM :
2648 res | POLLIN | POLLRDNORM;
2649 }
2650 return res;
2651}
2652EXPORT_SYMBOL_GPL(vb2_poll);
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665int vb2_queue_init(struct vb2_queue *q)
2666{
2667
2668
2669
2670 if (WARN_ON(!q) ||
2671 WARN_ON(!q->ops) ||
2672 WARN_ON(!q->mem_ops) ||
2673 WARN_ON(!q->type) ||
2674 WARN_ON(!q->io_modes) ||
2675 WARN_ON(!q->ops->queue_setup) ||
2676 WARN_ON(!q->ops->buf_queue) ||
2677 WARN_ON(q->timestamp_flags &
2678 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
2679 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
2680 return -EINVAL;
2681
2682
2683 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2684 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
2685
2686 INIT_LIST_HEAD(&q->queued_list);
2687 INIT_LIST_HEAD(&q->done_list);
2688 spin_lock_init(&q->done_lock);
2689 init_waitqueue_head(&q->done_wq);
2690
2691 if (q->buf_struct_size == 0)
2692 q->buf_struct_size = sizeof(struct vb2_buffer);
2693
2694 return 0;
2695}
2696EXPORT_SYMBOL_GPL(vb2_queue_init);
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706void vb2_queue_release(struct vb2_queue *q)
2707{
2708 __vb2_cleanup_fileio(q);
2709 __vb2_queue_cancel(q);
2710 __vb2_queue_free(q, q->num_buffers);
2711}
2712EXPORT_SYMBOL_GPL(vb2_queue_release);
2713
2714
2715
2716
2717
2718
2719
2720
2721struct vb2_fileio_buf {
2722 void *vaddr;
2723 unsigned int size;
2724 unsigned int pos;
2725 unsigned int queued:1;
2726};
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752struct vb2_fileio_data {
2753 struct v4l2_requestbuffers req;
2754 struct v4l2_plane p;
2755 struct v4l2_buffer b;
2756 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
2757 unsigned int cur_index;
2758 unsigned int initial_index;
2759 unsigned int q_count;
2760 unsigned int dq_count;
2761 unsigned int flags;
2762};
2763
2764
2765
2766
2767
2768
2769static int __vb2_init_fileio(struct vb2_queue *q, int read)
2770{
2771 struct vb2_fileio_data *fileio;
2772 int i, ret;
2773 unsigned int count = 0;
2774
2775
2776
2777
2778 if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2779 (!read && !(q->io_modes & VB2_WRITE))))
2780 return -EINVAL;
2781
2782
2783
2784
2785 if (!q->mem_ops->vaddr)
2786 return -EBUSY;
2787
2788
2789
2790
2791 if (q->streaming || q->num_buffers > 0)
2792 return -EBUSY;
2793
2794
2795
2796
2797 count = 1;
2798
2799 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2800 (read) ? "read" : "write", count, q->io_flags);
2801
2802 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2803 if (fileio == NULL)
2804 return -ENOMEM;
2805
2806 fileio->flags = q->io_flags;
2807
2808
2809
2810
2811
2812 fileio->req.count = count;
2813 fileio->req.memory = V4L2_MEMORY_MMAP;
2814 fileio->req.type = q->type;
2815 q->fileio = fileio;
2816 ret = __reqbufs(q, &fileio->req);
2817 if (ret)
2818 goto err_kfree;
2819
2820
2821
2822
2823
2824 if (q->bufs[0]->num_planes != 1) {
2825 ret = -EBUSY;
2826 goto err_reqbufs;
2827 }
2828
2829
2830
2831
2832 for (i = 0; i < q->num_buffers; i++) {
2833 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
2834 if (fileio->bufs[i].vaddr == NULL) {
2835 ret = -EINVAL;
2836 goto err_reqbufs;
2837 }
2838 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2839 }
2840
2841
2842
2843
2844 if (read) {
2845 bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
2846
2847
2848
2849
2850 for (i = 0; i < q->num_buffers; i++) {
2851 struct v4l2_buffer *b = &fileio->b;
2852
2853 memset(b, 0, sizeof(*b));
2854 b->type = q->type;
2855 if (is_multiplanar) {
2856 memset(&fileio->p, 0, sizeof(fileio->p));
2857 b->m.planes = &fileio->p;
2858 b->length = 1;
2859 }
2860 b->memory = q->memory;
2861 b->index = i;
2862 ret = vb2_internal_qbuf(q, b);
2863 if (ret)
2864 goto err_reqbufs;
2865 fileio->bufs[i].queued = 1;
2866 }
2867
2868
2869
2870
2871 fileio->initial_index = q->num_buffers;
2872 fileio->cur_index = q->num_buffers;
2873 }
2874
2875
2876
2877
2878 ret = vb2_internal_streamon(q, q->type);
2879 if (ret)
2880 goto err_reqbufs;
2881
2882 return ret;
2883
2884err_reqbufs:
2885 fileio->req.count = 0;
2886 __reqbufs(q, &fileio->req);
2887
2888err_kfree:
2889 q->fileio = NULL;
2890 kfree(fileio);
2891 return ret;
2892}
2893
2894
2895
2896
2897
2898static int __vb2_cleanup_fileio(struct vb2_queue *q)
2899{
2900 struct vb2_fileio_data *fileio = q->fileio;
2901
2902 if (fileio) {
2903 vb2_internal_streamoff(q, q->type);
2904 q->fileio = NULL;
2905 fileio->req.count = 0;
2906 vb2_reqbufs(q, &fileio->req);
2907 kfree(fileio);
2908 dprintk(3, "file io emulator closed\n");
2909 }
2910 return 0;
2911}
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2923 loff_t *ppos, int nonblock, int read)
2924{
2925 struct vb2_fileio_data *fileio;
2926 struct vb2_fileio_buf *buf;
2927 bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
2928
2929
2930
2931
2932
2933 bool set_timestamp = !read &&
2934 (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2935 V4L2_BUF_FLAG_TIMESTAMP_COPY;
2936 int ret, index;
2937
2938 dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
2939 read ? "read" : "write", (long)*ppos, count,
2940 nonblock ? "non" : "");
2941
2942 if (!data)
2943 return -EINVAL;
2944
2945
2946
2947
2948 if (!vb2_fileio_is_active(q)) {
2949 ret = __vb2_init_fileio(q, read);
2950 dprintk(3, "vb2_init_fileio result: %d\n", ret);
2951 if (ret)
2952 return ret;
2953 }
2954 fileio = q->fileio;
2955
2956
2957
2958
2959 index = fileio->cur_index;
2960 if (index >= q->num_buffers) {
2961
2962
2963
2964 memset(&fileio->b, 0, sizeof(fileio->b));
2965 fileio->b.type = q->type;
2966 fileio->b.memory = q->memory;
2967 if (is_multiplanar) {
2968 memset(&fileio->p, 0, sizeof(fileio->p));
2969 fileio->b.m.planes = &fileio->p;
2970 fileio->b.length = 1;
2971 }
2972 ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
2973 dprintk(5, "vb2_dqbuf result: %d\n", ret);
2974 if (ret)
2975 return ret;
2976 fileio->dq_count += 1;
2977
2978 fileio->cur_index = index = fileio->b.index;
2979 buf = &fileio->bufs[index];
2980
2981
2982
2983
2984 buf->pos = 0;
2985 buf->queued = 0;
2986 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2987 : vb2_plane_size(q->bufs[index], 0);
2988 } else {
2989 buf = &fileio->bufs[index];
2990 }
2991
2992
2993
2994
2995 if (buf->pos + count > buf->size) {
2996 count = buf->size - buf->pos;
2997 dprintk(5, "reducing read count: %zd\n", count);
2998 }
2999
3000
3001
3002
3003 dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
3004 count, index, buf->pos);
3005 if (read)
3006 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
3007 else
3008 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
3009 if (ret) {
3010 dprintk(3, "error copying data\n");
3011 return -EFAULT;
3012 }
3013
3014
3015
3016
3017 buf->pos += count;
3018 *ppos += count;
3019
3020
3021
3022
3023 if (buf->pos == buf->size ||
3024 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
3025
3026
3027
3028 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
3029 fileio->dq_count == 1) {
3030 dprintk(3, "read limit reached\n");
3031 return __vb2_cleanup_fileio(q);
3032 }
3033
3034
3035
3036
3037 memset(&fileio->b, 0, sizeof(fileio->b));
3038 fileio->b.type = q->type;
3039 fileio->b.memory = q->memory;
3040 fileio->b.index = index;
3041 fileio->b.bytesused = buf->pos;
3042 if (is_multiplanar) {
3043 memset(&fileio->p, 0, sizeof(fileio->p));
3044 fileio->p.bytesused = buf->pos;
3045 fileio->b.m.planes = &fileio->p;
3046 fileio->b.length = 1;
3047 }
3048 if (set_timestamp)
3049 v4l2_get_timestamp(&fileio->b.timestamp);
3050 ret = vb2_internal_qbuf(q, &fileio->b);
3051 dprintk(5, "vb2_dbuf result: %d\n", ret);
3052 if (ret)
3053 return ret;
3054
3055
3056
3057
3058 buf->pos = 0;
3059 buf->queued = 1;
3060 buf->size = vb2_plane_size(q->bufs[index], 0);
3061 fileio->q_count += 1;
3062
3063
3064
3065
3066 if (fileio->initial_index < q->num_buffers)
3067 fileio->initial_index++;
3068
3069
3070
3071
3072
3073
3074
3075 fileio->cur_index = fileio->initial_index;
3076 }
3077
3078
3079
3080
3081 if (ret == 0)
3082 ret = count;
3083 return ret;
3084}
3085
3086size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
3087 loff_t *ppos, int nonblocking)
3088{
3089 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
3090}
3091EXPORT_SYMBOL_GPL(vb2_read);
3092
3093size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
3094 loff_t *ppos, int nonblocking)
3095{
3096 return __vb2_perform_fileio(q, (char __user *) data, count,
3097 ppos, nonblocking, 0);
3098}
3099EXPORT_SYMBOL_GPL(vb2_write);
3100
3101struct vb2_threadio_data {
3102 struct task_struct *thread;
3103 vb2_thread_fnc fnc;
3104 void *priv;
3105 bool stop;
3106};
3107
3108static int vb2_thread(void *data)
3109{
3110 struct vb2_queue *q = data;
3111 struct vb2_threadio_data *threadio = q->threadio;
3112 struct vb2_fileio_data *fileio = q->fileio;
3113 bool set_timestamp = false;
3114 int prequeue = 0;
3115 int index = 0;
3116 int ret = 0;
3117
3118 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
3119 prequeue = q->num_buffers;
3120 set_timestamp =
3121 (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
3122 V4L2_BUF_FLAG_TIMESTAMP_COPY;
3123 }
3124
3125 set_freezable();
3126
3127 for (;;) {
3128 struct vb2_buffer *vb;
3129
3130
3131
3132
3133 memset(&fileio->b, 0, sizeof(fileio->b));
3134 fileio->b.type = q->type;
3135 fileio->b.memory = q->memory;
3136 if (prequeue) {
3137 fileio->b.index = index++;
3138 prequeue--;
3139 } else {
3140 call_void_qop(q, wait_finish, q);
3141 ret = vb2_internal_dqbuf(q, &fileio->b, 0);
3142 call_void_qop(q, wait_prepare, q);
3143 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
3144 }
3145 if (threadio->stop)
3146 break;
3147 if (ret)
3148 break;
3149 try_to_freeze();
3150
3151 vb = q->bufs[fileio->b.index];
3152 if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
3153 ret = threadio->fnc(vb, threadio->priv);
3154 if (ret)
3155 break;
3156 call_void_qop(q, wait_finish, q);
3157 if (set_timestamp)
3158 v4l2_get_timestamp(&fileio->b.timestamp);
3159 ret = vb2_internal_qbuf(q, &fileio->b);
3160 call_void_qop(q, wait_prepare, q);
3161 if (ret)
3162 break;
3163 }
3164
3165
3166 while (!kthread_should_stop()) {
3167 set_current_state(TASK_INTERRUPTIBLE);
3168 schedule();
3169 }
3170 return 0;
3171}
3172
3173
3174
3175
3176
3177
3178int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
3179 const char *thread_name)
3180{
3181 struct vb2_threadio_data *threadio;
3182 int ret = 0;
3183
3184 if (q->threadio)
3185 return -EBUSY;
3186 if (vb2_is_busy(q))
3187 return -EBUSY;
3188 if (WARN_ON(q->fileio))
3189 return -EBUSY;
3190
3191 threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
3192 if (threadio == NULL)
3193 return -ENOMEM;
3194 threadio->fnc = fnc;
3195 threadio->priv = priv;
3196
3197 ret = __vb2_init_fileio(q, !V4L2_TYPE_IS_OUTPUT(q->type));
3198 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
3199 if (ret)
3200 goto nomem;
3201 q->threadio = threadio;
3202 threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
3203 if (IS_ERR(threadio->thread)) {
3204 ret = PTR_ERR(threadio->thread);
3205 threadio->thread = NULL;
3206 goto nothread;
3207 }
3208 return 0;
3209
3210nothread:
3211 __vb2_cleanup_fileio(q);
3212nomem:
3213 kfree(threadio);
3214 return ret;
3215}
3216EXPORT_SYMBOL_GPL(vb2_thread_start);
3217
3218int vb2_thread_stop(struct vb2_queue *q)
3219{
3220 struct vb2_threadio_data *threadio = q->threadio;
3221 struct vb2_fileio_data *fileio = q->fileio;
3222 int err;
3223
3224 if (threadio == NULL)
3225 return 0;
3226 call_void_qop(q, wait_finish, q);
3227 threadio->stop = true;
3228 vb2_internal_streamoff(q, q->type);
3229 call_void_qop(q, wait_prepare, q);
3230 q->fileio = NULL;
3231 fileio->req.count = 0;
3232 vb2_reqbufs(q, &fileio->req);
3233 kfree(fileio);
3234 err = kthread_stop(threadio->thread);
3235 threadio->thread = NULL;
3236 kfree(threadio);
3237 q->fileio = NULL;
3238 q->threadio = NULL;
3239 return err;
3240}
3241EXPORT_SYMBOL_GPL(vb2_thread_stop);
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
3253{
3254 return vdev->queue->owner && vdev->queue->owner != file->private_data;
3255}
3256
3257
3258
3259int vb2_ioctl_reqbufs(struct file *file, void *priv,
3260 struct v4l2_requestbuffers *p)
3261{
3262 struct video_device *vdev = video_devdata(file);
3263 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
3264
3265 if (res)
3266 return res;
3267 if (vb2_queue_is_busy(vdev, file))
3268 return -EBUSY;
3269 res = __reqbufs(vdev->queue, p);
3270
3271
3272 if (res == 0)
3273 vdev->queue->owner = p->count ? file->private_data : NULL;
3274 return res;
3275}
3276EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
3277
3278int vb2_ioctl_create_bufs(struct file *file, void *priv,
3279 struct v4l2_create_buffers *p)
3280{
3281 struct video_device *vdev = video_devdata(file);
3282 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
3283
3284 p->index = vdev->queue->num_buffers;
3285
3286
3287 if (p->count == 0)
3288 return res != -EBUSY ? res : 0;
3289 if (res)
3290 return res;
3291 if (vb2_queue_is_busy(vdev, file))
3292 return -EBUSY;
3293 res = __create_bufs(vdev->queue, p);
3294 if (res == 0)
3295 vdev->queue->owner = file->private_data;
3296 return res;
3297}
3298EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
3299
3300int vb2_ioctl_prepare_buf(struct file *file, void *priv,
3301 struct v4l2_buffer *p)
3302{
3303 struct video_device *vdev = video_devdata(file);
3304
3305 if (vb2_queue_is_busy(vdev, file))
3306 return -EBUSY;
3307 return vb2_prepare_buf(vdev->queue, p);
3308}
3309EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
3310
3311int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
3312{
3313 struct video_device *vdev = video_devdata(file);
3314
3315
3316 return vb2_querybuf(vdev->queue, p);
3317}
3318EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
3319
3320int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3321{
3322 struct video_device *vdev = video_devdata(file);
3323
3324 if (vb2_queue_is_busy(vdev, file))
3325 return -EBUSY;
3326 return vb2_qbuf(vdev->queue, p);
3327}
3328EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
3329
3330int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3331{
3332 struct video_device *vdev = video_devdata(file);
3333
3334 if (vb2_queue_is_busy(vdev, file))
3335 return -EBUSY;
3336 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
3337}
3338EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
3339
3340int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
3341{
3342 struct video_device *vdev = video_devdata(file);
3343
3344 if (vb2_queue_is_busy(vdev, file))
3345 return -EBUSY;
3346 return vb2_streamon(vdev->queue, i);
3347}
3348EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
3349
3350int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
3351{
3352 struct video_device *vdev = video_devdata(file);
3353
3354 if (vb2_queue_is_busy(vdev, file))
3355 return -EBUSY;
3356 return vb2_streamoff(vdev->queue, i);
3357}
3358EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
3359
3360int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
3361{
3362 struct video_device *vdev = video_devdata(file);
3363
3364 if (vb2_queue_is_busy(vdev, file))
3365 return -EBUSY;
3366 return vb2_expbuf(vdev->queue, p);
3367}
3368EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
3369
3370
3371
3372int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
3373{
3374 struct video_device *vdev = video_devdata(file);
3375 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3376 int err;
3377
3378 if (lock && mutex_lock_interruptible(lock))
3379 return -ERESTARTSYS;
3380 err = vb2_mmap(vdev->queue, vma);
3381 if (lock)
3382 mutex_unlock(lock);
3383 return err;
3384}
3385EXPORT_SYMBOL_GPL(vb2_fop_mmap);
3386
3387int _vb2_fop_release(struct file *file, struct mutex *lock)
3388{
3389 struct video_device *vdev = video_devdata(file);
3390
3391 if (file->private_data == vdev->queue->owner) {
3392 if (lock)
3393 mutex_lock(lock);
3394 vb2_queue_release(vdev->queue);
3395 vdev->queue->owner = NULL;
3396 if (lock)
3397 mutex_unlock(lock);
3398 }
3399 return v4l2_fh_release(file);
3400}
3401EXPORT_SYMBOL_GPL(_vb2_fop_release);
3402
3403int vb2_fop_release(struct file *file)
3404{
3405 struct video_device *vdev = video_devdata(file);
3406 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3407
3408 return _vb2_fop_release(file, lock);
3409}
3410EXPORT_SYMBOL_GPL(vb2_fop_release);
3411
3412ssize_t vb2_fop_write(struct file *file, const char __user *buf,
3413 size_t count, loff_t *ppos)
3414{
3415 struct video_device *vdev = video_devdata(file);
3416 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3417 int err = -EBUSY;
3418
3419 if (lock && mutex_lock_interruptible(lock))
3420 return -ERESTARTSYS;
3421 if (vb2_queue_is_busy(vdev, file))
3422 goto exit;
3423 err = vb2_write(vdev->queue, buf, count, ppos,
3424 file->f_flags & O_NONBLOCK);
3425 if (vdev->queue->fileio)
3426 vdev->queue->owner = file->private_data;
3427exit:
3428 if (lock)
3429 mutex_unlock(lock);
3430 return err;
3431}
3432EXPORT_SYMBOL_GPL(vb2_fop_write);
3433
3434ssize_t vb2_fop_read(struct file *file, char __user *buf,
3435 size_t count, loff_t *ppos)
3436{
3437 struct video_device *vdev = video_devdata(file);
3438 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3439 int err = -EBUSY;
3440
3441 if (lock && mutex_lock_interruptible(lock))
3442 return -ERESTARTSYS;
3443 if (vb2_queue_is_busy(vdev, file))
3444 goto exit;
3445 err = vb2_read(vdev->queue, buf, count, ppos,
3446 file->f_flags & O_NONBLOCK);
3447 if (vdev->queue->fileio)
3448 vdev->queue->owner = file->private_data;
3449exit:
3450 if (lock)
3451 mutex_unlock(lock);
3452 return err;
3453}
3454EXPORT_SYMBOL_GPL(vb2_fop_read);
3455
3456unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3457{
3458 struct video_device *vdev = video_devdata(file);
3459 struct vb2_queue *q = vdev->queue;
3460 struct mutex *lock = q->lock ? q->lock : vdev->lock;
3461 unsigned long req_events = poll_requested_events(wait);
3462 unsigned res;
3463 void *fileio;
3464 bool must_lock = false;
3465
3466
3467
3468 if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
3469 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3470 (req_events & (POLLIN | POLLRDNORM)))
3471 must_lock = true;
3472 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3473 (req_events & (POLLOUT | POLLWRNORM)))
3474 must_lock = true;
3475 }
3476
3477
3478
3479 WARN_ON(must_lock && !lock);
3480
3481 if (must_lock && lock && mutex_lock_interruptible(lock))
3482 return POLLERR;
3483
3484 fileio = q->fileio;
3485
3486 res = vb2_poll(vdev->queue, file, wait);
3487
3488
3489 if (must_lock && !fileio && q->fileio)
3490 q->owner = file->private_data;
3491 if (must_lock && lock)
3492 mutex_unlock(lock);
3493 return res;
3494}
3495EXPORT_SYMBOL_GPL(vb2_fop_poll);
3496
3497#ifndef CONFIG_MMU
3498unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3499 unsigned long len, unsigned long pgoff, unsigned long flags)
3500{
3501 struct video_device *vdev = video_devdata(file);
3502 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3503 int ret;
3504
3505 if (lock && mutex_lock_interruptible(lock))
3506 return -ERESTARTSYS;
3507 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3508 if (lock)
3509 mutex_unlock(lock);
3510 return ret;
3511}
3512EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3513#endif
3514
3515
3516
3517void vb2_ops_wait_prepare(struct vb2_queue *vq)
3518{
3519 mutex_unlock(vq->lock);
3520}
3521EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
3522
3523void vb2_ops_wait_finish(struct vb2_queue *vq)
3524{
3525 mutex_lock(vq->lock);
3526}
3527EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
3528
3529MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
3530MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
3531MODULE_LICENSE("GPL");
3532