1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19
20#include <media/videobuf2-core.h>
21#include <media/v4l2-mem2mem.h>
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
25
26MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
27MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
28MODULE_LICENSE("GPL");
29
30static bool debug;
31module_param(debug, bool, 0644);
32
33#define dprintk(fmt, arg...) \
34 do { \
35 if (debug) \
36 printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
37 } while (0)
38
39
40
41#define TRANS_QUEUED (1 << 0)
42
43#define TRANS_RUNNING (1 << 1)
44
45
46
47
48
49#define DST_QUEUE_OFF_BASE (1 << 30)
50
51
52
53
54
55
56
57
58
59struct v4l2_m2m_dev {
60 struct v4l2_m2m_ctx *curr_ctx;
61
62 struct list_head job_queue;
63 spinlock_t job_spinlock;
64
65 const struct v4l2_m2m_ops *m2m_ops;
66};
67
68static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
69 enum v4l2_buf_type type)
70{
71 if (V4L2_TYPE_IS_OUTPUT(type))
72 return &m2m_ctx->out_q_ctx;
73 else
74 return &m2m_ctx->cap_q_ctx;
75}
76
77
78
79
80struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
81 enum v4l2_buf_type type)
82{
83 struct v4l2_m2m_queue_ctx *q_ctx;
84
85 q_ctx = get_queue_ctx(m2m_ctx, type);
86 if (!q_ctx)
87 return NULL;
88
89 return &q_ctx->q;
90}
91EXPORT_SYMBOL(v4l2_m2m_get_vq);
92
93
94
95
96void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
97{
98 struct v4l2_m2m_buffer *b = NULL;
99 unsigned long flags;
100
101 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
102
103 if (list_empty(&q_ctx->rdy_queue)) {
104 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
105 return NULL;
106 }
107
108 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
109 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
110 return &b->vb;
111}
112EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
113
114
115
116
117
118void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
119{
120 struct v4l2_m2m_buffer *b = NULL;
121 unsigned long flags;
122
123 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
124 if (list_empty(&q_ctx->rdy_queue)) {
125 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
126 return NULL;
127 }
128 b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
129 list_del(&b->list);
130 q_ctx->num_rdy--;
131 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
132
133 return &b->vb;
134}
135EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
136
137
138
139
140
141
142
143
144
145void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
146{
147 unsigned long flags;
148 void *ret = NULL;
149
150 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
151 if (m2m_dev->curr_ctx)
152 ret = m2m_dev->curr_ctx->priv;
153 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
154
155 return ret;
156}
157EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
158
159
160
161
162
163
164static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
165{
166 unsigned long flags;
167
168 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
169 if (NULL != m2m_dev->curr_ctx) {
170 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
171 dprintk("Another instance is running, won't run now\n");
172 return;
173 }
174
175 if (list_empty(&m2m_dev->job_queue)) {
176 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
177 dprintk("No job pending\n");
178 return;
179 }
180
181 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
182 struct v4l2_m2m_ctx, queue);
183 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
184 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
185
186 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
187}
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
206{
207 struct v4l2_m2m_dev *m2m_dev;
208 unsigned long flags_job, flags_out, flags_cap;
209
210 m2m_dev = m2m_ctx->m2m_dev;
211 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
212
213 if (!m2m_ctx->out_q_ctx.q.streaming
214 || !m2m_ctx->cap_q_ctx.q.streaming) {
215 dprintk("Streaming needs to be on for both queues\n");
216 return;
217 }
218
219 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
220 if (m2m_ctx->job_flags & TRANS_QUEUED) {
221 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
222 dprintk("On job queue already\n");
223 return;
224 }
225
226 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
227 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
228 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
229 flags_out);
230 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
231 dprintk("No input buffers available\n");
232 return;
233 }
234 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
235 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
236 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
237 flags_cap);
238 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
239 flags_out);
240 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
241 dprintk("No output buffers available\n");
242 return;
243 }
244 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
245 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
246
247 if (m2m_dev->m2m_ops->job_ready
248 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
249 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
250 dprintk("Driver not ready\n");
251 return;
252 }
253
254 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
255 m2m_ctx->job_flags |= TRANS_QUEUED;
256
257 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
258
259 v4l2_m2m_try_run(m2m_dev);
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
275 struct v4l2_m2m_ctx *m2m_ctx)
276{
277 unsigned long flags;
278
279 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
280 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
281 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
282 dprintk("Called by an instance not currently running\n");
283 return;
284 }
285
286 list_del(&m2m_dev->curr_ctx->queue);
287 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
288 wake_up(&m2m_dev->curr_ctx->finished);
289 m2m_dev->curr_ctx = NULL;
290
291 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
292
293
294
295
296 v4l2_m2m_try_schedule(m2m_ctx);
297 v4l2_m2m_try_run(m2m_dev);
298}
299EXPORT_SYMBOL(v4l2_m2m_job_finish);
300
301
302
303
304int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
305 struct v4l2_requestbuffers *reqbufs)
306{
307 struct vb2_queue *vq;
308
309 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
310 return vb2_reqbufs(vq, reqbufs);
311}
312EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
313
314
315
316
317
318
319int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
320 struct v4l2_buffer *buf)
321{
322 struct vb2_queue *vq;
323 int ret = 0;
324 unsigned int i;
325
326 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
327 ret = vb2_querybuf(vq, buf);
328
329
330 if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
331 if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
332 for (i = 0; i < buf->length; ++i)
333 buf->m.planes[i].m.mem_offset
334 += DST_QUEUE_OFF_BASE;
335 } else {
336 buf->m.offset += DST_QUEUE_OFF_BASE;
337 }
338 }
339
340 return ret;
341}
342EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
343
344
345
346
347
348int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
349 struct v4l2_buffer *buf)
350{
351 struct vb2_queue *vq;
352 int ret;
353
354 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
355 ret = vb2_qbuf(vq, buf);
356 if (!ret)
357 v4l2_m2m_try_schedule(m2m_ctx);
358
359 return ret;
360}
361EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
362
363
364
365
366
367int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
368 struct v4l2_buffer *buf)
369{
370 struct vb2_queue *vq;
371
372 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
373 return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
374}
375EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
376
377
378
379
380
381int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
382 struct v4l2_create_buffers *create)
383{
384 struct vb2_queue *vq;
385
386 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
387 return vb2_create_bufs(vq, create);
388}
389EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
390
391
392
393
394
395int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
396 struct v4l2_exportbuffer *eb)
397{
398 struct vb2_queue *vq;
399
400 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
401 return vb2_expbuf(vq, eb);
402}
403EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
404
405
406
407int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
408 enum v4l2_buf_type type)
409{
410 struct vb2_queue *vq;
411 int ret;
412
413 vq = v4l2_m2m_get_vq(m2m_ctx, type);
414 ret = vb2_streamon(vq, type);
415 if (!ret)
416 v4l2_m2m_try_schedule(m2m_ctx);
417
418 return ret;
419}
420EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
421
422
423
424
425int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
426 enum v4l2_buf_type type)
427{
428 struct v4l2_m2m_dev *m2m_dev;
429 struct v4l2_m2m_queue_ctx *q_ctx;
430 unsigned long flags_job, flags;
431 int ret;
432
433 q_ctx = get_queue_ctx(m2m_ctx, type);
434 ret = vb2_streamoff(&q_ctx->q, type);
435 if (ret)
436 return ret;
437
438 m2m_dev = m2m_ctx->m2m_dev;
439 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
440
441 INIT_LIST_HEAD(&m2m_ctx->queue);
442 m2m_ctx->job_flags = 0;
443
444 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
445
446
447 INIT_LIST_HEAD(&q_ctx->rdy_queue);
448 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
449
450 if (m2m_dev->curr_ctx == m2m_ctx) {
451 m2m_dev->curr_ctx = NULL;
452 wake_up(&m2m_ctx->finished);
453 }
454 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
455
456 return 0;
457}
458EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
459
460
461
462
463
464
465
466
467
468unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
469 struct poll_table_struct *wait)
470{
471 struct video_device *vfd = video_devdata(file);
472 unsigned long req_events = poll_requested_events(wait);
473 struct vb2_queue *src_q, *dst_q;
474 struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
475 unsigned int rc = 0;
476 unsigned long flags;
477
478 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
479 struct v4l2_fh *fh = file->private_data;
480
481 if (v4l2_event_pending(fh))
482 rc = POLLPRI;
483 else if (req_events & POLLPRI)
484 poll_wait(file, &fh->wait, wait);
485 if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
486 return rc;
487 }
488
489 src_q = v4l2_m2m_get_src_vq(m2m_ctx);
490 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
491
492
493
494
495
496
497 if ((!src_q->streaming || list_empty(&src_q->queued_list))
498 && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
499 rc |= POLLERR;
500 goto end;
501 }
502
503 if (m2m_ctx->m2m_dev->m2m_ops->unlock)
504 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
505
506 if (list_empty(&src_q->done_list))
507 poll_wait(file, &src_q->done_wq, wait);
508 if (list_empty(&dst_q->done_list))
509 poll_wait(file, &dst_q->done_wq, wait);
510
511 if (m2m_ctx->m2m_dev->m2m_ops->lock)
512 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
513
514 spin_lock_irqsave(&src_q->done_lock, flags);
515 if (!list_empty(&src_q->done_list))
516 src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
517 done_entry);
518 if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
519 || src_vb->state == VB2_BUF_STATE_ERROR))
520 rc |= POLLOUT | POLLWRNORM;
521 spin_unlock_irqrestore(&src_q->done_lock, flags);
522
523 spin_lock_irqsave(&dst_q->done_lock, flags);
524 if (!list_empty(&dst_q->done_list))
525 dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
526 done_entry);
527 if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
528 || dst_vb->state == VB2_BUF_STATE_ERROR))
529 rc |= POLLIN | POLLRDNORM;
530 spin_unlock_irqrestore(&dst_q->done_lock, flags);
531
532end:
533 return rc;
534}
535EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
536
537
538
539
540
541
542
543
544
545
546
547int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
548 struct vm_area_struct *vma)
549{
550 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
551 struct vb2_queue *vq;
552
553 if (offset < DST_QUEUE_OFF_BASE) {
554 vq = v4l2_m2m_get_src_vq(m2m_ctx);
555 } else {
556 vq = v4l2_m2m_get_dst_vq(m2m_ctx);
557 vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
558 }
559
560 return vb2_mmap(vq, vma);
561}
562EXPORT_SYMBOL(v4l2_m2m_mmap);
563
564
565
566
567
568
569struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
570{
571 struct v4l2_m2m_dev *m2m_dev;
572
573 if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
574 WARN_ON(!m2m_ops->job_abort))
575 return ERR_PTR(-EINVAL);
576
577 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
578 if (!m2m_dev)
579 return ERR_PTR(-ENOMEM);
580
581 m2m_dev->curr_ctx = NULL;
582 m2m_dev->m2m_ops = m2m_ops;
583 INIT_LIST_HEAD(&m2m_dev->job_queue);
584 spin_lock_init(&m2m_dev->job_spinlock);
585
586 return m2m_dev;
587}
588EXPORT_SYMBOL_GPL(v4l2_m2m_init);
589
590
591
592
593
594
595void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
596{
597 kfree(m2m_dev);
598}
599EXPORT_SYMBOL_GPL(v4l2_m2m_release);
600
601
602
603
604
605
606
607
608
609
610struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
611 void *drv_priv,
612 int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
613{
614 struct v4l2_m2m_ctx *m2m_ctx;
615 struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
616 int ret;
617
618 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
619 if (!m2m_ctx)
620 return ERR_PTR(-ENOMEM);
621
622 m2m_ctx->priv = drv_priv;
623 m2m_ctx->m2m_dev = m2m_dev;
624 init_waitqueue_head(&m2m_ctx->finished);
625
626 out_q_ctx = &m2m_ctx->out_q_ctx;
627 cap_q_ctx = &m2m_ctx->cap_q_ctx;
628
629 INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
630 INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
631 spin_lock_init(&out_q_ctx->rdy_spinlock);
632 spin_lock_init(&cap_q_ctx->rdy_spinlock);
633
634 INIT_LIST_HEAD(&m2m_ctx->queue);
635
636 ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
637
638 if (ret)
639 goto err;
640
641 return m2m_ctx;
642err:
643 kfree(m2m_ctx);
644 return ERR_PTR(ret);
645}
646EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
647
648
649
650
651
652
653void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
654{
655 struct v4l2_m2m_dev *m2m_dev;
656 unsigned long flags;
657
658 m2m_dev = m2m_ctx->m2m_dev;
659
660 spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
661 if (m2m_ctx->job_flags & TRANS_RUNNING) {
662 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
663 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
664 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
665 wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
666 } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
667 list_del(&m2m_ctx->queue);
668 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
669 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
670 dprintk("m2m_ctx: %p had been on queue and was removed\n",
671 m2m_ctx);
672 } else {
673
674 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
675 }
676
677 vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
678 vb2_queue_release(&m2m_ctx->out_q_ctx.q);
679
680 kfree(m2m_ctx);
681}
682EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
683
684
685
686
687
688
689void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
690{
691 struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
692 struct v4l2_m2m_queue_ctx *q_ctx;
693 unsigned long flags;
694
695 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
696 if (!q_ctx)
697 return;
698
699 spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
700 list_add_tail(&b->list, &q_ctx->rdy_queue);
701 q_ctx->num_rdy++;
702 spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
703}
704EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
705
706