linux/drivers/media/v4l2-core/v4l2-mem2mem.c
<<
>>
Prefs
   1/*
   2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
   3 *
   4 * Helper functions for devices that use videobuf buffers for both their
   5 * source and destination.
   6 *
   7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
   8 * Pawel Osciak, <pawel@osciak.com>
   9 * Marek Szyprowski, <m.szyprowski@samsung.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by the
  13 * Free Software Foundation; either version 2 of the License, or (at your
  14 * option) any later version.
  15 */
  16#include <linux/module.h>
  17#include <linux/sched.h>
  18#include <linux/slab.h>
  19
  20#include <media/media-device.h>
  21#include <media/videobuf2-v4l2.h>
  22#include <media/v4l2-mem2mem.h>
  23#include <media/v4l2-dev.h>
  24#include <media/v4l2-device.h>
  25#include <media/v4l2-fh.h>
  26#include <media/v4l2-event.h>
  27
  28MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
  29MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
  30MODULE_LICENSE("GPL");
  31
  32static bool debug;
  33module_param(debug, bool, 0644);
  34
  35#define dprintk(fmt, arg...)                                            \
  36        do {                                                            \
  37                if (debug)                                              \
  38                        printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
  39        } while (0)
  40
  41
  42/* Instance is already queued on the job_queue */
  43#define TRANS_QUEUED            (1 << 0)
  44/* Instance is currently running in hardware */
  45#define TRANS_RUNNING           (1 << 1)
  46/* Instance is currently aborting */
  47#define TRANS_ABORT             (1 << 2)
  48
  49
  50/* Offset base for buffers on the destination queue - used to distinguish
  51 * between source and destination buffers when mmapping - they receive the same
  52 * offsets but for different queues */
  53#define DST_QUEUE_OFF_BASE      (1 << 30)
  54
  55enum v4l2_m2m_entity_type {
  56        MEM2MEM_ENT_TYPE_SOURCE,
  57        MEM2MEM_ENT_TYPE_SINK,
  58        MEM2MEM_ENT_TYPE_PROC
  59};
  60
  61static const char * const m2m_entity_name[] = {
  62        "source",
  63        "sink",
  64        "proc"
  65};
  66
  67/**
  68 * struct v4l2_m2m_dev - per-device context
  69 * @source:             &struct media_entity pointer with the source entity
  70 *                      Used only when the M2M device is registered via
  71 *                      v4l2_m2m_unregister_media_controller().
  72 * @source_pad:         &struct media_pad with the source pad.
  73 *                      Used only when the M2M device is registered via
  74 *                      v4l2_m2m_unregister_media_controller().
  75 * @sink:               &struct media_entity pointer with the sink entity
  76 *                      Used only when the M2M device is registered via
  77 *                      v4l2_m2m_unregister_media_controller().
  78 * @sink_pad:           &struct media_pad with the sink pad.
  79 *                      Used only when the M2M device is registered via
  80 *                      v4l2_m2m_unregister_media_controller().
  81 * @proc:               &struct media_entity pointer with the M2M device itself.
  82 * @proc_pads:          &struct media_pad with the @proc pads.
  83 *                      Used only when the M2M device is registered via
  84 *                      v4l2_m2m_unregister_media_controller().
  85 * @intf_devnode:       &struct media_intf devnode pointer with the interface
  86 *                      with controls the M2M device.
  87 * @curr_ctx:           currently running instance
  88 * @job_queue:          instances queued to run
  89 * @job_spinlock:       protects job_queue
  90 * @m2m_ops:            driver callbacks
  91 */
  92struct v4l2_m2m_dev {
  93        struct v4l2_m2m_ctx     *curr_ctx;
  94#ifdef CONFIG_MEDIA_CONTROLLER
  95        struct media_entity     *source;
  96        struct media_pad        source_pad;
  97        struct media_entity     sink;
  98        struct media_pad        sink_pad;
  99        struct media_entity     proc;
 100        struct media_pad        proc_pads[2];
 101        struct media_intf_devnode *intf_devnode;
 102#endif
 103
 104        struct list_head        job_queue;
 105        spinlock_t              job_spinlock;
 106
 107        const struct v4l2_m2m_ops *m2m_ops;
 108};
 109
 110static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
 111                                                enum v4l2_buf_type type)
 112{
 113        if (V4L2_TYPE_IS_OUTPUT(type))
 114                return &m2m_ctx->out_q_ctx;
 115        else
 116                return &m2m_ctx->cap_q_ctx;
 117}
 118
 119struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
 120                                       enum v4l2_buf_type type)
 121{
 122        struct v4l2_m2m_queue_ctx *q_ctx;
 123
 124        q_ctx = get_queue_ctx(m2m_ctx, type);
 125        if (!q_ctx)
 126                return NULL;
 127
 128        return &q_ctx->q;
 129}
 130EXPORT_SYMBOL(v4l2_m2m_get_vq);
 131
 132void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
 133{
 134        struct v4l2_m2m_buffer *b;
 135        unsigned long flags;
 136
 137        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 138
 139        if (list_empty(&q_ctx->rdy_queue)) {
 140                spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 141                return NULL;
 142        }
 143
 144        b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
 145        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 146        return &b->vb;
 147}
 148EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
 149
 150void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx)
 151{
 152        struct v4l2_m2m_buffer *b;
 153        unsigned long flags;
 154
 155        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 156
 157        if (list_empty(&q_ctx->rdy_queue)) {
 158                spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 159                return NULL;
 160        }
 161
 162        b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
 163        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 164        return &b->vb;
 165}
 166EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf);
 167
 168void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
 169{
 170        struct v4l2_m2m_buffer *b;
 171        unsigned long flags;
 172
 173        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 174        if (list_empty(&q_ctx->rdy_queue)) {
 175                spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 176                return NULL;
 177        }
 178        b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
 179        list_del(&b->list);
 180        q_ctx->num_rdy--;
 181        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 182
 183        return &b->vb;
 184}
 185EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
 186
 187void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
 188                                struct vb2_v4l2_buffer *vbuf)
 189{
 190        struct v4l2_m2m_buffer *b;
 191        unsigned long flags;
 192
 193        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 194        b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
 195        list_del(&b->list);
 196        q_ctx->num_rdy--;
 197        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 198}
 199EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf);
 200
 201struct vb2_v4l2_buffer *
 202v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx)
 203
 204{
 205        struct v4l2_m2m_buffer *b, *tmp;
 206        struct vb2_v4l2_buffer *ret = NULL;
 207        unsigned long flags;
 208
 209        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 210        list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) {
 211                if (b->vb.vb2_buf.index == idx) {
 212                        list_del(&b->list);
 213                        q_ctx->num_rdy--;
 214                        ret = &b->vb;
 215                        break;
 216                }
 217        }
 218        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 219
 220        return ret;
 221}
 222EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx);
 223
 224/*
 225 * Scheduling handlers
 226 */
 227
 228void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
 229{
 230        unsigned long flags;
 231        void *ret = NULL;
 232
 233        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 234        if (m2m_dev->curr_ctx)
 235                ret = m2m_dev->curr_ctx->priv;
 236        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 237
 238        return ret;
 239}
 240EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
 241
 242/**
 243 * v4l2_m2m_try_run() - select next job to perform and run it if possible
 244 * @m2m_dev: per-device context
 245 *
 246 * Get next transaction (if present) from the waiting jobs list and run it.
 247 */
 248static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
 249{
 250        unsigned long flags;
 251
 252        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 253        if (NULL != m2m_dev->curr_ctx) {
 254                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 255                dprintk("Another instance is running, won't run now\n");
 256                return;
 257        }
 258
 259        if (list_empty(&m2m_dev->job_queue)) {
 260                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 261                dprintk("No job pending\n");
 262                return;
 263        }
 264
 265        m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
 266                                   struct v4l2_m2m_ctx, queue);
 267        m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
 268        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 269
 270        dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx);
 271        m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
 272}
 273
 274/*
 275 * __v4l2_m2m_try_queue() - queue a job
 276 * @m2m_dev: m2m device
 277 * @m2m_ctx: m2m context
 278 *
 279 * Check if this context is ready to queue a job.
 280 *
 281 * This function can run in interrupt context.
 282 */
 283static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
 284                                 struct v4l2_m2m_ctx *m2m_ctx)
 285{
 286        unsigned long flags_job, flags_out, flags_cap;
 287
 288        dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
 289
 290        if (!m2m_ctx->out_q_ctx.q.streaming
 291            || !m2m_ctx->cap_q_ctx.q.streaming) {
 292                dprintk("Streaming needs to be on for both queues\n");
 293                return;
 294        }
 295
 296        spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
 297
 298        /* If the context is aborted then don't schedule it */
 299        if (m2m_ctx->job_flags & TRANS_ABORT) {
 300                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 301                dprintk("Aborted context\n");
 302                return;
 303        }
 304
 305        if (m2m_ctx->job_flags & TRANS_QUEUED) {
 306                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 307                dprintk("On job queue already\n");
 308                return;
 309        }
 310
 311        spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 312        if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
 313            && !m2m_ctx->out_q_ctx.buffered) {
 314                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
 315                                        flags_out);
 316                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 317                dprintk("No input buffers available\n");
 318                return;
 319        }
 320        spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
 321        if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
 322            && !m2m_ctx->cap_q_ctx.buffered) {
 323                spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
 324                                        flags_cap);
 325                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
 326                                        flags_out);
 327                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 328                dprintk("No output buffers available\n");
 329                return;
 330        }
 331        spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
 332        spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 333
 334        if (m2m_dev->m2m_ops->job_ready
 335                && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
 336                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 337                dprintk("Driver not ready\n");
 338                return;
 339        }
 340
 341        list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
 342        m2m_ctx->job_flags |= TRANS_QUEUED;
 343
 344        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 345}
 346
 347/**
 348 * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context
 349 * @m2m_ctx: m2m context
 350 *
 351 * Check if this context is ready to queue a job. If suitable,
 352 * run the next queued job on the mem2mem device.
 353 *
 354 * This function shouldn't run in interrupt context.
 355 *
 356 * Note that v4l2_m2m_try_schedule() can schedule one job for this context,
 357 * and then run another job for another context.
 358 */
 359void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
 360{
 361        struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev;
 362
 363        __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
 364        v4l2_m2m_try_run(m2m_dev);
 365}
 366EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
 367
 368/**
 369 * v4l2_m2m_cancel_job() - cancel pending jobs for the context
 370 * @m2m_ctx: m2m context with jobs to be canceled
 371 *
 372 * In case of streamoff or release called on any context,
 373 * 1] If the context is currently running, then abort job will be called
 374 * 2] If the context is queued, then the context will be removed from
 375 *    the job_queue
 376 */
 377static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
 378{
 379        struct v4l2_m2m_dev *m2m_dev;
 380        unsigned long flags;
 381
 382        m2m_dev = m2m_ctx->m2m_dev;
 383        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 384
 385        m2m_ctx->job_flags |= TRANS_ABORT;
 386        if (m2m_ctx->job_flags & TRANS_RUNNING) {
 387                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 388                if (m2m_dev->m2m_ops->job_abort)
 389                        m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
 390                dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
 391                wait_event(m2m_ctx->finished,
 392                                !(m2m_ctx->job_flags & TRANS_RUNNING));
 393        } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
 394                list_del(&m2m_ctx->queue);
 395                m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
 396                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 397                dprintk("m2m_ctx: %p had been on queue and was removed\n",
 398                        m2m_ctx);
 399        } else {
 400                /* Do nothing, was not on queue/running */
 401                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 402        }
 403}
 404
 405void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
 406                         struct v4l2_m2m_ctx *m2m_ctx)
 407{
 408        unsigned long flags;
 409
 410        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 411        if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
 412                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 413                dprintk("Called by an instance not currently running\n");
 414                return;
 415        }
 416
 417        list_del(&m2m_dev->curr_ctx->queue);
 418        m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
 419        wake_up(&m2m_dev->curr_ctx->finished);
 420        m2m_dev->curr_ctx = NULL;
 421
 422        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 423
 424        /* This instance might have more buffers ready, but since we do not
 425         * allow more than one job on the job_queue per instance, each has
 426         * to be scheduled separately after the previous one finishes. */
 427        v4l2_m2m_try_schedule(m2m_ctx);
 428}
 429EXPORT_SYMBOL(v4l2_m2m_job_finish);
 430
 431int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 432                     struct v4l2_requestbuffers *reqbufs)
 433{
 434        struct vb2_queue *vq;
 435        int ret;
 436
 437        vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
 438        ret = vb2_reqbufs(vq, reqbufs);
 439        /* If count == 0, then the owner has released all buffers and he
 440           is no longer owner of the queue. Otherwise we have an owner. */
 441        if (ret == 0)
 442                vq->owner = reqbufs->count ? file->private_data : NULL;
 443
 444        return ret;
 445}
 446EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
 447
 448int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 449                      struct v4l2_buffer *buf)
 450{
 451        struct vb2_queue *vq;
 452        int ret = 0;
 453        unsigned int i;
 454
 455        vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
 456        ret = vb2_querybuf(vq, buf);
 457
 458        /* Adjust MMAP memory offsets for the CAPTURE queue */
 459        if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
 460                if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
 461                        for (i = 0; i < buf->length; ++i)
 462                                buf->m.planes[i].m.mem_offset
 463                                        += DST_QUEUE_OFF_BASE;
 464                } else {
 465                        buf->m.offset += DST_QUEUE_OFF_BASE;
 466                }
 467        }
 468
 469        return ret;
 470}
 471EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
 472
 473int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 474                  struct v4l2_buffer *buf)
 475{
 476        struct vb2_queue *vq;
 477        int ret;
 478
 479        vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
 480        ret = vb2_qbuf(vq, buf);
 481        if (!ret)
 482                v4l2_m2m_try_schedule(m2m_ctx);
 483
 484        return ret;
 485}
 486EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
 487
 488int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 489                   struct v4l2_buffer *buf)
 490{
 491        struct vb2_queue *vq;
 492
 493        vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
 494        return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
 495}
 496EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
 497
 498int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 499                         struct v4l2_buffer *buf)
 500{
 501        struct vb2_queue *vq;
 502        int ret;
 503
 504        vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
 505        ret = vb2_prepare_buf(vq, buf);
 506        if (!ret)
 507                v4l2_m2m_try_schedule(m2m_ctx);
 508
 509        return ret;
 510}
 511EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf);
 512
 513int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 514                         struct v4l2_create_buffers *create)
 515{
 516        struct vb2_queue *vq;
 517
 518        vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
 519        return vb2_create_bufs(vq, create);
 520}
 521EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
 522
 523int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 524                  struct v4l2_exportbuffer *eb)
 525{
 526        struct vb2_queue *vq;
 527
 528        vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
 529        return vb2_expbuf(vq, eb);
 530}
 531EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
 532
 533int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 534                      enum v4l2_buf_type type)
 535{
 536        struct vb2_queue *vq;
 537        int ret;
 538
 539        vq = v4l2_m2m_get_vq(m2m_ctx, type);
 540        ret = vb2_streamon(vq, type);
 541        if (!ret)
 542                v4l2_m2m_try_schedule(m2m_ctx);
 543
 544        return ret;
 545}
 546EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
 547
 548int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 549                       enum v4l2_buf_type type)
 550{
 551        struct v4l2_m2m_dev *m2m_dev;
 552        struct v4l2_m2m_queue_ctx *q_ctx;
 553        unsigned long flags_job, flags;
 554        int ret;
 555
 556        /* wait until the current context is dequeued from job_queue */
 557        v4l2_m2m_cancel_job(m2m_ctx);
 558
 559        q_ctx = get_queue_ctx(m2m_ctx, type);
 560        ret = vb2_streamoff(&q_ctx->q, type);
 561        if (ret)
 562                return ret;
 563
 564        m2m_dev = m2m_ctx->m2m_dev;
 565        spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
 566        /* We should not be scheduled anymore, since we're dropping a queue. */
 567        if (m2m_ctx->job_flags & TRANS_QUEUED)
 568                list_del(&m2m_ctx->queue);
 569        m2m_ctx->job_flags = 0;
 570
 571        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 572        /* Drop queue, since streamoff returns device to the same state as after
 573         * calling reqbufs. */
 574        INIT_LIST_HEAD(&q_ctx->rdy_queue);
 575        q_ctx->num_rdy = 0;
 576        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 577
 578        if (m2m_dev->curr_ctx == m2m_ctx) {
 579                m2m_dev->curr_ctx = NULL;
 580                wake_up(&m2m_ctx->finished);
 581        }
 582        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 583
 584        return 0;
 585}
 586EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
 587
 588__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 589                           struct poll_table_struct *wait)
 590{
 591        struct video_device *vfd = video_devdata(file);
 592        __poll_t req_events = poll_requested_events(wait);
 593        struct vb2_queue *src_q, *dst_q;
 594        struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
 595        __poll_t rc = 0;
 596        unsigned long flags;
 597
 598        if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
 599                struct v4l2_fh *fh = file->private_data;
 600
 601                if (v4l2_event_pending(fh))
 602                        rc = EPOLLPRI;
 603                else if (req_events & EPOLLPRI)
 604                        poll_wait(file, &fh->wait, wait);
 605                if (!(req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)))
 606                        return rc;
 607        }
 608
 609        src_q = v4l2_m2m_get_src_vq(m2m_ctx);
 610        dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
 611
 612        /*
 613         * There has to be at least one buffer queued on each queued_list, which
 614         * means either in driver already or waiting for driver to claim it
 615         * and start processing.
 616         */
 617        if ((!src_q->streaming || list_empty(&src_q->queued_list))
 618                && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
 619                rc |= EPOLLERR;
 620                goto end;
 621        }
 622
 623        spin_lock_irqsave(&src_q->done_lock, flags);
 624        if (list_empty(&src_q->done_list))
 625                poll_wait(file, &src_q->done_wq, wait);
 626        spin_unlock_irqrestore(&src_q->done_lock, flags);
 627
 628        spin_lock_irqsave(&dst_q->done_lock, flags);
 629        if (list_empty(&dst_q->done_list)) {
 630                /*
 631                 * If the last buffer was dequeued from the capture queue,
 632                 * return immediately. DQBUF will return -EPIPE.
 633                 */
 634                if (dst_q->last_buffer_dequeued) {
 635                        spin_unlock_irqrestore(&dst_q->done_lock, flags);
 636                        return rc | EPOLLIN | EPOLLRDNORM;
 637                }
 638
 639                poll_wait(file, &dst_q->done_wq, wait);
 640        }
 641        spin_unlock_irqrestore(&dst_q->done_lock, flags);
 642
 643        spin_lock_irqsave(&src_q->done_lock, flags);
 644        if (!list_empty(&src_q->done_list))
 645                src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
 646                                                done_entry);
 647        if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
 648                        || src_vb->state == VB2_BUF_STATE_ERROR))
 649                rc |= EPOLLOUT | EPOLLWRNORM;
 650        spin_unlock_irqrestore(&src_q->done_lock, flags);
 651
 652        spin_lock_irqsave(&dst_q->done_lock, flags);
 653        if (!list_empty(&dst_q->done_list))
 654                dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
 655                                                done_entry);
 656        if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
 657                        || dst_vb->state == VB2_BUF_STATE_ERROR))
 658                rc |= EPOLLIN | EPOLLRDNORM;
 659        spin_unlock_irqrestore(&dst_q->done_lock, flags);
 660
 661end:
 662        return rc;
 663}
 664EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
 665
 666int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 667                         struct vm_area_struct *vma)
 668{
 669        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
 670        struct vb2_queue *vq;
 671
 672        if (offset < DST_QUEUE_OFF_BASE) {
 673                vq = v4l2_m2m_get_src_vq(m2m_ctx);
 674        } else {
 675                vq = v4l2_m2m_get_dst_vq(m2m_ctx);
 676                vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
 677        }
 678
 679        return vb2_mmap(vq, vma);
 680}
 681EXPORT_SYMBOL(v4l2_m2m_mmap);
 682
 683#if defined(CONFIG_MEDIA_CONTROLLER)
 684void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
 685{
 686        media_remove_intf_links(&m2m_dev->intf_devnode->intf);
 687        media_devnode_remove(m2m_dev->intf_devnode);
 688
 689        media_entity_remove_links(m2m_dev->source);
 690        media_entity_remove_links(&m2m_dev->sink);
 691        media_entity_remove_links(&m2m_dev->proc);
 692        media_device_unregister_entity(m2m_dev->source);
 693        media_device_unregister_entity(&m2m_dev->sink);
 694        media_device_unregister_entity(&m2m_dev->proc);
 695        kfree(m2m_dev->source->name);
 696        kfree(m2m_dev->sink.name);
 697        kfree(m2m_dev->proc.name);
 698}
 699EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller);
 700
 701static int v4l2_m2m_register_entity(struct media_device *mdev,
 702        struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type,
 703        struct video_device *vdev, int function)
 704{
 705        struct media_entity *entity;
 706        struct media_pad *pads;
 707        char *name;
 708        unsigned int len;
 709        int num_pads;
 710        int ret;
 711
 712        switch (type) {
 713        case MEM2MEM_ENT_TYPE_SOURCE:
 714                entity = m2m_dev->source;
 715                pads = &m2m_dev->source_pad;
 716                pads[0].flags = MEDIA_PAD_FL_SOURCE;
 717                num_pads = 1;
 718                break;
 719        case MEM2MEM_ENT_TYPE_SINK:
 720                entity = &m2m_dev->sink;
 721                pads = &m2m_dev->sink_pad;
 722                pads[0].flags = MEDIA_PAD_FL_SINK;
 723                num_pads = 1;
 724                break;
 725        case MEM2MEM_ENT_TYPE_PROC:
 726                entity = &m2m_dev->proc;
 727                pads = m2m_dev->proc_pads;
 728                pads[0].flags = MEDIA_PAD_FL_SINK;
 729                pads[1].flags = MEDIA_PAD_FL_SOURCE;
 730                num_pads = 2;
 731                break;
 732        default:
 733                return -EINVAL;
 734        }
 735
 736        entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
 737        if (type != MEM2MEM_ENT_TYPE_PROC) {
 738                entity->info.dev.major = VIDEO_MAJOR;
 739                entity->info.dev.minor = vdev->minor;
 740        }
 741        len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]);
 742        name = kmalloc(len, GFP_KERNEL);
 743        if (!name)
 744                return -ENOMEM;
 745        snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]);
 746        entity->name = name;
 747        entity->function = function;
 748
 749        ret = media_entity_pads_init(entity, num_pads, pads);
 750        if (ret)
 751                return ret;
 752        ret = media_device_register_entity(mdev, entity);
 753        if (ret)
 754                return ret;
 755
 756        return 0;
 757}
 758
 759int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
 760                struct video_device *vdev, int function)
 761{
 762        struct media_device *mdev = vdev->v4l2_dev->mdev;
 763        struct media_link *link;
 764        int ret;
 765
 766        if (!mdev)
 767                return 0;
 768
 769        /* A memory-to-memory device consists in two
 770         * DMA engine and one video processing entities.
 771         * The DMA engine entities are linked to a V4L interface
 772         */
 773
 774        /* Create the three entities with their pads */
 775        m2m_dev->source = &vdev->entity;
 776        ret = v4l2_m2m_register_entity(mdev, m2m_dev,
 777                        MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L);
 778        if (ret)
 779                return ret;
 780        ret = v4l2_m2m_register_entity(mdev, m2m_dev,
 781                        MEM2MEM_ENT_TYPE_PROC, vdev, function);
 782        if (ret)
 783                goto err_rel_entity0;
 784        ret = v4l2_m2m_register_entity(mdev, m2m_dev,
 785                        MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L);
 786        if (ret)
 787                goto err_rel_entity1;
 788
 789        /* Connect the three entities */
 790        ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
 791                        MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
 792        if (ret)
 793                goto err_rel_entity2;
 794
 795        ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
 796                        MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
 797        if (ret)
 798                goto err_rm_links0;
 799
 800        /* Create video interface */
 801        m2m_dev->intf_devnode = media_devnode_create(mdev,
 802                        MEDIA_INTF_T_V4L_VIDEO, 0,
 803                        VIDEO_MAJOR, vdev->minor);
 804        if (!m2m_dev->intf_devnode) {
 805                ret = -ENOMEM;
 806                goto err_rm_links1;
 807        }
 808
 809        /* Connect the two DMA engines to the interface */
 810        link = media_create_intf_link(m2m_dev->source,
 811                        &m2m_dev->intf_devnode->intf,
 812                        MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
 813        if (!link) {
 814                ret = -ENOMEM;
 815                goto err_rm_devnode;
 816        }
 817
 818        link = media_create_intf_link(&m2m_dev->sink,
 819                        &m2m_dev->intf_devnode->intf,
 820                        MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
 821        if (!link) {
 822                ret = -ENOMEM;
 823                goto err_rm_intf_link;
 824        }
 825        return 0;
 826
 827err_rm_intf_link:
 828        media_remove_intf_links(&m2m_dev->intf_devnode->intf);
 829err_rm_devnode:
 830        media_devnode_remove(m2m_dev->intf_devnode);
 831err_rm_links1:
 832        media_entity_remove_links(&m2m_dev->sink);
 833err_rm_links0:
 834        media_entity_remove_links(&m2m_dev->proc);
 835        media_entity_remove_links(m2m_dev->source);
 836err_rel_entity2:
 837        media_device_unregister_entity(&m2m_dev->proc);
 838        kfree(m2m_dev->proc.name);
 839err_rel_entity1:
 840        media_device_unregister_entity(&m2m_dev->sink);
 841        kfree(m2m_dev->sink.name);
 842err_rel_entity0:
 843        media_device_unregister_entity(m2m_dev->source);
 844        kfree(m2m_dev->source->name);
 845        return ret;
 846        return 0;
 847}
 848EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller);
 849#endif
 850
 851struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
 852{
 853        struct v4l2_m2m_dev *m2m_dev;
 854
 855        if (!m2m_ops || WARN_ON(!m2m_ops->device_run))
 856                return ERR_PTR(-EINVAL);
 857
 858        m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
 859        if (!m2m_dev)
 860                return ERR_PTR(-ENOMEM);
 861
 862        m2m_dev->curr_ctx = NULL;
 863        m2m_dev->m2m_ops = m2m_ops;
 864        INIT_LIST_HEAD(&m2m_dev->job_queue);
 865        spin_lock_init(&m2m_dev->job_spinlock);
 866
 867        return m2m_dev;
 868}
 869EXPORT_SYMBOL_GPL(v4l2_m2m_init);
 870
 871void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
 872{
 873        kfree(m2m_dev);
 874}
 875EXPORT_SYMBOL_GPL(v4l2_m2m_release);
 876
 877struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
 878                void *drv_priv,
 879                int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
 880{
 881        struct v4l2_m2m_ctx *m2m_ctx;
 882        struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
 883        int ret;
 884
 885        m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
 886        if (!m2m_ctx)
 887                return ERR_PTR(-ENOMEM);
 888
 889        m2m_ctx->priv = drv_priv;
 890        m2m_ctx->m2m_dev = m2m_dev;
 891        init_waitqueue_head(&m2m_ctx->finished);
 892
 893        out_q_ctx = &m2m_ctx->out_q_ctx;
 894        cap_q_ctx = &m2m_ctx->cap_q_ctx;
 895
 896        INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
 897        INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
 898        spin_lock_init(&out_q_ctx->rdy_spinlock);
 899        spin_lock_init(&cap_q_ctx->rdy_spinlock);
 900
 901        INIT_LIST_HEAD(&m2m_ctx->queue);
 902
 903        ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
 904
 905        if (ret)
 906                goto err;
 907        /*
 908         * If both queues use same mutex assign it as the common buffer
 909         * queues lock to the m2m context. This lock is used in the
 910         * v4l2_m2m_ioctl_* helpers.
 911         */
 912        if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
 913                m2m_ctx->q_lock = out_q_ctx->q.lock;
 914
 915        return m2m_ctx;
 916err:
 917        kfree(m2m_ctx);
 918        return ERR_PTR(ret);
 919}
 920EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
 921
 922void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
 923{
 924        /* wait until the current context is dequeued from job_queue */
 925        v4l2_m2m_cancel_job(m2m_ctx);
 926
 927        vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
 928        vb2_queue_release(&m2m_ctx->out_q_ctx.q);
 929
 930        kfree(m2m_ctx);
 931}
 932EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
 933
 934void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
 935                struct vb2_v4l2_buffer *vbuf)
 936{
 937        struct v4l2_m2m_buffer *b = container_of(vbuf,
 938                                struct v4l2_m2m_buffer, vb);
 939        struct v4l2_m2m_queue_ctx *q_ctx;
 940        unsigned long flags;
 941
 942        q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type);
 943        if (!q_ctx)
 944                return;
 945
 946        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 947        list_add_tail(&b->list, &q_ctx->rdy_queue);
 948        q_ctx->num_rdy++;
 949        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 950}
 951EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
 952
 953/* Videobuf2 ioctl helpers */
 954
 955int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
 956                                struct v4l2_requestbuffers *rb)
 957{
 958        struct v4l2_fh *fh = file->private_data;
 959
 960        return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb);
 961}
 962EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs);
 963
 964int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv,
 965                                struct v4l2_create_buffers *create)
 966{
 967        struct v4l2_fh *fh = file->private_data;
 968
 969        return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create);
 970}
 971EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs);
 972
 973int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv,
 974                                struct v4l2_buffer *buf)
 975{
 976        struct v4l2_fh *fh = file->private_data;
 977
 978        return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf);
 979}
 980EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf);
 981
 982int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv,
 983                                struct v4l2_buffer *buf)
 984{
 985        struct v4l2_fh *fh = file->private_data;
 986
 987        return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf);
 988}
 989EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf);
 990
 991int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv,
 992                                struct v4l2_buffer *buf)
 993{
 994        struct v4l2_fh *fh = file->private_data;
 995
 996        return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf);
 997}
 998EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf);
 999
1000int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv,
1001                               struct v4l2_buffer *buf)
1002{
1003        struct v4l2_fh *fh = file->private_data;
1004
1005        return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf);
1006}
1007EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf);
1008
1009int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv,
1010                                struct v4l2_exportbuffer *eb)
1011{
1012        struct v4l2_fh *fh = file->private_data;
1013
1014        return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb);
1015}
1016EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf);
1017
1018int v4l2_m2m_ioctl_streamon(struct file *file, void *priv,
1019                                enum v4l2_buf_type type)
1020{
1021        struct v4l2_fh *fh = file->private_data;
1022
1023        return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
1024}
1025EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon);
1026
1027int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv,
1028                                enum v4l2_buf_type type)
1029{
1030        struct v4l2_fh *fh = file->private_data;
1031
1032        return v4l2_m2m_streamoff(file, fh->m2m_ctx, type);
1033}
1034EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff);
1035
1036/*
1037 * v4l2_file_operations helpers. It is assumed here same lock is used
1038 * for the output and the capture buffer queue.
1039 */
1040
1041int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma)
1042{
1043        struct v4l2_fh *fh = file->private_data;
1044
1045        return v4l2_m2m_mmap(file, fh->m2m_ctx, vma);
1046}
1047EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap);
1048
1049__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait)
1050{
1051        struct v4l2_fh *fh = file->private_data;
1052        struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx;
1053        __poll_t ret;
1054
1055        if (m2m_ctx->q_lock)
1056                mutex_lock(m2m_ctx->q_lock);
1057
1058        ret = v4l2_m2m_poll(file, m2m_ctx, wait);
1059
1060        if (m2m_ctx->q_lock)
1061                mutex_unlock(m2m_ctx->q_lock);
1062
1063        return ret;
1064}
1065EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll);
1066
1067