linux/drivers/media/v4l2-core/v4l2-mem2mem.c
<<
>>
Prefs
   1/*
   2 * Memory-to-memory device framework for Video for Linux 2 and videobuf.
   3 *
   4 * Helper functions for devices that use videobuf buffers for both their
   5 * source and destination.
   6 *
   7 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
   8 * Pawel Osciak, <pawel@osciak.com>
   9 * Marek Szyprowski, <m.szyprowski@samsung.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by the
  13 * Free Software Foundation; either version 2 of the License, or (at your
  14 * option) any later version.
  15 */
  16#include <linux/module.h>
  17#include <linux/sched.h>
  18#include <linux/slab.h>
  19
  20#include <media/videobuf2-core.h>
  21#include <media/v4l2-mem2mem.h>
  22#include <media/v4l2-dev.h>
  23#include <media/v4l2-fh.h>
  24#include <media/v4l2-event.h>
  25
  26MODULE_DESCRIPTION("Mem to mem device framework for videobuf");
  27MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
  28MODULE_LICENSE("GPL");
  29
  30static bool debug;
  31module_param(debug, bool, 0644);
  32
  33#define dprintk(fmt, arg...)                                            \
  34        do {                                                            \
  35                if (debug)                                              \
  36                        printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\
  37        } while (0)
  38
  39
  40/* Instance is already queued on the job_queue */
  41#define TRANS_QUEUED            (1 << 0)
  42/* Instance is currently running in hardware */
  43#define TRANS_RUNNING           (1 << 1)
  44
  45
  46/* Offset base for buffers on the destination queue - used to distinguish
  47 * between source and destination buffers when mmapping - they receive the same
  48 * offsets but for different queues */
  49#define DST_QUEUE_OFF_BASE      (1 << 30)
  50
  51
  52/**
  53 * struct v4l2_m2m_dev - per-device context
  54 * @curr_ctx:           currently running instance
  55 * @job_queue:          instances queued to run
  56 * @job_spinlock:       protects job_queue
  57 * @m2m_ops:            driver callbacks
  58 */
  59struct v4l2_m2m_dev {
  60        struct v4l2_m2m_ctx     *curr_ctx;
  61
  62        struct list_head        job_queue;
  63        spinlock_t              job_spinlock;
  64
  65        const struct v4l2_m2m_ops *m2m_ops;
  66};
  67
  68static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,
  69                                                enum v4l2_buf_type type)
  70{
  71        if (V4L2_TYPE_IS_OUTPUT(type))
  72                return &m2m_ctx->out_q_ctx;
  73        else
  74                return &m2m_ctx->cap_q_ctx;
  75}
  76
  77/**
  78 * v4l2_m2m_get_vq() - return vb2_queue for the given type
  79 */
  80struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
  81                                       enum v4l2_buf_type type)
  82{
  83        struct v4l2_m2m_queue_ctx *q_ctx;
  84
  85        q_ctx = get_queue_ctx(m2m_ctx, type);
  86        if (!q_ctx)
  87                return NULL;
  88
  89        return &q_ctx->q;
  90}
  91EXPORT_SYMBOL(v4l2_m2m_get_vq);
  92
  93/**
  94 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
  95 */
  96void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx)
  97{
  98        struct v4l2_m2m_buffer *b = NULL;
  99        unsigned long flags;
 100
 101        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 102
 103        if (list_empty(&q_ctx->rdy_queue)) {
 104                spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 105                return NULL;
 106        }
 107
 108        b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
 109        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 110        return &b->vb;
 111}
 112EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf);
 113
 114/**
 115 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
 116 * return it
 117 */
 118void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx)
 119{
 120        struct v4l2_m2m_buffer *b = NULL;
 121        unsigned long flags;
 122
 123        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 124        if (list_empty(&q_ctx->rdy_queue)) {
 125                spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 126                return NULL;
 127        }
 128        b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list);
 129        list_del(&b->list);
 130        q_ctx->num_rdy--;
 131        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 132
 133        return &b->vb;
 134}
 135EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove);
 136
 137/*
 138 * Scheduling handlers
 139 */
 140
 141/**
 142 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
 143 * running instance or NULL if no instance is running
 144 */
 145void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)
 146{
 147        unsigned long flags;
 148        void *ret = NULL;
 149
 150        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 151        if (m2m_dev->curr_ctx)
 152                ret = m2m_dev->curr_ctx->priv;
 153        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 154
 155        return ret;
 156}
 157EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
 158
 159/**
 160 * v4l2_m2m_try_run() - select next job to perform and run it if possible
 161 *
 162 * Get next transaction (if present) from the waiting jobs list and run it.
 163 */
 164static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
 165{
 166        unsigned long flags;
 167
 168        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 169        if (NULL != m2m_dev->curr_ctx) {
 170                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 171                dprintk("Another instance is running, won't run now\n");
 172                return;
 173        }
 174
 175        if (list_empty(&m2m_dev->job_queue)) {
 176                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 177                dprintk("No job pending\n");
 178                return;
 179        }
 180
 181        m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue,
 182                                   struct v4l2_m2m_ctx, queue);
 183        m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING;
 184        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 185
 186        m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv);
 187}
 188
 189/**
 190 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
 191 * the pending job queue and add it if so.
 192 * @m2m_ctx:    m2m context assigned to the instance to be checked
 193 *
 194 * There are three basic requirements an instance has to meet to be able to run:
 195 * 1) at least one source buffer has to be queued,
 196 * 2) at least one destination buffer has to be queued,
 197 * 3) streaming has to be on.
 198 *
 199 * There may also be additional, custom requirements. In such case the driver
 200 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
 201 * return 1 if the instance is ready.
 202 * An example of the above could be an instance that requires more than one
 203 * src/dst buffer per transaction.
 204 */
 205static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
 206{
 207        struct v4l2_m2m_dev *m2m_dev;
 208        unsigned long flags_job, flags_out, flags_cap;
 209
 210        m2m_dev = m2m_ctx->m2m_dev;
 211        dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
 212
 213        if (!m2m_ctx->out_q_ctx.q.streaming
 214            || !m2m_ctx->cap_q_ctx.q.streaming) {
 215                dprintk("Streaming needs to be on for both queues\n");
 216                return;
 217        }
 218
 219        spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
 220        if (m2m_ctx->job_flags & TRANS_QUEUED) {
 221                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 222                dprintk("On job queue already\n");
 223                return;
 224        }
 225
 226        spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 227        if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
 228                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
 229                                        flags_out);
 230                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 231                dprintk("No input buffers available\n");
 232                return;
 233        }
 234        spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
 235        if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
 236                spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
 237                                        flags_cap);
 238                spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
 239                                        flags_out);
 240                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 241                dprintk("No output buffers available\n");
 242                return;
 243        }
 244        spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
 245        spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 246
 247        if (m2m_dev->m2m_ops->job_ready
 248                && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
 249                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 250                dprintk("Driver not ready\n");
 251                return;
 252        }
 253
 254        list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
 255        m2m_ctx->job_flags |= TRANS_QUEUED;
 256
 257        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 258
 259        v4l2_m2m_try_run(m2m_dev);
 260}
 261
 262/**
 263 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
 264 * and have it clean up
 265 *
 266 * Called by a driver to yield back the device after it has finished with it.
 267 * Should be called as soon as possible after reaching a state which allows
 268 * other instances to take control of the device.
 269 *
 270 * This function has to be called only after device_run() callback has been
 271 * called on the driver. To prevent recursion, it should not be called directly
 272 * from the device_run() callback though.
 273 */
 274void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
 275                         struct v4l2_m2m_ctx *m2m_ctx)
 276{
 277        unsigned long flags;
 278
 279        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 280        if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) {
 281                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 282                dprintk("Called by an instance not currently running\n");
 283                return;
 284        }
 285
 286        list_del(&m2m_dev->curr_ctx->queue);
 287        m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
 288        wake_up(&m2m_dev->curr_ctx->finished);
 289        m2m_dev->curr_ctx = NULL;
 290
 291        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 292
 293        /* This instance might have more buffers ready, but since we do not
 294         * allow more than one job on the job_queue per instance, each has
 295         * to be scheduled separately after the previous one finishes. */
 296        v4l2_m2m_try_schedule(m2m_ctx);
 297        v4l2_m2m_try_run(m2m_dev);
 298}
 299EXPORT_SYMBOL(v4l2_m2m_job_finish);
 300
 301/**
 302 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
 303 */
 304int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 305                     struct v4l2_requestbuffers *reqbufs)
 306{
 307        struct vb2_queue *vq;
 308
 309        vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type);
 310        return vb2_reqbufs(vq, reqbufs);
 311}
 312EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs);
 313
 314/**
 315 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
 316 *
 317 * See v4l2_m2m_mmap() documentation for details.
 318 */
 319int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 320                      struct v4l2_buffer *buf)
 321{
 322        struct vb2_queue *vq;
 323        int ret = 0;
 324        unsigned int i;
 325
 326        vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
 327        ret = vb2_querybuf(vq, buf);
 328
 329        /* Adjust MMAP memory offsets for the CAPTURE queue */
 330        if (buf->memory == V4L2_MEMORY_MMAP && !V4L2_TYPE_IS_OUTPUT(vq->type)) {
 331                if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) {
 332                        for (i = 0; i < buf->length; ++i)
 333                                buf->m.planes[i].m.mem_offset
 334                                        += DST_QUEUE_OFF_BASE;
 335                } else {
 336                        buf->m.offset += DST_QUEUE_OFF_BASE;
 337                }
 338        }
 339
 340        return ret;
 341}
 342EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf);
 343
 344/**
 345 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
 346 * the type
 347 */
 348int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 349                  struct v4l2_buffer *buf)
 350{
 351        struct vb2_queue *vq;
 352        int ret;
 353
 354        vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
 355        ret = vb2_qbuf(vq, buf);
 356        if (!ret)
 357                v4l2_m2m_try_schedule(m2m_ctx);
 358
 359        return ret;
 360}
 361EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf);
 362
 363/**
 364 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
 365 * the type
 366 */
 367int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 368                   struct v4l2_buffer *buf)
 369{
 370        struct vb2_queue *vq;
 371
 372        vq = v4l2_m2m_get_vq(m2m_ctx, buf->type);
 373        return vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK);
 374}
 375EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
 376
 377/**
 378 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
 379 * on the type
 380 */
 381int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 382                         struct v4l2_create_buffers *create)
 383{
 384        struct vb2_queue *vq;
 385
 386        vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
 387        return vb2_create_bufs(vq, create);
 388}
 389EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
 390
 391/**
 392 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
 393 * the type
 394 */
 395int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 396                  struct v4l2_exportbuffer *eb)
 397{
 398        struct vb2_queue *vq;
 399
 400        vq = v4l2_m2m_get_vq(m2m_ctx, eb->type);
 401        return vb2_expbuf(vq, eb);
 402}
 403EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf);
 404/**
 405 * v4l2_m2m_streamon() - turn on streaming for a video queue
 406 */
 407int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 408                      enum v4l2_buf_type type)
 409{
 410        struct vb2_queue *vq;
 411        int ret;
 412
 413        vq = v4l2_m2m_get_vq(m2m_ctx, type);
 414        ret = vb2_streamon(vq, type);
 415        if (!ret)
 416                v4l2_m2m_try_schedule(m2m_ctx);
 417
 418        return ret;
 419}
 420EXPORT_SYMBOL_GPL(v4l2_m2m_streamon);
 421
 422/**
 423 * v4l2_m2m_streamoff() - turn off streaming for a video queue
 424 */
 425int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 426                       enum v4l2_buf_type type)
 427{
 428        struct v4l2_m2m_dev *m2m_dev;
 429        struct v4l2_m2m_queue_ctx *q_ctx;
 430        unsigned long flags_job, flags;
 431        int ret;
 432
 433        q_ctx = get_queue_ctx(m2m_ctx, type);
 434        ret = vb2_streamoff(&q_ctx->q, type);
 435        if (ret)
 436                return ret;
 437
 438        m2m_dev = m2m_ctx->m2m_dev;
 439        spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job);
 440        /* We should not be scheduled anymore, since we're dropping a queue. */
 441        INIT_LIST_HEAD(&m2m_ctx->queue);
 442        m2m_ctx->job_flags = 0;
 443
 444        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 445        /* Drop queue, since streamoff returns device to the same state as after
 446         * calling reqbufs. */
 447        INIT_LIST_HEAD(&q_ctx->rdy_queue);
 448        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 449
 450        if (m2m_dev->curr_ctx == m2m_ctx) {
 451                m2m_dev->curr_ctx = NULL;
 452                wake_up(&m2m_ctx->finished);
 453        }
 454        spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 455
 456        return 0;
 457}
 458EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff);
 459
 460/**
 461 * v4l2_m2m_poll() - poll replacement, for destination buffers only
 462 *
 463 * Call from the driver's poll() function. Will poll both queues. If a buffer
 464 * is available to dequeue (with dqbuf) from the source queue, this will
 465 * indicate that a non-blocking write can be performed, while read will be
 466 * returned in case of the destination queue.
 467 */
 468unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 469                           struct poll_table_struct *wait)
 470{
 471        struct video_device *vfd = video_devdata(file);
 472        unsigned long req_events = poll_requested_events(wait);
 473        struct vb2_queue *src_q, *dst_q;
 474        struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
 475        unsigned int rc = 0;
 476        unsigned long flags;
 477
 478        if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
 479                struct v4l2_fh *fh = file->private_data;
 480
 481                if (v4l2_event_pending(fh))
 482                        rc = POLLPRI;
 483                else if (req_events & POLLPRI)
 484                        poll_wait(file, &fh->wait, wait);
 485                if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM)))
 486                        return rc;
 487        }
 488
 489        src_q = v4l2_m2m_get_src_vq(m2m_ctx);
 490        dst_q = v4l2_m2m_get_dst_vq(m2m_ctx);
 491
 492        /*
 493         * There has to be at least one buffer queued on each queued_list, which
 494         * means either in driver already or waiting for driver to claim it
 495         * and start processing.
 496         */
 497        if ((!src_q->streaming || list_empty(&src_q->queued_list))
 498                && (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
 499                rc |= POLLERR;
 500                goto end;
 501        }
 502
 503        if (m2m_ctx->m2m_dev->m2m_ops->unlock)
 504                m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
 505
 506        if (list_empty(&src_q->done_list))
 507                poll_wait(file, &src_q->done_wq, wait);
 508        if (list_empty(&dst_q->done_list))
 509                poll_wait(file, &dst_q->done_wq, wait);
 510
 511        if (m2m_ctx->m2m_dev->m2m_ops->lock)
 512                m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
 513
 514        spin_lock_irqsave(&src_q->done_lock, flags);
 515        if (!list_empty(&src_q->done_list))
 516                src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
 517                                                done_entry);
 518        if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
 519                        || src_vb->state == VB2_BUF_STATE_ERROR))
 520                rc |= POLLOUT | POLLWRNORM;
 521        spin_unlock_irqrestore(&src_q->done_lock, flags);
 522
 523        spin_lock_irqsave(&dst_q->done_lock, flags);
 524        if (!list_empty(&dst_q->done_list))
 525                dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
 526                                                done_entry);
 527        if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
 528                        || dst_vb->state == VB2_BUF_STATE_ERROR))
 529                rc |= POLLIN | POLLRDNORM;
 530        spin_unlock_irqrestore(&dst_q->done_lock, flags);
 531
 532end:
 533        return rc;
 534}
 535EXPORT_SYMBOL_GPL(v4l2_m2m_poll);
 536
 537/**
 538 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
 539 *
 540 * Call from driver's mmap() function. Will handle mmap() for both queues
 541 * seamlessly for videobuffer, which will receive normal per-queue offsets and
 542 * proper videobuf queue pointers. The differentiation is made outside videobuf
 543 * by adding a predefined offset to buffers from one of the queues and
 544 * subtracting it before passing it back to videobuf. Only drivers (and
 545 * thus applications) receive modified offsets.
 546 */
 547int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 548                         struct vm_area_struct *vma)
 549{
 550        unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
 551        struct vb2_queue *vq;
 552
 553        if (offset < DST_QUEUE_OFF_BASE) {
 554                vq = v4l2_m2m_get_src_vq(m2m_ctx);
 555        } else {
 556                vq = v4l2_m2m_get_dst_vq(m2m_ctx);
 557                vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
 558        }
 559
 560        return vb2_mmap(vq, vma);
 561}
 562EXPORT_SYMBOL(v4l2_m2m_mmap);
 563
 564/**
 565 * v4l2_m2m_init() - initialize per-driver m2m data
 566 *
 567 * Usually called from driver's probe() function.
 568 */
 569struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
 570{
 571        struct v4l2_m2m_dev *m2m_dev;
 572
 573        if (!m2m_ops || WARN_ON(!m2m_ops->device_run) ||
 574                        WARN_ON(!m2m_ops->job_abort))
 575                return ERR_PTR(-EINVAL);
 576
 577        m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL);
 578        if (!m2m_dev)
 579                return ERR_PTR(-ENOMEM);
 580
 581        m2m_dev->curr_ctx = NULL;
 582        m2m_dev->m2m_ops = m2m_ops;
 583        INIT_LIST_HEAD(&m2m_dev->job_queue);
 584        spin_lock_init(&m2m_dev->job_spinlock);
 585
 586        return m2m_dev;
 587}
 588EXPORT_SYMBOL_GPL(v4l2_m2m_init);
 589
 590/**
 591 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
 592 *
 593 * Usually called from driver's remove() function.
 594 */
 595void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev)
 596{
 597        kfree(m2m_dev);
 598}
 599EXPORT_SYMBOL_GPL(v4l2_m2m_release);
 600
 601/**
 602 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
 603 * @priv - driver's instance private data
 604 * @m2m_dev - a previously initialized m2m_dev struct
 605 * @vq_init - a callback for queue type-specific initialization function to be
 606 * used for initializing videobuf_queues
 607 *
 608 * Usually called from driver's open() function.
 609 */
 610struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
 611                void *drv_priv,
 612                int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq))
 613{
 614        struct v4l2_m2m_ctx *m2m_ctx;
 615        struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx;
 616        int ret;
 617
 618        m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL);
 619        if (!m2m_ctx)
 620                return ERR_PTR(-ENOMEM);
 621
 622        m2m_ctx->priv = drv_priv;
 623        m2m_ctx->m2m_dev = m2m_dev;
 624        init_waitqueue_head(&m2m_ctx->finished);
 625
 626        out_q_ctx = &m2m_ctx->out_q_ctx;
 627        cap_q_ctx = &m2m_ctx->cap_q_ctx;
 628
 629        INIT_LIST_HEAD(&out_q_ctx->rdy_queue);
 630        INIT_LIST_HEAD(&cap_q_ctx->rdy_queue);
 631        spin_lock_init(&out_q_ctx->rdy_spinlock);
 632        spin_lock_init(&cap_q_ctx->rdy_spinlock);
 633
 634        INIT_LIST_HEAD(&m2m_ctx->queue);
 635
 636        ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q);
 637
 638        if (ret)
 639                goto err;
 640
 641        return m2m_ctx;
 642err:
 643        kfree(m2m_ctx);
 644        return ERR_PTR(ret);
 645}
 646EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
 647
 648/**
 649 * v4l2_m2m_ctx_release() - release m2m context
 650 *
 651 * Usually called from driver's release() function.
 652 */
 653void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
 654{
 655        struct v4l2_m2m_dev *m2m_dev;
 656        unsigned long flags;
 657
 658        m2m_dev = m2m_ctx->m2m_dev;
 659
 660        spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
 661        if (m2m_ctx->job_flags & TRANS_RUNNING) {
 662                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 663                m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
 664                dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
 665                wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
 666        } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
 667                list_del(&m2m_ctx->queue);
 668                m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
 669                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 670                dprintk("m2m_ctx: %p had been on queue and was removed\n",
 671                        m2m_ctx);
 672        } else {
 673                /* Do nothing, was not on queue/running */
 674                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
 675        }
 676
 677        vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
 678        vb2_queue_release(&m2m_ctx->out_q_ctx.q);
 679
 680        kfree(m2m_ctx);
 681}
 682EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release);
 683
 684/**
 685 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
 686 *
 687 * Call from buf_queue(), videobuf_queue_ops callback.
 688 */
 689void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb)
 690{
 691        struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb);
 692        struct v4l2_m2m_queue_ctx *q_ctx;
 693        unsigned long flags;
 694
 695        q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type);
 696        if (!q_ctx)
 697                return;
 698
 699        spin_lock_irqsave(&q_ctx->rdy_spinlock, flags);
 700        list_add_tail(&b->list, &q_ctx->rdy_queue);
 701        q_ctx->num_rdy++;
 702        spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags);
 703}
 704EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
 705
 706