linux/include/media/v4l2-mem2mem.h
<<
>>
Prefs
   1/*
   2 * Memory-to-memory device framework for Video for Linux 2.
   3 *
   4 * Helper functions for devices that use memory buffers for both source
   5 * and destination.
   6 *
   7 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
   8 * Pawel Osciak, <pawel@osciak.com>
   9 * Marek Szyprowski, <m.szyprowski@samsung.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by the
  13 * Free Software Foundation; either version 2 of the
  14 * License, or (at your option) any later version
  15 */
  16
  17#ifndef _MEDIA_V4L2_MEM2MEM_H
  18#define _MEDIA_V4L2_MEM2MEM_H
  19
  20#include <media/videobuf2-v4l2.h>
  21
  22/**
  23 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
  24 * @device_run: required. Begin the actual job (transaction) inside this
  25 *              callback.
  26 *              The job does NOT have to end before this callback returns
  27 *              (and it will be the usual case). When the job finishes,
  28 *              v4l2_m2m_job_finish() has to be called.
  29 * @job_ready:  optional. Should return 0 if the driver does not have a job
  30 *              fully prepared to run yet (i.e. it will not be able to finish a
  31 *              transaction without sleeping). If not provided, it will be
  32 *              assumed that one source and one destination buffer are all
  33 *              that is required for the driver to perform one full transaction.
  34 *              This method may not sleep.
  35 * @job_abort:  required. Informs the driver that it has to abort the currently
  36 *              running transaction as soon as possible (i.e. as soon as it can
  37 *              stop the device safely; e.g. in the next interrupt handler),
  38 *              even if the transaction would not have been finished by then.
  39 *              After the driver performs the necessary steps, it has to call
  40 *              v4l2_m2m_job_finish() (as if the transaction ended normally).
  41 *              This function does not have to (and will usually not) wait
  42 *              until the device enters a state when it can be stopped.
  43 * @lock:       optional. Define a driver's own lock callback, instead of using
  44 *              &v4l2_m2m_ctx->q_lock.
  45 * @unlock:     optional. Define a driver's own unlock callback, instead of
  46 *              using &v4l2_m2m_ctx->q_lock.
  47 */
  48struct v4l2_m2m_ops {
  49        void (*device_run)(void *priv);
  50        int (*job_ready)(void *priv);
  51        void (*job_abort)(void *priv);
  52        void (*lock)(void *priv);
  53        void (*unlock)(void *priv);
  54};
  55
  56struct v4l2_m2m_dev;
  57
  58/**
  59 * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
  60 *      processed
  61 *
  62 * @q:          pointer to struct &vb2_queue
  63 * @rdy_queue:  List of V4L2 mem-to-mem queues
  64 * @rdy_spinlock: spin lock to protect the struct usage
  65 * @num_rdy:    number of buffers ready to be processed
  66 * @buffered:   is the queue buffered?
  67 *
  68 * Queue for buffers ready to be processed as soon as this
  69 * instance receives access to the device.
  70 */
  71
  72struct v4l2_m2m_queue_ctx {
  73        struct vb2_queue        q;
  74
  75        struct list_head        rdy_queue;
  76        spinlock_t              rdy_spinlock;
  77        u8                      num_rdy;
  78        bool                    buffered;
  79};
  80
  81/**
  82 * struct v4l2_m2m_ctx - Memory to memory context structure
  83 *
  84 * @q_lock: struct &mutex lock
  85 * @m2m_dev: opaque pointer to the internal data to handle M2M context
  86 * @cap_q_ctx: Capture (output to memory) queue context
  87 * @out_q_ctx: Output (input from memory) queue context
  88 * @queue: List of memory to memory contexts
  89 * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
  90 *              %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
  91 * @finished: Wait queue used to signalize when a job queue finished.
  92 * @priv: Instance private data
  93 *
  94 * The memory to memory context is specific to a file handle, NOT to e.g.
  95 * a device.
  96 */
  97struct v4l2_m2m_ctx {
  98        /* optional cap/out vb2 queues lock */
  99        struct mutex                    *q_lock;
 100
 101        /* internal use only */
 102        struct v4l2_m2m_dev             *m2m_dev;
 103
 104        struct v4l2_m2m_queue_ctx       cap_q_ctx;
 105
 106        struct v4l2_m2m_queue_ctx       out_q_ctx;
 107
 108        /* For device job queue */
 109        struct list_head                queue;
 110        unsigned long                   job_flags;
 111        wait_queue_head_t               finished;
 112
 113        void                            *priv;
 114};
 115
 116/**
 117 * struct v4l2_m2m_buffer - Memory to memory buffer
 118 *
 119 * @vb: pointer to struct &vb2_v4l2_buffer
 120 * @list: list of m2m buffers
 121 */
 122struct v4l2_m2m_buffer {
 123        struct vb2_v4l2_buffer  vb;
 124        struct list_head        list;
 125};
 126
 127/**
 128 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
 129 * running instance or NULL if no instance is running
 130 *
 131 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 132 */
 133void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
 134
 135/**
 136 * v4l2_m2m_get_vq() - return vb2_queue for the given type
 137 *
 138 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 139 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
 140 */
 141struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
 142                                       enum v4l2_buf_type type);
 143
 144/**
 145 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
 146 * the pending job queue and add it if so.
 147 *
 148 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 149 *
 150 * There are three basic requirements an instance has to meet to be able to run:
 151 * 1) at least one source buffer has to be queued,
 152 * 2) at least one destination buffer has to be queued,
 153 * 3) streaming has to be on.
 154 *
 155 * If a queue is buffered (for example a decoder hardware ringbuffer that has
 156 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
 157 * on that queue.
 158 *
 159 * There may also be additional, custom requirements. In such case the driver
 160 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
 161 * return 1 if the instance is ready.
 162 * An example of the above could be an instance that requires more than one
 163 * src/dst buffer per transaction.
 164 */
 165void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
 166
 167/**
 168 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
 169 * and have it clean up
 170 *
 171 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 172 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 173 *
 174 * Called by a driver to yield back the device after it has finished with it.
 175 * Should be called as soon as possible after reaching a state which allows
 176 * other instances to take control of the device.
 177 *
 178 * This function has to be called only after &v4l2_m2m_ops->device_run
 179 * callback has been called on the driver. To prevent recursion, it should
 180 * not be called directly from the &v4l2_m2m_ops->device_run callback though.
 181 */
 182void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
 183                         struct v4l2_m2m_ctx *m2m_ctx);
 184
 185static inline void
 186v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
 187{
 188        vb2_buffer_done(&buf->vb2_buf, state);
 189}
 190
 191/**
 192 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
 193 *
 194 * @file: pointer to struct &file
 195 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 196 * @reqbufs: pointer to struct &v4l2_requestbuffers
 197 */
 198int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 199                     struct v4l2_requestbuffers *reqbufs);
 200
 201/**
 202 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
 203 *
 204 * @file: pointer to struct &file
 205 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 206 * @buf: pointer to struct &v4l2_buffer
 207 *
 208 * See v4l2_m2m_mmap() documentation for details.
 209 */
 210int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 211                      struct v4l2_buffer *buf);
 212
 213/**
 214 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
 215 * the type
 216 *
 217 * @file: pointer to struct &file
 218 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 219 * @buf: pointer to struct &v4l2_buffer
 220 */
 221int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 222                  struct v4l2_buffer *buf);
 223
 224/**
 225 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
 226 * the type
 227 *
 228 * @file: pointer to struct &file
 229 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 230 * @buf: pointer to struct &v4l2_buffer
 231 */
 232int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 233                   struct v4l2_buffer *buf);
 234
 235/**
 236 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
 237 * the type
 238 *
 239 * @file: pointer to struct &file
 240 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 241 * @buf: pointer to struct &v4l2_buffer
 242 */
 243int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 244                         struct v4l2_buffer *buf);
 245
 246/**
 247 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
 248 * on the type
 249 *
 250 * @file: pointer to struct &file
 251 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 252 * @create: pointer to struct &v4l2_create_buffers
 253 */
 254int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 255                         struct v4l2_create_buffers *create);
 256
 257/**
 258 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
 259 * the type
 260 *
 261 * @file: pointer to struct &file
 262 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 263 * @eb: pointer to struct &v4l2_exportbuffer
 264 */
 265int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 266                   struct v4l2_exportbuffer *eb);
 267
 268/**
 269 * v4l2_m2m_streamon() - turn on streaming for a video queue
 270 *
 271 * @file: pointer to struct &file
 272 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 273 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
 274 */
 275int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 276                      enum v4l2_buf_type type);
 277
 278/**
 279 * v4l2_m2m_streamoff() - turn off streaming for a video queue
 280 *
 281 * @file: pointer to struct &file
 282 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 283 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
 284 */
 285int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 286                       enum v4l2_buf_type type);
 287
 288/**
 289 * v4l2_m2m_poll() - poll replacement, for destination buffers only
 290 *
 291 * @file: pointer to struct &file
 292 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 293 * @wait: pointer to struct &poll_table_struct
 294 *
 295 * Call from the driver's poll() function. Will poll both queues. If a buffer
 296 * is available to dequeue (with dqbuf) from the source queue, this will
 297 * indicate that a non-blocking write can be performed, while read will be
 298 * returned in case of the destination queue.
 299 */
 300__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 301                           struct poll_table_struct *wait);
 302
 303/**
 304 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
 305 *
 306 * @file: pointer to struct &file
 307 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 308 * @vma: pointer to struct &vm_area_struct
 309 *
 310 * Call from driver's mmap() function. Will handle mmap() for both queues
 311 * seamlessly for videobuffer, which will receive normal per-queue offsets and
 312 * proper videobuf queue pointers. The differentiation is made outside videobuf
 313 * by adding a predefined offset to buffers from one of the queues and
 314 * subtracting it before passing it back to videobuf. Only drivers (and
 315 * thus applications) receive modified offsets.
 316 */
 317int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 318                  struct vm_area_struct *vma);
 319
 320/**
 321 * v4l2_m2m_init() - initialize per-driver m2m data
 322 *
 323 * @m2m_ops: pointer to struct v4l2_m2m_ops
 324 *
 325 * Usually called from driver's ``probe()`` function.
 326 *
 327 * Return: returns an opaque pointer to the internal data to handle M2M context
 328 */
 329struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
 330
 331/**
 332 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
 333 *
 334 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 335 *
 336 * Usually called from driver's ``remove()`` function.
 337 */
 338void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
 339
 340/**
 341 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
 342 *
 343 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 344 * @drv_priv: driver's instance private data
 345 * @queue_init: a callback for queue type-specific initialization function
 346 *      to be used for initializing videobuf_queues
 347 *
 348 * Usually called from driver's ``open()`` function.
 349 */
 350struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
 351                void *drv_priv,
 352                int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
 353
 354static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
 355                                             bool buffered)
 356{
 357        m2m_ctx->out_q_ctx.buffered = buffered;
 358}
 359
 360static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
 361                                             bool buffered)
 362{
 363        m2m_ctx->cap_q_ctx.buffered = buffered;
 364}
 365
 366/**
 367 * v4l2_m2m_ctx_release() - release m2m context
 368 *
 369 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 370 *
 371 * Usually called from driver's release() function.
 372 */
 373void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
 374
 375/**
 376 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
 377 *
 378 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 379 * @vbuf: pointer to struct &vb2_v4l2_buffer
 380 *
 381 * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
 382 */
 383void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
 384                        struct vb2_v4l2_buffer *vbuf);
 385
 386/**
 387 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
 388 * use
 389 *
 390 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 391 */
 392static inline
 393unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
 394{
 395        return m2m_ctx->out_q_ctx.num_rdy;
 396}
 397
 398/**
 399 * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
 400 * ready for use
 401 *
 402 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 403 */
 404static inline
 405unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
 406{
 407        return m2m_ctx->cap_q_ctx.num_rdy;
 408}
 409
 410/**
 411 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
 412 *
 413 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
 414 */
 415void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
 416
 417/**
 418 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
 419 * buffers
 420 *
 421 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 422 */
 423static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
 424{
 425        return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
 426}
 427
 428/**
 429 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
 430 * ready buffers
 431 *
 432 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 433 */
 434static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
 435{
 436        return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
 437}
 438
 439/**
 440 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
 441 * buffers
 442 *
 443 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 444 * @b: current buffer of type struct v4l2_m2m_buffer
 445 */
 446#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)   \
 447        list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
 448
 449/**
 450 * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
 451 *
 452 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 453 * @b: current buffer of type struct v4l2_m2m_buffer
 454 */
 455#define v4l2_m2m_for_each_src_buf(m2m_ctx, b)   \
 456        list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
 457
 458/**
 459 * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
 460 * buffers safely
 461 *
 462 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 463 * @b: current buffer of type struct v4l2_m2m_buffer
 464 * @n: used as temporary storage
 465 */
 466#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)   \
 467        list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
 468
 469/**
 470 * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
 471 * buffers safely
 472 *
 473 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 474 * @b: current buffer of type struct v4l2_m2m_buffer
 475 * @n: used as temporary storage
 476 */
 477#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)   \
 478        list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
 479
 480/**
 481 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
 482 *
 483 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 484 */
 485static inline
 486struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
 487{
 488        return &m2m_ctx->out_q_ctx.q;
 489}
 490
 491/**
 492 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
 493 *
 494 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 495 */
 496static inline
 497struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
 498{
 499        return &m2m_ctx->cap_q_ctx.q;
 500}
 501
 502/**
 503 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
 504 * return it
 505 *
 506 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
 507 */
 508void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
 509
 510/**
 511 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
 512 * buffers and return it
 513 *
 514 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 515 */
 516static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 517{
 518        return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
 519}
 520
 521/**
 522 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
 523 * ready buffers and return it
 524 *
 525 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 526 */
 527static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 528{
 529        return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
 530}
 531
 532/**
 533 * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
 534 * buffers
 535 *
 536 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
 537 * @vbuf: the buffer to be removed
 538 */
 539void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
 540                                struct vb2_v4l2_buffer *vbuf);
 541
 542/**
 543 * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
 544 * of ready buffers
 545 *
 546 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 547 * @vbuf: the buffer to be removed
 548 */
 549static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
 550                                                  struct vb2_v4l2_buffer *vbuf)
 551{
 552        v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
 553}
 554
 555/**
 556 * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
 557 * list of ready buffers
 558 *
 559 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 560 * @vbuf: the buffer to be removed
 561 */
 562static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
 563                                                  struct vb2_v4l2_buffer *vbuf)
 564{
 565        v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
 566}
 567
 568struct vb2_v4l2_buffer *
 569v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
 570
 571static inline struct vb2_v4l2_buffer *
 572v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
 573{
 574        return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
 575}
 576
 577static inline struct vb2_v4l2_buffer *
 578v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
 579{
 580        return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
 581}
 582
 583/* v4l2 ioctl helpers */
 584
 585int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
 586                                struct v4l2_requestbuffers *rb);
 587int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
 588                                struct v4l2_create_buffers *create);
 589int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
 590                                struct v4l2_buffer *buf);
 591int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
 592                                struct v4l2_exportbuffer *eb);
 593int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
 594                                struct v4l2_buffer *buf);
 595int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
 596                                struct v4l2_buffer *buf);
 597int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
 598                               struct v4l2_buffer *buf);
 599int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
 600                                enum v4l2_buf_type type);
 601int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
 602                                enum v4l2_buf_type type);
 603int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
 604__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
 605
 606#endif /* _MEDIA_V4L2_MEM2MEM_H */
 607
 608