linux/include/media/v4l2-mem2mem.h
<<
>>
Prefs
   1/*
   2 * Memory-to-memory device framework for Video for Linux 2.
   3 *
   4 * Helper functions for devices that use memory buffers for both source
   5 * and destination.
   6 *
   7 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
   8 * Pawel Osciak, <pawel@osciak.com>
   9 * Marek Szyprowski, <m.szyprowski@samsung.com>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by the
  13 * Free Software Foundation; either version 2 of the
  14 * License, or (at your option) any later version
  15 */
  16
  17#ifndef _MEDIA_V4L2_MEM2MEM_H
  18#define _MEDIA_V4L2_MEM2MEM_H
  19
  20#include <media/videobuf2-v4l2.h>
  21
  22/**
  23 * struct v4l2_m2m_ops - mem-to-mem device driver callbacks
  24 * @device_run: required. Begin the actual job (transaction) inside this
  25 *              callback.
  26 *              The job does NOT have to end before this callback returns
  27 *              (and it will be the usual case). When the job finishes,
  28 *              v4l2_m2m_job_finish() has to be called.
  29 * @job_ready:  optional. Should return 0 if the driver does not have a job
  30 *              fully prepared to run yet (i.e. it will not be able to finish a
  31 *              transaction without sleeping). If not provided, it will be
  32 *              assumed that one source and one destination buffer are all
  33 *              that is required for the driver to perform one full transaction.
  34 *              This method may not sleep.
  35 * @job_abort:  optional. Informs the driver that it has to abort the currently
  36 *              running transaction as soon as possible (i.e. as soon as it can
  37 *              stop the device safely; e.g. in the next interrupt handler),
  38 *              even if the transaction would not have been finished by then.
  39 *              After the driver performs the necessary steps, it has to call
  40 *              v4l2_m2m_job_finish() (as if the transaction ended normally).
  41 *              This function does not have to (and will usually not) wait
  42 *              until the device enters a state when it can be stopped.
  43 */
  44struct v4l2_m2m_ops {
  45        void (*device_run)(void *priv);
  46        int (*job_ready)(void *priv);
  47        void (*job_abort)(void *priv);
  48};
  49
  50struct video_device;
  51struct v4l2_m2m_dev;
  52
  53/**
  54 * struct v4l2_m2m_queue_ctx - represents a queue for buffers ready to be
  55 *      processed
  56 *
  57 * @q:          pointer to struct &vb2_queue
  58 * @rdy_queue:  List of V4L2 mem-to-mem queues
  59 * @rdy_spinlock: spin lock to protect the struct usage
  60 * @num_rdy:    number of buffers ready to be processed
  61 * @buffered:   is the queue buffered?
  62 *
  63 * Queue for buffers ready to be processed as soon as this
  64 * instance receives access to the device.
  65 */
  66
  67struct v4l2_m2m_queue_ctx {
  68        struct vb2_queue        q;
  69
  70        struct list_head        rdy_queue;
  71        spinlock_t              rdy_spinlock;
  72        u8                      num_rdy;
  73        bool                    buffered;
  74};
  75
  76/**
  77 * struct v4l2_m2m_ctx - Memory to memory context structure
  78 *
  79 * @q_lock: struct &mutex lock
  80 * @m2m_dev: opaque pointer to the internal data to handle M2M context
  81 * @cap_q_ctx: Capture (output to memory) queue context
  82 * @out_q_ctx: Output (input from memory) queue context
  83 * @queue: List of memory to memory contexts
  84 * @job_flags: Job queue flags, used internally by v4l2-mem2mem.c:
  85 *              %TRANS_QUEUED, %TRANS_RUNNING and %TRANS_ABORT.
  86 * @finished: Wait queue used to signalize when a job queue finished.
  87 * @priv: Instance private data
  88 *
  89 * The memory to memory context is specific to a file handle, NOT to e.g.
  90 * a device.
  91 */
  92struct v4l2_m2m_ctx {
  93        /* optional cap/out vb2 queues lock */
  94        struct mutex                    *q_lock;
  95
  96        /* internal use only */
  97        struct v4l2_m2m_dev             *m2m_dev;
  98
  99        struct v4l2_m2m_queue_ctx       cap_q_ctx;
 100
 101        struct v4l2_m2m_queue_ctx       out_q_ctx;
 102
 103        /* For device job queue */
 104        struct list_head                queue;
 105        unsigned long                   job_flags;
 106        wait_queue_head_t               finished;
 107
 108        void                            *priv;
 109};
 110
 111/**
 112 * struct v4l2_m2m_buffer - Memory to memory buffer
 113 *
 114 * @vb: pointer to struct &vb2_v4l2_buffer
 115 * @list: list of m2m buffers
 116 */
 117struct v4l2_m2m_buffer {
 118        struct vb2_v4l2_buffer  vb;
 119        struct list_head        list;
 120};
 121
 122/**
 123 * v4l2_m2m_get_curr_priv() - return driver private data for the currently
 124 * running instance or NULL if no instance is running
 125 *
 126 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 127 */
 128void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev);
 129
 130/**
 131 * v4l2_m2m_get_vq() - return vb2_queue for the given type
 132 *
 133 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 134 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
 135 */
 136struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx,
 137                                       enum v4l2_buf_type type);
 138
 139/**
 140 * v4l2_m2m_try_schedule() - check whether an instance is ready to be added to
 141 * the pending job queue and add it if so.
 142 *
 143 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 144 *
 145 * There are three basic requirements an instance has to meet to be able to run:
 146 * 1) at least one source buffer has to be queued,
 147 * 2) at least one destination buffer has to be queued,
 148 * 3) streaming has to be on.
 149 *
 150 * If a queue is buffered (for example a decoder hardware ringbuffer that has
 151 * to be drained before doing streamoff), allow scheduling without v4l2 buffers
 152 * on that queue.
 153 *
 154 * There may also be additional, custom requirements. In such case the driver
 155 * should supply a custom callback (job_ready in v4l2_m2m_ops) that should
 156 * return 1 if the instance is ready.
 157 * An example of the above could be an instance that requires more than one
 158 * src/dst buffer per transaction.
 159 */
 160void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
 161
 162/**
 163 * v4l2_m2m_job_finish() - inform the framework that a job has been finished
 164 * and have it clean up
 165 *
 166 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 167 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 168 *
 169 * Called by a driver to yield back the device after it has finished with it.
 170 * Should be called as soon as possible after reaching a state which allows
 171 * other instances to take control of the device.
 172 *
 173 * This function has to be called only after &v4l2_m2m_ops->device_run
 174 * callback has been called on the driver. To prevent recursion, it should
 175 * not be called directly from the &v4l2_m2m_ops->device_run callback though.
 176 */
 177void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
 178                         struct v4l2_m2m_ctx *m2m_ctx);
 179
 180static inline void
 181v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
 182{
 183        vb2_buffer_done(&buf->vb2_buf, state);
 184}
 185
 186/**
 187 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
 188 *
 189 * @file: pointer to struct &file
 190 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 191 * @reqbufs: pointer to struct &v4l2_requestbuffers
 192 */
 193int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 194                     struct v4l2_requestbuffers *reqbufs);
 195
 196/**
 197 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
 198 *
 199 * @file: pointer to struct &file
 200 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 201 * @buf: pointer to struct &v4l2_buffer
 202 *
 203 * See v4l2_m2m_mmap() documentation for details.
 204 */
 205int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 206                      struct v4l2_buffer *buf);
 207
 208/**
 209 * v4l2_m2m_qbuf() - enqueue a source or destination buffer, depending on
 210 * the type
 211 *
 212 * @file: pointer to struct &file
 213 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 214 * @buf: pointer to struct &v4l2_buffer
 215 */
 216int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 217                  struct v4l2_buffer *buf);
 218
 219/**
 220 * v4l2_m2m_dqbuf() - dequeue a source or destination buffer, depending on
 221 * the type
 222 *
 223 * @file: pointer to struct &file
 224 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 225 * @buf: pointer to struct &v4l2_buffer
 226 */
 227int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 228                   struct v4l2_buffer *buf);
 229
 230/**
 231 * v4l2_m2m_prepare_buf() - prepare a source or destination buffer, depending on
 232 * the type
 233 *
 234 * @file: pointer to struct &file
 235 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 236 * @buf: pointer to struct &v4l2_buffer
 237 */
 238int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 239                         struct v4l2_buffer *buf);
 240
 241/**
 242 * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
 243 * on the type
 244 *
 245 * @file: pointer to struct &file
 246 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 247 * @create: pointer to struct &v4l2_create_buffers
 248 */
 249int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 250                         struct v4l2_create_buffers *create);
 251
 252/**
 253 * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
 254 * the type
 255 *
 256 * @file: pointer to struct &file
 257 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 258 * @eb: pointer to struct &v4l2_exportbuffer
 259 */
 260int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 261                   struct v4l2_exportbuffer *eb);
 262
 263/**
 264 * v4l2_m2m_streamon() - turn on streaming for a video queue
 265 *
 266 * @file: pointer to struct &file
 267 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 268 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
 269 */
 270int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 271                      enum v4l2_buf_type type);
 272
 273/**
 274 * v4l2_m2m_streamoff() - turn off streaming for a video queue
 275 *
 276 * @file: pointer to struct &file
 277 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 278 * @type: type of the V4L2 buffer, as defined by enum &v4l2_buf_type
 279 */
 280int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 281                       enum v4l2_buf_type type);
 282
 283/**
 284 * v4l2_m2m_poll() - poll replacement, for destination buffers only
 285 *
 286 * @file: pointer to struct &file
 287 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 288 * @wait: pointer to struct &poll_table_struct
 289 *
 290 * Call from the driver's poll() function. Will poll both queues. If a buffer
 291 * is available to dequeue (with dqbuf) from the source queue, this will
 292 * indicate that a non-blocking write can be performed, while read will be
 293 * returned in case of the destination queue.
 294 */
 295__poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 296                           struct poll_table_struct *wait);
 297
 298/**
 299 * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer
 300 *
 301 * @file: pointer to struct &file
 302 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 303 * @vma: pointer to struct &vm_area_struct
 304 *
 305 * Call from driver's mmap() function. Will handle mmap() for both queues
 306 * seamlessly for videobuffer, which will receive normal per-queue offsets and
 307 * proper videobuf queue pointers. The differentiation is made outside videobuf
 308 * by adding a predefined offset to buffers from one of the queues and
 309 * subtracting it before passing it back to videobuf. Only drivers (and
 310 * thus applications) receive modified offsets.
 311 */
 312int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 313                  struct vm_area_struct *vma);
 314
 315/**
 316 * v4l2_m2m_init() - initialize per-driver m2m data
 317 *
 318 * @m2m_ops: pointer to struct v4l2_m2m_ops
 319 *
 320 * Usually called from driver's ``probe()`` function.
 321 *
 322 * Return: returns an opaque pointer to the internal data to handle M2M context
 323 */
 324struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
 325
 326#if defined(CONFIG_MEDIA_CONTROLLER)
 327void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
 328int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
 329                        struct video_device *vdev, int function);
 330#else
 331static inline void
 332v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
 333{
 334}
 335
 336static inline int
 337v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
 338                struct video_device *vdev, int function)
 339{
 340        return 0;
 341}
 342#endif
 343
 344/**
 345 * v4l2_m2m_release() - cleans up and frees a m2m_dev structure
 346 *
 347 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 348 *
 349 * Usually called from driver's ``remove()`` function.
 350 */
 351void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev);
 352
 353/**
 354 * v4l2_m2m_ctx_init() - allocate and initialize a m2m context
 355 *
 356 * @m2m_dev: opaque pointer to the internal data to handle M2M context
 357 * @drv_priv: driver's instance private data
 358 * @queue_init: a callback for queue type-specific initialization function
 359 *      to be used for initializing videobuf_queues
 360 *
 361 * Usually called from driver's ``open()`` function.
 362 */
 363struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
 364                void *drv_priv,
 365                int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
 366
 367static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
 368                                             bool buffered)
 369{
 370        m2m_ctx->out_q_ctx.buffered = buffered;
 371}
 372
 373static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
 374                                             bool buffered)
 375{
 376        m2m_ctx->cap_q_ctx.buffered = buffered;
 377}
 378
 379/**
 380 * v4l2_m2m_ctx_release() - release m2m context
 381 *
 382 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 383 *
 384 * Usually called from driver's release() function.
 385 */
 386void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
 387
 388/**
 389 * v4l2_m2m_buf_queue() - add a buffer to the proper ready buffers list.
 390 *
 391 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 392 * @vbuf: pointer to struct &vb2_v4l2_buffer
 393 *
 394 * Call from videobuf_queue_ops->ops->buf_queue, videobuf_queue_ops callback.
 395 */
 396void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
 397                        struct vb2_v4l2_buffer *vbuf);
 398
 399/**
 400 * v4l2_m2m_num_src_bufs_ready() - return the number of source buffers ready for
 401 * use
 402 *
 403 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 404 */
 405static inline
 406unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
 407{
 408        return m2m_ctx->out_q_ctx.num_rdy;
 409}
 410
 411/**
 412 * v4l2_m2m_num_dst_bufs_ready() - return the number of destination buffers
 413 * ready for use
 414 *
 415 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 416 */
 417static inline
 418unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
 419{
 420        return m2m_ctx->cap_q_ctx.num_rdy;
 421}
 422
 423/**
 424 * v4l2_m2m_next_buf() - return next buffer from the list of ready buffers
 425 *
 426 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
 427 */
 428void *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx);
 429
 430/**
 431 * v4l2_m2m_next_src_buf() - return next source buffer from the list of ready
 432 * buffers
 433 *
 434 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 435 */
 436static inline void *v4l2_m2m_next_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
 437{
 438        return v4l2_m2m_next_buf(&m2m_ctx->out_q_ctx);
 439}
 440
 441/**
 442 * v4l2_m2m_next_dst_buf() - return next destination buffer from the list of
 443 * ready buffers
 444 *
 445 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 446 */
 447static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
 448{
 449        return v4l2_m2m_next_buf(&m2m_ctx->cap_q_ctx);
 450}
 451
 452/**
 453 * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
 454 *
 455 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
 456 */
 457void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
 458
 459/**
 460 * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
 461 * ready buffers
 462 *
 463 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 464 */
 465static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
 466{
 467        return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
 468}
 469
 470/**
 471 * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
 472 * ready buffers
 473 *
 474 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 475 */
 476static inline void *v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
 477{
 478        return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
 479}
 480
 481/**
 482 * v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
 483 * buffers
 484 *
 485 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 486 * @b: current buffer of type struct v4l2_m2m_buffer
 487 */
 488#define v4l2_m2m_for_each_dst_buf(m2m_ctx, b)   \
 489        list_for_each_entry(b, &m2m_ctx->cap_q_ctx.rdy_queue, list)
 490
 491/**
 492 * v4l2_m2m_for_each_src_buf() - iterate over a list of source ready buffers
 493 *
 494 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 495 * @b: current buffer of type struct v4l2_m2m_buffer
 496 */
 497#define v4l2_m2m_for_each_src_buf(m2m_ctx, b)   \
 498        list_for_each_entry(b, &m2m_ctx->out_q_ctx.rdy_queue, list)
 499
 500/**
 501 * v4l2_m2m_for_each_dst_buf_safe() - iterate over a list of destination ready
 502 * buffers safely
 503 *
 504 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 505 * @b: current buffer of type struct v4l2_m2m_buffer
 506 * @n: used as temporary storage
 507 */
 508#define v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, b, n)   \
 509        list_for_each_entry_safe(b, n, &m2m_ctx->cap_q_ctx.rdy_queue, list)
 510
 511/**
 512 * v4l2_m2m_for_each_src_buf_safe() - iterate over a list of source ready
 513 * buffers safely
 514 *
 515 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 516 * @b: current buffer of type struct v4l2_m2m_buffer
 517 * @n: used as temporary storage
 518 */
 519#define v4l2_m2m_for_each_src_buf_safe(m2m_ctx, b, n)   \
 520        list_for_each_entry_safe(b, n, &m2m_ctx->out_q_ctx.rdy_queue, list)
 521
 522/**
 523 * v4l2_m2m_get_src_vq() - return vb2_queue for source buffers
 524 *
 525 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 526 */
 527static inline
 528struct vb2_queue *v4l2_m2m_get_src_vq(struct v4l2_m2m_ctx *m2m_ctx)
 529{
 530        return &m2m_ctx->out_q_ctx.q;
 531}
 532
 533/**
 534 * v4l2_m2m_get_dst_vq() - return vb2_queue for destination buffers
 535 *
 536 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 537 */
 538static inline
 539struct vb2_queue *v4l2_m2m_get_dst_vq(struct v4l2_m2m_ctx *m2m_ctx)
 540{
 541        return &m2m_ctx->cap_q_ctx.q;
 542}
 543
 544/**
 545 * v4l2_m2m_buf_remove() - take off a buffer from the list of ready buffers and
 546 * return it
 547 *
 548 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
 549 */
 550void *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx);
 551
 552/**
 553 * v4l2_m2m_src_buf_remove() - take off a source buffer from the list of ready
 554 * buffers and return it
 555 *
 556 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 557 */
 558static inline void *v4l2_m2m_src_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 559{
 560        return v4l2_m2m_buf_remove(&m2m_ctx->out_q_ctx);
 561}
 562
 563/**
 564 * v4l2_m2m_dst_buf_remove() - take off a destination buffer from the list of
 565 * ready buffers and return it
 566 *
 567 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 568 */
 569static inline void *v4l2_m2m_dst_buf_remove(struct v4l2_m2m_ctx *m2m_ctx)
 570{
 571        return v4l2_m2m_buf_remove(&m2m_ctx->cap_q_ctx);
 572}
 573
 574/**
 575 * v4l2_m2m_buf_remove_by_buf() - take off exact buffer from the list of ready
 576 * buffers
 577 *
 578 * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
 579 * @vbuf: the buffer to be removed
 580 */
 581void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx,
 582                                struct vb2_v4l2_buffer *vbuf);
 583
 584/**
 585 * v4l2_m2m_src_buf_remove_by_buf() - take off exact source buffer from the list
 586 * of ready buffers
 587 *
 588 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 589 * @vbuf: the buffer to be removed
 590 */
 591static inline void v4l2_m2m_src_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
 592                                                  struct vb2_v4l2_buffer *vbuf)
 593{
 594        v4l2_m2m_buf_remove_by_buf(&m2m_ctx->out_q_ctx, vbuf);
 595}
 596
 597/**
 598 * v4l2_m2m_dst_buf_remove_by_buf() - take off exact destination buffer from the
 599 * list of ready buffers
 600 *
 601 * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
 602 * @vbuf: the buffer to be removed
 603 */
 604static inline void v4l2_m2m_dst_buf_remove_by_buf(struct v4l2_m2m_ctx *m2m_ctx,
 605                                                  struct vb2_v4l2_buffer *vbuf)
 606{
 607        v4l2_m2m_buf_remove_by_buf(&m2m_ctx->cap_q_ctx, vbuf);
 608}
 609
 610struct vb2_v4l2_buffer *
 611v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx);
 612
 613static inline struct vb2_v4l2_buffer *
 614v4l2_m2m_src_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
 615{
 616        return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->out_q_ctx, idx);
 617}
 618
 619static inline struct vb2_v4l2_buffer *
 620v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
 621{
 622        return v4l2_m2m_buf_remove_by_idx(&m2m_ctx->cap_q_ctx, idx);
 623}
 624
 625/* v4l2 ioctl helpers */
 626
 627int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv,
 628                                struct v4l2_requestbuffers *rb);
 629int v4l2_m2m_ioctl_create_bufs(struct file *file, void *fh,
 630                                struct v4l2_create_buffers *create);
 631int v4l2_m2m_ioctl_querybuf(struct file *file, void *fh,
 632                                struct v4l2_buffer *buf);
 633int v4l2_m2m_ioctl_expbuf(struct file *file, void *fh,
 634                                struct v4l2_exportbuffer *eb);
 635int v4l2_m2m_ioctl_qbuf(struct file *file, void *fh,
 636                                struct v4l2_buffer *buf);
 637int v4l2_m2m_ioctl_dqbuf(struct file *file, void *fh,
 638                                struct v4l2_buffer *buf);
 639int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *fh,
 640                               struct v4l2_buffer *buf);
 641int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
 642                                enum v4l2_buf_type type);
 643int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
 644                                enum v4l2_buf_type type);
 645int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
 646__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
 647
 648#endif /* _MEDIA_V4L2_MEM2MEM_H */
 649
 650