linux/drivers/media/video/uvc/uvc_queue.c
<<
>>
Prefs
   1/*
   2 *      uvc_queue.c  --  USB Video Class driver - Buffers management
   3 *
   4 *      Copyright (C) 2005-2009
   5 *          Laurent Pinchart (laurent.pinchart@skynet.be)
   6 *
   7 *      This program is free software; you can redistribute it and/or modify
   8 *      it under the terms of the GNU General Public License as published by
   9 *      the Free Software Foundation; either version 2 of the License, or
  10 *      (at your option) any later version.
  11 *
  12 */
  13
  14#include <linux/kernel.h>
  15#include <linux/mm.h>
  16#include <linux/list.h>
  17#include <linux/module.h>
  18#include <linux/usb.h>
  19#include <linux/videodev2.h>
  20#include <linux/vmalloc.h>
  21#include <linux/wait.h>
  22#include <asm/atomic.h>
  23
  24#include "uvcvideo.h"
  25
  26/* ------------------------------------------------------------------------
  27 * Video buffers queue management.
  28 *
  29 * Video queues is initialized by uvc_queue_init(). The function performs
  30 * basic initialization of the uvc_video_queue struct and never fails.
  31 *
  32 * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
  33 * uvc_free_buffers respectively. The former acquires the video queue lock,
  34 * while the later must be called with the lock held (so that allocation can
  35 * free previously allocated buffers). Trying to free buffers that are mapped
  36 * to user space will return -EBUSY.
  37 *
  38 * Video buffers are managed using two queues. However, unlike most USB video
  39 * drivers that use an in queue and an out queue, we use a main queue to hold
  40 * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
  41 * hold empty buffers. This design (copied from video-buf) minimizes locking
  42 * in interrupt, as only one queue is shared between interrupt and user
  43 * contexts.
  44 *
  45 * Use cases
  46 * ---------
  47 *
  48 * Unless stated otherwise, all operations that modify the irq buffers queue
  49 * are protected by the irq spinlock.
  50 *
  51 * 1. The user queues the buffers, starts streaming and dequeues a buffer.
  52 *
  53 *    The buffers are added to the main and irq queues. Both operations are
  54 *    protected by the queue lock, and the later is protected by the irq
  55 *    spinlock as well.
  56 *
  57 *    The completion handler fetches a buffer from the irq queue and fills it
  58 *    with video data. If no buffer is available (irq queue empty), the handler
  59 *    returns immediately.
  60 *
  61 *    When the buffer is full, the completion handler removes it from the irq
  62 *    queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
  63 *    At that point, any process waiting on the buffer will be woken up. If a
  64 *    process tries to dequeue a buffer after it has been marked ready, the
  65 *    dequeing will succeed immediately.
  66 *
  67 * 2. Buffers are queued, user is waiting on a buffer and the device gets
  68 *    disconnected.
  69 *
  70 *    When the device is disconnected, the kernel calls the completion handler
  71 *    with an appropriate status code. The handler marks all buffers in the
  72 *    irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
  73 *    that any process waiting on a buffer gets woken up.
  74 *
  75 *    Waking up up the first buffer on the irq list is not enough, as the
  76 *    process waiting on the buffer might restart the dequeue operation
  77 *    immediately.
  78 *
  79 */
  80
  81void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
  82{
  83        mutex_init(&queue->mutex);
  84        spin_lock_init(&queue->irqlock);
  85        INIT_LIST_HEAD(&queue->mainqueue);
  86        INIT_LIST_HEAD(&queue->irqqueue);
  87        queue->type = type;
  88}
  89
  90/*
  91 * Allocate the video buffers.
  92 *
  93 * Pages are reserved to make sure they will not be swapped, as they will be
  94 * filled in the URB completion handler.
  95 *
  96 * Buffers will be individually mapped, so they must all be page aligned.
  97 */
  98int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
  99                unsigned int buflength)
 100{
 101        unsigned int bufsize = PAGE_ALIGN(buflength);
 102        unsigned int i;
 103        void *mem = NULL;
 104        int ret;
 105
 106        if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
 107                nbuffers = UVC_MAX_VIDEO_BUFFERS;
 108
 109        mutex_lock(&queue->mutex);
 110
 111        if ((ret = uvc_free_buffers(queue)) < 0)
 112                goto done;
 113
 114        /* Bail out if no buffers should be allocated. */
 115        if (nbuffers == 0)
 116                goto done;
 117
 118        /* Decrement the number of buffers until allocation succeeds. */
 119        for (; nbuffers > 0; --nbuffers) {
 120                mem = vmalloc_32(nbuffers * bufsize);
 121                if (mem != NULL)
 122                        break;
 123        }
 124
 125        if (mem == NULL) {
 126                ret = -ENOMEM;
 127                goto done;
 128        }
 129
 130        for (i = 0; i < nbuffers; ++i) {
 131                memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
 132                queue->buffer[i].buf.index = i;
 133                queue->buffer[i].buf.m.offset = i * bufsize;
 134                queue->buffer[i].buf.length = buflength;
 135                queue->buffer[i].buf.type = queue->type;
 136                queue->buffer[i].buf.sequence = 0;
 137                queue->buffer[i].buf.field = V4L2_FIELD_NONE;
 138                queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
 139                queue->buffer[i].buf.flags = 0;
 140                init_waitqueue_head(&queue->buffer[i].wait);
 141        }
 142
 143        queue->mem = mem;
 144        queue->count = nbuffers;
 145        queue->buf_size = bufsize;
 146        ret = nbuffers;
 147
 148done:
 149        mutex_unlock(&queue->mutex);
 150        return ret;
 151}
 152
 153/*
 154 * Free the video buffers.
 155 *
 156 * This function must be called with the queue lock held.
 157 */
 158int uvc_free_buffers(struct uvc_video_queue *queue)
 159{
 160        unsigned int i;
 161
 162        for (i = 0; i < queue->count; ++i) {
 163                if (queue->buffer[i].vma_use_count != 0)
 164                        return -EBUSY;
 165        }
 166
 167        if (queue->count) {
 168                vfree(queue->mem);
 169                queue->count = 0;
 170        }
 171
 172        return 0;
 173}
 174
 175/*
 176 * Check if buffers have been allocated.
 177 */
 178int uvc_queue_allocated(struct uvc_video_queue *queue)
 179{
 180        int allocated;
 181
 182        mutex_lock(&queue->mutex);
 183        allocated = queue->count != 0;
 184        mutex_unlock(&queue->mutex);
 185
 186        return allocated;
 187}
 188
 189static void __uvc_query_buffer(struct uvc_buffer *buf,
 190                struct v4l2_buffer *v4l2_buf)
 191{
 192        memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
 193
 194        if (buf->vma_use_count)
 195                v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
 196
 197        switch (buf->state) {
 198        case UVC_BUF_STATE_ERROR:
 199        case UVC_BUF_STATE_DONE:
 200                v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
 201                break;
 202        case UVC_BUF_STATE_QUEUED:
 203        case UVC_BUF_STATE_ACTIVE:
 204                v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
 205                break;
 206        case UVC_BUF_STATE_IDLE:
 207        default:
 208                break;
 209        }
 210}
 211
 212int uvc_query_buffer(struct uvc_video_queue *queue,
 213                struct v4l2_buffer *v4l2_buf)
 214{
 215        int ret = 0;
 216
 217        mutex_lock(&queue->mutex);
 218        if (v4l2_buf->index >= queue->count) {
 219                ret = -EINVAL;
 220                goto done;
 221        }
 222
 223        __uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
 224
 225done:
 226        mutex_unlock(&queue->mutex);
 227        return ret;
 228}
 229
 230/*
 231 * Queue a video buffer. Attempting to queue a buffer that has already been
 232 * queued will return -EINVAL.
 233 */
 234int uvc_queue_buffer(struct uvc_video_queue *queue,
 235        struct v4l2_buffer *v4l2_buf)
 236{
 237        struct uvc_buffer *buf;
 238        unsigned long flags;
 239        int ret = 0;
 240
 241        uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
 242
 243        if (v4l2_buf->type != queue->type ||
 244            v4l2_buf->memory != V4L2_MEMORY_MMAP) {
 245                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
 246                        "and/or memory (%u).\n", v4l2_buf->type,
 247                        v4l2_buf->memory);
 248                return -EINVAL;
 249        }
 250
 251        mutex_lock(&queue->mutex);
 252        if (v4l2_buf->index >= queue->count) {
 253                uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
 254                ret = -EINVAL;
 255                goto done;
 256        }
 257
 258        buf = &queue->buffer[v4l2_buf->index];
 259        if (buf->state != UVC_BUF_STATE_IDLE) {
 260                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
 261                        "(%u).\n", buf->state);
 262                ret = -EINVAL;
 263                goto done;
 264        }
 265
 266        if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
 267            v4l2_buf->bytesused > buf->buf.length) {
 268                uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
 269                ret = -EINVAL;
 270                goto done;
 271        }
 272
 273        spin_lock_irqsave(&queue->irqlock, flags);
 274        if (queue->flags & UVC_QUEUE_DISCONNECTED) {
 275                spin_unlock_irqrestore(&queue->irqlock, flags);
 276                ret = -ENODEV;
 277                goto done;
 278        }
 279        buf->state = UVC_BUF_STATE_QUEUED;
 280        if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 281                buf->buf.bytesused = 0;
 282        else
 283                buf->buf.bytesused = v4l2_buf->bytesused;
 284
 285        list_add_tail(&buf->stream, &queue->mainqueue);
 286        list_add_tail(&buf->queue, &queue->irqqueue);
 287        spin_unlock_irqrestore(&queue->irqlock, flags);
 288
 289done:
 290        mutex_unlock(&queue->mutex);
 291        return ret;
 292}
 293
 294static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
 295{
 296        if (nonblocking) {
 297                return (buf->state != UVC_BUF_STATE_QUEUED &&
 298                        buf->state != UVC_BUF_STATE_ACTIVE)
 299                        ? 0 : -EAGAIN;
 300        }
 301
 302        return wait_event_interruptible(buf->wait,
 303                buf->state != UVC_BUF_STATE_QUEUED &&
 304                buf->state != UVC_BUF_STATE_ACTIVE);
 305}
 306
 307/*
 308 * Dequeue a video buffer. If nonblocking is false, block until a buffer is
 309 * available.
 310 */
 311int uvc_dequeue_buffer(struct uvc_video_queue *queue,
 312                struct v4l2_buffer *v4l2_buf, int nonblocking)
 313{
 314        struct uvc_buffer *buf;
 315        int ret = 0;
 316
 317        if (v4l2_buf->type != queue->type ||
 318            v4l2_buf->memory != V4L2_MEMORY_MMAP) {
 319                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
 320                        "and/or memory (%u).\n", v4l2_buf->type,
 321                        v4l2_buf->memory);
 322                return -EINVAL;
 323        }
 324
 325        mutex_lock(&queue->mutex);
 326        if (list_empty(&queue->mainqueue)) {
 327                uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
 328                ret = -EINVAL;
 329                goto done;
 330        }
 331
 332        buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
 333        if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
 334                goto done;
 335
 336        uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
 337                buf->buf.index, buf->state, buf->buf.bytesused);
 338
 339        switch (buf->state) {
 340        case UVC_BUF_STATE_ERROR:
 341                uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
 342                        "(transmission error).\n");
 343                ret = -EIO;
 344        case UVC_BUF_STATE_DONE:
 345                buf->state = UVC_BUF_STATE_IDLE;
 346                break;
 347
 348        case UVC_BUF_STATE_IDLE:
 349        case UVC_BUF_STATE_QUEUED:
 350        case UVC_BUF_STATE_ACTIVE:
 351        default:
 352                uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
 353                        "(driver bug?).\n", buf->state);
 354                ret = -EINVAL;
 355                goto done;
 356        }
 357
 358        list_del(&buf->stream);
 359        __uvc_query_buffer(buf, v4l2_buf);
 360
 361done:
 362        mutex_unlock(&queue->mutex);
 363        return ret;
 364}
 365
 366/*
 367 * Poll the video queue.
 368 *
 369 * This function implements video queue polling and is intended to be used by
 370 * the device poll handler.
 371 */
 372unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
 373                poll_table *wait)
 374{
 375        struct uvc_buffer *buf;
 376        unsigned int mask = 0;
 377
 378        mutex_lock(&queue->mutex);
 379        if (list_empty(&queue->mainqueue)) {
 380                mask |= POLLERR;
 381                goto done;
 382        }
 383        buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
 384
 385        poll_wait(file, &buf->wait, wait);
 386        if (buf->state == UVC_BUF_STATE_DONE ||
 387            buf->state == UVC_BUF_STATE_ERROR)
 388                mask |= POLLIN | POLLRDNORM;
 389
 390done:
 391        mutex_unlock(&queue->mutex);
 392        return mask;
 393}
 394
 395/*
 396 * Enable or disable the video buffers queue.
 397 *
 398 * The queue must be enabled before starting video acquisition and must be
 399 * disabled after stopping it. This ensures that the video buffers queue
 400 * state can be properly initialized before buffers are accessed from the
 401 * interrupt handler.
 402 *
 403 * Enabling the video queue initializes parameters (such as sequence number,
 404 * sync pattern, ...). If the queue is already enabled, return -EBUSY.
 405 *
 406 * Disabling the video queue cancels the queue and removes all buffers from
 407 * the main queue.
 408 *
 409 * This function can't be called from interrupt context. Use
 410 * uvc_queue_cancel() instead.
 411 */
 412int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
 413{
 414        unsigned int i;
 415        int ret = 0;
 416
 417        mutex_lock(&queue->mutex);
 418        if (enable) {
 419                if (uvc_queue_streaming(queue)) {
 420                        ret = -EBUSY;
 421                        goto done;
 422                }
 423                queue->sequence = 0;
 424                queue->flags |= UVC_QUEUE_STREAMING;
 425                queue->buf_used = 0;
 426        } else {
 427                uvc_queue_cancel(queue, 0);
 428                INIT_LIST_HEAD(&queue->mainqueue);
 429
 430                for (i = 0; i < queue->count; ++i)
 431                        queue->buffer[i].state = UVC_BUF_STATE_IDLE;
 432
 433                queue->flags &= ~UVC_QUEUE_STREAMING;
 434        }
 435
 436done:
 437        mutex_unlock(&queue->mutex);
 438        return ret;
 439}
 440
 441/*
 442 * Cancel the video buffers queue.
 443 *
 444 * Cancelling the queue marks all buffers on the irq queue as erroneous,
 445 * wakes them up and removes them from the queue.
 446 *
 447 * If the disconnect parameter is set, further calls to uvc_queue_buffer will
 448 * fail with -ENODEV.
 449 *
 450 * This function acquires the irq spinlock and can be called from interrupt
 451 * context.
 452 */
 453void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
 454{
 455        struct uvc_buffer *buf;
 456        unsigned long flags;
 457
 458        spin_lock_irqsave(&queue->irqlock, flags);
 459        while (!list_empty(&queue->irqqueue)) {
 460                buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 461                                       queue);
 462                list_del(&buf->queue);
 463                buf->state = UVC_BUF_STATE_ERROR;
 464                wake_up(&buf->wait);
 465        }
 466        /* This must be protected by the irqlock spinlock to avoid race
 467         * conditions between uvc_queue_buffer and the disconnection event that
 468         * could result in an interruptible wait in uvc_dequeue_buffer. Do not
 469         * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
 470         * state outside the queue code.
 471         */
 472        if (disconnect)
 473                queue->flags |= UVC_QUEUE_DISCONNECTED;
 474        spin_unlock_irqrestore(&queue->irqlock, flags);
 475}
 476
 477struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
 478                struct uvc_buffer *buf)
 479{
 480        struct uvc_buffer *nextbuf;
 481        unsigned long flags;
 482
 483        if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
 484            buf->buf.length != buf->buf.bytesused) {
 485                buf->state = UVC_BUF_STATE_QUEUED;
 486                buf->buf.bytesused = 0;
 487                return buf;
 488        }
 489
 490        spin_lock_irqsave(&queue->irqlock, flags);
 491        list_del(&buf->queue);
 492        if (!list_empty(&queue->irqqueue))
 493                nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 494                                           queue);
 495        else
 496                nextbuf = NULL;
 497        spin_unlock_irqrestore(&queue->irqlock, flags);
 498
 499        buf->buf.sequence = queue->sequence++;
 500        do_gettimeofday(&buf->buf.timestamp);
 501
 502        wake_up(&buf->wait);
 503        return nextbuf;
 504}
 505
 506