linux/drivers/media/platform/omap3isp/ispqueue.c
<<
>>
Prefs
   1/*
   2 * ispqueue.c
   3 *
   4 * TI OMAP3 ISP - Video buffers queue handling
   5 *
   6 * Copyright (C) 2010 Nokia Corporation
   7 *
   8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
   9 *           Sakari Ailus <sakari.ailus@iki.fi>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License version 2 as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23 * 02110-1301 USA
  24 */
  25
  26#include <asm/cacheflush.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/mm.h>
  29#include <linux/pagemap.h>
  30#include <linux/poll.h>
  31#include <linux/scatterlist.h>
  32#include <linux/sched.h>
  33#include <linux/slab.h>
  34#include <linux/vmalloc.h>
  35
  36#include "ispqueue.h"
  37
  38/* -----------------------------------------------------------------------------
  39 * Video buffers management
  40 */
  41
  42/*
  43 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
  44 *
  45 * The typical operation required here is Cache Invalidation across
  46 * the (user space) buffer address range. And this _must_ be done
  47 * at QBUF stage (and *only* at QBUF).
  48 *
  49 * We try to use optimal cache invalidation function:
  50 * - dmac_map_area:
  51 *    - used when the number of pages are _low_.
  52 *    - it becomes quite slow as the number of pages increase.
  53 *       - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
  54 *       - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
  55 *
  56 * - flush_cache_all:
  57 *    - used when the number of pages are _high_.
  58 *    - time taken in the range of 500-900 us.
  59 *    - has a higher penalty but, as whole dcache + icache is invalidated
  60 */
  61/*
  62 * FIXME: dmac_inv_range crashes randomly on the user space buffer
  63 *        address. Fall back to flush_cache_all for now.
  64 */
  65#define ISP_CACHE_FLUSH_PAGES_MAX       0
  66
  67static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
  68{
  69        if (buf->skip_cache)
  70                return;
  71
  72        if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
  73            buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
  74                flush_cache_all();
  75        else {
  76                dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
  77                              DMA_FROM_DEVICE);
  78                outer_inv_range(buf->vbuf.m.userptr,
  79                                buf->vbuf.m.userptr + buf->vbuf.length);
  80        }
  81}
  82
  83/*
  84 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
  85 *
  86 * Lock the VMAs underlying the given buffer into memory. This avoids the
  87 * userspace buffer mapping from being swapped out, making VIPT cache handling
  88 * easier.
  89 *
  90 * Note that the pages will not be freed as the buffers have been locked to
  91 * memory using by a call to get_user_pages(), but the userspace mapping could
  92 * still disappear if the VMAs are not locked. This is caused by the memory
  93 * management code trying to be as lock-less as possible, which results in the
  94 * userspace mapping manager not finding out that the pages are locked under
  95 * some conditions.
  96 */
  97static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
  98{
  99        struct vm_area_struct *vma;
 100        unsigned long start;
 101        unsigned long end;
 102        int ret = 0;
 103
 104        if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
 105                return 0;
 106
 107        /* We can be called from workqueue context if the current task dies to
 108         * unlock the VMAs. In that case there's no current memory management
 109         * context so unlocking can't be performed, but the VMAs have been or
 110         * are getting destroyed anyway so it doesn't really matter.
 111         */
 112        if (!current || !current->mm)
 113                return lock ? -EINVAL : 0;
 114
 115        start = buf->vbuf.m.userptr;
 116        end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
 117
 118        down_write(&current->mm->mmap_sem);
 119        spin_lock(&current->mm->page_table_lock);
 120
 121        do {
 122                vma = find_vma(current->mm, start);
 123                if (vma == NULL) {
 124                        ret = -EFAULT;
 125                        goto out;
 126                }
 127
 128                if (lock)
 129                        vma->vm_flags |= VM_LOCKED;
 130                else
 131                        vma->vm_flags &= ~VM_LOCKED;
 132
 133                start = vma->vm_end + 1;
 134        } while (vma->vm_end < end);
 135
 136        if (lock)
 137                buf->vm_flags |= VM_LOCKED;
 138        else
 139                buf->vm_flags &= ~VM_LOCKED;
 140
 141out:
 142        spin_unlock(&current->mm->page_table_lock);
 143        up_write(&current->mm->mmap_sem);
 144        return ret;
 145}
 146
 147/*
 148 * isp_video_buffer_sglist_kernel - Build a scatter list for a vmalloc'ed buffer
 149 *
 150 * Iterate over the vmalloc'ed area and create a scatter list entry for every
 151 * page.
 152 */
 153static int isp_video_buffer_sglist_kernel(struct isp_video_buffer *buf)
 154{
 155        struct scatterlist *sglist;
 156        unsigned int npages;
 157        unsigned int i;
 158        void *addr;
 159
 160        addr = buf->vaddr;
 161        npages = PAGE_ALIGN(buf->vbuf.length) >> PAGE_SHIFT;
 162
 163        sglist = vmalloc(npages * sizeof(*sglist));
 164        if (sglist == NULL)
 165                return -ENOMEM;
 166
 167        sg_init_table(sglist, npages);
 168
 169        for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
 170                struct page *page = vmalloc_to_page(addr);
 171
 172                if (page == NULL || PageHighMem(page)) {
 173                        vfree(sglist);
 174                        return -EINVAL;
 175                }
 176
 177                sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
 178        }
 179
 180        buf->sglen = npages;
 181        buf->sglist = sglist;
 182
 183        return 0;
 184}
 185
 186/*
 187 * isp_video_buffer_sglist_user - Build a scatter list for a userspace buffer
 188 *
 189 * Walk the buffer pages list and create a 1:1 mapping to a scatter list.
 190 */
 191static int isp_video_buffer_sglist_user(struct isp_video_buffer *buf)
 192{
 193        struct scatterlist *sglist;
 194        unsigned int offset = buf->offset;
 195        unsigned int i;
 196
 197        sglist = vmalloc(buf->npages * sizeof(*sglist));
 198        if (sglist == NULL)
 199                return -ENOMEM;
 200
 201        sg_init_table(sglist, buf->npages);
 202
 203        for (i = 0; i < buf->npages; ++i) {
 204                if (PageHighMem(buf->pages[i])) {
 205                        vfree(sglist);
 206                        return -EINVAL;
 207                }
 208
 209                sg_set_page(&sglist[i], buf->pages[i], PAGE_SIZE - offset,
 210                            offset);
 211                offset = 0;
 212        }
 213
 214        buf->sglen = buf->npages;
 215        buf->sglist = sglist;
 216
 217        return 0;
 218}
 219
 220/*
 221 * isp_video_buffer_sglist_pfnmap - Build a scatter list for a VM_PFNMAP buffer
 222 *
 223 * Create a scatter list of physically contiguous pages starting at the buffer
 224 * memory physical address.
 225 */
 226static int isp_video_buffer_sglist_pfnmap(struct isp_video_buffer *buf)
 227{
 228        struct scatterlist *sglist;
 229        unsigned int offset = buf->offset;
 230        unsigned long pfn = buf->paddr >> PAGE_SHIFT;
 231        unsigned int i;
 232
 233        sglist = vmalloc(buf->npages * sizeof(*sglist));
 234        if (sglist == NULL)
 235                return -ENOMEM;
 236
 237        sg_init_table(sglist, buf->npages);
 238
 239        for (i = 0; i < buf->npages; ++i, ++pfn) {
 240                sg_set_page(&sglist[i], pfn_to_page(pfn), PAGE_SIZE - offset,
 241                            offset);
 242                /* PFNMAP buffers will not get DMA-mapped, set the DMA address
 243                 * manually.
 244                 */
 245                sg_dma_address(&sglist[i]) = (pfn << PAGE_SHIFT) + offset;
 246                offset = 0;
 247        }
 248
 249        buf->sglen = buf->npages;
 250        buf->sglist = sglist;
 251
 252        return 0;
 253}
 254
 255/*
 256 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
 257 *
 258 * Release pages locked by a call isp_video_buffer_prepare_user and free the
 259 * pages table.
 260 */
 261static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
 262{
 263        enum dma_data_direction direction;
 264        unsigned int i;
 265
 266        if (buf->queue->ops->buffer_cleanup)
 267                buf->queue->ops->buffer_cleanup(buf);
 268
 269        if (!(buf->vm_flags & VM_PFNMAP)) {
 270                direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
 271                          ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 272                dma_unmap_sg(buf->queue->dev, buf->sglist, buf->sglen,
 273                             direction);
 274        }
 275
 276        vfree(buf->sglist);
 277        buf->sglist = NULL;
 278        buf->sglen = 0;
 279
 280        if (buf->pages != NULL) {
 281                isp_video_buffer_lock_vma(buf, 0);
 282
 283                for (i = 0; i < buf->npages; ++i)
 284                        page_cache_release(buf->pages[i]);
 285
 286                vfree(buf->pages);
 287                buf->pages = NULL;
 288        }
 289
 290        buf->npages = 0;
 291        buf->skip_cache = false;
 292}
 293
 294/*
 295 * isp_video_buffer_prepare_user - Pin userspace VMA pages to memory.
 296 *
 297 * This function creates a list of pages for a userspace VMA. The number of
 298 * pages is first computed based on the buffer size, and pages are then
 299 * retrieved by a call to get_user_pages.
 300 *
 301 * Pages are pinned to memory by get_user_pages, making them available for DMA
 302 * transfers. However, due to memory management optimization, it seems the
 303 * get_user_pages doesn't guarantee that the pinned pages will not be written
 304 * to swap and removed from the userspace mapping(s). When this happens, a page
 305 * fault can be generated when accessing those unmapped pages.
 306 *
 307 * If the fault is triggered by a page table walk caused by VIPT cache
 308 * management operations, the page fault handler might oops if the MM semaphore
 309 * is held, as it can't handle kernel page faults in that case. To fix that, a
 310 * fixup entry needs to be added to the cache management code, or the userspace
 311 * VMA must be locked to avoid removing pages from the userspace mapping in the
 312 * first place.
 313 *
 314 * If the number of pages retrieved is smaller than the number required by the
 315 * buffer size, the function returns -EFAULT.
 316 */
 317static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
 318{
 319        unsigned long data;
 320        unsigned int first;
 321        unsigned int last;
 322        int ret;
 323
 324        data = buf->vbuf.m.userptr;
 325        first = (data & PAGE_MASK) >> PAGE_SHIFT;
 326        last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
 327
 328        buf->offset = data & ~PAGE_MASK;
 329        buf->npages = last - first + 1;
 330        buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
 331        if (buf->pages == NULL)
 332                return -ENOMEM;
 333
 334        down_read(&current->mm->mmap_sem);
 335        ret = get_user_pages(current, current->mm, data & PAGE_MASK,
 336                             buf->npages,
 337                             buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
 338                             buf->pages, NULL);
 339        up_read(&current->mm->mmap_sem);
 340
 341        if (ret != buf->npages) {
 342                buf->npages = ret < 0 ? 0 : ret;
 343                isp_video_buffer_cleanup(buf);
 344                return -EFAULT;
 345        }
 346
 347        ret = isp_video_buffer_lock_vma(buf, 1);
 348        if (ret < 0)
 349                isp_video_buffer_cleanup(buf);
 350
 351        return ret;
 352}
 353
 354/*
 355 * isp_video_buffer_prepare_pfnmap - Validate a VM_PFNMAP userspace buffer
 356 *
 357 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
 358 * memory and if they span a single VMA.
 359 *
 360 * Return 0 if the buffer is valid, or -EFAULT otherwise.
 361 */
 362static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
 363{
 364        struct vm_area_struct *vma;
 365        unsigned long prev_pfn;
 366        unsigned long this_pfn;
 367        unsigned long start;
 368        unsigned long end;
 369        dma_addr_t pa = 0;
 370        int ret = -EFAULT;
 371
 372        start = buf->vbuf.m.userptr;
 373        end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
 374
 375        buf->offset = start & ~PAGE_MASK;
 376        buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
 377        buf->pages = NULL;
 378
 379        down_read(&current->mm->mmap_sem);
 380        vma = find_vma(current->mm, start);
 381        if (vma == NULL || vma->vm_end < end)
 382                goto done;
 383
 384        for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
 385                ret = follow_pfn(vma, start, &this_pfn);
 386                if (ret)
 387                        goto done;
 388
 389                if (prev_pfn == 0)
 390                        pa = this_pfn << PAGE_SHIFT;
 391                else if (this_pfn != prev_pfn + 1) {
 392                        ret = -EFAULT;
 393                        goto done;
 394                }
 395
 396                prev_pfn = this_pfn;
 397        }
 398
 399        buf->paddr = pa + buf->offset;
 400        ret = 0;
 401
 402done:
 403        up_read(&current->mm->mmap_sem);
 404        return ret;
 405}
 406
 407/*
 408 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
 409 *
 410 * This function locates the VMAs for the buffer's userspace address and checks
 411 * that their flags match. The only flag that we need to care for at the moment
 412 * is VM_PFNMAP.
 413 *
 414 * The buffer vm_flags field is set to the first VMA flags.
 415 *
 416 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
 417 * have incompatible flags.
 418 */
 419static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
 420{
 421        struct vm_area_struct *vma;
 422        pgprot_t uninitialized_var(vm_page_prot);
 423        unsigned long start;
 424        unsigned long end;
 425        int ret = -EFAULT;
 426
 427        start = buf->vbuf.m.userptr;
 428        end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
 429
 430        down_read(&current->mm->mmap_sem);
 431
 432        do {
 433                vma = find_vma(current->mm, start);
 434                if (vma == NULL)
 435                        goto done;
 436
 437                if (start == buf->vbuf.m.userptr) {
 438                        buf->vm_flags = vma->vm_flags;
 439                        vm_page_prot = vma->vm_page_prot;
 440                }
 441
 442                if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
 443                        goto done;
 444
 445                if (vm_page_prot != vma->vm_page_prot)
 446                        goto done;
 447
 448                start = vma->vm_end + 1;
 449        } while (vma->vm_end < end);
 450
 451        /* Skip cache management to enhance performances for non-cached or
 452         * write-combining buffers.
 453         */
 454        if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
 455            vm_page_prot == pgprot_writecombine(vm_page_prot))
 456                buf->skip_cache = true;
 457
 458        ret = 0;
 459
 460done:
 461        up_read(&current->mm->mmap_sem);
 462        return ret;
 463}
 464
 465/*
 466 * isp_video_buffer_prepare - Make a buffer ready for operation
 467 *
 468 * Preparing a buffer involves:
 469 *
 470 * - validating VMAs (userspace buffers only)
 471 * - locking pages and VMAs into memory (userspace buffers only)
 472 * - building page and scatter-gather lists
 473 * - mapping buffers for DMA operation
 474 * - performing driver-specific preparation
 475 *
 476 * The function must be called in userspace context with a valid mm context
 477 * (this excludes cleanup paths such as sys_close when the userspace process
 478 * segfaults).
 479 */
 480static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
 481{
 482        enum dma_data_direction direction;
 483        int ret;
 484
 485        switch (buf->vbuf.memory) {
 486        case V4L2_MEMORY_MMAP:
 487                ret = isp_video_buffer_sglist_kernel(buf);
 488                break;
 489
 490        case V4L2_MEMORY_USERPTR:
 491                ret = isp_video_buffer_prepare_vm_flags(buf);
 492                if (ret < 0)
 493                        return ret;
 494
 495                if (buf->vm_flags & VM_PFNMAP) {
 496                        ret = isp_video_buffer_prepare_pfnmap(buf);
 497                        if (ret < 0)
 498                                return ret;
 499
 500                        ret = isp_video_buffer_sglist_pfnmap(buf);
 501                } else {
 502                        ret = isp_video_buffer_prepare_user(buf);
 503                        if (ret < 0)
 504                                return ret;
 505
 506                        ret = isp_video_buffer_sglist_user(buf);
 507                }
 508                break;
 509
 510        default:
 511                return -EINVAL;
 512        }
 513
 514        if (ret < 0)
 515                goto done;
 516
 517        if (!(buf->vm_flags & VM_PFNMAP)) {
 518                direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
 519                          ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
 520                ret = dma_map_sg(buf->queue->dev, buf->sglist, buf->sglen,
 521                                 direction);
 522                if (ret != buf->sglen) {
 523                        ret = -EFAULT;
 524                        goto done;
 525                }
 526        }
 527
 528        if (buf->queue->ops->buffer_prepare)
 529                ret = buf->queue->ops->buffer_prepare(buf);
 530
 531done:
 532        if (ret < 0) {
 533                isp_video_buffer_cleanup(buf);
 534                return ret;
 535        }
 536
 537        return ret;
 538}
 539
 540/*
 541 * isp_video_queue_query - Query the status of a given buffer
 542 *
 543 * Locking: must be called with the queue lock held.
 544 */
 545static void isp_video_buffer_query(struct isp_video_buffer *buf,
 546                                   struct v4l2_buffer *vbuf)
 547{
 548        memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
 549
 550        if (buf->vma_use_count)
 551                vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
 552
 553        switch (buf->state) {
 554        case ISP_BUF_STATE_ERROR:
 555                vbuf->flags |= V4L2_BUF_FLAG_ERROR;
 556        case ISP_BUF_STATE_DONE:
 557                vbuf->flags |= V4L2_BUF_FLAG_DONE;
 558        case ISP_BUF_STATE_QUEUED:
 559        case ISP_BUF_STATE_ACTIVE:
 560                vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
 561                break;
 562        case ISP_BUF_STATE_IDLE:
 563        default:
 564                break;
 565        }
 566}
 567
 568/*
 569 * isp_video_buffer_wait - Wait for a buffer to be ready
 570 *
 571 * In non-blocking mode, return immediately with 0 if the buffer is ready or
 572 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
 573 *
 574 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
 575 * queue using the same condition.
 576 */
 577static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
 578{
 579        if (nonblocking) {
 580                return (buf->state != ISP_BUF_STATE_QUEUED &&
 581                        buf->state != ISP_BUF_STATE_ACTIVE)
 582                        ? 0 : -EAGAIN;
 583        }
 584
 585        return wait_event_interruptible(buf->wait,
 586                buf->state != ISP_BUF_STATE_QUEUED &&
 587                buf->state != ISP_BUF_STATE_ACTIVE);
 588}
 589
 590/* -----------------------------------------------------------------------------
 591 * Queue management
 592 */
 593
 594/*
 595 * isp_video_queue_free - Free video buffers memory
 596 *
 597 * Buffers can only be freed if the queue isn't streaming and if no buffer is
 598 * mapped to userspace. Return -EBUSY if those conditions aren't statisfied.
 599 *
 600 * This function must be called with the queue lock held.
 601 */
 602static int isp_video_queue_free(struct isp_video_queue *queue)
 603{
 604        unsigned int i;
 605
 606        if (queue->streaming)
 607                return -EBUSY;
 608
 609        for (i = 0; i < queue->count; ++i) {
 610                if (queue->buffers[i]->vma_use_count != 0)
 611                        return -EBUSY;
 612        }
 613
 614        for (i = 0; i < queue->count; ++i) {
 615                struct isp_video_buffer *buf = queue->buffers[i];
 616
 617                isp_video_buffer_cleanup(buf);
 618
 619                vfree(buf->vaddr);
 620                buf->vaddr = NULL;
 621
 622                kfree(buf);
 623                queue->buffers[i] = NULL;
 624        }
 625
 626        INIT_LIST_HEAD(&queue->queue);
 627        queue->count = 0;
 628        return 0;
 629}
 630
 631/*
 632 * isp_video_queue_alloc - Allocate video buffers memory
 633 *
 634 * This function must be called with the queue lock held.
 635 */
 636static int isp_video_queue_alloc(struct isp_video_queue *queue,
 637                                 unsigned int nbuffers,
 638                                 unsigned int size, enum v4l2_memory memory)
 639{
 640        struct isp_video_buffer *buf;
 641        unsigned int i;
 642        void *mem;
 643        int ret;
 644
 645        /* Start by freeing the buffers. */
 646        ret = isp_video_queue_free(queue);
 647        if (ret < 0)
 648                return ret;
 649
 650        /* Bail out if no buffers should be allocated. */
 651        if (nbuffers == 0)
 652                return 0;
 653
 654        /* Initialize the allocated buffers. */
 655        for (i = 0; i < nbuffers; ++i) {
 656                buf = kzalloc(queue->bufsize, GFP_KERNEL);
 657                if (buf == NULL)
 658                        break;
 659
 660                if (memory == V4L2_MEMORY_MMAP) {
 661                        /* Allocate video buffers memory for mmap mode. Align
 662                         * the size to the page size.
 663                         */
 664                        mem = vmalloc_32_user(PAGE_ALIGN(size));
 665                        if (mem == NULL) {
 666                                kfree(buf);
 667                                break;
 668                        }
 669
 670                        buf->vbuf.m.offset = i * PAGE_ALIGN(size);
 671                        buf->vaddr = mem;
 672                }
 673
 674                buf->vbuf.index = i;
 675                buf->vbuf.length = size;
 676                buf->vbuf.type = queue->type;
 677                buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 678                buf->vbuf.field = V4L2_FIELD_NONE;
 679                buf->vbuf.memory = memory;
 680
 681                buf->queue = queue;
 682                init_waitqueue_head(&buf->wait);
 683
 684                queue->buffers[i] = buf;
 685        }
 686
 687        if (i == 0)
 688                return -ENOMEM;
 689
 690        queue->count = i;
 691        return nbuffers;
 692}
 693
 694/**
 695 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
 696 * @queue: Video buffers queue
 697 *
 698 * Free all allocated resources and clean up the video buffers queue. The queue
 699 * must not be busy (no ongoing video stream) and buffers must have been
 700 * unmapped.
 701 *
 702 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
 703 * unmapped.
 704 */
 705int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
 706{
 707        return isp_video_queue_free(queue);
 708}
 709
 710/**
 711 * omap3isp_video_queue_init - Initialize the video buffers queue
 712 * @queue: Video buffers queue
 713 * @type: V4L2 buffer type (capture or output)
 714 * @ops: Driver-specific queue operations
 715 * @dev: Device used for DMA operations
 716 * @bufsize: Size of the driver-specific buffer structure
 717 *
 718 * Initialize the video buffers queue with the supplied parameters.
 719 *
 720 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
 721 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
 722 *
 723 * Buffer objects will be allocated using the given buffer size to allow room
 724 * for driver-specific fields. Driver-specific buffer structures must start
 725 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
 726 * structure must pass the size of the isp_video_buffer structure in the bufsize
 727 * parameter.
 728 *
 729 * Return 0 on success.
 730 */
 731int omap3isp_video_queue_init(struct isp_video_queue *queue,
 732                              enum v4l2_buf_type type,
 733                              const struct isp_video_queue_operations *ops,
 734                              struct device *dev, unsigned int bufsize)
 735{
 736        INIT_LIST_HEAD(&queue->queue);
 737        mutex_init(&queue->lock);
 738        spin_lock_init(&queue->irqlock);
 739
 740        queue->type = type;
 741        queue->ops = ops;
 742        queue->dev = dev;
 743        queue->bufsize = bufsize;
 744
 745        return 0;
 746}
 747
 748/* -----------------------------------------------------------------------------
 749 * V4L2 operations
 750 */
 751
 752/**
 753 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
 754 *
 755 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
 756 * allocated video buffer objects and, for MMAP buffers, buffer memory.
 757 *
 758 * If the number of buffers is 0, all buffers are freed and the function returns
 759 * without performing any allocation.
 760 *
 761 * If the number of buffers is not 0, currently allocated buffers (if any) are
 762 * freed and the requested number of buffers are allocated. Depending on
 763 * driver-specific requirements and on memory availability, a number of buffer
 764 * smaller or bigger than requested can be allocated. This isn't considered as
 765 * an error.
 766 *
 767 * Return 0 on success or one of the following error codes:
 768 *
 769 * -EINVAL if the buffer type or index are invalid
 770 * -EBUSY if the queue is busy (streaming or buffers mapped)
 771 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
 772 */
 773int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
 774                                 struct v4l2_requestbuffers *rb)
 775{
 776        unsigned int nbuffers = rb->count;
 777        unsigned int size;
 778        int ret;
 779
 780        if (rb->type != queue->type)
 781                return -EINVAL;
 782
 783        queue->ops->queue_prepare(queue, &nbuffers, &size);
 784        if (size == 0)
 785                return -EINVAL;
 786
 787        nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
 788
 789        mutex_lock(&queue->lock);
 790
 791        ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
 792        if (ret < 0)
 793                goto done;
 794
 795        rb->count = ret;
 796        ret = 0;
 797
 798done:
 799        mutex_unlock(&queue->lock);
 800        return ret;
 801}
 802
 803/**
 804 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
 805 *
 806 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
 807 * returns the status of a given video buffer.
 808 *
 809 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
 810 */
 811int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
 812                                  struct v4l2_buffer *vbuf)
 813{
 814        struct isp_video_buffer *buf;
 815        int ret = 0;
 816
 817        if (vbuf->type != queue->type)
 818                return -EINVAL;
 819
 820        mutex_lock(&queue->lock);
 821
 822        if (vbuf->index >= queue->count) {
 823                ret = -EINVAL;
 824                goto done;
 825        }
 826
 827        buf = queue->buffers[vbuf->index];
 828        isp_video_buffer_query(buf, vbuf);
 829
 830done:
 831        mutex_unlock(&queue->lock);
 832        return ret;
 833}
 834
 835/**
 836 * omap3isp_video_queue_qbuf - Queue a buffer
 837 *
 838 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
 839 *
 840 * The v4l2_buffer structure passed from userspace is first sanity tested. If
 841 * sane, the buffer is then processed and added to the main queue and, if the
 842 * queue is streaming, to the IRQ queue.
 843 *
 844 * Before being enqueued, USERPTR buffers are checked for address changes. If
 845 * the buffer has a different userspace address, the old memory area is unlocked
 846 * and the new memory area is locked.
 847 */
 848int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
 849                              struct v4l2_buffer *vbuf)
 850{
 851        struct isp_video_buffer *buf;
 852        unsigned long flags;
 853        int ret = -EINVAL;
 854
 855        if (vbuf->type != queue->type)
 856                goto done;
 857
 858        mutex_lock(&queue->lock);
 859
 860        if (vbuf->index >= queue->count)
 861                goto done;
 862
 863        buf = queue->buffers[vbuf->index];
 864
 865        if (vbuf->memory != buf->vbuf.memory)
 866                goto done;
 867
 868        if (buf->state != ISP_BUF_STATE_IDLE)
 869                goto done;
 870
 871        if (vbuf->memory == V4L2_MEMORY_USERPTR &&
 872            vbuf->length < buf->vbuf.length)
 873                goto done;
 874
 875        if (vbuf->memory == V4L2_MEMORY_USERPTR &&
 876            vbuf->m.userptr != buf->vbuf.m.userptr) {
 877                isp_video_buffer_cleanup(buf);
 878                buf->vbuf.m.userptr = vbuf->m.userptr;
 879                buf->prepared = 0;
 880        }
 881
 882        if (!buf->prepared) {
 883                ret = isp_video_buffer_prepare(buf);
 884                if (ret < 0)
 885                        goto done;
 886                buf->prepared = 1;
 887        }
 888
 889        isp_video_buffer_cache_sync(buf);
 890
 891        buf->state = ISP_BUF_STATE_QUEUED;
 892        list_add_tail(&buf->stream, &queue->queue);
 893
 894        if (queue->streaming) {
 895                spin_lock_irqsave(&queue->irqlock, flags);
 896                queue->ops->buffer_queue(buf);
 897                spin_unlock_irqrestore(&queue->irqlock, flags);
 898        }
 899
 900        ret = 0;
 901
 902done:
 903        mutex_unlock(&queue->lock);
 904        return ret;
 905}
 906
 907/**
 908 * omap3isp_video_queue_dqbuf - Dequeue a buffer
 909 *
 910 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
 911 *
 912 * Wait until a buffer is ready to be dequeued, remove it from the queue and
 913 * copy its information to the v4l2_buffer structure.
 914 *
 915 * If the nonblocking argument is not zero and no buffer is ready, return
 916 * -EAGAIN immediately instead of waiting.
 917 *
 918 * If no buffer has been enqueued, or if the requested buffer type doesn't match
 919 * the queue type, return -EINVAL.
 920 */
 921int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
 922                               struct v4l2_buffer *vbuf, int nonblocking)
 923{
 924        struct isp_video_buffer *buf;
 925        int ret;
 926
 927        if (vbuf->type != queue->type)
 928                return -EINVAL;
 929
 930        mutex_lock(&queue->lock);
 931
 932        if (list_empty(&queue->queue)) {
 933                ret = -EINVAL;
 934                goto done;
 935        }
 936
 937        buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
 938        ret = isp_video_buffer_wait(buf, nonblocking);
 939        if (ret < 0)
 940                goto done;
 941
 942        list_del(&buf->stream);
 943
 944        isp_video_buffer_query(buf, vbuf);
 945        buf->state = ISP_BUF_STATE_IDLE;
 946        vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
 947
 948done:
 949        mutex_unlock(&queue->lock);
 950        return ret;
 951}
 952
 953/**
 954 * omap3isp_video_queue_streamon - Start streaming
 955 *
 956 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
 957 * starts streaming on the queue and calls the buffer_queue operation for all
 958 * queued buffers.
 959 *
 960 * Return 0 on success.
 961 */
 962int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
 963{
 964        struct isp_video_buffer *buf;
 965        unsigned long flags;
 966
 967        mutex_lock(&queue->lock);
 968
 969        if (queue->streaming)
 970                goto done;
 971
 972        queue->streaming = 1;
 973
 974        spin_lock_irqsave(&queue->irqlock, flags);
 975        list_for_each_entry(buf, &queue->queue, stream)
 976                queue->ops->buffer_queue(buf);
 977        spin_unlock_irqrestore(&queue->irqlock, flags);
 978
 979done:
 980        mutex_unlock(&queue->lock);
 981        return 0;
 982}
 983
 984/**
 985 * omap3isp_video_queue_streamoff - Stop streaming
 986 *
 987 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
 988 * stops streaming on the queue and wakes up all the buffers.
 989 *
 990 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
 991 * delayed works before calling this function to make sure no buffer will be
 992 * touched by the driver and/or hardware.
 993 */
 994void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
 995{
 996        struct isp_video_buffer *buf;
 997        unsigned long flags;
 998        unsigned int i;
 999
1000        mutex_lock(&queue->lock);
1001
1002        if (!queue->streaming)
1003                goto done;
1004
1005        queue->streaming = 0;
1006
1007        spin_lock_irqsave(&queue->irqlock, flags);
1008        for (i = 0; i < queue->count; ++i) {
1009                buf = queue->buffers[i];
1010
1011                if (buf->state == ISP_BUF_STATE_ACTIVE)
1012                        wake_up(&buf->wait);
1013
1014                buf->state = ISP_BUF_STATE_IDLE;
1015        }
1016        spin_unlock_irqrestore(&queue->irqlock, flags);
1017
1018        INIT_LIST_HEAD(&queue->queue);
1019
1020done:
1021        mutex_unlock(&queue->lock);
1022}
1023
1024/**
1025 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
1026 *
1027 * This function is intended to be used with suspend/resume operations. It
1028 * discards all 'done' buffers as they would be too old to be requested after
1029 * resume.
1030 *
1031 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1032 * delayed works before calling this function to make sure no buffer will be
1033 * touched by the driver and/or hardware.
1034 */
1035void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1036{
1037        struct isp_video_buffer *buf;
1038        unsigned int i;
1039
1040        mutex_lock(&queue->lock);
1041
1042        if (!queue->streaming)
1043                goto done;
1044
1045        for (i = 0; i < queue->count; ++i) {
1046                buf = queue->buffers[i];
1047
1048                if (buf->state == ISP_BUF_STATE_DONE)
1049                        buf->state = ISP_BUF_STATE_ERROR;
1050        }
1051
1052done:
1053        mutex_unlock(&queue->lock);
1054}
1055
1056static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1057{
1058        struct isp_video_buffer *buf = vma->vm_private_data;
1059
1060        buf->vma_use_count++;
1061}
1062
1063static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1064{
1065        struct isp_video_buffer *buf = vma->vm_private_data;
1066
1067        buf->vma_use_count--;
1068}
1069
1070static const struct vm_operations_struct isp_video_queue_vm_ops = {
1071        .open = isp_video_queue_vm_open,
1072        .close = isp_video_queue_vm_close,
1073};
1074
1075/**
1076 * omap3isp_video_queue_mmap - Map buffers to userspace
1077 *
1078 * This function is intended to be used as an mmap() file operation handler. It
1079 * maps a buffer to userspace based on the VMA offset.
1080 *
1081 * Only buffers of memory type MMAP are supported.
1082 */
1083int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1084                         struct vm_area_struct *vma)
1085{
1086        struct isp_video_buffer *uninitialized_var(buf);
1087        unsigned long size;
1088        unsigned int i;
1089        int ret = 0;
1090
1091        mutex_lock(&queue->lock);
1092
1093        for (i = 0; i < queue->count; ++i) {
1094                buf = queue->buffers[i];
1095                if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1096                        break;
1097        }
1098
1099        if (i == queue->count) {
1100                ret = -EINVAL;
1101                goto done;
1102        }
1103
1104        size = vma->vm_end - vma->vm_start;
1105
1106        if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1107            size != PAGE_ALIGN(buf->vbuf.length)) {
1108                ret = -EINVAL;
1109                goto done;
1110        }
1111
1112        ret = remap_vmalloc_range(vma, buf->vaddr, 0);
1113        if (ret < 0)
1114                goto done;
1115
1116        vma->vm_ops = &isp_video_queue_vm_ops;
1117        vma->vm_private_data = buf;
1118        isp_video_queue_vm_open(vma);
1119
1120done:
1121        mutex_unlock(&queue->lock);
1122        return ret;
1123}
1124
1125/**
1126 * omap3isp_video_queue_poll - Poll video queue state
1127 *
1128 * This function is intended to be used as a poll() file operation handler. It
1129 * polls the state of the video buffer at the front of the queue and returns an
1130 * events mask.
1131 *
1132 * If no buffer is present at the front of the queue, POLLERR is returned.
1133 */
1134unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1135                                       struct file *file, poll_table *wait)
1136{
1137        struct isp_video_buffer *buf;
1138        unsigned int mask = 0;
1139
1140        mutex_lock(&queue->lock);
1141        if (list_empty(&queue->queue)) {
1142                mask |= POLLERR;
1143                goto done;
1144        }
1145        buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1146
1147        poll_wait(file, &buf->wait, wait);
1148        if (buf->state == ISP_BUF_STATE_DONE ||
1149            buf->state == ISP_BUF_STATE_ERROR) {
1150                if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1151                        mask |= POLLIN | POLLRDNORM;
1152                else
1153                        mask |= POLLOUT | POLLWRNORM;
1154        }
1155
1156done:
1157        mutex_unlock(&queue->lock);
1158        return mask;
1159}
1160