linux/drivers/media/video/omap3isp/ispvideo.c
<<
>>
Prefs
   1/*
   2 * ispvideo.c
   3 *
   4 * TI OMAP3 ISP - Generic video node
   5 *
   6 * Copyright (C) 2009-2010 Nokia Corporation
   7 *
   8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
   9 *           Sakari Ailus <sakari.ailus@iki.fi>
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License version 2 as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
  23 * 02110-1301 USA
  24 */
  25
  26#include <asm/cacheflush.h>
  27#include <linux/clk.h>
  28#include <linux/mm.h>
  29#include <linux/pagemap.h>
  30#include <linux/scatterlist.h>
  31#include <linux/sched.h>
  32#include <linux/slab.h>
  33#include <linux/vmalloc.h>
  34#include <media/v4l2-dev.h>
  35#include <media/v4l2-ioctl.h>
  36#include <plat/iommu.h>
  37#include <plat/iovmm.h>
  38#include <plat/omap-pm.h>
  39
  40#include "ispvideo.h"
  41#include "isp.h"
  42
  43
  44/* -----------------------------------------------------------------------------
  45 * Helper functions
  46 */
  47
  48static struct isp_format_info formats[] = {
  49        { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
  50          V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
  51          V4L2_PIX_FMT_GREY, 8, },
  52        { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
  53          V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
  54          V4L2_PIX_FMT_Y10, 10, },
  55        { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
  56          V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
  57          V4L2_PIX_FMT_Y12, 12, },
  58        { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
  59          V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
  60          V4L2_PIX_FMT_SBGGR8, 8, },
  61        { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
  62          V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
  63          V4L2_PIX_FMT_SGBRG8, 8, },
  64        { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
  65          V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
  66          V4L2_PIX_FMT_SGRBG8, 8, },
  67        { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
  68          V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
  69          V4L2_PIX_FMT_SRGGB8, 8, },
  70        { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
  71          V4L2_MBUS_FMT_SGRBG10_1X10, 0,
  72          V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
  73        { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
  74          V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
  75          V4L2_PIX_FMT_SBGGR10, 10, },
  76        { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
  77          V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
  78          V4L2_PIX_FMT_SGBRG10, 10, },
  79        { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
  80          V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
  81          V4L2_PIX_FMT_SGRBG10, 10, },
  82        { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
  83          V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
  84          V4L2_PIX_FMT_SRGGB10, 10, },
  85        { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
  86          V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
  87          V4L2_PIX_FMT_SBGGR12, 12, },
  88        { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
  89          V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
  90          V4L2_PIX_FMT_SGBRG12, 12, },
  91        { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
  92          V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
  93          V4L2_PIX_FMT_SGRBG12, 12, },
  94        { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
  95          V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
  96          V4L2_PIX_FMT_SRGGB12, 12, },
  97        { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
  98          V4L2_MBUS_FMT_UYVY8_1X16, 0,
  99          V4L2_PIX_FMT_UYVY, 16, },
 100        { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
 101          V4L2_MBUS_FMT_YUYV8_1X16, 0,
 102          V4L2_PIX_FMT_YUYV, 16, },
 103};
 104
 105const struct isp_format_info *
 106omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
 107{
 108        unsigned int i;
 109
 110        for (i = 0; i < ARRAY_SIZE(formats); ++i) {
 111                if (formats[i].code == code)
 112                        return &formats[i];
 113        }
 114
 115        return NULL;
 116}
 117
 118/*
 119 * Decide whether desired output pixel code can be obtained with
 120 * the lane shifter by shifting the input pixel code.
 121 * @in: input pixelcode to shifter
 122 * @out: output pixelcode from shifter
 123 * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0]
 124 *
 125 * return true if the combination is possible
 126 * return false otherwise
 127 */
 128static bool isp_video_is_shiftable(enum v4l2_mbus_pixelcode in,
 129                enum v4l2_mbus_pixelcode out,
 130                unsigned int additional_shift)
 131{
 132        const struct isp_format_info *in_info, *out_info;
 133
 134        if (in == out)
 135                return true;
 136
 137        in_info = omap3isp_video_format_info(in);
 138        out_info = omap3isp_video_format_info(out);
 139
 140        if ((in_info->flavor == 0) || (out_info->flavor == 0))
 141                return false;
 142
 143        if (in_info->flavor != out_info->flavor)
 144                return false;
 145
 146        return in_info->bpp - out_info->bpp + additional_shift <= 6;
 147}
 148
 149/*
 150 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
 151 * @video: ISP video instance
 152 * @mbus: v4l2_mbus_framefmt format (input)
 153 * @pix: v4l2_pix_format format (output)
 154 *
 155 * Fill the output pix structure with information from the input mbus format.
 156 * The bytesperline and sizeimage fields are computed from the requested bytes
 157 * per line value in the pix format and information from the video instance.
 158 *
 159 * Return the number of padding bytes at end of line.
 160 */
 161static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
 162                                          const struct v4l2_mbus_framefmt *mbus,
 163                                          struct v4l2_pix_format *pix)
 164{
 165        unsigned int bpl = pix->bytesperline;
 166        unsigned int min_bpl;
 167        unsigned int i;
 168
 169        memset(pix, 0, sizeof(*pix));
 170        pix->width = mbus->width;
 171        pix->height = mbus->height;
 172
 173        for (i = 0; i < ARRAY_SIZE(formats); ++i) {
 174                if (formats[i].code == mbus->code)
 175                        break;
 176        }
 177
 178        if (WARN_ON(i == ARRAY_SIZE(formats)))
 179                return 0;
 180
 181        min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
 182
 183        /* Clamp the requested bytes per line value. If the maximum bytes per
 184         * line value is zero, the module doesn't support user configurable line
 185         * sizes. Override the requested value with the minimum in that case.
 186         */
 187        if (video->bpl_max)
 188                bpl = clamp(bpl, min_bpl, video->bpl_max);
 189        else
 190                bpl = min_bpl;
 191
 192        if (!video->bpl_zero_padding || bpl != min_bpl)
 193                bpl = ALIGN(bpl, video->bpl_alignment);
 194
 195        pix->pixelformat = formats[i].pixelformat;
 196        pix->bytesperline = bpl;
 197        pix->sizeimage = pix->bytesperline * pix->height;
 198        pix->colorspace = mbus->colorspace;
 199        pix->field = mbus->field;
 200
 201        return bpl - min_bpl;
 202}
 203
 204static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
 205                                  struct v4l2_mbus_framefmt *mbus)
 206{
 207        unsigned int i;
 208
 209        memset(mbus, 0, sizeof(*mbus));
 210        mbus->width = pix->width;
 211        mbus->height = pix->height;
 212
 213        for (i = 0; i < ARRAY_SIZE(formats); ++i) {
 214                if (formats[i].pixelformat == pix->pixelformat)
 215                        break;
 216        }
 217
 218        if (WARN_ON(i == ARRAY_SIZE(formats)))
 219                return;
 220
 221        mbus->code = formats[i].code;
 222        mbus->colorspace = pix->colorspace;
 223        mbus->field = pix->field;
 224}
 225
 226static struct v4l2_subdev *
 227isp_video_remote_subdev(struct isp_video *video, u32 *pad)
 228{
 229        struct media_pad *remote;
 230
 231        remote = media_entity_remote_source(&video->pad);
 232
 233        if (remote == NULL ||
 234            media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
 235                return NULL;
 236
 237        if (pad)
 238                *pad = remote->index;
 239
 240        return media_entity_to_v4l2_subdev(remote->entity);
 241}
 242
 243/* Return a pointer to the ISP video instance at the far end of the pipeline. */
 244static struct isp_video *
 245isp_video_far_end(struct isp_video *video)
 246{
 247        struct media_entity_graph graph;
 248        struct media_entity *entity = &video->video.entity;
 249        struct media_device *mdev = entity->parent;
 250        struct isp_video *far_end = NULL;
 251
 252        mutex_lock(&mdev->graph_mutex);
 253        media_entity_graph_walk_start(&graph, entity);
 254
 255        while ((entity = media_entity_graph_walk_next(&graph))) {
 256                if (entity == &video->video.entity)
 257                        continue;
 258
 259                if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
 260                        continue;
 261
 262                far_end = to_isp_video(media_entity_to_video_device(entity));
 263                if (far_end->type != video->type)
 264                        break;
 265
 266                far_end = NULL;
 267        }
 268
 269        mutex_unlock(&mdev->graph_mutex);
 270        return far_end;
 271}
 272
 273/*
 274 * Validate a pipeline by checking both ends of all links for format
 275 * discrepancies.
 276 *
 277 * Compute the minimum time per frame value as the maximum of time per frame
 278 * limits reported by every block in the pipeline.
 279 *
 280 * Return 0 if all formats match, or -EPIPE if at least one link is found with
 281 * different formats on its two ends.
 282 */
 283static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
 284{
 285        struct isp_device *isp = pipe->output->isp;
 286        struct v4l2_subdev_format fmt_source;
 287        struct v4l2_subdev_format fmt_sink;
 288        struct media_pad *pad;
 289        struct v4l2_subdev *subdev;
 290        int ret;
 291
 292        pipe->max_rate = pipe->l3_ick;
 293
 294        subdev = isp_video_remote_subdev(pipe->output, NULL);
 295        if (subdev == NULL)
 296                return -EPIPE;
 297
 298        while (1) {
 299                unsigned int shifter_link;
 300                /* Retrieve the sink format */
 301                pad = &subdev->entity.pads[0];
 302                if (!(pad->flags & MEDIA_PAD_FL_SINK))
 303                        break;
 304
 305                fmt_sink.pad = pad->index;
 306                fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 307                ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
 308                if (ret < 0 && ret != -ENOIOCTLCMD)
 309                        return -EPIPE;
 310
 311                /* Update the maximum frame rate */
 312                if (subdev == &isp->isp_res.subdev)
 313                        omap3isp_resizer_max_rate(&isp->isp_res,
 314                                                  &pipe->max_rate);
 315
 316                /* Check ccdc maximum data rate when data comes from sensor
 317                 * TODO: Include ccdc rate in pipe->max_rate and compare the
 318                 *       total pipe rate with the input data rate from sensor.
 319                 */
 320                if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
 321                        unsigned int rate = UINT_MAX;
 322
 323                        omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
 324                        if (isp->isp_ccdc.vpcfg.pixelclk > rate)
 325                                return -ENOSPC;
 326                }
 327
 328                /* If sink pad is on CCDC, the link has the lane shifter
 329                 * in the middle of it. */
 330                shifter_link = subdev == &isp->isp_ccdc.subdev;
 331
 332                /* Retrieve the source format */
 333                pad = media_entity_remote_source(pad);
 334                if (pad == NULL ||
 335                    media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
 336                        break;
 337
 338                subdev = media_entity_to_v4l2_subdev(pad->entity);
 339
 340                fmt_source.pad = pad->index;
 341                fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 342                ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
 343                if (ret < 0 && ret != -ENOIOCTLCMD)
 344                        return -EPIPE;
 345
 346                /* Check if the two ends match */
 347                if (fmt_source.format.width != fmt_sink.format.width ||
 348                    fmt_source.format.height != fmt_sink.format.height)
 349                        return -EPIPE;
 350
 351                if (shifter_link) {
 352                        unsigned int parallel_shift = 0;
 353                        if (isp->isp_ccdc.input == CCDC_INPUT_PARALLEL) {
 354                                struct isp_parallel_platform_data *pdata =
 355                                        &((struct isp_v4l2_subdevs_group *)
 356                                              subdev->host_priv)->bus.parallel;
 357                                parallel_shift = pdata->data_lane_shift * 2;
 358                        }
 359                        if (!isp_video_is_shiftable(fmt_source.format.code,
 360                                                fmt_sink.format.code,
 361                                                parallel_shift))
 362                                return -EPIPE;
 363                } else if (fmt_source.format.code != fmt_sink.format.code)
 364                        return -EPIPE;
 365        }
 366
 367        return 0;
 368}
 369
 370static int
 371__isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
 372{
 373        struct v4l2_subdev_format fmt;
 374        struct v4l2_subdev *subdev;
 375        u32 pad;
 376        int ret;
 377
 378        subdev = isp_video_remote_subdev(video, &pad);
 379        if (subdev == NULL)
 380                return -EINVAL;
 381
 382        mutex_lock(&video->mutex);
 383
 384        fmt.pad = pad;
 385        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 386        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
 387        if (ret == -ENOIOCTLCMD)
 388                ret = -EINVAL;
 389
 390        mutex_unlock(&video->mutex);
 391
 392        if (ret)
 393                return ret;
 394
 395        format->type = video->type;
 396        return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
 397}
 398
 399static int
 400isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
 401{
 402        struct v4l2_format format;
 403        int ret;
 404
 405        memcpy(&format, &vfh->format, sizeof(format));
 406        ret = __isp_video_get_format(video, &format);
 407        if (ret < 0)
 408                return ret;
 409
 410        if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
 411            vfh->format.fmt.pix.height != format.fmt.pix.height ||
 412            vfh->format.fmt.pix.width != format.fmt.pix.width ||
 413            vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
 414            vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
 415                return -EINVAL;
 416
 417        return ret;
 418}
 419
 420/* -----------------------------------------------------------------------------
 421 * IOMMU management
 422 */
 423
 424#define IOMMU_FLAG      (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
 425
 426/*
 427 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
 428 * @dev: Device pointer specific to the OMAP3 ISP.
 429 * @sglist: Pointer to source Scatter gather list to allocate.
 430 * @sglen: Number of elements of the scatter-gatter list.
 431 *
 432 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
 433 * we ran out of memory.
 434 */
 435static dma_addr_t
 436ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
 437{
 438        struct sg_table *sgt;
 439        u32 da;
 440
 441        sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
 442        if (sgt == NULL)
 443                return -ENOMEM;
 444
 445        sgt->sgl = (struct scatterlist *)sglist;
 446        sgt->nents = sglen;
 447        sgt->orig_nents = sglen;
 448
 449        da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG);
 450        if (IS_ERR_VALUE(da))
 451                kfree(sgt);
 452
 453        return da;
 454}
 455
 456/*
 457 * ispmmu_vunmap - Unmap a device address from the ISP MMU
 458 * @dev: Device pointer specific to the OMAP3 ISP.
 459 * @da: Device address generated from a ispmmu_vmap call.
 460 */
 461static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
 462{
 463        struct sg_table *sgt;
 464
 465        sgt = iommu_vunmap(isp->iommu, (u32)da);
 466        kfree(sgt);
 467}
 468
 469/* -----------------------------------------------------------------------------
 470 * Video queue operations
 471 */
 472
 473static void isp_video_queue_prepare(struct isp_video_queue *queue,
 474                                    unsigned int *nbuffers, unsigned int *size)
 475{
 476        struct isp_video_fh *vfh =
 477                container_of(queue, struct isp_video_fh, queue);
 478        struct isp_video *video = vfh->video;
 479
 480        *size = vfh->format.fmt.pix.sizeimage;
 481        if (*size == 0)
 482                return;
 483
 484        *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
 485}
 486
 487static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
 488{
 489        struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
 490        struct isp_buffer *buffer = to_isp_buffer(buf);
 491        struct isp_video *video = vfh->video;
 492
 493        if (buffer->isp_addr) {
 494                ispmmu_vunmap(video->isp, buffer->isp_addr);
 495                buffer->isp_addr = 0;
 496        }
 497}
 498
 499static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
 500{
 501        struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
 502        struct isp_buffer *buffer = to_isp_buffer(buf);
 503        struct isp_video *video = vfh->video;
 504        unsigned long addr;
 505
 506        addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
 507        if (IS_ERR_VALUE(addr))
 508                return -EIO;
 509
 510        if (!IS_ALIGNED(addr, 32)) {
 511                dev_dbg(video->isp->dev, "Buffer address must be "
 512                        "aligned to 32 bytes boundary.\n");
 513                ispmmu_vunmap(video->isp, buffer->isp_addr);
 514                return -EINVAL;
 515        }
 516
 517        buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
 518        buffer->isp_addr = addr;
 519        return 0;
 520}
 521
 522/*
 523 * isp_video_buffer_queue - Add buffer to streaming queue
 524 * @buf: Video buffer
 525 *
 526 * In memory-to-memory mode, start streaming on the pipeline if buffers are
 527 * queued on both the input and the output, if the pipeline isn't already busy.
 528 * If the pipeline is busy, it will be restarted in the output module interrupt
 529 * handler.
 530 */
 531static void isp_video_buffer_queue(struct isp_video_buffer *buf)
 532{
 533        struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
 534        struct isp_buffer *buffer = to_isp_buffer(buf);
 535        struct isp_video *video = vfh->video;
 536        struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
 537        enum isp_pipeline_state state;
 538        unsigned long flags;
 539        unsigned int empty;
 540        unsigned int start;
 541
 542        empty = list_empty(&video->dmaqueue);
 543        list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
 544
 545        if (empty) {
 546                if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 547                        state = ISP_PIPELINE_QUEUE_OUTPUT;
 548                else
 549                        state = ISP_PIPELINE_QUEUE_INPUT;
 550
 551                spin_lock_irqsave(&pipe->lock, flags);
 552                pipe->state |= state;
 553                video->ops->queue(video, buffer);
 554                video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
 555
 556                start = isp_pipeline_ready(pipe);
 557                if (start)
 558                        pipe->state |= ISP_PIPELINE_STREAM;
 559                spin_unlock_irqrestore(&pipe->lock, flags);
 560
 561                if (start)
 562                        omap3isp_pipeline_set_stream(pipe,
 563                                                ISP_PIPELINE_STREAM_SINGLESHOT);
 564        }
 565}
 566
 567static const struct isp_video_queue_operations isp_video_queue_ops = {
 568        .queue_prepare = &isp_video_queue_prepare,
 569        .buffer_prepare = &isp_video_buffer_prepare,
 570        .buffer_queue = &isp_video_buffer_queue,
 571        .buffer_cleanup = &isp_video_buffer_cleanup,
 572};
 573
 574/*
 575 * omap3isp_video_buffer_next - Complete the current buffer and return the next
 576 * @video: ISP video object
 577 * @error: Whether an error occurred during capture
 578 *
 579 * Remove the current video buffer from the DMA queue and fill its timestamp,
 580 * field count and state fields before waking up its completion handler.
 581 *
 582 * The buffer state is set to VIDEOBUF_DONE if no error occurred (@error is 0)
 583 * or VIDEOBUF_ERROR otherwise (@error is non-zero).
 584 *
 585 * The DMA queue is expected to contain at least one buffer.
 586 *
 587 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
 588 * empty.
 589 */
 590struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video,
 591                                              unsigned int error)
 592{
 593        struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
 594        struct isp_video_queue *queue = video->queue;
 595        enum isp_pipeline_state state;
 596        struct isp_video_buffer *buf;
 597        unsigned long flags;
 598        struct timespec ts;
 599
 600        spin_lock_irqsave(&queue->irqlock, flags);
 601        if (WARN_ON(list_empty(&video->dmaqueue))) {
 602                spin_unlock_irqrestore(&queue->irqlock, flags);
 603                return NULL;
 604        }
 605
 606        buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
 607                               irqlist);
 608        list_del(&buf->irqlist);
 609        spin_unlock_irqrestore(&queue->irqlock, flags);
 610
 611        ktime_get_ts(&ts);
 612        buf->vbuf.timestamp.tv_sec = ts.tv_sec;
 613        buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
 614
 615        /* Do frame number propagation only if this is the output video node.
 616         * Frame number either comes from the CSI receivers or it gets
 617         * incremented here if H3A is not active.
 618         * Note: There is no guarantee that the output buffer will finish
 619         * first, so the input number might lag behind by 1 in some cases.
 620         */
 621        if (video == pipe->output && !pipe->do_propagation)
 622                buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
 623        else
 624                buf->vbuf.sequence = atomic_read(&pipe->frame_number);
 625
 626        buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE;
 627
 628        wake_up(&buf->wait);
 629
 630        if (list_empty(&video->dmaqueue)) {
 631                if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 632                        state = ISP_PIPELINE_QUEUE_OUTPUT
 633                              | ISP_PIPELINE_STREAM;
 634                else
 635                        state = ISP_PIPELINE_QUEUE_INPUT
 636                              | ISP_PIPELINE_STREAM;
 637
 638                spin_lock_irqsave(&pipe->lock, flags);
 639                pipe->state &= ~state;
 640                if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
 641                        video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
 642                spin_unlock_irqrestore(&pipe->lock, flags);
 643                return NULL;
 644        }
 645
 646        if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
 647                spin_lock_irqsave(&pipe->lock, flags);
 648                pipe->state &= ~ISP_PIPELINE_STREAM;
 649                spin_unlock_irqrestore(&pipe->lock, flags);
 650        }
 651
 652        buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
 653                               irqlist);
 654        buf->state = ISP_BUF_STATE_ACTIVE;
 655        return to_isp_buffer(buf);
 656}
 657
 658/*
 659 * omap3isp_video_resume - Perform resume operation on the buffers
 660 * @video: ISP video object
 661 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
 662 *
 663 * This function is intended to be used on suspend/resume scenario. It
 664 * requests video queue layer to discard buffers marked as DONE if it's in
 665 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
 666 * if there's any.
 667 */
 668void omap3isp_video_resume(struct isp_video *video, int continuous)
 669{
 670        struct isp_buffer *buf = NULL;
 671
 672        if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 673                omap3isp_video_queue_discard_done(video->queue);
 674
 675        if (!list_empty(&video->dmaqueue)) {
 676                buf = list_first_entry(&video->dmaqueue,
 677                                       struct isp_buffer, buffer.irqlist);
 678                video->ops->queue(video, buf);
 679                video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
 680        } else {
 681                if (continuous)
 682                        video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
 683        }
 684}
 685
 686/* -----------------------------------------------------------------------------
 687 * V4L2 ioctls
 688 */
 689
 690static int
 691isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
 692{
 693        struct isp_video *video = video_drvdata(file);
 694
 695        strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
 696        strlcpy(cap->card, video->video.name, sizeof(cap->card));
 697        strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
 698
 699        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 700                cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
 701        else
 702                cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
 703
 704        return 0;
 705}
 706
 707static int
 708isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
 709{
 710        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 711        struct isp_video *video = video_drvdata(file);
 712
 713        if (format->type != video->type)
 714                return -EINVAL;
 715
 716        mutex_lock(&video->mutex);
 717        *format = vfh->format;
 718        mutex_unlock(&video->mutex);
 719
 720        return 0;
 721}
 722
 723static int
 724isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
 725{
 726        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 727        struct isp_video *video = video_drvdata(file);
 728        struct v4l2_mbus_framefmt fmt;
 729
 730        if (format->type != video->type)
 731                return -EINVAL;
 732
 733        mutex_lock(&video->mutex);
 734
 735        /* Fill the bytesperline and sizeimage fields by converting to media bus
 736         * format and back to pixel format.
 737         */
 738        isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
 739        isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
 740
 741        vfh->format = *format;
 742
 743        mutex_unlock(&video->mutex);
 744        return 0;
 745}
 746
 747static int
 748isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
 749{
 750        struct isp_video *video = video_drvdata(file);
 751        struct v4l2_subdev_format fmt;
 752        struct v4l2_subdev *subdev;
 753        u32 pad;
 754        int ret;
 755
 756        if (format->type != video->type)
 757                return -EINVAL;
 758
 759        subdev = isp_video_remote_subdev(video, &pad);
 760        if (subdev == NULL)
 761                return -EINVAL;
 762
 763        isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
 764
 765        fmt.pad = pad;
 766        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 767        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
 768        if (ret)
 769                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
 770
 771        isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
 772        return 0;
 773}
 774
 775static int
 776isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
 777{
 778        struct isp_video *video = video_drvdata(file);
 779        struct v4l2_subdev *subdev;
 780        int ret;
 781
 782        subdev = isp_video_remote_subdev(video, NULL);
 783        if (subdev == NULL)
 784                return -EINVAL;
 785
 786        mutex_lock(&video->mutex);
 787        ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
 788        mutex_unlock(&video->mutex);
 789
 790        return ret == -ENOIOCTLCMD ? -EINVAL : ret;
 791}
 792
 793static int
 794isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
 795{
 796        struct isp_video *video = video_drvdata(file);
 797        struct v4l2_subdev_format format;
 798        struct v4l2_subdev *subdev;
 799        u32 pad;
 800        int ret;
 801
 802        subdev = isp_video_remote_subdev(video, &pad);
 803        if (subdev == NULL)
 804                return -EINVAL;
 805
 806        /* Try the get crop operation first and fallback to get format if not
 807         * implemented.
 808         */
 809        ret = v4l2_subdev_call(subdev, video, g_crop, crop);
 810        if (ret != -ENOIOCTLCMD)
 811                return ret;
 812
 813        format.pad = pad;
 814        format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 815        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
 816        if (ret < 0)
 817                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
 818
 819        crop->c.left = 0;
 820        crop->c.top = 0;
 821        crop->c.width = format.format.width;
 822        crop->c.height = format.format.height;
 823
 824        return 0;
 825}
 826
 827static int
 828isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
 829{
 830        struct isp_video *video = video_drvdata(file);
 831        struct v4l2_subdev *subdev;
 832        int ret;
 833
 834        subdev = isp_video_remote_subdev(video, NULL);
 835        if (subdev == NULL)
 836                return -EINVAL;
 837
 838        mutex_lock(&video->mutex);
 839        ret = v4l2_subdev_call(subdev, video, s_crop, crop);
 840        mutex_unlock(&video->mutex);
 841
 842        return ret == -ENOIOCTLCMD ? -EINVAL : ret;
 843}
 844
 845static int
 846isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
 847{
 848        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 849        struct isp_video *video = video_drvdata(file);
 850
 851        if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
 852            video->type != a->type)
 853                return -EINVAL;
 854
 855        memset(a, 0, sizeof(*a));
 856        a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
 857        a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
 858        a->parm.output.timeperframe = vfh->timeperframe;
 859
 860        return 0;
 861}
 862
 863static int
 864isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
 865{
 866        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 867        struct isp_video *video = video_drvdata(file);
 868
 869        if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
 870            video->type != a->type)
 871                return -EINVAL;
 872
 873        if (a->parm.output.timeperframe.denominator == 0)
 874                a->parm.output.timeperframe.denominator = 1;
 875
 876        vfh->timeperframe = a->parm.output.timeperframe;
 877
 878        return 0;
 879}
 880
 881static int
 882isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
 883{
 884        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 885
 886        return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
 887}
 888
 889static int
 890isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
 891{
 892        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 893
 894        return omap3isp_video_queue_querybuf(&vfh->queue, b);
 895}
 896
 897static int
 898isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
 899{
 900        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 901
 902        return omap3isp_video_queue_qbuf(&vfh->queue, b);
 903}
 904
 905static int
 906isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
 907{
 908        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 909
 910        return omap3isp_video_queue_dqbuf(&vfh->queue, b,
 911                                          file->f_flags & O_NONBLOCK);
 912}
 913
 914/*
 915 * Stream management
 916 *
 917 * Every ISP pipeline has a single input and a single output. The input can be
 918 * either a sensor or a video node. The output is always a video node.
 919 *
 920 * As every pipeline has an output video node, the ISP video objects at the
 921 * pipeline output stores the pipeline state. It tracks the streaming state of
 922 * both the input and output, as well as the availability of buffers.
 923 *
 924 * In sensor-to-memory mode, frames are always available at the pipeline input.
 925 * Starting the sensor usually requires I2C transfers and must be done in
 926 * interruptible context. The pipeline is started and stopped synchronously
 927 * to the stream on/off commands. All modules in the pipeline will get their
 928 * subdev set stream handler called. The module at the end of the pipeline must
 929 * delay starting the hardware until buffers are available at its output.
 930 *
 931 * In memory-to-memory mode, starting/stopping the stream requires
 932 * synchronization between the input and output. ISP modules can't be stopped
 933 * in the middle of a frame, and at least some of the modules seem to become
 934 * busy as soon as they're started, even if they don't receive a frame start
 935 * event. For that reason frames need to be processed in single-shot mode. The
 936 * driver needs to wait until a frame is completely processed and written to
 937 * memory before restarting the pipeline for the next frame. Pipelined
 938 * processing might be possible but requires more testing.
 939 *
 940 * Stream start must be delayed until buffers are available at both the input
 941 * and output. The pipeline must be started in the videobuf queue callback with
 942 * the buffers queue spinlock held. The modules subdev set stream operation must
 943 * not sleep.
 944 */
 945static int
 946isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
 947{
 948        struct isp_video_fh *vfh = to_isp_video_fh(fh);
 949        struct isp_video *video = video_drvdata(file);
 950        enum isp_pipeline_state state;
 951        struct isp_pipeline *pipe;
 952        struct isp_video *far_end;
 953        unsigned long flags;
 954        int ret;
 955
 956        if (type != video->type)
 957                return -EINVAL;
 958
 959        mutex_lock(&video->stream_lock);
 960
 961        if (video->streaming) {
 962                mutex_unlock(&video->stream_lock);
 963                return -EBUSY;
 964        }
 965
 966        /* Start streaming on the pipeline. No link touching an entity in the
 967         * pipeline can be activated or deactivated once streaming is started.
 968         */
 969        pipe = video->video.entity.pipe
 970             ? to_isp_pipeline(&video->video.entity) : &video->pipe;
 971        media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
 972
 973        /* Verify that the currently configured format matches the output of
 974         * the connected subdev.
 975         */
 976        ret = isp_video_check_format(video, vfh);
 977        if (ret < 0)
 978                goto error;
 979
 980        video->bpl_padding = ret;
 981        video->bpl_value = vfh->format.fmt.pix.bytesperline;
 982
 983        /* Find the ISP video node connected at the far end of the pipeline and
 984         * update the pipeline.
 985         */
 986        far_end = isp_video_far_end(video);
 987
 988        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
 989                state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
 990                pipe->input = far_end;
 991                pipe->output = video;
 992        } else {
 993                if (far_end == NULL) {
 994                        ret = -EPIPE;
 995                        goto error;
 996                }
 997
 998                state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
 999                pipe->input = video;
1000                pipe->output = far_end;
1001        }
1002
1003        if (video->isp->pdata->set_constraints)
1004                video->isp->pdata->set_constraints(video->isp, true);
1005        pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1006
1007        /* Validate the pipeline and update its state. */
1008        ret = isp_video_validate_pipeline(pipe);
1009        if (ret < 0)
1010                goto error;
1011
1012        spin_lock_irqsave(&pipe->lock, flags);
1013        pipe->state &= ~ISP_PIPELINE_STREAM;
1014        pipe->state |= state;
1015        spin_unlock_irqrestore(&pipe->lock, flags);
1016
1017        /* Set the maximum time per frame as the value requested by userspace.
1018         * This is a soft limit that can be overridden if the hardware doesn't
1019         * support the request limit.
1020         */
1021        if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1022                pipe->max_timeperframe = vfh->timeperframe;
1023
1024        video->queue = &vfh->queue;
1025        INIT_LIST_HEAD(&video->dmaqueue);
1026        atomic_set(&pipe->frame_number, -1);
1027
1028        ret = omap3isp_video_queue_streamon(&vfh->queue);
1029        if (ret < 0)
1030                goto error;
1031
1032        /* In sensor-to-memory mode, the stream can be started synchronously
1033         * to the stream on command. In memory-to-memory mode, it will be
1034         * started when buffers are queued on both the input and output.
1035         */
1036        if (pipe->input == NULL) {
1037                ret = omap3isp_pipeline_set_stream(pipe,
1038                                              ISP_PIPELINE_STREAM_CONTINUOUS);
1039                if (ret < 0)
1040                        goto error;
1041                spin_lock_irqsave(&video->queue->irqlock, flags);
1042                if (list_empty(&video->dmaqueue))
1043                        video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1044                spin_unlock_irqrestore(&video->queue->irqlock, flags);
1045        }
1046
1047error:
1048        if (ret < 0) {
1049                omap3isp_video_queue_streamoff(&vfh->queue);
1050                if (video->isp->pdata->set_constraints)
1051                        video->isp->pdata->set_constraints(video->isp, false);
1052                media_entity_pipeline_stop(&video->video.entity);
1053                video->queue = NULL;
1054        }
1055
1056        if (!ret)
1057                video->streaming = 1;
1058
1059        mutex_unlock(&video->stream_lock);
1060        return ret;
1061}
1062
1063static int
1064isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1065{
1066        struct isp_video_fh *vfh = to_isp_video_fh(fh);
1067        struct isp_video *video = video_drvdata(file);
1068        struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1069        enum isp_pipeline_state state;
1070        unsigned int streaming;
1071        unsigned long flags;
1072
1073        if (type != video->type)
1074                return -EINVAL;
1075
1076        mutex_lock(&video->stream_lock);
1077
1078        /* Make sure we're not streaming yet. */
1079        mutex_lock(&vfh->queue.lock);
1080        streaming = vfh->queue.streaming;
1081        mutex_unlock(&vfh->queue.lock);
1082
1083        if (!streaming)
1084                goto done;
1085
1086        /* Update the pipeline state. */
1087        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1088                state = ISP_PIPELINE_STREAM_OUTPUT
1089                      | ISP_PIPELINE_QUEUE_OUTPUT;
1090        else
1091                state = ISP_PIPELINE_STREAM_INPUT
1092                      | ISP_PIPELINE_QUEUE_INPUT;
1093
1094        spin_lock_irqsave(&pipe->lock, flags);
1095        pipe->state &= ~state;
1096        spin_unlock_irqrestore(&pipe->lock, flags);
1097
1098        /* Stop the stream. */
1099        omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1100        omap3isp_video_queue_streamoff(&vfh->queue);
1101        video->queue = NULL;
1102        video->streaming = 0;
1103
1104        if (video->isp->pdata->set_constraints)
1105                video->isp->pdata->set_constraints(video->isp, false);
1106        media_entity_pipeline_stop(&video->video.entity);
1107
1108done:
1109        mutex_unlock(&video->stream_lock);
1110        return 0;
1111}
1112
1113static int
1114isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1115{
1116        if (input->index > 0)
1117                return -EINVAL;
1118
1119        strlcpy(input->name, "camera", sizeof(input->name));
1120        input->type = V4L2_INPUT_TYPE_CAMERA;
1121
1122        return 0;
1123}
1124
1125static int
1126isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1127{
1128        *input = 0;
1129
1130        return 0;
1131}
1132
1133static int
1134isp_video_s_input(struct file *file, void *fh, unsigned int input)
1135{
1136        return input == 0 ? 0 : -EINVAL;
1137}
1138
1139static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1140        .vidioc_querycap                = isp_video_querycap,
1141        .vidioc_g_fmt_vid_cap           = isp_video_get_format,
1142        .vidioc_s_fmt_vid_cap           = isp_video_set_format,
1143        .vidioc_try_fmt_vid_cap         = isp_video_try_format,
1144        .vidioc_g_fmt_vid_out           = isp_video_get_format,
1145        .vidioc_s_fmt_vid_out           = isp_video_set_format,
1146        .vidioc_try_fmt_vid_out         = isp_video_try_format,
1147        .vidioc_cropcap                 = isp_video_cropcap,
1148        .vidioc_g_crop                  = isp_video_get_crop,
1149        .vidioc_s_crop                  = isp_video_set_crop,
1150        .vidioc_g_parm                  = isp_video_get_param,
1151        .vidioc_s_parm                  = isp_video_set_param,
1152        .vidioc_reqbufs                 = isp_video_reqbufs,
1153        .vidioc_querybuf                = isp_video_querybuf,
1154        .vidioc_qbuf                    = isp_video_qbuf,
1155        .vidioc_dqbuf                   = isp_video_dqbuf,
1156        .vidioc_streamon                = isp_video_streamon,
1157        .vidioc_streamoff               = isp_video_streamoff,
1158        .vidioc_enum_input              = isp_video_enum_input,
1159        .vidioc_g_input                 = isp_video_g_input,
1160        .vidioc_s_input                 = isp_video_s_input,
1161};
1162
1163/* -----------------------------------------------------------------------------
1164 * V4L2 file operations
1165 */
1166
1167static int isp_video_open(struct file *file)
1168{
1169        struct isp_video *video = video_drvdata(file);
1170        struct isp_video_fh *handle;
1171        int ret = 0;
1172
1173        handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1174        if (handle == NULL)
1175                return -ENOMEM;
1176
1177        v4l2_fh_init(&handle->vfh, &video->video);
1178        v4l2_fh_add(&handle->vfh);
1179
1180        /* If this is the first user, initialise the pipeline. */
1181        if (omap3isp_get(video->isp) == NULL) {
1182                ret = -EBUSY;
1183                goto done;
1184        }
1185
1186        ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1187        if (ret < 0) {
1188                omap3isp_put(video->isp);
1189                goto done;
1190        }
1191
1192        omap3isp_video_queue_init(&handle->queue, video->type,
1193                                  &isp_video_queue_ops, video->isp->dev,
1194                                  sizeof(struct isp_buffer));
1195
1196        memset(&handle->format, 0, sizeof(handle->format));
1197        handle->format.type = video->type;
1198        handle->timeperframe.denominator = 1;
1199
1200        handle->video = video;
1201        file->private_data = &handle->vfh;
1202
1203done:
1204        if (ret < 0) {
1205                v4l2_fh_del(&handle->vfh);
1206                kfree(handle);
1207        }
1208
1209        return ret;
1210}
1211
1212static int isp_video_release(struct file *file)
1213{
1214        struct isp_video *video = video_drvdata(file);
1215        struct v4l2_fh *vfh = file->private_data;
1216        struct isp_video_fh *handle = to_isp_video_fh(vfh);
1217
1218        /* Disable streaming and free the buffers queue resources. */
1219        isp_video_streamoff(file, vfh, video->type);
1220
1221        mutex_lock(&handle->queue.lock);
1222        omap3isp_video_queue_cleanup(&handle->queue);
1223        mutex_unlock(&handle->queue.lock);
1224
1225        omap3isp_pipeline_pm_use(&video->video.entity, 0);
1226
1227        /* Release the file handle. */
1228        v4l2_fh_del(vfh);
1229        kfree(handle);
1230        file->private_data = NULL;
1231
1232        omap3isp_put(video->isp);
1233
1234        return 0;
1235}
1236
1237static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1238{
1239        struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1240        struct isp_video_queue *queue = &vfh->queue;
1241
1242        return omap3isp_video_queue_poll(queue, file, wait);
1243}
1244
1245static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1246{
1247        struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1248
1249        return omap3isp_video_queue_mmap(&vfh->queue, vma);
1250}
1251
1252static struct v4l2_file_operations isp_video_fops = {
1253        .owner = THIS_MODULE,
1254        .unlocked_ioctl = video_ioctl2,
1255        .open = isp_video_open,
1256        .release = isp_video_release,
1257        .poll = isp_video_poll,
1258        .mmap = isp_video_mmap,
1259};
1260
1261/* -----------------------------------------------------------------------------
1262 * ISP video core
1263 */
1264
1265static const struct isp_video_operations isp_video_dummy_ops = {
1266};
1267
1268int omap3isp_video_init(struct isp_video *video, const char *name)
1269{
1270        const char *direction;
1271        int ret;
1272
1273        switch (video->type) {
1274        case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1275                direction = "output";
1276                video->pad.flags = MEDIA_PAD_FL_SINK;
1277                break;
1278        case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1279                direction = "input";
1280                video->pad.flags = MEDIA_PAD_FL_SOURCE;
1281                break;
1282
1283        default:
1284                return -EINVAL;
1285        }
1286
1287        ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1288        if (ret < 0)
1289                return ret;
1290
1291        mutex_init(&video->mutex);
1292        atomic_set(&video->active, 0);
1293
1294        spin_lock_init(&video->pipe.lock);
1295        mutex_init(&video->stream_lock);
1296
1297        /* Initialize the video device. */
1298        if (video->ops == NULL)
1299                video->ops = &isp_video_dummy_ops;
1300
1301        video->video.fops = &isp_video_fops;
1302        snprintf(video->video.name, sizeof(video->video.name),
1303                 "OMAP3 ISP %s %s", name, direction);
1304        video->video.vfl_type = VFL_TYPE_GRABBER;
1305        video->video.release = video_device_release_empty;
1306        video->video.ioctl_ops = &isp_video_ioctl_ops;
1307        video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1308
1309        video_set_drvdata(&video->video, video);
1310
1311        return 0;
1312}
1313
1314int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1315{
1316        int ret;
1317
1318        video->video.v4l2_dev = vdev;
1319
1320        ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1321        if (ret < 0)
1322                printk(KERN_ERR "%s: could not register video device (%d)\n",
1323                        __func__, ret);
1324
1325        return ret;
1326}
1327
1328void omap3isp_video_unregister(struct isp_video *video)
1329{
1330        if (video_is_registered(&video->video)) {
1331                media_entity_cleanup(&video->video.entity);
1332                video_unregister_device(&video->video);
1333        }
1334}
1335