linux/drivers/media/platform/ti-vpe/vpe.c
<<
>>
Prefs
   1/*
   2 * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver
   3 *
   4 * Copyright (c) 2013 Texas Instruments Inc.
   5 * David Griego, <dagriego@biglakesoftware.com>
   6 * Dale Farnsworth, <dale@farnsworth.org>
   7 * Archit Taneja, <archit@ti.com>
   8 *
   9 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
  10 * Pawel Osciak, <pawel@osciak.com>
  11 * Marek Szyprowski, <m.szyprowski@samsung.com>
  12 *
  13 * Based on the virtual v4l2-mem2mem example device
  14 *
  15 * This program is free software; you can redistribute it and/or modify it
  16 * under the terms of the GNU General Public License version 2 as published by
  17 * the Free Software Foundation
  18 */
  19
  20#include <linux/delay.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/err.h>
  23#include <linux/fs.h>
  24#include <linux/interrupt.h>
  25#include <linux/io.h>
  26#include <linux/ioctl.h>
  27#include <linux/module.h>
  28#include <linux/of.h>
  29#include <linux/platform_device.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/sched.h>
  32#include <linux/slab.h>
  33#include <linux/videodev2.h>
  34#include <linux/log2.h>
  35#include <linux/sizes.h>
  36
  37#include <media/v4l2-common.h>
  38#include <media/v4l2-ctrls.h>
  39#include <media/v4l2-device.h>
  40#include <media/v4l2-event.h>
  41#include <media/v4l2-ioctl.h>
  42#include <media/v4l2-mem2mem.h>
  43#include <media/videobuf2-v4l2.h>
  44#include <media/videobuf2-dma-contig.h>
  45
  46#include "vpdma.h"
  47#include "vpe_regs.h"
  48#include "sc.h"
  49#include "csc.h"
  50
  51#define VPE_MODULE_NAME "vpe"
  52
  53/* minimum and maximum frame sizes */
  54#define MIN_W           32
  55#define MIN_H           32
  56#define MAX_W           1920
  57#define MAX_H           1080
  58
  59/* required alignments */
  60#define S_ALIGN         0       /* multiple of 1 */
  61#define H_ALIGN         1       /* multiple of 2 */
  62
  63/* flags that indicate a format can be used for capture/output */
  64#define VPE_FMT_TYPE_CAPTURE    (1 << 0)
  65#define VPE_FMT_TYPE_OUTPUT     (1 << 1)
  66
  67/* used as plane indices */
  68#define VPE_MAX_PLANES  2
  69#define VPE_LUMA        0
  70#define VPE_CHROMA      1
  71
  72/* per m2m context info */
  73#define VPE_MAX_SRC_BUFS        3       /* need 3 src fields to de-interlace */
  74
  75#define VPE_DEF_BUFS_PER_JOB    1       /* default one buffer per batch job */
  76
  77/*
  78 * each VPE context can need up to 3 config descriptors, 7 input descriptors,
  79 * 3 output descriptors, and 10 control descriptors
  80 */
  81#define VPE_DESC_LIST_SIZE      (10 * VPDMA_DTD_DESC_SIZE +     \
  82                                        13 * VPDMA_CFD_CTD_DESC_SIZE)
  83
  84#define vpe_dbg(vpedev, fmt, arg...)    \
  85                dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg)
  86#define vpe_err(vpedev, fmt, arg...)    \
  87                dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg)
  88
  89struct vpe_us_coeffs {
  90        unsigned short  anchor_fid0_c0;
  91        unsigned short  anchor_fid0_c1;
  92        unsigned short  anchor_fid0_c2;
  93        unsigned short  anchor_fid0_c3;
  94        unsigned short  interp_fid0_c0;
  95        unsigned short  interp_fid0_c1;
  96        unsigned short  interp_fid0_c2;
  97        unsigned short  interp_fid0_c3;
  98        unsigned short  anchor_fid1_c0;
  99        unsigned short  anchor_fid1_c1;
 100        unsigned short  anchor_fid1_c2;
 101        unsigned short  anchor_fid1_c3;
 102        unsigned short  interp_fid1_c0;
 103        unsigned short  interp_fid1_c1;
 104        unsigned short  interp_fid1_c2;
 105        unsigned short  interp_fid1_c3;
 106};
 107
 108/*
 109 * Default upsampler coefficients
 110 */
 111static const struct vpe_us_coeffs us_coeffs[] = {
 112        {
 113                /* Coefficients for progressive input */
 114                0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
 115                0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8,
 116        },
 117        {
 118                /* Coefficients for Top Field Interlaced input */
 119                0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3,
 120                /* Coefficients for Bottom Field Interlaced input */
 121                0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9,
 122        },
 123};
 124
 125/*
 126 * the following registers are for configuring some of the parameters of the
 127 * motion and edge detection blocks inside DEI, these generally remain the same,
 128 * these could be passed later via userspace if some one needs to tweak these.
 129 */
 130struct vpe_dei_regs {
 131        unsigned long mdt_spacial_freq_thr_reg;         /* VPE_DEI_REG2 */
 132        unsigned long edi_config_reg;                   /* VPE_DEI_REG3 */
 133        unsigned long edi_lut_reg0;                     /* VPE_DEI_REG4 */
 134        unsigned long edi_lut_reg1;                     /* VPE_DEI_REG5 */
 135        unsigned long edi_lut_reg2;                     /* VPE_DEI_REG6 */
 136        unsigned long edi_lut_reg3;                     /* VPE_DEI_REG7 */
 137};
 138
 139/*
 140 * default expert DEI register values, unlikely to be modified.
 141 */
 142static const struct vpe_dei_regs dei_regs = {
 143        .mdt_spacial_freq_thr_reg = 0x020C0804u,
 144        .edi_config_reg = 0x0118100Fu,
 145        .edi_lut_reg0 = 0x08040200u,
 146        .edi_lut_reg1 = 0x1010100Cu,
 147        .edi_lut_reg2 = 0x10101010u,
 148        .edi_lut_reg3 = 0x10101010u,
 149};
 150
 151/*
 152 * The port_data structure contains per-port data.
 153 */
 154struct vpe_port_data {
 155        enum vpdma_channel channel;     /* VPDMA channel */
 156        u8      vb_index;               /* input frame f, f-1, f-2 index */
 157        u8      vb_part;                /* plane index for co-panar formats */
 158};
 159
 160/*
 161 * Define indices into the port_data tables
 162 */
 163#define VPE_PORT_LUMA1_IN       0
 164#define VPE_PORT_CHROMA1_IN     1
 165#define VPE_PORT_LUMA2_IN       2
 166#define VPE_PORT_CHROMA2_IN     3
 167#define VPE_PORT_LUMA3_IN       4
 168#define VPE_PORT_CHROMA3_IN     5
 169#define VPE_PORT_MV_IN          6
 170#define VPE_PORT_MV_OUT         7
 171#define VPE_PORT_LUMA_OUT       8
 172#define VPE_PORT_CHROMA_OUT     9
 173#define VPE_PORT_RGB_OUT        10
 174
 175static const struct vpe_port_data port_data[11] = {
 176        [VPE_PORT_LUMA1_IN] = {
 177                .channel        = VPE_CHAN_LUMA1_IN,
 178                .vb_index       = 0,
 179                .vb_part        = VPE_LUMA,
 180        },
 181        [VPE_PORT_CHROMA1_IN] = {
 182                .channel        = VPE_CHAN_CHROMA1_IN,
 183                .vb_index       = 0,
 184                .vb_part        = VPE_CHROMA,
 185        },
 186        [VPE_PORT_LUMA2_IN] = {
 187                .channel        = VPE_CHAN_LUMA2_IN,
 188                .vb_index       = 1,
 189                .vb_part        = VPE_LUMA,
 190        },
 191        [VPE_PORT_CHROMA2_IN] = {
 192                .channel        = VPE_CHAN_CHROMA2_IN,
 193                .vb_index       = 1,
 194                .vb_part        = VPE_CHROMA,
 195        },
 196        [VPE_PORT_LUMA3_IN] = {
 197                .channel        = VPE_CHAN_LUMA3_IN,
 198                .vb_index       = 2,
 199                .vb_part        = VPE_LUMA,
 200        },
 201        [VPE_PORT_CHROMA3_IN] = {
 202                .channel        = VPE_CHAN_CHROMA3_IN,
 203                .vb_index       = 2,
 204                .vb_part        = VPE_CHROMA,
 205        },
 206        [VPE_PORT_MV_IN] = {
 207                .channel        = VPE_CHAN_MV_IN,
 208        },
 209        [VPE_PORT_MV_OUT] = {
 210                .channel        = VPE_CHAN_MV_OUT,
 211        },
 212        [VPE_PORT_LUMA_OUT] = {
 213                .channel        = VPE_CHAN_LUMA_OUT,
 214                .vb_part        = VPE_LUMA,
 215        },
 216        [VPE_PORT_CHROMA_OUT] = {
 217                .channel        = VPE_CHAN_CHROMA_OUT,
 218                .vb_part        = VPE_CHROMA,
 219        },
 220        [VPE_PORT_RGB_OUT] = {
 221                .channel        = VPE_CHAN_RGB_OUT,
 222                .vb_part        = VPE_LUMA,
 223        },
 224};
 225
 226
 227/* driver info for each of the supported video formats */
 228struct vpe_fmt {
 229        char    *name;                  /* human-readable name */
 230        u32     fourcc;                 /* standard format identifier */
 231        u8      types;                  /* CAPTURE and/or OUTPUT */
 232        u8      coplanar;               /* set for unpacked Luma and Chroma */
 233        /* vpdma format info for each plane */
 234        struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES];
 235};
 236
 237static struct vpe_fmt vpe_formats[] = {
 238        {
 239                .name           = "YUV 422 co-planar",
 240                .fourcc         = V4L2_PIX_FMT_NV16,
 241                .types          = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 242                .coplanar       = 1,
 243                .vpdma_fmt      = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444],
 244                                    &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444],
 245                                  },
 246        },
 247        {
 248                .name           = "YUV 420 co-planar",
 249                .fourcc         = V4L2_PIX_FMT_NV12,
 250                .types          = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 251                .coplanar       = 1,
 252                .vpdma_fmt      = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420],
 253                                    &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420],
 254                                  },
 255        },
 256        {
 257                .name           = "YUYV 422 packed",
 258                .fourcc         = V4L2_PIX_FMT_YUYV,
 259                .types          = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 260                .coplanar       = 0,
 261                .vpdma_fmt      = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YC422],
 262                                  },
 263        },
 264        {
 265                .name           = "UYVY 422 packed",
 266                .fourcc         = V4L2_PIX_FMT_UYVY,
 267                .types          = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT,
 268                .coplanar       = 0,
 269                .vpdma_fmt      = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CY422],
 270                                  },
 271        },
 272        {
 273                .name           = "RGB888 packed",
 274                .fourcc         = V4L2_PIX_FMT_RGB24,
 275                .types          = VPE_FMT_TYPE_CAPTURE,
 276                .coplanar       = 0,
 277                .vpdma_fmt      = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24],
 278                                  },
 279        },
 280        {
 281                .name           = "ARGB32",
 282                .fourcc         = V4L2_PIX_FMT_RGB32,
 283                .types          = VPE_FMT_TYPE_CAPTURE,
 284                .coplanar       = 0,
 285                .vpdma_fmt      = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32],
 286                                  },
 287        },
 288        {
 289                .name           = "BGR888 packed",
 290                .fourcc         = V4L2_PIX_FMT_BGR24,
 291                .types          = VPE_FMT_TYPE_CAPTURE,
 292                .coplanar       = 0,
 293                .vpdma_fmt      = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24],
 294                                  },
 295        },
 296        {
 297                .name           = "ABGR32",
 298                .fourcc         = V4L2_PIX_FMT_BGR32,
 299                .types          = VPE_FMT_TYPE_CAPTURE,
 300                .coplanar       = 0,
 301                .vpdma_fmt      = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32],
 302                                  },
 303        },
 304};
 305
 306/*
 307 * per-queue, driver-specific private data.
 308 * there is one source queue and one destination queue for each m2m context.
 309 */
 310struct vpe_q_data {
 311        unsigned int            width;                          /* frame width */
 312        unsigned int            height;                         /* frame height */
 313        unsigned int            bytesperline[VPE_MAX_PLANES];   /* bytes per line in memory */
 314        enum v4l2_colorspace    colorspace;
 315        enum v4l2_field         field;                          /* supported field value */
 316        unsigned int            flags;
 317        unsigned int            sizeimage[VPE_MAX_PLANES];      /* image size in memory */
 318        struct v4l2_rect        c_rect;                         /* crop/compose rectangle */
 319        struct vpe_fmt          *fmt;                           /* format info */
 320};
 321
 322/* vpe_q_data flag bits */
 323#define Q_DATA_FRAME_1D         (1 << 0)
 324#define Q_DATA_MODE_TILED       (1 << 1)
 325#define Q_DATA_INTERLACED       (1 << 2)
 326
 327enum {
 328        Q_DATA_SRC = 0,
 329        Q_DATA_DST = 1,
 330};
 331
 332/* find our format description corresponding to the passed v4l2_format */
 333static struct vpe_fmt *find_format(struct v4l2_format *f)
 334{
 335        struct vpe_fmt *fmt;
 336        unsigned int k;
 337
 338        for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) {
 339                fmt = &vpe_formats[k];
 340                if (fmt->fourcc == f->fmt.pix.pixelformat)
 341                        return fmt;
 342        }
 343
 344        return NULL;
 345}
 346
 347/*
 348 * there is one vpe_dev structure in the driver, it is shared by
 349 * all instances.
 350 */
 351struct vpe_dev {
 352        struct v4l2_device      v4l2_dev;
 353        struct video_device     vfd;
 354        struct v4l2_m2m_dev     *m2m_dev;
 355
 356        atomic_t                num_instances;  /* count of driver instances */
 357        dma_addr_t              loaded_mmrs;    /* shadow mmrs in device */
 358        struct mutex            dev_mutex;
 359        spinlock_t              lock;
 360
 361        int                     irq;
 362        void __iomem            *base;
 363        struct resource         *res;
 364
 365        struct vpdma_data       *vpdma;         /* vpdma data handle */
 366        struct sc_data          *sc;            /* scaler data handle */
 367        struct csc_data         *csc;           /* csc data handle */
 368};
 369
 370/*
 371 * There is one vpe_ctx structure for each m2m context.
 372 */
 373struct vpe_ctx {
 374        struct v4l2_fh          fh;
 375        struct vpe_dev          *dev;
 376        struct v4l2_ctrl_handler hdl;
 377
 378        unsigned int            field;                  /* current field */
 379        unsigned int            sequence;               /* current frame/field seq */
 380        unsigned int            aborting;               /* abort after next irq */
 381
 382        unsigned int            bufs_per_job;           /* input buffers per batch */
 383        unsigned int            bufs_completed;         /* bufs done in this batch */
 384
 385        struct vpe_q_data       q_data[2];              /* src & dst queue data */
 386        struct vb2_v4l2_buffer  *src_vbs[VPE_MAX_SRC_BUFS];
 387        struct vb2_v4l2_buffer  *dst_vb;
 388
 389        dma_addr_t              mv_buf_dma[2];          /* dma addrs of motion vector in/out bufs */
 390        void                    *mv_buf[2];             /* virtual addrs of motion vector bufs */
 391        size_t                  mv_buf_size;            /* current motion vector buffer size */
 392        struct vpdma_buf        mmr_adb;                /* shadow reg addr/data block */
 393        struct vpdma_buf        sc_coeff_h;             /* h coeff buffer */
 394        struct vpdma_buf        sc_coeff_v;             /* v coeff buffer */
 395        struct vpdma_desc_list  desc_list;              /* DMA descriptor list */
 396
 397        bool                    deinterlacing;          /* using de-interlacer */
 398        bool                    load_mmrs;              /* have new shadow reg values */
 399
 400        unsigned int            src_mv_buf_selector;
 401};
 402
 403
 404/*
 405 * M2M devices get 2 queues.
 406 * Return the queue given the type.
 407 */
 408static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx,
 409                                     enum v4l2_buf_type type)
 410{
 411        switch (type) {
 412        case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 413        case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 414                return &ctx->q_data[Q_DATA_SRC];
 415        case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 416        case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 417                return &ctx->q_data[Q_DATA_DST];
 418        default:
 419                BUG();
 420        }
 421        return NULL;
 422}
 423
 424static u32 read_reg(struct vpe_dev *dev, int offset)
 425{
 426        return ioread32(dev->base + offset);
 427}
 428
 429static void write_reg(struct vpe_dev *dev, int offset, u32 value)
 430{
 431        iowrite32(value, dev->base + offset);
 432}
 433
 434/* register field read/write helpers */
 435static int get_field(u32 value, u32 mask, int shift)
 436{
 437        return (value & (mask << shift)) >> shift;
 438}
 439
 440static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift)
 441{
 442        return get_field(read_reg(dev, offset), mask, shift);
 443}
 444
 445static void write_field(u32 *valp, u32 field, u32 mask, int shift)
 446{
 447        u32 val = *valp;
 448
 449        val &= ~(mask << shift);
 450        val |= (field & mask) << shift;
 451        *valp = val;
 452}
 453
 454static void write_field_reg(struct vpe_dev *dev, int offset, u32 field,
 455                u32 mask, int shift)
 456{
 457        u32 val = read_reg(dev, offset);
 458
 459        write_field(&val, field, mask, shift);
 460
 461        write_reg(dev, offset, val);
 462}
 463
 464/*
 465 * DMA address/data block for the shadow registers
 466 */
 467struct vpe_mmr_adb {
 468        struct vpdma_adb_hdr    out_fmt_hdr;
 469        u32                     out_fmt_reg[1];
 470        u32                     out_fmt_pad[3];
 471        struct vpdma_adb_hdr    us1_hdr;
 472        u32                     us1_regs[8];
 473        struct vpdma_adb_hdr    us2_hdr;
 474        u32                     us2_regs[8];
 475        struct vpdma_adb_hdr    us3_hdr;
 476        u32                     us3_regs[8];
 477        struct vpdma_adb_hdr    dei_hdr;
 478        u32                     dei_regs[8];
 479        struct vpdma_adb_hdr    sc_hdr0;
 480        u32                     sc_regs0[7];
 481        u32                     sc_pad0[1];
 482        struct vpdma_adb_hdr    sc_hdr8;
 483        u32                     sc_regs8[6];
 484        u32                     sc_pad8[2];
 485        struct vpdma_adb_hdr    sc_hdr17;
 486        u32                     sc_regs17[9];
 487        u32                     sc_pad17[3];
 488        struct vpdma_adb_hdr    csc_hdr;
 489        u32                     csc_regs[6];
 490        u32                     csc_pad[2];
 491};
 492
 493#define GET_OFFSET_TOP(ctx, obj, reg)   \
 494        ((obj)->res->start - ctx->dev->res->start + reg)
 495
 496#define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a)   \
 497        VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
 498/*
 499 * Set the headers for all of the address/data block structures.
 500 */
 501static void init_adb_hdrs(struct vpe_ctx *ctx)
 502{
 503        VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT);
 504        VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0);
 505        VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0);
 506        VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0);
 507        VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE);
 508        VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0,
 509                GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0));
 510        VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8,
 511                GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8));
 512        VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17,
 513                GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17));
 514        VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs,
 515                GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00));
 516};
 517
 518/*
 519 * Allocate or re-allocate the motion vector DMA buffers
 520 * There are two buffers, one for input and one for output.
 521 * However, the roles are reversed after each field is processed.
 522 * In other words, after each field is processed, the previous
 523 * output (dst) MV buffer becomes the new input (src) MV buffer.
 524 */
 525static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size)
 526{
 527        struct device *dev = ctx->dev->v4l2_dev.dev;
 528
 529        if (ctx->mv_buf_size == size)
 530                return 0;
 531
 532        if (ctx->mv_buf[0])
 533                dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0],
 534                        ctx->mv_buf_dma[0]);
 535
 536        if (ctx->mv_buf[1])
 537                dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1],
 538                        ctx->mv_buf_dma[1]);
 539
 540        if (size == 0)
 541                return 0;
 542
 543        ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0],
 544                                GFP_KERNEL);
 545        if (!ctx->mv_buf[0]) {
 546                vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
 547                return -ENOMEM;
 548        }
 549
 550        ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1],
 551                                GFP_KERNEL);
 552        if (!ctx->mv_buf[1]) {
 553                vpe_err(ctx->dev, "failed to allocate motion vector buffer\n");
 554                dma_free_coherent(dev, size, ctx->mv_buf[0],
 555                        ctx->mv_buf_dma[0]);
 556
 557                return -ENOMEM;
 558        }
 559
 560        ctx->mv_buf_size = size;
 561        ctx->src_mv_buf_selector = 0;
 562
 563        return 0;
 564}
 565
 566static void free_mv_buffers(struct vpe_ctx *ctx)
 567{
 568        realloc_mv_buffers(ctx, 0);
 569}
 570
 571/*
 572 * While de-interlacing, we keep the two most recent input buffers
 573 * around.  This function frees those two buffers when we have
 574 * finished processing the current stream.
 575 */
 576static void free_vbs(struct vpe_ctx *ctx)
 577{
 578        struct vpe_dev *dev = ctx->dev;
 579        unsigned long flags;
 580
 581        if (ctx->src_vbs[2] == NULL)
 582                return;
 583
 584        spin_lock_irqsave(&dev->lock, flags);
 585        if (ctx->src_vbs[2]) {
 586                v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE);
 587                v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE);
 588        }
 589        spin_unlock_irqrestore(&dev->lock, flags);
 590}
 591
 592/*
 593 * Enable or disable the VPE clocks
 594 */
 595static void vpe_set_clock_enable(struct vpe_dev *dev, bool on)
 596{
 597        u32 val = 0;
 598
 599        if (on)
 600                val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE;
 601        write_reg(dev, VPE_CLK_ENABLE, val);
 602}
 603
 604static void vpe_top_reset(struct vpe_dev *dev)
 605{
 606
 607        write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK,
 608                VPE_DATA_PATH_CLK_RESET_SHIFT);
 609
 610        usleep_range(100, 150);
 611
 612        write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK,
 613                VPE_DATA_PATH_CLK_RESET_SHIFT);
 614}
 615
 616static void vpe_top_vpdma_reset(struct vpe_dev *dev)
 617{
 618        write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK,
 619                VPE_VPDMA_CLK_RESET_SHIFT);
 620
 621        usleep_range(100, 150);
 622
 623        write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK,
 624                VPE_VPDMA_CLK_RESET_SHIFT);
 625}
 626
 627/*
 628 * Load the correct of upsampler coefficients into the shadow MMRs
 629 */
 630static void set_us_coefficients(struct vpe_ctx *ctx)
 631{
 632        struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
 633        struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
 634        u32 *us1_reg = &mmr_adb->us1_regs[0];
 635        u32 *us2_reg = &mmr_adb->us2_regs[0];
 636        u32 *us3_reg = &mmr_adb->us3_regs[0];
 637        const unsigned short *cp, *end_cp;
 638
 639        cp = &us_coeffs[0].anchor_fid0_c0;
 640
 641        if (s_q_data->flags & Q_DATA_INTERLACED)        /* interlaced */
 642                cp += sizeof(us_coeffs[0]) / sizeof(*cp);
 643
 644        end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp);
 645
 646        while (cp < end_cp) {
 647                write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT);
 648                write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT);
 649                *us2_reg++ = *us1_reg;
 650                *us3_reg++ = *us1_reg++;
 651        }
 652        ctx->load_mmrs = true;
 653}
 654
 655/*
 656 * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs.
 657 */
 658static void set_cfg_and_line_modes(struct vpe_ctx *ctx)
 659{
 660        struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt;
 661        struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
 662        u32 *us1_reg0 = &mmr_adb->us1_regs[0];
 663        u32 *us2_reg0 = &mmr_adb->us2_regs[0];
 664        u32 *us3_reg0 = &mmr_adb->us3_regs[0];
 665        int line_mode = 1;
 666        int cfg_mode = 1;
 667
 668        /*
 669         * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing.
 670         * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing.
 671         */
 672
 673        if (fmt->fourcc == V4L2_PIX_FMT_NV12) {
 674                cfg_mode = 0;
 675                line_mode = 0;          /* double lines to line buffer */
 676        }
 677
 678        write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
 679        write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
 680        write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT);
 681
 682        /* regs for now */
 683        vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN);
 684        vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN);
 685        vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN);
 686
 687        /* frame start for input luma */
 688        vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 689                VPE_CHAN_LUMA1_IN);
 690        vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 691                VPE_CHAN_LUMA2_IN);
 692        vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 693                VPE_CHAN_LUMA3_IN);
 694
 695        /* frame start for input chroma */
 696        vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 697                VPE_CHAN_CHROMA1_IN);
 698        vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 699                VPE_CHAN_CHROMA2_IN);
 700        vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 701                VPE_CHAN_CHROMA3_IN);
 702
 703        /* frame start for MV in client */
 704        vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE,
 705                VPE_CHAN_MV_IN);
 706
 707        ctx->load_mmrs = true;
 708}
 709
 710/*
 711 * Set the shadow registers that are modified when the source
 712 * format changes.
 713 */
 714static void set_src_registers(struct vpe_ctx *ctx)
 715{
 716        set_us_coefficients(ctx);
 717}
 718
 719/*
 720 * Set the shadow registers that are modified when the destination
 721 * format changes.
 722 */
 723static void set_dst_registers(struct vpe_ctx *ctx)
 724{
 725        struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
 726        enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace;
 727        struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt;
 728        u32 val = 0;
 729
 730        if (clrspc == V4L2_COLORSPACE_SRGB)
 731                val |= VPE_RGB_OUT_SELECT;
 732        else if (fmt->fourcc == V4L2_PIX_FMT_NV16)
 733                val |= VPE_COLOR_SEPARATE_422;
 734
 735        /*
 736         * the source of CHR_DS and CSC is always the scaler, irrespective of
 737         * whether it's used or not
 738         */
 739        val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER;
 740
 741        if (fmt->fourcc != V4L2_PIX_FMT_NV12)
 742                val |= VPE_DS_BYPASS;
 743
 744        mmr_adb->out_fmt_reg[0] = val;
 745
 746        ctx->load_mmrs = true;
 747}
 748
 749/*
 750 * Set the de-interlacer shadow register values
 751 */
 752static void set_dei_regs(struct vpe_ctx *ctx)
 753{
 754        struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
 755        struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC];
 756        unsigned int src_h = s_q_data->c_rect.height;
 757        unsigned int src_w = s_q_data->c_rect.width;
 758        u32 *dei_mmr0 = &mmr_adb->dei_regs[0];
 759        bool deinterlace = true;
 760        u32 val = 0;
 761
 762        /*
 763         * according to TRM, we should set DEI in progressive bypass mode when
 764         * the input content is progressive, however, DEI is bypassed correctly
 765         * for both progressive and interlace content in interlace bypass mode.
 766         * It has been recommended not to use progressive bypass mode.
 767         */
 768        if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) ||
 769                        !(s_q_data->flags & Q_DATA_INTERLACED)) {
 770                deinterlace = false;
 771                val = VPE_DEI_INTERLACE_BYPASS;
 772        }
 773
 774        src_h = deinterlace ? src_h * 2 : src_h;
 775
 776        val |= (src_h << VPE_DEI_HEIGHT_SHIFT) |
 777                (src_w << VPE_DEI_WIDTH_SHIFT) |
 778                VPE_DEI_FIELD_FLUSH;
 779
 780        *dei_mmr0 = val;
 781
 782        ctx->load_mmrs = true;
 783}
 784
 785static void set_dei_shadow_registers(struct vpe_ctx *ctx)
 786{
 787        struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
 788        u32 *dei_mmr = &mmr_adb->dei_regs[0];
 789        const struct vpe_dei_regs *cur = &dei_regs;
 790
 791        dei_mmr[2]  = cur->mdt_spacial_freq_thr_reg;
 792        dei_mmr[3]  = cur->edi_config_reg;
 793        dei_mmr[4]  = cur->edi_lut_reg0;
 794        dei_mmr[5]  = cur->edi_lut_reg1;
 795        dei_mmr[6]  = cur->edi_lut_reg2;
 796        dei_mmr[7]  = cur->edi_lut_reg3;
 797
 798        ctx->load_mmrs = true;
 799}
 800
 801/*
 802 * Set the shadow registers whose values are modified when either the
 803 * source or destination format is changed.
 804 */
 805static int set_srcdst_params(struct vpe_ctx *ctx)
 806{
 807        struct vpe_q_data *s_q_data =  &ctx->q_data[Q_DATA_SRC];
 808        struct vpe_q_data *d_q_data =  &ctx->q_data[Q_DATA_DST];
 809        struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr;
 810        unsigned int src_w = s_q_data->c_rect.width;
 811        unsigned int src_h = s_q_data->c_rect.height;
 812        unsigned int dst_w = d_q_data->c_rect.width;
 813        unsigned int dst_h = d_q_data->c_rect.height;
 814        size_t mv_buf_size;
 815        int ret;
 816
 817        ctx->sequence = 0;
 818        ctx->field = V4L2_FIELD_TOP;
 819
 820        if ((s_q_data->flags & Q_DATA_INTERLACED) &&
 821                        !(d_q_data->flags & Q_DATA_INTERLACED)) {
 822                int bytes_per_line;
 823                const struct vpdma_data_format *mv =
 824                        &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
 825
 826                /*
 827                 * we make sure that the source image has a 16 byte aligned
 828                 * stride, we need to do the same for the motion vector buffer
 829                 * by aligning it's stride to the next 16 byte boundry. this
 830                 * extra space will not be used by the de-interlacer, but will
 831                 * ensure that vpdma operates correctly
 832                 */
 833                bytes_per_line = ALIGN((s_q_data->width * mv->depth) >> 3,
 834                                        VPDMA_STRIDE_ALIGN);
 835                mv_buf_size = bytes_per_line * s_q_data->height;
 836
 837                ctx->deinterlacing = true;
 838                src_h <<= 1;
 839        } else {
 840                ctx->deinterlacing = false;
 841                mv_buf_size = 0;
 842        }
 843
 844        free_vbs(ctx);
 845
 846        ret = realloc_mv_buffers(ctx, mv_buf_size);
 847        if (ret)
 848                return ret;
 849
 850        set_cfg_and_line_modes(ctx);
 851        set_dei_regs(ctx);
 852
 853        csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0],
 854                s_q_data->colorspace, d_q_data->colorspace);
 855
 856        sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w);
 857        sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h);
 858
 859        sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0],
 860                &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0],
 861                src_w, src_h, dst_w, dst_h);
 862
 863        return 0;
 864}
 865
 866/*
 867 * Return the vpe_ctx structure for a given struct file
 868 */
 869static struct vpe_ctx *file2ctx(struct file *file)
 870{
 871        return container_of(file->private_data, struct vpe_ctx, fh);
 872}
 873
 874/*
 875 * mem2mem callbacks
 876 */
 877
 878/**
 879 * job_ready() - check whether an instance is ready to be scheduled to run
 880 */
 881static int job_ready(void *priv)
 882{
 883        struct vpe_ctx *ctx = priv;
 884        int needed = ctx->bufs_per_job;
 885
 886        if (ctx->deinterlacing && ctx->src_vbs[2] == NULL)
 887                needed += 2;    /* need additional two most recent fields */
 888
 889        if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed)
 890                return 0;
 891
 892        if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed)
 893                return 0;
 894
 895        return 1;
 896}
 897
 898static void job_abort(void *priv)
 899{
 900        struct vpe_ctx *ctx = priv;
 901
 902        /* Will cancel the transaction in the next interrupt handler */
 903        ctx->aborting = 1;
 904}
 905
 906/*
 907 * Lock access to the device
 908 */
 909static void vpe_lock(void *priv)
 910{
 911        struct vpe_ctx *ctx = priv;
 912        struct vpe_dev *dev = ctx->dev;
 913        mutex_lock(&dev->dev_mutex);
 914}
 915
 916static void vpe_unlock(void *priv)
 917{
 918        struct vpe_ctx *ctx = priv;
 919        struct vpe_dev *dev = ctx->dev;
 920        mutex_unlock(&dev->dev_mutex);
 921}
 922
 923static void vpe_dump_regs(struct vpe_dev *dev)
 924{
 925#define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r))
 926
 927        vpe_dbg(dev, "VPE Registers:\n");
 928
 929        DUMPREG(PID);
 930        DUMPREG(SYSCONFIG);
 931        DUMPREG(INT0_STATUS0_RAW);
 932        DUMPREG(INT0_STATUS0);
 933        DUMPREG(INT0_ENABLE0);
 934        DUMPREG(INT0_STATUS1_RAW);
 935        DUMPREG(INT0_STATUS1);
 936        DUMPREG(INT0_ENABLE1);
 937        DUMPREG(CLK_ENABLE);
 938        DUMPREG(CLK_RESET);
 939        DUMPREG(CLK_FORMAT_SELECT);
 940        DUMPREG(CLK_RANGE_MAP);
 941        DUMPREG(US1_R0);
 942        DUMPREG(US1_R1);
 943        DUMPREG(US1_R2);
 944        DUMPREG(US1_R3);
 945        DUMPREG(US1_R4);
 946        DUMPREG(US1_R5);
 947        DUMPREG(US1_R6);
 948        DUMPREG(US1_R7);
 949        DUMPREG(US2_R0);
 950        DUMPREG(US2_R1);
 951        DUMPREG(US2_R2);
 952        DUMPREG(US2_R3);
 953        DUMPREG(US2_R4);
 954        DUMPREG(US2_R5);
 955        DUMPREG(US2_R6);
 956        DUMPREG(US2_R7);
 957        DUMPREG(US3_R0);
 958        DUMPREG(US3_R1);
 959        DUMPREG(US3_R2);
 960        DUMPREG(US3_R3);
 961        DUMPREG(US3_R4);
 962        DUMPREG(US3_R5);
 963        DUMPREG(US3_R6);
 964        DUMPREG(US3_R7);
 965        DUMPREG(DEI_FRAME_SIZE);
 966        DUMPREG(MDT_BYPASS);
 967        DUMPREG(MDT_SF_THRESHOLD);
 968        DUMPREG(EDI_CONFIG);
 969        DUMPREG(DEI_EDI_LUT_R0);
 970        DUMPREG(DEI_EDI_LUT_R1);
 971        DUMPREG(DEI_EDI_LUT_R2);
 972        DUMPREG(DEI_EDI_LUT_R3);
 973        DUMPREG(DEI_FMD_WINDOW_R0);
 974        DUMPREG(DEI_FMD_WINDOW_R1);
 975        DUMPREG(DEI_FMD_CONTROL_R0);
 976        DUMPREG(DEI_FMD_CONTROL_R1);
 977        DUMPREG(DEI_FMD_STATUS_R0);
 978        DUMPREG(DEI_FMD_STATUS_R1);
 979        DUMPREG(DEI_FMD_STATUS_R2);
 980#undef DUMPREG
 981
 982        sc_dump_regs(dev->sc);
 983        csc_dump_regs(dev->csc);
 984}
 985
 986static void add_out_dtd(struct vpe_ctx *ctx, int port)
 987{
 988        struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST];
 989        const struct vpe_port_data *p_data = &port_data[port];
 990        struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf;
 991        struct vpe_fmt *fmt = q_data->fmt;
 992        const struct vpdma_data_format *vpdma_fmt;
 993        int mv_buf_selector = !ctx->src_mv_buf_selector;
 994        dma_addr_t dma_addr;
 995        u32 flags = 0;
 996
 997        if (port == VPE_PORT_MV_OUT) {
 998                vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
 999                dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1000        } else {
1001                /* to incorporate interleaved formats */
1002                int plane = fmt->coplanar ? p_data->vb_part : 0;
1003
1004                vpdma_fmt = fmt->vpdma_fmt[plane];
1005                dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1006                if (!dma_addr) {
1007                        vpe_err(ctx->dev,
1008                                "acquiring output buffer(%d) dma_addr failed\n",
1009                                port);
1010                        return;
1011                }
1012        }
1013
1014        if (q_data->flags & Q_DATA_FRAME_1D)
1015                flags |= VPDMA_DATA_FRAME_1D;
1016        if (q_data->flags & Q_DATA_MODE_TILED)
1017                flags |= VPDMA_DATA_MODE_TILED;
1018
1019        vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
1020                vpdma_fmt, dma_addr, p_data->channel, flags);
1021}
1022
1023static void add_in_dtd(struct vpe_ctx *ctx, int port)
1024{
1025        struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC];
1026        const struct vpe_port_data *p_data = &port_data[port];
1027        struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf;
1028        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1029        struct vpe_fmt *fmt = q_data->fmt;
1030        const struct vpdma_data_format *vpdma_fmt;
1031        int mv_buf_selector = ctx->src_mv_buf_selector;
1032        int field = vbuf->field == V4L2_FIELD_BOTTOM;
1033        int frame_width, frame_height;
1034        dma_addr_t dma_addr;
1035        u32 flags = 0;
1036
1037        if (port == VPE_PORT_MV_IN) {
1038                vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV];
1039                dma_addr = ctx->mv_buf_dma[mv_buf_selector];
1040        } else {
1041                /* to incorporate interleaved formats */
1042                int plane = fmt->coplanar ? p_data->vb_part : 0;
1043
1044                vpdma_fmt = fmt->vpdma_fmt[plane];
1045
1046                dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane);
1047                if (!dma_addr) {
1048                        vpe_err(ctx->dev,
1049                                "acquiring input buffer(%d) dma_addr failed\n",
1050                                port);
1051                        return;
1052                }
1053        }
1054
1055        if (q_data->flags & Q_DATA_FRAME_1D)
1056                flags |= VPDMA_DATA_FRAME_1D;
1057        if (q_data->flags & Q_DATA_MODE_TILED)
1058                flags |= VPDMA_DATA_MODE_TILED;
1059
1060        frame_width = q_data->c_rect.width;
1061        frame_height = q_data->c_rect.height;
1062
1063        if (p_data->vb_part && fmt->fourcc == V4L2_PIX_FMT_NV12)
1064                frame_height /= 2;
1065
1066        vpdma_add_in_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect,
1067                vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width,
1068                frame_height, 0, 0);
1069}
1070
1071/*
1072 * Enable the expected IRQ sources
1073 */
1074static void enable_irqs(struct vpe_ctx *ctx)
1075{
1076        write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE);
1077        write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT |
1078                                VPE_DS1_UV_ERROR_INT);
1079
1080        vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true);
1081}
1082
1083static void disable_irqs(struct vpe_ctx *ctx)
1084{
1085        write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff);
1086        write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff);
1087
1088        vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false);
1089}
1090
1091/* device_run() - prepares and starts the device
1092 *
1093 * This function is only called when both the source and destination
1094 * buffers are in place.
1095 */
1096static void device_run(void *priv)
1097{
1098        struct vpe_ctx *ctx = priv;
1099        struct sc_data *sc = ctx->dev->sc;
1100        struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST];
1101
1102        if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) {
1103                ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1104                WARN_ON(ctx->src_vbs[2] == NULL);
1105                ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1106                WARN_ON(ctx->src_vbs[1] == NULL);
1107        }
1108
1109        ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1110        WARN_ON(ctx->src_vbs[0] == NULL);
1111        ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
1112        WARN_ON(ctx->dst_vb == NULL);
1113
1114        /* config descriptors */
1115        if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) {
1116                vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb);
1117                vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb);
1118                ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr;
1119                ctx->load_mmrs = false;
1120        }
1121
1122        if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr ||
1123                        sc->load_coeff_h) {
1124                vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h);
1125                vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1126                        &ctx->sc_coeff_h, 0);
1127
1128                sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr;
1129                sc->load_coeff_h = false;
1130        }
1131
1132        if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr ||
1133                        sc->load_coeff_v) {
1134                vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v);
1135                vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT,
1136                        &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4);
1137
1138                sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr;
1139                sc->load_coeff_v = false;
1140        }
1141
1142        /* output data descriptors */
1143        if (ctx->deinterlacing)
1144                add_out_dtd(ctx, VPE_PORT_MV_OUT);
1145
1146        if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1147                add_out_dtd(ctx, VPE_PORT_RGB_OUT);
1148        } else {
1149                add_out_dtd(ctx, VPE_PORT_LUMA_OUT);
1150                if (d_q_data->fmt->coplanar)
1151                        add_out_dtd(ctx, VPE_PORT_CHROMA_OUT);
1152        }
1153
1154        /* input data descriptors */
1155        if (ctx->deinterlacing) {
1156                add_in_dtd(ctx, VPE_PORT_LUMA3_IN);
1157                add_in_dtd(ctx, VPE_PORT_CHROMA3_IN);
1158
1159                add_in_dtd(ctx, VPE_PORT_LUMA2_IN);
1160                add_in_dtd(ctx, VPE_PORT_CHROMA2_IN);
1161        }
1162
1163        add_in_dtd(ctx, VPE_PORT_LUMA1_IN);
1164        add_in_dtd(ctx, VPE_PORT_CHROMA1_IN);
1165
1166        if (ctx->deinterlacing)
1167                add_in_dtd(ctx, VPE_PORT_MV_IN);
1168
1169        /* sync on channel control descriptors for input ports */
1170        vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN);
1171        vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN);
1172
1173        if (ctx->deinterlacing) {
1174                vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1175                        VPE_CHAN_LUMA2_IN);
1176                vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1177                        VPE_CHAN_CHROMA2_IN);
1178
1179                vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1180                        VPE_CHAN_LUMA3_IN);
1181                vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1182                        VPE_CHAN_CHROMA3_IN);
1183
1184                vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN);
1185        }
1186
1187        /* sync on channel control descriptors for output ports */
1188        if (d_q_data->colorspace == V4L2_COLORSPACE_SRGB) {
1189                vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1190                        VPE_CHAN_RGB_OUT);
1191        } else {
1192                vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1193                        VPE_CHAN_LUMA_OUT);
1194                if (d_q_data->fmt->coplanar)
1195                        vpdma_add_sync_on_channel_ctd(&ctx->desc_list,
1196                                VPE_CHAN_CHROMA_OUT);
1197        }
1198
1199        if (ctx->deinterlacing)
1200                vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT);
1201
1202        enable_irqs(ctx);
1203
1204        vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf);
1205        vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list);
1206}
1207
1208static void dei_error(struct vpe_ctx *ctx)
1209{
1210        dev_warn(ctx->dev->v4l2_dev.dev,
1211                "received DEI error interrupt\n");
1212}
1213
1214static void ds1_uv_error(struct vpe_ctx *ctx)
1215{
1216        dev_warn(ctx->dev->v4l2_dev.dev,
1217                "received downsampler error interrupt\n");
1218}
1219
1220static irqreturn_t vpe_irq(int irq_vpe, void *data)
1221{
1222        struct vpe_dev *dev = (struct vpe_dev *)data;
1223        struct vpe_ctx *ctx;
1224        struct vpe_q_data *d_q_data;
1225        struct vb2_v4l2_buffer *s_vb, *d_vb;
1226        unsigned long flags;
1227        u32 irqst0, irqst1;
1228
1229        irqst0 = read_reg(dev, VPE_INT0_STATUS0);
1230        if (irqst0) {
1231                write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0);
1232                vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0);
1233        }
1234
1235        irqst1 = read_reg(dev, VPE_INT0_STATUS1);
1236        if (irqst1) {
1237                write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1);
1238                vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1);
1239        }
1240
1241        ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
1242        if (!ctx) {
1243                vpe_err(dev, "instance released before end of transaction\n");
1244                goto handled;
1245        }
1246
1247        if (irqst1) {
1248                if (irqst1 & VPE_DEI_ERROR_INT) {
1249                        irqst1 &= ~VPE_DEI_ERROR_INT;
1250                        dei_error(ctx);
1251                }
1252                if (irqst1 & VPE_DS1_UV_ERROR_INT) {
1253                        irqst1 &= ~VPE_DS1_UV_ERROR_INT;
1254                        ds1_uv_error(ctx);
1255                }
1256        }
1257
1258        if (irqst0) {
1259                if (irqst0 & VPE_INT0_LIST0_COMPLETE)
1260                        vpdma_clear_list_stat(ctx->dev->vpdma);
1261
1262                irqst0 &= ~(VPE_INT0_LIST0_COMPLETE);
1263        }
1264
1265        if (irqst0 | irqst1) {
1266                dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: "
1267                        "INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n",
1268                        irqst0, irqst1);
1269        }
1270
1271        disable_irqs(ctx);
1272
1273        vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf);
1274        vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb);
1275        vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h);
1276        vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v);
1277
1278        vpdma_reset_desc_list(&ctx->desc_list);
1279
1280         /* the previous dst mv buffer becomes the next src mv buffer */
1281        ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector;
1282
1283        if (ctx->aborting)
1284                goto finished;
1285
1286        s_vb = ctx->src_vbs[0];
1287        d_vb = ctx->dst_vb;
1288
1289        d_vb->flags = s_vb->flags;
1290        d_vb->vb2_buf.timestamp = s_vb->vb2_buf.timestamp;
1291
1292        if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE)
1293                d_vb->timecode = s_vb->timecode;
1294
1295        d_vb->sequence = ctx->sequence;
1296
1297        d_q_data = &ctx->q_data[Q_DATA_DST];
1298        if (d_q_data->flags & Q_DATA_INTERLACED) {
1299                d_vb->field = ctx->field;
1300                if (ctx->field == V4L2_FIELD_BOTTOM) {
1301                        ctx->sequence++;
1302                        ctx->field = V4L2_FIELD_TOP;
1303                } else {
1304                        WARN_ON(ctx->field != V4L2_FIELD_TOP);
1305                        ctx->field = V4L2_FIELD_BOTTOM;
1306                }
1307        } else {
1308                d_vb->field = V4L2_FIELD_NONE;
1309                ctx->sequence++;
1310        }
1311
1312        if (ctx->deinterlacing)
1313                s_vb = ctx->src_vbs[2];
1314
1315        spin_lock_irqsave(&dev->lock, flags);
1316        v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE);
1317        v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE);
1318        spin_unlock_irqrestore(&dev->lock, flags);
1319
1320        if (ctx->deinterlacing) {
1321                ctx->src_vbs[2] = ctx->src_vbs[1];
1322                ctx->src_vbs[1] = ctx->src_vbs[0];
1323        }
1324
1325        ctx->bufs_completed++;
1326        if (ctx->bufs_completed < ctx->bufs_per_job) {
1327                device_run(ctx);
1328                goto handled;
1329        }
1330
1331finished:
1332        vpe_dbg(ctx->dev, "finishing transaction\n");
1333        ctx->bufs_completed = 0;
1334        v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
1335handled:
1336        return IRQ_HANDLED;
1337}
1338
1339/*
1340 * video ioctls
1341 */
1342static int vpe_querycap(struct file *file, void *priv,
1343                        struct v4l2_capability *cap)
1344{
1345        strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
1346        strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
1347        snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
1348                VPE_MODULE_NAME);
1349        cap->device_caps  = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
1350        cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
1351        return 0;
1352}
1353
1354static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type)
1355{
1356        int i, index;
1357        struct vpe_fmt *fmt = NULL;
1358
1359        index = 0;
1360        for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) {
1361                if (vpe_formats[i].types & type) {
1362                        if (index == f->index) {
1363                                fmt = &vpe_formats[i];
1364                                break;
1365                        }
1366                        index++;
1367                }
1368        }
1369
1370        if (!fmt)
1371                return -EINVAL;
1372
1373        strncpy(f->description, fmt->name, sizeof(f->description) - 1);
1374        f->pixelformat = fmt->fourcc;
1375        return 0;
1376}
1377
1378static int vpe_enum_fmt(struct file *file, void *priv,
1379                                struct v4l2_fmtdesc *f)
1380{
1381        if (V4L2_TYPE_IS_OUTPUT(f->type))
1382                return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT);
1383
1384        return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE);
1385}
1386
1387static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
1388{
1389        struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1390        struct vpe_ctx *ctx = file2ctx(file);
1391        struct vb2_queue *vq;
1392        struct vpe_q_data *q_data;
1393        int i;
1394
1395        vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1396        if (!vq)
1397                return -EINVAL;
1398
1399        q_data = get_q_data(ctx, f->type);
1400
1401        pix->width = q_data->width;
1402        pix->height = q_data->height;
1403        pix->pixelformat = q_data->fmt->fourcc;
1404        pix->field = q_data->field;
1405
1406        if (V4L2_TYPE_IS_OUTPUT(f->type)) {
1407                pix->colorspace = q_data->colorspace;
1408        } else {
1409                struct vpe_q_data *s_q_data;
1410
1411                /* get colorspace from the source queue */
1412                s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
1413
1414                pix->colorspace = s_q_data->colorspace;
1415        }
1416
1417        pix->num_planes = q_data->fmt->coplanar ? 2 : 1;
1418
1419        for (i = 0; i < pix->num_planes; i++) {
1420                pix->plane_fmt[i].bytesperline = q_data->bytesperline[i];
1421                pix->plane_fmt[i].sizeimage = q_data->sizeimage[i];
1422        }
1423
1424        return 0;
1425}
1426
1427static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
1428                       struct vpe_fmt *fmt, int type)
1429{
1430        struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1431        struct v4l2_plane_pix_format *plane_fmt;
1432        unsigned int w_align;
1433        int i, depth, depth_bytes;
1434
1435        if (!fmt || !(fmt->types & type)) {
1436                vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n",
1437                        pix->pixelformat);
1438                return -EINVAL;
1439        }
1440
1441        if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE)
1442                pix->field = V4L2_FIELD_NONE;
1443
1444        depth = fmt->vpdma_fmt[VPE_LUMA]->depth;
1445
1446        /*
1447         * the line stride should 16 byte aligned for VPDMA to work, based on
1448         * the bytes per pixel, figure out how much the width should be aligned
1449         * to make sure line stride is 16 byte aligned
1450         */
1451        depth_bytes = depth >> 3;
1452
1453        if (depth_bytes == 3)
1454                /*
1455                 * if bpp is 3(as in some RGB formats), the pixel width doesn't
1456                 * really help in ensuring line stride is 16 byte aligned
1457                 */
1458                w_align = 4;
1459        else
1460                /*
1461                 * for the remainder bpp(4, 2 and 1), the pixel width alignment
1462                 * can ensure a line stride alignment of 16 bytes. For example,
1463                 * if bpp is 2, then the line stride can be 16 byte aligned if
1464                 * the width is 8 byte aligned
1465                 */
1466                w_align = order_base_2(VPDMA_DESC_ALIGN / depth_bytes);
1467
1468        v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align,
1469                              &pix->height, MIN_H, MAX_H, H_ALIGN,
1470                              S_ALIGN);
1471
1472        pix->num_planes = fmt->coplanar ? 2 : 1;
1473        pix->pixelformat = fmt->fourcc;
1474
1475        if (!pix->colorspace) {
1476                if (fmt->fourcc == V4L2_PIX_FMT_RGB24 ||
1477                                fmt->fourcc == V4L2_PIX_FMT_BGR24 ||
1478                                fmt->fourcc == V4L2_PIX_FMT_RGB32 ||
1479                                fmt->fourcc == V4L2_PIX_FMT_BGR32) {
1480                        pix->colorspace = V4L2_COLORSPACE_SRGB;
1481                } else {
1482                        if (pix->height > 1280) /* HD */
1483                                pix->colorspace = V4L2_COLORSPACE_REC709;
1484                        else                    /* SD */
1485                                pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
1486                }
1487        }
1488
1489        memset(pix->reserved, 0, sizeof(pix->reserved));
1490        for (i = 0; i < pix->num_planes; i++) {
1491                plane_fmt = &pix->plane_fmt[i];
1492                depth = fmt->vpdma_fmt[i]->depth;
1493
1494                if (i == VPE_LUMA)
1495                        plane_fmt->bytesperline = (pix->width * depth) >> 3;
1496                else
1497                        plane_fmt->bytesperline = pix->width;
1498
1499                plane_fmt->sizeimage =
1500                                (pix->height * pix->width * depth) >> 3;
1501
1502                memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
1503        }
1504
1505        return 0;
1506}
1507
1508static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
1509{
1510        struct vpe_ctx *ctx = file2ctx(file);
1511        struct vpe_fmt *fmt = find_format(f);
1512
1513        if (V4L2_TYPE_IS_OUTPUT(f->type))
1514                return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT);
1515        else
1516                return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE);
1517}
1518
1519static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f)
1520{
1521        struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
1522        struct v4l2_plane_pix_format *plane_fmt;
1523        struct vpe_q_data *q_data;
1524        struct vb2_queue *vq;
1525        int i;
1526
1527        vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
1528        if (!vq)
1529                return -EINVAL;
1530
1531        if (vb2_is_busy(vq)) {
1532                vpe_err(ctx->dev, "queue busy\n");
1533                return -EBUSY;
1534        }
1535
1536        q_data = get_q_data(ctx, f->type);
1537        if (!q_data)
1538                return -EINVAL;
1539
1540        q_data->fmt             = find_format(f);
1541        q_data->width           = pix->width;
1542        q_data->height          = pix->height;
1543        q_data->colorspace      = pix->colorspace;
1544        q_data->field           = pix->field;
1545
1546        for (i = 0; i < pix->num_planes; i++) {
1547                plane_fmt = &pix->plane_fmt[i];
1548
1549                q_data->bytesperline[i] = plane_fmt->bytesperline;
1550                q_data->sizeimage[i]    = plane_fmt->sizeimage;
1551        }
1552
1553        q_data->c_rect.left     = 0;
1554        q_data->c_rect.top      = 0;
1555        q_data->c_rect.width    = q_data->width;
1556        q_data->c_rect.height   = q_data->height;
1557
1558        if (q_data->field == V4L2_FIELD_ALTERNATE)
1559                q_data->flags |= Q_DATA_INTERLACED;
1560        else
1561                q_data->flags &= ~Q_DATA_INTERLACED;
1562
1563        vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d",
1564                f->type, q_data->width, q_data->height, q_data->fmt->fourcc,
1565                q_data->bytesperline[VPE_LUMA]);
1566        if (q_data->fmt->coplanar)
1567                vpe_dbg(ctx->dev, " bpl_uv %d\n",
1568                        q_data->bytesperline[VPE_CHROMA]);
1569
1570        return 0;
1571}
1572
1573static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
1574{
1575        int ret;
1576        struct vpe_ctx *ctx = file2ctx(file);
1577
1578        ret = vpe_try_fmt(file, priv, f);
1579        if (ret)
1580                return ret;
1581
1582        ret = __vpe_s_fmt(ctx, f);
1583        if (ret)
1584                return ret;
1585
1586        if (V4L2_TYPE_IS_OUTPUT(f->type))
1587                set_src_registers(ctx);
1588        else
1589                set_dst_registers(ctx);
1590
1591        return set_srcdst_params(ctx);
1592}
1593
1594static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s)
1595{
1596        struct vpe_q_data *q_data;
1597
1598        if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1599            (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1600                return -EINVAL;
1601
1602        q_data = get_q_data(ctx, s->type);
1603        if (!q_data)
1604                return -EINVAL;
1605
1606        switch (s->target) {
1607        case V4L2_SEL_TGT_COMPOSE:
1608                /*
1609                 * COMPOSE target is only valid for capture buffer type, return
1610                 * error for output buffer type
1611                 */
1612                if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1613                        return -EINVAL;
1614                break;
1615        case V4L2_SEL_TGT_CROP:
1616                /*
1617                 * CROP target is only valid for output buffer type, return
1618                 * error for capture buffer type
1619                 */
1620                if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1621                        return -EINVAL;
1622                break;
1623        /*
1624         * bound and default crop/compose targets are invalid targets to
1625         * try/set
1626         */
1627        default:
1628                return -EINVAL;
1629        }
1630
1631        if (s->r.top < 0 || s->r.left < 0) {
1632                vpe_err(ctx->dev, "negative values for top and left\n");
1633                s->r.top = s->r.left = 0;
1634        }
1635
1636        v4l_bound_align_image(&s->r.width, MIN_W, q_data->width, 1,
1637                &s->r.height, MIN_H, q_data->height, H_ALIGN, S_ALIGN);
1638
1639        /* adjust left/top if cropping rectangle is out of bounds */
1640        if (s->r.left + s->r.width > q_data->width)
1641                s->r.left = q_data->width - s->r.width;
1642        if (s->r.top + s->r.height > q_data->height)
1643                s->r.top = q_data->height - s->r.height;
1644
1645        return 0;
1646}
1647
1648static int vpe_g_selection(struct file *file, void *fh,
1649                struct v4l2_selection *s)
1650{
1651        struct vpe_ctx *ctx = file2ctx(file);
1652        struct vpe_q_data *q_data;
1653        bool use_c_rect = false;
1654
1655        if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
1656            (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
1657                return -EINVAL;
1658
1659        q_data = get_q_data(ctx, s->type);
1660        if (!q_data)
1661                return -EINVAL;
1662
1663        switch (s->target) {
1664        case V4L2_SEL_TGT_COMPOSE_DEFAULT:
1665        case V4L2_SEL_TGT_COMPOSE_BOUNDS:
1666                if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1667                        return -EINVAL;
1668                break;
1669        case V4L2_SEL_TGT_CROP_BOUNDS:
1670        case V4L2_SEL_TGT_CROP_DEFAULT:
1671                if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1672                        return -EINVAL;
1673                break;
1674        case V4L2_SEL_TGT_COMPOSE:
1675                if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1676                        return -EINVAL;
1677                use_c_rect = true;
1678                break;
1679        case V4L2_SEL_TGT_CROP:
1680                if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1681                        return -EINVAL;
1682                use_c_rect = true;
1683                break;
1684        default:
1685                return -EINVAL;
1686        }
1687
1688        if (use_c_rect) {
1689                /*
1690                 * for CROP/COMPOSE target type, return c_rect params from the
1691                 * respective buffer type
1692                 */
1693                s->r = q_data->c_rect;
1694        } else {
1695                /*
1696                 * for DEFAULT/BOUNDS target type, return width and height from
1697                 * S_FMT of the respective buffer type
1698                 */
1699                s->r.left = 0;
1700                s->r.top = 0;
1701                s->r.width = q_data->width;
1702                s->r.height = q_data->height;
1703        }
1704
1705        return 0;
1706}
1707
1708
1709static int vpe_s_selection(struct file *file, void *fh,
1710                struct v4l2_selection *s)
1711{
1712        struct vpe_ctx *ctx = file2ctx(file);
1713        struct vpe_q_data *q_data;
1714        struct v4l2_selection sel = *s;
1715        int ret;
1716
1717        ret = __vpe_try_selection(ctx, &sel);
1718        if (ret)
1719                return ret;
1720
1721        q_data = get_q_data(ctx, sel.type);
1722        if (!q_data)
1723                return -EINVAL;
1724
1725        if ((q_data->c_rect.left == sel.r.left) &&
1726                        (q_data->c_rect.top == sel.r.top) &&
1727                        (q_data->c_rect.width == sel.r.width) &&
1728                        (q_data->c_rect.height == sel.r.height)) {
1729                vpe_dbg(ctx->dev,
1730                        "requested crop/compose values are already set\n");
1731                return 0;
1732        }
1733
1734        q_data->c_rect = sel.r;
1735
1736        return set_srcdst_params(ctx);
1737}
1738
1739/*
1740 * defines number of buffers/frames a context can process with VPE before
1741 * switching to a different context. default value is 1 buffer per context
1742 */
1743#define V4L2_CID_VPE_BUFS_PER_JOB               (V4L2_CID_USER_TI_VPE_BASE + 0)
1744
1745static int vpe_s_ctrl(struct v4l2_ctrl *ctrl)
1746{
1747        struct vpe_ctx *ctx =
1748                container_of(ctrl->handler, struct vpe_ctx, hdl);
1749
1750        switch (ctrl->id) {
1751        case V4L2_CID_VPE_BUFS_PER_JOB:
1752                ctx->bufs_per_job = ctrl->val;
1753                break;
1754
1755        default:
1756                vpe_err(ctx->dev, "Invalid control\n");
1757                return -EINVAL;
1758        }
1759
1760        return 0;
1761}
1762
1763static const struct v4l2_ctrl_ops vpe_ctrl_ops = {
1764        .s_ctrl = vpe_s_ctrl,
1765};
1766
1767static const struct v4l2_ioctl_ops vpe_ioctl_ops = {
1768        .vidioc_querycap                = vpe_querycap,
1769
1770        .vidioc_enum_fmt_vid_cap_mplane = vpe_enum_fmt,
1771        .vidioc_g_fmt_vid_cap_mplane    = vpe_g_fmt,
1772        .vidioc_try_fmt_vid_cap_mplane  = vpe_try_fmt,
1773        .vidioc_s_fmt_vid_cap_mplane    = vpe_s_fmt,
1774
1775        .vidioc_enum_fmt_vid_out_mplane = vpe_enum_fmt,
1776        .vidioc_g_fmt_vid_out_mplane    = vpe_g_fmt,
1777        .vidioc_try_fmt_vid_out_mplane  = vpe_try_fmt,
1778        .vidioc_s_fmt_vid_out_mplane    = vpe_s_fmt,
1779
1780        .vidioc_g_selection             = vpe_g_selection,
1781        .vidioc_s_selection             = vpe_s_selection,
1782
1783        .vidioc_reqbufs                 = v4l2_m2m_ioctl_reqbufs,
1784        .vidioc_querybuf                = v4l2_m2m_ioctl_querybuf,
1785        .vidioc_qbuf                    = v4l2_m2m_ioctl_qbuf,
1786        .vidioc_dqbuf                   = v4l2_m2m_ioctl_dqbuf,
1787        .vidioc_streamon                = v4l2_m2m_ioctl_streamon,
1788        .vidioc_streamoff               = v4l2_m2m_ioctl_streamoff,
1789
1790        .vidioc_subscribe_event         = v4l2_ctrl_subscribe_event,
1791        .vidioc_unsubscribe_event       = v4l2_event_unsubscribe,
1792};
1793
1794/*
1795 * Queue operations
1796 */
1797static int vpe_queue_setup(struct vb2_queue *vq,
1798                           unsigned int *nbuffers, unsigned int *nplanes,
1799                           unsigned int sizes[], struct device *alloc_devs[])
1800{
1801        int i;
1802        struct vpe_ctx *ctx = vb2_get_drv_priv(vq);
1803        struct vpe_q_data *q_data;
1804
1805        q_data = get_q_data(ctx, vq->type);
1806
1807        *nplanes = q_data->fmt->coplanar ? 2 : 1;
1808
1809        for (i = 0; i < *nplanes; i++)
1810                sizes[i] = q_data->sizeimage[i];
1811
1812        vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers,
1813                sizes[VPE_LUMA]);
1814        if (q_data->fmt->coplanar)
1815                vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]);
1816
1817        return 0;
1818}
1819
1820static int vpe_buf_prepare(struct vb2_buffer *vb)
1821{
1822        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1823        struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1824        struct vpe_q_data *q_data;
1825        int i, num_planes;
1826
1827        vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type);
1828
1829        q_data = get_q_data(ctx, vb->vb2_queue->type);
1830        num_planes = q_data->fmt->coplanar ? 2 : 1;
1831
1832        if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
1833                if (!(q_data->flags & Q_DATA_INTERLACED)) {
1834                        vbuf->field = V4L2_FIELD_NONE;
1835                } else {
1836                        if (vbuf->field != V4L2_FIELD_TOP &&
1837                                        vbuf->field != V4L2_FIELD_BOTTOM)
1838                                return -EINVAL;
1839                }
1840        }
1841
1842        for (i = 0; i < num_planes; i++) {
1843                if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
1844                        vpe_err(ctx->dev,
1845                                "data will not fit into plane (%lu < %lu)\n",
1846                                vb2_plane_size(vb, i),
1847                                (long) q_data->sizeimage[i]);
1848                        return -EINVAL;
1849                }
1850        }
1851
1852        for (i = 0; i < num_planes; i++)
1853                vb2_set_plane_payload(vb, i, q_data->sizeimage[i]);
1854
1855        return 0;
1856}
1857
1858static void vpe_buf_queue(struct vb2_buffer *vb)
1859{
1860        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
1861        struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
1862
1863        v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
1864}
1865
1866static int vpe_start_streaming(struct vb2_queue *q, unsigned int count)
1867{
1868        /* currently we do nothing here */
1869
1870        return 0;
1871}
1872
1873static void vpe_stop_streaming(struct vb2_queue *q)
1874{
1875        struct vpe_ctx *ctx = vb2_get_drv_priv(q);
1876
1877        vpe_dump_regs(ctx->dev);
1878        vpdma_dump_regs(ctx->dev->vpdma);
1879}
1880
1881static const struct vb2_ops vpe_qops = {
1882        .queue_setup     = vpe_queue_setup,
1883        .buf_prepare     = vpe_buf_prepare,
1884        .buf_queue       = vpe_buf_queue,
1885        .wait_prepare    = vb2_ops_wait_prepare,
1886        .wait_finish     = vb2_ops_wait_finish,
1887        .start_streaming = vpe_start_streaming,
1888        .stop_streaming  = vpe_stop_streaming,
1889};
1890
1891static int queue_init(void *priv, struct vb2_queue *src_vq,
1892                      struct vb2_queue *dst_vq)
1893{
1894        struct vpe_ctx *ctx = priv;
1895        struct vpe_dev *dev = ctx->dev;
1896        int ret;
1897
1898        memset(src_vq, 0, sizeof(*src_vq));
1899        src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1900        src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1901        src_vq->drv_priv = ctx;
1902        src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1903        src_vq->ops = &vpe_qops;
1904        src_vq->mem_ops = &vb2_dma_contig_memops;
1905        src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1906        src_vq->lock = &dev->dev_mutex;
1907        src_vq->dev = dev->v4l2_dev.dev;
1908
1909        ret = vb2_queue_init(src_vq);
1910        if (ret)
1911                return ret;
1912
1913        memset(dst_vq, 0, sizeof(*dst_vq));
1914        dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1915        dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
1916        dst_vq->drv_priv = ctx;
1917        dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
1918        dst_vq->ops = &vpe_qops;
1919        dst_vq->mem_ops = &vb2_dma_contig_memops;
1920        dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1921        dst_vq->lock = &dev->dev_mutex;
1922        dst_vq->dev = dev->v4l2_dev.dev;
1923
1924        return vb2_queue_init(dst_vq);
1925}
1926
1927static const struct v4l2_ctrl_config vpe_bufs_per_job = {
1928        .ops = &vpe_ctrl_ops,
1929        .id = V4L2_CID_VPE_BUFS_PER_JOB,
1930        .name = "Buffers Per Transaction",
1931        .type = V4L2_CTRL_TYPE_INTEGER,
1932        .def = VPE_DEF_BUFS_PER_JOB,
1933        .min = 1,
1934        .max = VIDEO_MAX_FRAME,
1935        .step = 1,
1936};
1937
1938/*
1939 * File operations
1940 */
1941static int vpe_open(struct file *file)
1942{
1943        struct vpe_dev *dev = video_drvdata(file);
1944        struct vpe_q_data *s_q_data;
1945        struct v4l2_ctrl_handler *hdl;
1946        struct vpe_ctx *ctx;
1947        int ret;
1948
1949        vpe_dbg(dev, "vpe_open\n");
1950
1951        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1952        if (!ctx)
1953                return -ENOMEM;
1954
1955        ctx->dev = dev;
1956
1957        if (mutex_lock_interruptible(&dev->dev_mutex)) {
1958                ret = -ERESTARTSYS;
1959                goto free_ctx;
1960        }
1961
1962        ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE,
1963                        VPDMA_LIST_TYPE_NORMAL);
1964        if (ret != 0)
1965                goto unlock;
1966
1967        ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb));
1968        if (ret != 0)
1969                goto free_desc_list;
1970
1971        ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE);
1972        if (ret != 0)
1973                goto free_mmr_adb;
1974
1975        ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE);
1976        if (ret != 0)
1977                goto free_sc_h;
1978
1979        init_adb_hdrs(ctx);
1980
1981        v4l2_fh_init(&ctx->fh, video_devdata(file));
1982        file->private_data = &ctx->fh;
1983
1984        hdl = &ctx->hdl;
1985        v4l2_ctrl_handler_init(hdl, 1);
1986        v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL);
1987        if (hdl->error) {
1988                ret = hdl->error;
1989                goto exit_fh;
1990        }
1991        ctx->fh.ctrl_handler = hdl;
1992        v4l2_ctrl_handler_setup(hdl);
1993
1994        s_q_data = &ctx->q_data[Q_DATA_SRC];
1995        s_q_data->fmt = &vpe_formats[2];
1996        s_q_data->width = 1920;
1997        s_q_data->height = 1080;
1998        s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
1999                        s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
2000        s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
2001                        s_q_data->height);
2002        s_q_data->colorspace = V4L2_COLORSPACE_REC709;
2003        s_q_data->field = V4L2_FIELD_NONE;
2004        s_q_data->c_rect.left = 0;
2005        s_q_data->c_rect.top = 0;
2006        s_q_data->c_rect.width = s_q_data->width;
2007        s_q_data->c_rect.height = s_q_data->height;
2008        s_q_data->flags = 0;
2009
2010        ctx->q_data[Q_DATA_DST] = *s_q_data;
2011
2012        set_dei_shadow_registers(ctx);
2013        set_src_registers(ctx);
2014        set_dst_registers(ctx);
2015        ret = set_srcdst_params(ctx);
2016        if (ret)
2017                goto exit_fh;
2018
2019        ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
2020
2021        if (IS_ERR(ctx->fh.m2m_ctx)) {
2022                ret = PTR_ERR(ctx->fh.m2m_ctx);
2023                goto exit_fh;
2024        }
2025
2026        v4l2_fh_add(&ctx->fh);
2027
2028        /*
2029         * for now, just report the creation of the first instance, we can later
2030         * optimize the driver to enable or disable clocks when the first
2031         * instance is created or the last instance released
2032         */
2033        if (atomic_inc_return(&dev->num_instances) == 1)
2034                vpe_dbg(dev, "first instance created\n");
2035
2036        ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB;
2037
2038        ctx->load_mmrs = true;
2039
2040        vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n",
2041                ctx, ctx->fh.m2m_ctx);
2042
2043        mutex_unlock(&dev->dev_mutex);
2044
2045        return 0;
2046exit_fh:
2047        v4l2_ctrl_handler_free(hdl);
2048        v4l2_fh_exit(&ctx->fh);
2049        vpdma_free_desc_buf(&ctx->sc_coeff_v);
2050free_sc_h:
2051        vpdma_free_desc_buf(&ctx->sc_coeff_h);
2052free_mmr_adb:
2053        vpdma_free_desc_buf(&ctx->mmr_adb);
2054free_desc_list:
2055        vpdma_free_desc_list(&ctx->desc_list);
2056unlock:
2057        mutex_unlock(&dev->dev_mutex);
2058free_ctx:
2059        kfree(ctx);
2060        return ret;
2061}
2062
2063static int vpe_release(struct file *file)
2064{
2065        struct vpe_dev *dev = video_drvdata(file);
2066        struct vpe_ctx *ctx = file2ctx(file);
2067
2068        vpe_dbg(dev, "releasing instance %p\n", ctx);
2069
2070        mutex_lock(&dev->dev_mutex);
2071        free_vbs(ctx);
2072        free_mv_buffers(ctx);
2073        vpdma_free_desc_list(&ctx->desc_list);
2074        vpdma_free_desc_buf(&ctx->mmr_adb);
2075
2076        v4l2_fh_del(&ctx->fh);
2077        v4l2_fh_exit(&ctx->fh);
2078        v4l2_ctrl_handler_free(&ctx->hdl);
2079        v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
2080
2081        kfree(ctx);
2082
2083        /*
2084         * for now, just report the release of the last instance, we can later
2085         * optimize the driver to enable or disable clocks when the first
2086         * instance is created or the last instance released
2087         */
2088        if (atomic_dec_return(&dev->num_instances) == 0)
2089                vpe_dbg(dev, "last instance released\n");
2090
2091        mutex_unlock(&dev->dev_mutex);
2092
2093        return 0;
2094}
2095
2096static const struct v4l2_file_operations vpe_fops = {
2097        .owner          = THIS_MODULE,
2098        .open           = vpe_open,
2099        .release        = vpe_release,
2100        .poll           = v4l2_m2m_fop_poll,
2101        .unlocked_ioctl = video_ioctl2,
2102        .mmap           = v4l2_m2m_fop_mmap,
2103};
2104
2105static struct video_device vpe_videodev = {
2106        .name           = VPE_MODULE_NAME,
2107        .fops           = &vpe_fops,
2108        .ioctl_ops      = &vpe_ioctl_ops,
2109        .minor          = -1,
2110        .release        = video_device_release_empty,
2111        .vfl_dir        = VFL_DIR_M2M,
2112};
2113
2114static struct v4l2_m2m_ops m2m_ops = {
2115        .device_run     = device_run,
2116        .job_ready      = job_ready,
2117        .job_abort      = job_abort,
2118        .lock           = vpe_lock,
2119        .unlock         = vpe_unlock,
2120};
2121
2122static int vpe_runtime_get(struct platform_device *pdev)
2123{
2124        int r;
2125
2126        dev_dbg(&pdev->dev, "vpe_runtime_get\n");
2127
2128        r = pm_runtime_get_sync(&pdev->dev);
2129        WARN_ON(r < 0);
2130        return r < 0 ? r : 0;
2131}
2132
2133static void vpe_runtime_put(struct platform_device *pdev)
2134{
2135
2136        int r;
2137
2138        dev_dbg(&pdev->dev, "vpe_runtime_put\n");
2139
2140        r = pm_runtime_put_sync(&pdev->dev);
2141        WARN_ON(r < 0 && r != -ENOSYS);
2142}
2143
2144static void vpe_fw_cb(struct platform_device *pdev)
2145{
2146        struct vpe_dev *dev = platform_get_drvdata(pdev);
2147        struct video_device *vfd;
2148        int ret;
2149
2150        vfd = &dev->vfd;
2151        *vfd = vpe_videodev;
2152        vfd->lock = &dev->dev_mutex;
2153        vfd->v4l2_dev = &dev->v4l2_dev;
2154
2155        ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
2156        if (ret) {
2157                vpe_err(dev, "Failed to register video device\n");
2158
2159                vpe_set_clock_enable(dev, 0);
2160                vpe_runtime_put(pdev);
2161                pm_runtime_disable(&pdev->dev);
2162                v4l2_m2m_release(dev->m2m_dev);
2163                v4l2_device_unregister(&dev->v4l2_dev);
2164
2165                return;
2166        }
2167
2168        video_set_drvdata(vfd, dev);
2169        snprintf(vfd->name, sizeof(vfd->name), "%s", vpe_videodev.name);
2170        dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n",
2171                vfd->num);
2172}
2173
2174static int vpe_probe(struct platform_device *pdev)
2175{
2176        struct vpe_dev *dev;
2177        int ret, irq, func;
2178
2179        dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
2180        if (!dev)
2181                return -ENOMEM;
2182
2183        spin_lock_init(&dev->lock);
2184
2185        ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
2186        if (ret)
2187                return ret;
2188
2189        atomic_set(&dev->num_instances, 0);
2190        mutex_init(&dev->dev_mutex);
2191
2192        dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2193                        "vpe_top");
2194        /*
2195         * HACK: we get resource info from device tree in the form of a list of
2196         * VPE sub blocks, the driver currently uses only the base of vpe_top
2197         * for register access, the driver should be changed later to access
2198         * registers based on the sub block base addresses
2199         */
2200        dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K);
2201        if (!dev->base) {
2202                ret = -ENOMEM;
2203                goto v4l2_dev_unreg;
2204        }
2205
2206        irq = platform_get_irq(pdev, 0);
2207        ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME,
2208                        dev);
2209        if (ret)
2210                goto v4l2_dev_unreg;
2211
2212        platform_set_drvdata(pdev, dev);
2213
2214        dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
2215        if (IS_ERR(dev->m2m_dev)) {
2216                vpe_err(dev, "Failed to init mem2mem device\n");
2217                ret = PTR_ERR(dev->m2m_dev);
2218                goto v4l2_dev_unreg;
2219        }
2220
2221        pm_runtime_enable(&pdev->dev);
2222
2223        ret = vpe_runtime_get(pdev);
2224        if (ret)
2225                goto rel_m2m;
2226
2227        /* Perform clk enable followed by reset */
2228        vpe_set_clock_enable(dev, 1);
2229
2230        vpe_top_reset(dev);
2231
2232        func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK,
2233                VPE_PID_FUNC_SHIFT);
2234        vpe_dbg(dev, "VPE PID function %x\n", func);
2235
2236        vpe_top_vpdma_reset(dev);
2237
2238        dev->sc = sc_create(pdev);
2239        if (IS_ERR(dev->sc)) {
2240                ret = PTR_ERR(dev->sc);
2241                goto runtime_put;
2242        }
2243
2244        dev->csc = csc_create(pdev);
2245        if (IS_ERR(dev->csc)) {
2246                ret = PTR_ERR(dev->csc);
2247                goto runtime_put;
2248        }
2249
2250        dev->vpdma = vpdma_create(pdev, vpe_fw_cb);
2251        if (IS_ERR(dev->vpdma)) {
2252                ret = PTR_ERR(dev->vpdma);
2253                goto runtime_put;
2254        }
2255
2256        return 0;
2257
2258runtime_put:
2259        vpe_runtime_put(pdev);
2260rel_m2m:
2261        pm_runtime_disable(&pdev->dev);
2262        v4l2_m2m_release(dev->m2m_dev);
2263v4l2_dev_unreg:
2264        v4l2_device_unregister(&dev->v4l2_dev);
2265
2266        return ret;
2267}
2268
2269static int vpe_remove(struct platform_device *pdev)
2270{
2271        struct vpe_dev *dev = platform_get_drvdata(pdev);
2272
2273        v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME);
2274
2275        v4l2_m2m_release(dev->m2m_dev);
2276        video_unregister_device(&dev->vfd);
2277        v4l2_device_unregister(&dev->v4l2_dev);
2278
2279        vpe_set_clock_enable(dev, 0);
2280        vpe_runtime_put(pdev);
2281        pm_runtime_disable(&pdev->dev);
2282
2283        return 0;
2284}
2285
2286#if defined(CONFIG_OF)
2287static const struct of_device_id vpe_of_match[] = {
2288        {
2289                .compatible = "ti,vpe",
2290        },
2291        {},
2292};
2293#endif
2294
2295static struct platform_driver vpe_pdrv = {
2296        .probe          = vpe_probe,
2297        .remove         = vpe_remove,
2298        .driver         = {
2299                .name   = VPE_MODULE_NAME,
2300                .of_match_table = of_match_ptr(vpe_of_match),
2301        },
2302};
2303
2304module_platform_driver(vpe_pdrv);
2305
2306MODULE_DESCRIPTION("TI VPE driver");
2307MODULE_AUTHOR("Dale Farnsworth, <dale@farnsworth.org>");
2308MODULE_LICENSE("GPL");
2309