linux/drivers/media/platform/xilinx/xilinx-dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Xilinx Video DMA
   4 *
   5 * Copyright (C) 2013-2015 Ideas on Board
   6 * Copyright (C) 2013-2015 Xilinx, Inc.
   7 *
   8 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
   9 *           Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  10 */
  11
  12#include <linux/dma/xilinx_dma.h>
  13#include <linux/dma/xilinx_frmbuf.h>
  14#include <linux/lcm.h>
  15#include <linux/list.h>
  16#include <linux/module.h>
  17#include <linux/of.h>
  18#include <linux/slab.h>
  19#include <linux/xilinx-v4l2-controls.h>
  20
  21#include <media/v4l2-dev.h>
  22#include <media/v4l2-fh.h>
  23#include <media/v4l2-ioctl.h>
  24#include <media/videobuf2-v4l2.h>
  25#include <media/videobuf2-dma-contig.h>
  26
  27#include "xilinx-dma.h"
  28#include "xilinx-vip.h"
  29#include "xilinx-vipp.h"
  30
  31#define XVIP_DMA_DEF_FORMAT             V4L2_PIX_FMT_YUYV
  32#define XVIP_DMA_DEF_WIDTH              1920
  33#define XVIP_DMA_DEF_HEIGHT             1080
  34#define XVIP_DMA_DEF_WIDTH_ALIGN        2
  35/* Minimum and maximum widths are expressed in bytes */
  36#define XVIP_DMA_MIN_WIDTH              1U
  37#define XVIP_DMA_MAX_WIDTH              65535U
  38#define XVIP_DMA_MIN_HEIGHT             1U
  39#define XVIP_DMA_MAX_HEIGHT             8191U
  40
  41/* -----------------------------------------------------------------------------
  42 * Helper functions
  43 */
  44
  45static struct v4l2_subdev *
  46xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
  47{
  48        struct media_pad *remote;
  49
  50        remote = media_entity_remote_pad(local);
  51        if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  52                return NULL;
  53
  54        if (pad)
  55                *pad = remote->index;
  56
  57        return media_entity_to_v4l2_subdev(remote->entity);
  58}
  59
  60static int xvip_dma_verify_format(struct xvip_dma *dma)
  61{
  62        struct v4l2_subdev_format fmt;
  63        struct v4l2_subdev *subdev;
  64        int ret;
  65
  66        subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
  67        if (!subdev)
  68                return -EPIPE;
  69
  70        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  71        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  72        if (ret < 0)
  73                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  74
  75        if (dma->fmtinfo->code != fmt.format.code)
  76                return -EINVAL;
  77
  78        /*
  79         * Crop rectangle contains format resolution by default, and crop
  80         * rectangle if s_selection is executed.
  81         */
  82        if (dma->r.width != fmt.format.width ||
  83            dma->r.height != fmt.format.height)
  84                return -EINVAL;
  85
  86        return 0;
  87}
  88
  89/* -----------------------------------------------------------------------------
  90 * Pipeline Stream Management
  91 */
  92
  93/**
  94 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
  95 * @pipe: The pipeline
  96 * @on: Turn the stream on when true or off when false
  97 *
  98 * The pipeline is shared between all DMA engines connect at its input and
  99 * output. While the stream state of DMA engines can be controlled
 100 * independently, pipelines have a shared stream state that enable or disable
 101 * all entities in the pipeline. For this reason the pipeline uses a streaming
 102 * counter that tracks the number of DMA engines that have requested the stream
 103 * to be enabled. This will walk the graph starting from each DMA and enable or
 104 * disable the entities in the path.
 105 *
 106 * When called with the @on argument set to true, this function will increment
 107 * the pipeline streaming count. If the streaming count reaches the number of
 108 * DMA engines in the pipeline it will enable all entities that belong to the
 109 * pipeline.
 110 *
 111 * Similarly, when called with the @on argument set to false, this function will
 112 * decrement the pipeline streaming count and disable all entities in the
 113 * pipeline when the streaming count reaches zero.
 114 *
 115 * Return: 0 if successful, or the return value of the failed video::s_stream
 116 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
 117 * not updated when the operation fails.
 118 */
 119static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
 120{
 121        struct xvip_composite_device *xdev;
 122        int ret = 0;
 123
 124        mutex_lock(&pipe->lock);
 125        xdev = pipe->xdev;
 126
 127        if (on) {
 128                if (pipe->stream_count == pipe->num_dmas - 1 || xdev->atomic_streamon) {
 129                        ret = xvip_graph_pipeline_start_stop(xdev, pipe, true);
 130                        if (ret < 0)
 131                                goto done;
 132                }
 133                pipe->stream_count++;
 134        } else {
 135                if (--pipe->stream_count == 0)
 136                        xvip_graph_pipeline_start_stop(xdev, pipe, false);
 137        }
 138
 139done:
 140        mutex_unlock(&pipe->lock);
 141        return ret;
 142}
 143
 144static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
 145                                  struct xvip_dma *start)
 146{
 147        struct media_graph graph;
 148        struct media_entity *entity = &start->video.entity;
 149        struct media_device *mdev = entity->graph_obj.mdev;
 150        unsigned int num_inputs = 0;
 151        unsigned int num_outputs = 0;
 152        int ret;
 153
 154        mutex_lock(&mdev->graph_mutex);
 155
 156        /* Walk the graph to locate the video nodes. */
 157        ret = media_graph_walk_init(&graph, mdev);
 158        if (ret) {
 159                mutex_unlock(&mdev->graph_mutex);
 160                return ret;
 161        }
 162
 163        media_graph_walk_start(&graph, entity);
 164
 165        while ((entity = media_graph_walk_next(&graph))) {
 166                struct xvip_dma *dma;
 167
 168                if (entity->function != MEDIA_ENT_F_IO_V4L)
 169                        continue;
 170
 171                dma = to_xvip_dma(media_entity_to_video_device(entity));
 172
 173                if (dma->pad.flags & MEDIA_PAD_FL_SINK)
 174                        num_outputs++;
 175                else
 176                        num_inputs++;
 177        }
 178
 179        mutex_unlock(&mdev->graph_mutex);
 180
 181        media_graph_walk_cleanup(&graph);
 182
 183        /* We need at least one DMA to proceed */
 184        if (num_outputs == 0 && num_inputs == 0)
 185                return -EPIPE;
 186
 187        pipe->num_dmas = num_inputs + num_outputs;
 188        pipe->xdev = start->xdev;
 189
 190        return 0;
 191}
 192
 193static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
 194{
 195        pipe->num_dmas = 0;
 196}
 197
 198/**
 199 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
 200 * @pipe: the pipeline
 201 *
 202 * Decrease the pipeline use count and clean it up if we were the last user.
 203 */
 204static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
 205{
 206        mutex_lock(&pipe->lock);
 207
 208        /* If we're the last user clean up the pipeline. */
 209        if (--pipe->use_count == 0)
 210                __xvip_pipeline_cleanup(pipe);
 211
 212        mutex_unlock(&pipe->lock);
 213}
 214
 215/**
 216 * xvip_pipeline_prepare - Prepare the pipeline for streaming
 217 * @pipe: the pipeline
 218 * @dma: DMA engine at one end of the pipeline
 219 *
 220 * Validate the pipeline if no user exists yet, otherwise just increase the use
 221 * count.
 222 *
 223 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
 224 */
 225static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
 226                                 struct xvip_dma *dma)
 227{
 228        int ret;
 229
 230        mutex_lock(&pipe->lock);
 231
 232        /* If we're the first user validate and initialize the pipeline. */
 233        if (pipe->use_count == 0) {
 234                ret = xvip_pipeline_validate(pipe, dma);
 235                if (ret < 0) {
 236                        __xvip_pipeline_cleanup(pipe);
 237                        goto done;
 238                }
 239        }
 240
 241        pipe->use_count++;
 242        ret = 0;
 243
 244done:
 245        mutex_unlock(&pipe->lock);
 246        return ret;
 247}
 248
 249/* -----------------------------------------------------------------------------
 250 * videobuf2 queue operations
 251 */
 252
 253/**
 254 * struct xvip_dma_buffer - Video DMA buffer
 255 * @buf: vb2 buffer base object
 256 * @queue: buffer list entry in the DMA engine queued buffers list
 257 * @dma: DMA channel that uses the buffer
 258 * @desc: Descriptor associated with this structure
 259 */
 260struct xvip_dma_buffer {
 261        struct vb2_v4l2_buffer buf;
 262        struct list_head queue;
 263        struct xvip_dma *dma;
 264        struct dma_async_tx_descriptor *desc;
 265};
 266
 267#define to_xvip_dma_buffer(vb)  container_of(vb, struct xvip_dma_buffer, buf)
 268
 269static void xvip_dma_complete(void *param)
 270{
 271        struct xvip_dma_buffer *buf = param;
 272        struct xvip_dma *dma = buf->dma;
 273        int i, sizeimage;
 274        u32 fid;
 275        int status;
 276
 277        spin_lock(&dma->queued_lock);
 278        list_del(&buf->queue);
 279        spin_unlock(&dma->queued_lock);
 280
 281        buf->buf.field = V4L2_FIELD_NONE;
 282        buf->buf.sequence = dma->sequence++;
 283        buf->buf.vb2_buf.timestamp = ktime_get_ns();
 284
 285        status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
 286        if (!status) {
 287                if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
 288                     dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
 289                     dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
 290                        /*
 291                         * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
 292                         * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
 293                         */
 294                        buf->buf.field = fid ?
 295                                         V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
 296
 297                        if (fid == dma->prev_fid)
 298                                buf->buf.sequence = dma->sequence++;
 299
 300                        buf->buf.sequence >>= 1;
 301                        dma->prev_fid = fid;
 302                }
 303        }
 304
 305        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 306                for (i = 0; i < dma->fmtinfo->buffers; i++) {
 307                        sizeimage =
 308                                dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
 309                        vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
 310                }
 311        } else {
 312                sizeimage = dma->format.fmt.pix.sizeimage;
 313                vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
 314        }
 315
 316        vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 317}
 318
 319static int
 320xvip_dma_queue_setup(struct vb2_queue *vq,
 321                     unsigned int *nbuffers, unsigned int *nplanes,
 322                     unsigned int sizes[], struct device *alloc_devs[])
 323{
 324        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 325        unsigned int i;
 326        int sizeimage;
 327
 328        /* Multi planar case: Make sure the image size is large enough */
 329        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 330                if (*nplanes) {
 331                        if (*nplanes != dma->format.fmt.pix_mp.num_planes)
 332                                return -EINVAL;
 333
 334                        for (i = 0; i < *nplanes; i++) {
 335                                sizeimage =
 336                                  dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
 337                                if (sizes[i] < sizeimage)
 338                                        return -EINVAL;
 339                        }
 340                } else {
 341                        *nplanes = dma->fmtinfo->buffers;
 342                        for (i = 0; i < dma->fmtinfo->buffers; i++) {
 343                                sizeimage =
 344                                  dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
 345                                sizes[i] = sizeimage;
 346                        }
 347                }
 348                return 0;
 349        }
 350
 351        /* Single planar case: Make sure the image size is large enough */
 352        sizeimage = dma->format.fmt.pix.sizeimage;
 353        if (*nplanes == 1)
 354                return sizes[0] < sizeimage ? -EINVAL : 0;
 355
 356        *nplanes = 1;
 357        sizes[0] = sizeimage;
 358
 359        return 0;
 360}
 361
 362static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
 363{
 364        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 365        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 366        struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
 367
 368        buf->dma = dma;
 369
 370        return 0;
 371}
 372
 373static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
 374{
 375        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 376        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 377        struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
 378        struct dma_async_tx_descriptor *desc;
 379        dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
 380        u32 flags = 0;
 381        u32 luma_size;
 382        u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
 383        u32 fid = ~0;
 384        u32 bpl;
 385
 386        if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
 387            dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 388                flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 389                dma->xt.dir = DMA_DEV_TO_MEM;
 390                dma->xt.src_sgl = false;
 391                dma->xt.dst_sgl = true;
 392                dma->xt.dst_start = addr;
 393        } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
 394                   dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 395                flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 396                dma->xt.dir = DMA_MEM_TO_DEV;
 397                dma->xt.src_sgl = true;
 398                dma->xt.dst_sgl = false;
 399                dma->xt.src_start = addr;
 400        }
 401
 402        /*
 403         * DMA IP supports only 2 planes, so one datachunk is sufficient
 404         * to get start address of 2nd plane
 405         */
 406        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 407                struct v4l2_pix_format_mplane *pix_mp;
 408                size_t size;
 409
 410                pix_mp = &dma->format.fmt.pix_mp;
 411                bpl = pix_mp->plane_fmt[0].bytesperline;
 412
 413                xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
 414                xvip_width_padding_factor(pix_mp->pixelformat,
 415                                          &padding_factor_nume,
 416                                          &padding_factor_deno);
 417                xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
 418                                        &bpl_deno);
 419                dma->xt.frame_size = dma->fmtinfo->num_planes;
 420
 421                size = ((size_t)dma->r.width * dma->fmtinfo->bpl_factor *
 422                        padding_factor_nume * bpl_nume) /
 423                        ((size_t)padding_factor_deno * bpl_deno);
 424                dma->sgl[0].size = size;
 425
 426                dma->sgl[0].icg = bpl - dma->sgl[0].size;
 427                dma->xt.numf = dma->r.height;
 428
 429                /*
 430                 * dst_icg is the number of bytes to jump after last luma addr
 431                 * and before first chroma addr
 432                 */
 433
 434                /* Handling contiguous data with mplanes */
 435                if (dma->fmtinfo->buffers == 1) {
 436                        dma->sgl[0].dst_icg = (size_t)bpl *
 437                                              (pix_mp->height - dma->r.height);
 438                } else {
 439                        /* Handling non-contiguous data with mplanes */
 440                        if (dma->fmtinfo->buffers == 2 || dma->fmtinfo->buffers == 3) {
 441                                dma_addr_t chroma_addr =
 442                                        vb2_dma_contig_plane_dma_addr(vb, 1);
 443                                luma_size = bpl * dma->xt.numf;
 444                                if (chroma_addr > addr)
 445                                        dma->sgl[0].dst_icg = chroma_addr -
 446                                                addr - luma_size;
 447                        }
 448                        /* Handle the 3rd plane for Y_U_V8 */
 449                        if (dma->fmtinfo->buffers == 3) {
 450                                dma_addr_t chroma_addr =
 451                                        vb2_dma_contig_plane_dma_addr(vb, 1);
 452                                dma_addr_t third_plane_addr =
 453                                        vb2_dma_contig_plane_dma_addr(vb, 2);
 454                                u32 chroma_size = bpl * dma->xt.numf;
 455
 456                                if (third_plane_addr > chroma_addr)
 457                                        dma->sgl[0].dst_icg = third_plane_addr -
 458                                                chroma_addr - chroma_size;
 459                        }
 460                }
 461        } else {
 462                struct v4l2_pix_format *pix;
 463                size_t size;
 464                size_t dst_icg;
 465
 466                pix = &dma->format.fmt.pix;
 467                bpl = pix->bytesperline;
 468                xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
 469                xvip_width_padding_factor(pix->pixelformat,
 470                                          &padding_factor_nume,
 471                                          &padding_factor_deno);
 472                xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
 473                                        &bpl_deno);
 474                dma->xt.frame_size = dma->fmtinfo->num_planes;
 475                size = ((size_t)dma->r.width * dma->fmtinfo->bpl_factor *
 476                        padding_factor_nume * bpl_nume) /
 477                        ((size_t)padding_factor_deno * bpl_deno);
 478                dma->sgl[0].size = size;
 479                dma->sgl[0].icg = bpl - dma->sgl[0].size;
 480                dma->xt.numf = dma->r.height;
 481                dma->sgl[0].dst_icg = 0;
 482                dst_icg = (size_t)bpl * (pix->height - dma->r.height);
 483                dma->sgl[0].dst_icg = dst_icg;
 484        }
 485
 486        desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
 487        if (!desc) {
 488                dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
 489                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 490                return;
 491        }
 492        desc->callback = xvip_dma_complete;
 493        desc->callback_param = buf;
 494        buf->desc = desc;
 495
 496        if (buf->buf.field == V4L2_FIELD_TOP)
 497                fid = 1;
 498        else if (buf->buf.field == V4L2_FIELD_BOTTOM)
 499                fid = 0;
 500        else if (buf->buf.field == V4L2_FIELD_NONE)
 501                fid = 0;
 502
 503        xilinx_xdma_set_fid(dma->dma, desc, fid);
 504
 505        spin_lock_irq(&dma->queued_lock);
 506        list_add_tail(&buf->queue, &dma->queued_bufs);
 507        spin_unlock_irq(&dma->queued_lock);
 508
 509        /*
 510         * Low latency capture: Give descriptor callback at start of
 511         * processing the descriptor
 512         */
 513        if (dma->low_latency_cap)
 514                xilinx_xdma_set_earlycb(dma->dma, desc,
 515                                        EARLY_CALLBACK_START_DESC);
 516        dmaengine_submit(desc);
 517
 518        if (vb2_is_streaming(&dma->queue))
 519                dma_async_issue_pending(dma->dma);
 520}
 521
 522static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
 523{
 524        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 525        struct xvip_dma_buffer *buf, *nbuf;
 526        struct xvip_pipeline *pipe;
 527        int ret;
 528
 529        dma->sequence = 0;
 530        dma->prev_fid = ~0;
 531
 532        /*
 533         * Start streaming on the pipeline. No link touching an entity in the
 534         * pipeline can be activated or deactivated once streaming is started.
 535         *
 536         * Use the pipeline object embedded in the first DMA object that starts
 537         * streaming.
 538         */
 539        mutex_lock(&dma->xdev->lock);
 540        pipe = dma->video.entity.pipe
 541             ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
 542
 543        ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
 544        mutex_unlock(&dma->xdev->lock);
 545        if (ret < 0)
 546                goto error;
 547
 548        /* Verify that the configured format matches the output of the
 549         * connected subdev.
 550         */
 551        ret = xvip_dma_verify_format(dma);
 552        if (ret < 0)
 553                goto error_stop;
 554
 555        ret = xvip_pipeline_prepare(pipe, dma);
 556        if (ret < 0)
 557                goto error_stop;
 558
 559        /* Start the DMA engine. This must be done before starting the blocks
 560         * in the pipeline to avoid DMA synchronization issues.
 561         * We dont't want to start DMA in case of low latency capture mode,
 562         * applications will start DMA using S_CTRL at later point of time.
 563         */
 564        if (!dma->low_latency_cap) {
 565                dma_async_issue_pending(dma->dma);
 566        } else {
 567                /* For low latency capture, return the first buffer early
 568                 * so that consumer can initialize until we start DMA.
 569                 */
 570                buf = list_first_entry(&dma->queued_bufs,
 571                                       struct xvip_dma_buffer, queue);
 572                xvip_dma_complete(buf);
 573                buf->desc->callback = NULL;
 574        }
 575
 576        /* Start the pipeline. */
 577        ret = xvip_pipeline_set_stream(pipe, true);
 578        if (ret < 0)
 579                goto error_stop;
 580
 581        return 0;
 582
 583error_stop:
 584        media_pipeline_stop(&dma->video.entity);
 585
 586error:
 587        dmaengine_terminate_all(dma->dma);
 588        /* Give back all queued buffers to videobuf2. */
 589        spin_lock_irq(&dma->queued_lock);
 590        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
 591                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
 592                list_del(&buf->queue);
 593        }
 594        spin_unlock_irq(&dma->queued_lock);
 595
 596        return ret;
 597}
 598
 599static void xvip_dma_stop_streaming(struct vb2_queue *vq)
 600{
 601        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 602        struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
 603        struct xvip_dma_buffer *buf, *nbuf;
 604
 605        /* Stop the pipeline. */
 606        xvip_pipeline_set_stream(pipe, false);
 607
 608        /* Stop and reset the DMA engine. */
 609        dmaengine_terminate_all(dma->dma);
 610
 611        /* Cleanup the pipeline and mark it as being stopped. */
 612        xvip_pipeline_cleanup(pipe);
 613        media_pipeline_stop(&dma->video.entity);
 614
 615        /* Give back all queued buffers to videobuf2. */
 616        spin_lock_irq(&dma->queued_lock);
 617        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
 618                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 619                list_del(&buf->queue);
 620        }
 621        spin_unlock_irq(&dma->queued_lock);
 622}
 623
 624static const struct vb2_ops xvip_dma_queue_qops = {
 625        .queue_setup = xvip_dma_queue_setup,
 626        .buf_prepare = xvip_dma_buffer_prepare,
 627        .buf_queue = xvip_dma_buffer_queue,
 628        .wait_prepare = vb2_ops_wait_prepare,
 629        .wait_finish = vb2_ops_wait_finish,
 630        .start_streaming = xvip_dma_start_streaming,
 631        .stop_streaming = xvip_dma_stop_streaming,
 632};
 633
 634/* -----------------------------------------------------------------------------
 635 * V4L2 ioctls
 636 */
 637
 638static int
 639xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
 640{
 641        struct v4l2_fh *vfh = file->private_data;
 642        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 643
 644        cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
 645                            V4L2_CAP_DEVICE_CAPS;
 646
 647        strscpy((char *)cap->driver, "xilinx-vipp", sizeof(cap->driver));
 648        strscpy((char *)cap->card, (char *)dma->video.name, sizeof(cap->card));
 649        snprintf((char *)cap->bus_info, sizeof(cap->bus_info),
 650                 "platform:%pOFn:%u", dma->xdev->dev->of_node, dma->port);
 651
 652        return 0;
 653}
 654
 655static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
 656                              struct v4l2_subdev_format *v4l_fmt)
 657{
 658        const struct xvip_video_format *fmt;
 659        int ret;
 660        u32 i, fmt_cnt, *fmts;
 661
 662        ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
 663        if (ret)
 664                return ret;
 665
 666        /* Has media pad value changed? */
 667        if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
 668            !dma->remote_subdev_med_bus) {
 669                /* Re-generate legal list of fourcc codes */
 670                dma->poss_v4l2_fmt_cnt = 0;
 671                dma->remote_subdev_med_bus = v4l_fmt->format.code;
 672
 673                if (!dma->poss_v4l2_fmts) {
 674                        dma->poss_v4l2_fmts =
 675                                devm_kzalloc(&dma->video.dev,
 676                                             sizeof(u32) * fmt_cnt,
 677                                             GFP_KERNEL);
 678                        if (!dma->poss_v4l2_fmts)
 679                                return -ENOMEM;
 680                }
 681
 682                for (i = 0; i < fmt_cnt; i++) {
 683                        fmt = xvip_get_format_by_fourcc(fmts[i]);
 684                        if (IS_ERR(fmt))
 685                                return PTR_ERR(fmt);
 686
 687                        if (fmt->code != dma->remote_subdev_med_bus)
 688                                continue;
 689
 690                        dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
 691                }
 692        }
 693
 694        /* Return err if index is greater than count of legal values */
 695        if (f->index >= dma->poss_v4l2_fmt_cnt)
 696                return -EINVAL;
 697
 698        /* Else return pix format in table */
 699        fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
 700        if (IS_ERR(fmt))
 701                return PTR_ERR(fmt);
 702
 703        f->pixelformat = fmt->fourcc;
 704
 705        return 0;
 706}
 707
 708static int
 709xvip_dma_enum_input(struct file *file, void *priv, struct v4l2_input *i)
 710{
 711        struct v4l2_fh *vfh = file->private_data;
 712        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 713        struct v4l2_subdev *subdev;
 714
 715        if (i->index > 0)
 716                return -EINVAL;
 717
 718        subdev = xvip_dma_remote_subdev(&dma->pad, NULL);
 719        if (!subdev)
 720                return -EPIPE;
 721
 722        /*
 723         * FIXME: right now only camera input type is handled.
 724         * There should be mechanism to distinguish other types of
 725         * input like V4L2_INPUT_TYPE_TUNER and V4L2_INPUT_TYPE_TOUCH.
 726         */
 727        i->type = V4L2_INPUT_TYPE_CAMERA;
 728        strlcpy((char *)i->name, (char *)subdev->name, sizeof(i->name));
 729
 730        return 0;
 731}
 732
 733static int
 734xvip_dma_get_input(struct file *file, void *fh, unsigned int *i)
 735{
 736        *i = 0;
 737        return 0;
 738}
 739
 740static int
 741xvip_dma_set_input(struct file *file, void *fh, unsigned int i)
 742{
 743        if (i > 0)
 744                return -EINVAL;
 745
 746        return 0;
 747}
 748
 749/* FIXME: without this callback function, some applications are not configured
 750 * with correct formats, and it results in frames in wrong format. Whether this
 751 * callback needs to be required is not clearly defined, so it should be
 752 * clarified through the mailing list.
 753 */
 754static int
 755xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
 756{
 757        struct v4l2_fh *vfh = file->private_data;
 758        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 759        struct v4l2_subdev *subdev;
 760        struct v4l2_subdev_format v4l_fmt;
 761        const struct xvip_video_format *fmt;
 762        int err, ret;
 763
 764        /* Establish media pad format */
 765        subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
 766        if (!subdev)
 767                return -EPIPE;
 768
 769        v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 770        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
 771        if (ret < 0)
 772                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
 773
 774        /*
 775         * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
 776         * to enumerate formats otherwise return the pix format corresponding
 777         * to subdev's media bus format. This kind of separation would be
 778         * helpful for clean up and upstreaming.
 779         */
 780        err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
 781        if (!err)
 782                return err;
 783
 784        /*
 785         * This logic will just return one pix format based on subdev's
 786         * media bus format
 787         */
 788        if (f->index > 0)
 789                return -EINVAL;
 790
 791        fmt = xvip_get_format_by_code(v4l_fmt.format.code);
 792        if (IS_ERR(fmt))
 793                return PTR_ERR(fmt);
 794
 795        f->pixelformat = fmt->fourcc;
 796
 797        return 0;
 798}
 799
 800static int
 801xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
 802{
 803        struct v4l2_fh *vfh = file->private_data;
 804        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 805
 806        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 807                format->fmt.pix_mp = dma->format.fmt.pix_mp;
 808        else
 809                format->fmt.pix = dma->format.fmt.pix;
 810
 811        return 0;
 812}
 813
 814static void
 815__xvip_dma_try_format(struct xvip_dma *dma,
 816                      struct v4l2_format *format,
 817                      const struct xvip_video_format **fmtinfo)
 818{
 819        const struct xvip_video_format *info;
 820        unsigned int min_width;
 821        unsigned int max_width;
 822        unsigned int min_bpl;
 823        unsigned int max_bpl;
 824        unsigned int width;
 825        unsigned int bpl;
 826        unsigned int i, hsub, vsub, plane_width, plane_height;
 827        unsigned int fourcc;
 828        unsigned int padding_factor_nume, padding_factor_deno;
 829        unsigned int bpl_nume, bpl_deno;
 830        struct v4l2_subdev_format fmt;
 831        struct v4l2_subdev *subdev;
 832        int ret;
 833
 834        subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
 835        if (!subdev)
 836                return;
 837
 838        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 839        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
 840        if (ret < 0)
 841                return;
 842
 843        if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
 844                if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 845                        dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
 846                else
 847                        dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
 848        } else {
 849                if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 850                        dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
 851                else
 852                        dma->format.fmt.pix.field = V4L2_FIELD_NONE;
 853        }
 854
 855        /* Retrieve format information and select the default format if the
 856         * requested format isn't supported.
 857         */
 858        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 859                fourcc = format->fmt.pix_mp.pixelformat;
 860        else
 861                fourcc = format->fmt.pix.pixelformat;
 862
 863        info = xvip_get_format_by_fourcc(fourcc);
 864
 865        if (IS_ERR(info))
 866                info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
 867
 868        xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
 869                                  &padding_factor_deno);
 870        xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
 871
 872        /* The transfer alignment requirements are expressed in bytes. Compute
 873         * the minimum and maximum values, clamp the requested width and convert
 874         * it back to pixels.
 875         */
 876        min_width = roundup(XVIP_DMA_MIN_WIDTH, dma->width_align);
 877        max_width = rounddown(XVIP_DMA_MAX_WIDTH, dma->width_align);
 878
 879        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 880                struct v4l2_pix_format_mplane *pix_mp;
 881                struct v4l2_plane_pix_format *plane_fmt;
 882
 883                pix_mp = &format->fmt.pix_mp;
 884                plane_fmt = pix_mp->plane_fmt;
 885                pix_mp->field = dma->format.fmt.pix_mp.field;
 886                width = rounddown(pix_mp->width * info->bpl_factor,
 887                                  dma->width_align);
 888                pix_mp->width = clamp(width, min_width, max_width) /
 889                                info->bpl_factor;
 890                pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
 891                                       XVIP_DMA_MAX_HEIGHT);
 892
 893                /*
 894                 * Clamp the requested bytes per line value. If the maximum
 895                 * bytes per line value is zero, the module doesn't support
 896                 * user configurable line sizes. Override the requested value
 897                 * with the minimum in that case.
 898                 */
 899
 900                max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
 901
 902                /* Handling contiguous data with mplanes */
 903                if (info->buffers == 1) {
 904                        min_bpl = (pix_mp->width * info->bpl_factor *
 905                                   padding_factor_nume * bpl_nume) /
 906                                   (padding_factor_deno * bpl_deno);
 907                        min_bpl = roundup(min_bpl, dma->align);
 908                        bpl = roundup(plane_fmt[0].bytesperline, dma->align);
 909                        plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
 910                                                          max_bpl);
 911
 912                        if (info->num_planes == 1) {
 913                                /* Single plane formats */
 914                                plane_fmt[0].sizeimage =
 915                                                plane_fmt[0].bytesperline *
 916                                                pix_mp->height;
 917                        } else {
 918                                /* Multi plane formats */
 919                                plane_fmt[0].sizeimage =
 920                                        DIV_ROUND_UP(plane_fmt[0].bytesperline *
 921                                                     pix_mp->height *
 922                                                     info->bpp, 8);
 923                        }
 924                } else {
 925                        /* Handling non-contiguous data with mplanes */
 926                        hsub = info->hsub;
 927                        vsub = info->vsub;
 928                        for (i = 0; i < info->num_planes; i++) {
 929                                plane_width = pix_mp->width / (i ? hsub : 1);
 930                                plane_height = pix_mp->height / (i ? vsub : 1);
 931                                min_bpl = (plane_width * info->bpl_factor *
 932                                           padding_factor_nume * bpl_nume) /
 933                                           (padding_factor_deno * bpl_deno);
 934                                min_bpl = roundup(min_bpl, dma->align);
 935                                bpl = rounddown(plane_fmt[i].bytesperline,
 936                                                dma->align);
 937                                plane_fmt[i].bytesperline =
 938                                                clamp(bpl, min_bpl, max_bpl);
 939                                plane_fmt[i].sizeimage =
 940                                                plane_fmt[i].bytesperline *
 941                                                plane_height;
 942                        }
 943                }
 944        } else {
 945                struct v4l2_pix_format *pix;
 946
 947                pix = &format->fmt.pix;
 948                pix->field = dma->format.fmt.pix.field;
 949                width = rounddown(pix->width * info->bpl_factor,
 950                                  dma->width_align);
 951                pix->width = clamp(width, min_width, max_width) /
 952                             info->bpl_factor;
 953                pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
 954                                    XVIP_DMA_MAX_HEIGHT);
 955
 956                min_bpl = (pix->width * info->bpl_factor *
 957                          padding_factor_nume * bpl_nume) /
 958                          (padding_factor_deno * bpl_deno);
 959                min_bpl = roundup(min_bpl, dma->align);
 960                max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
 961                bpl = rounddown(pix->bytesperline, dma->align);
 962                pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
 963                pix->sizeimage = pix->width * pix->height * info->bpp / 8;
 964        }
 965
 966        if (fmtinfo)
 967                *fmtinfo = info;
 968}
 969
 970static int
 971xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
 972{
 973        struct v4l2_fh *vfh = file->private_data;
 974        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 975
 976        __xvip_dma_try_format(dma, format, NULL);
 977        return 0;
 978}
 979
 980static int
 981xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
 982{
 983        struct v4l2_fh *vfh = file->private_data;
 984        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 985        const struct xvip_video_format *info;
 986
 987        __xvip_dma_try_format(dma, format, &info);
 988
 989        if (vb2_is_busy(&dma->queue))
 990                return -EBUSY;
 991
 992        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 993                dma->format.fmt.pix_mp = format->fmt.pix_mp;
 994
 995                /*
 996                 * Save format resolution in crop rectangle. This will be
 997                 * updated when s_slection is called.
 998                 */
 999                dma->r.width = format->fmt.pix_mp.width;
1000                dma->r.height = format->fmt.pix_mp.height;
1001        } else {
1002                dma->format.fmt.pix = format->fmt.pix;
1003                dma->r.width = format->fmt.pix.width;
1004                dma->r.height = format->fmt.pix.height;
1005        }
1006
1007        dma->fmtinfo = info;
1008
1009        return 0;
1010}
1011
1012static int
1013xvip_dma_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
1014{
1015        struct v4l2_fh *vfh = file->private_data;
1016        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1017        u32 width, height;
1018        bool crop_frame = false;
1019
1020        switch (sel->target) {
1021        case V4L2_SEL_TGT_COMPOSE:
1022                if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1023                        return -EINVAL;
1024
1025                crop_frame = true;
1026                break;
1027        case V4L2_SEL_TGT_COMPOSE_BOUNDS:
1028        case V4L2_SEL_TGT_COMPOSE_DEFAULT:
1029                if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1030                        return -EINVAL;
1031                break;
1032        case V4L2_SEL_TGT_CROP:
1033                if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
1034                        return -EINVAL;
1035
1036                crop_frame = true;
1037                break;
1038        case V4L2_SEL_TGT_CROP_BOUNDS:
1039        case V4L2_SEL_TGT_CROP_DEFAULT:
1040                if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
1041                        return -EINVAL;
1042                break;
1043        default:
1044                return -EINVAL;
1045        }
1046
1047        sel->r.left = 0;
1048        sel->r.top = 0;
1049
1050        if (crop_frame) {
1051                sel->r.width = dma->r.width;
1052                sel->r.height = dma->r.height;
1053        } else {
1054                if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
1055                        width = dma->format.fmt.pix_mp.width;
1056                        height = dma->format.fmt.pix_mp.height;
1057                } else {
1058                        width = dma->format.fmt.pix.width;
1059                        height = dma->format.fmt.pix.height;
1060                }
1061
1062                sel->r.width = width;
1063                sel->r.height = height;
1064        }
1065
1066        return 0;
1067}
1068
1069static int
1070xvip_dma_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
1071{
1072        struct v4l2_fh *vfh = file->private_data;
1073        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1074        u32 width, height;
1075
1076        switch (sel->target) {
1077        case V4L2_SEL_TGT_COMPOSE:
1078                /* COMPOSE target is only valid for capture buftype */
1079                if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
1080                        return -EINVAL;
1081                break;
1082        case V4L2_SEL_TGT_CROP:
1083                /* CROP target is only valid for output buftype */
1084                if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
1085                        return -EINVAL;
1086                break;
1087        default:
1088                return -EINVAL;
1089        }
1090
1091        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
1092                width = dma->format.fmt.pix_mp.width;
1093                height = dma->format.fmt.pix_mp.height;
1094        } else {
1095                width = dma->format.fmt.pix.width;
1096                height = dma->format.fmt.pix.height;
1097        }
1098
1099        if (sel->r.width > width || sel->r.height > height ||
1100            sel->r.top != 0 || sel->r.left != 0)
1101                return -EINVAL;
1102
1103        sel->r.width = rounddown(max(XVIP_DMA_MIN_WIDTH, sel->r.width),
1104                                 dma->width_align);
1105        sel->r.height = max(XVIP_DMA_MIN_HEIGHT, sel->r.height);
1106        dma->r.width = sel->r.width;
1107        dma->r.height = sel->r.height;
1108
1109        return 0;
1110}
1111
1112static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
1113        .vidioc_querycap                = xvip_dma_querycap,
1114        .vidioc_enum_fmt_vid_cap        = xvip_dma_enum_format,
1115        .vidioc_enum_fmt_vid_out        = xvip_dma_enum_format,
1116        .vidioc_g_fmt_vid_cap           = xvip_dma_get_format,
1117        .vidioc_g_fmt_vid_cap_mplane    = xvip_dma_get_format,
1118        .vidioc_g_fmt_vid_out           = xvip_dma_get_format,
1119        .vidioc_g_fmt_vid_out_mplane    = xvip_dma_get_format,
1120        .vidioc_s_fmt_vid_cap           = xvip_dma_set_format,
1121        .vidioc_s_fmt_vid_cap_mplane    = xvip_dma_set_format,
1122        .vidioc_s_fmt_vid_out           = xvip_dma_set_format,
1123        .vidioc_s_fmt_vid_out_mplane    = xvip_dma_set_format,
1124        .vidioc_try_fmt_vid_cap         = xvip_dma_try_format,
1125        .vidioc_try_fmt_vid_cap_mplane  = xvip_dma_try_format,
1126        .vidioc_try_fmt_vid_out         = xvip_dma_try_format,
1127        .vidioc_try_fmt_vid_out_mplane  = xvip_dma_try_format,
1128        .vidioc_s_selection             = xvip_dma_s_selection,
1129        .vidioc_g_selection             = xvip_dma_g_selection,
1130        .vidioc_reqbufs                 = vb2_ioctl_reqbufs,
1131        .vidioc_querybuf                = vb2_ioctl_querybuf,
1132        .vidioc_qbuf                    = vb2_ioctl_qbuf,
1133        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
1134        .vidioc_create_bufs             = vb2_ioctl_create_bufs,
1135        .vidioc_expbuf                  = vb2_ioctl_expbuf,
1136        .vidioc_streamon                = vb2_ioctl_streamon,
1137        .vidioc_streamoff               = vb2_ioctl_streamoff,
1138        .vidioc_enum_input      = &xvip_dma_enum_input,
1139        .vidioc_g_input         = &xvip_dma_get_input,
1140        .vidioc_s_input         = &xvip_dma_set_input,
1141};
1142
1143/* -----------------------------------------------------------------------------
1144 * V4L2 controls
1145 */
1146
1147static int xvip_dma_s_ctrl(struct v4l2_ctrl *ctl)
1148{
1149        struct xvip_dma *dma = container_of(ctl->handler, struct xvip_dma,
1150                                            ctrl_handler);
1151        int ret = 0;
1152
1153        switch (ctl->id)  {
1154        case V4L2_CID_XILINX_LOW_LATENCY:
1155                if (ctl->val == XVIP_LOW_LATENCY_ENABLE) {
1156                        if (vb2_is_busy(&dma->queue))
1157                                return -EBUSY;
1158
1159                        dma->low_latency_cap = true;
1160                        /*
1161                         * Don't use auto-restart for low latency
1162                         * to avoid extra one frame delay between
1163                         * programming and actual writing of data
1164                         */
1165                        xilinx_xdma_set_mode(dma->dma, DEFAULT);
1166                } else if (ctl->val == XVIP_LOW_LATENCY_DISABLE) {
1167                        if (vb2_is_busy(&dma->queue))
1168                                return -EBUSY;
1169
1170                        dma->low_latency_cap = false;
1171                        xilinx_xdma_set_mode(dma->dma, AUTO_RESTART);
1172                } else if (ctl->val == XVIP_START_DMA) {
1173                        /*
1174                         * In low latency capture, the driver allows application
1175                         * to start dma when queue has buffers. That's why we
1176                         * don't check for vb2_is_busy().
1177                         */
1178                        if (dma->low_latency_cap &&
1179                            vb2_is_streaming(&dma->queue))
1180                                dma_async_issue_pending(dma->dma);
1181                        else
1182                                ret = -EINVAL;
1183                } else {
1184                        ret = -EINVAL;
1185                }
1186
1187                break;
1188        default:
1189                ret = -EINVAL;
1190        }
1191
1192        return ret;
1193}
1194
1195static int xvip_dma_open(struct file *file)
1196{
1197        int ret;
1198
1199        ret = v4l2_fh_open(file);
1200        if (ret)
1201                return ret;
1202
1203        /* Disable the low latency mode as default */
1204        if (v4l2_fh_is_singular_file(file)) {
1205                struct xvip_dma *dma = video_drvdata(file);
1206
1207                mutex_lock(&dma->lock);
1208                dma->low_latency_cap = false;
1209                xilinx_xdma_set_mode(dma->dma, AUTO_RESTART);
1210                mutex_unlock(&dma->lock);
1211        }
1212
1213        return 0;
1214}
1215
1216static const struct v4l2_ctrl_ops xvip_dma_ctrl_ops = {
1217        .s_ctrl = xvip_dma_s_ctrl,
1218};
1219
1220static const struct v4l2_ctrl_config xvip_dma_ctrls[] = {
1221        {
1222                .ops = &xvip_dma_ctrl_ops,
1223                .id = V4L2_CID_XILINX_LOW_LATENCY,
1224                .name = "Low Latency Controls",
1225                .type = V4L2_CTRL_TYPE_INTEGER,
1226                .min = XVIP_LOW_LATENCY_ENABLE,
1227                .max = XVIP_START_DMA,
1228                .step = 1,
1229                .def = XVIP_LOW_LATENCY_DISABLE,
1230        }
1231};
1232
1233/* -----------------------------------------------------------------------------
1234 * V4L2 file operations
1235 */
1236
1237static const struct v4l2_file_operations xvip_dma_fops = {
1238        .owner          = THIS_MODULE,
1239        .unlocked_ioctl = video_ioctl2,
1240        .open           = xvip_dma_open,
1241        .release        = vb2_fop_release,
1242        .poll           = vb2_fop_poll,
1243        .mmap           = vb2_fop_mmap,
1244};
1245
1246/* -----------------------------------------------------------------------------
1247 * Xilinx Video DMA Core
1248 */
1249
1250int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
1251                  enum v4l2_buf_type type, unsigned int port)
1252{
1253        char name[16];
1254        int ret;
1255        u32 i, hsub, vsub, width, height;
1256
1257        dma->xdev = xdev;
1258        dma->port = port;
1259        mutex_init(&dma->lock);
1260        mutex_init(&dma->pipe.lock);
1261        INIT_LIST_HEAD(&dma->queued_bufs);
1262        spin_lock_init(&dma->queued_lock);
1263
1264        dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
1265        dma->format.type = type;
1266
1267        if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
1268                struct v4l2_pix_format_mplane *pix_mp;
1269
1270                pix_mp = &dma->format.fmt.pix_mp;
1271                pix_mp->pixelformat = dma->fmtinfo->fourcc;
1272                pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
1273                pix_mp->field = V4L2_FIELD_NONE;
1274                pix_mp->width = XVIP_DMA_DEF_WIDTH;
1275
1276                /* Handling contiguous data with mplanes */
1277                if (dma->fmtinfo->buffers == 1) {
1278                        pix_mp->plane_fmt[0].bytesperline =
1279                                pix_mp->width * dma->fmtinfo->bpl_factor;
1280                        pix_mp->plane_fmt[0].sizeimage =
1281                                        pix_mp->width * pix_mp->height *
1282                                        dma->fmtinfo->bpp / 8;
1283                } else {
1284                    /* Handling non-contiguous data with mplanes */
1285                        hsub = dma->fmtinfo->hsub;
1286                        vsub = dma->fmtinfo->vsub;
1287                        for (i = 0; i < dma->fmtinfo->buffers; i++) {
1288                                width = pix_mp->width / (i ? hsub : 1);
1289                                height = pix_mp->height / (i ? vsub : 1);
1290                                pix_mp->plane_fmt[i].bytesperline =
1291                                        width * dma->fmtinfo->bpl_factor;
1292                                pix_mp->plane_fmt[i].sizeimage = width * height;
1293                        }
1294                }
1295        } else {
1296                struct v4l2_pix_format *pix;
1297
1298                pix = &dma->format.fmt.pix;
1299                pix->pixelformat = dma->fmtinfo->fourcc;
1300                pix->colorspace = V4L2_COLORSPACE_SRGB;
1301                pix->field = V4L2_FIELD_NONE;
1302                pix->width = XVIP_DMA_DEF_WIDTH;
1303                pix->height = XVIP_DMA_DEF_HEIGHT;
1304                pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
1305                pix->sizeimage =
1306                        pix->width * pix->height * dma->fmtinfo->bpp / 8;
1307        }
1308
1309        /* Initialize the media entity... */
1310        if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1311            type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1312                dma->pad.flags = MEDIA_PAD_FL_SINK;
1313        else
1314                dma->pad.flags = MEDIA_PAD_FL_SOURCE;
1315
1316        ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
1317        if (ret < 0)
1318                goto error;
1319
1320        ret = v4l2_ctrl_handler_init(&dma->ctrl_handler,
1321                                     ARRAY_SIZE(xvip_dma_ctrls));
1322        if (ret < 0) {
1323                dev_err(dma->xdev->dev, "failed to initialize V4L2 ctrl\n");
1324                goto error;
1325        }
1326
1327        for (i = 0; i < ARRAY_SIZE(xvip_dma_ctrls); i++) {
1328                struct v4l2_ctrl *ctrl;
1329
1330                dev_dbg(dma->xdev->dev, "%d ctrl = 0x%x\n", i,
1331                        xvip_dma_ctrls[i].id);
1332                ctrl = v4l2_ctrl_new_custom(&dma->ctrl_handler,
1333                                            &xvip_dma_ctrls[i], NULL);
1334                if (!ctrl) {
1335                        dev_err(dma->xdev->dev, "Failed for %s ctrl\n",
1336                                xvip_dma_ctrls[i].name);
1337                        goto error;
1338                }
1339        }
1340
1341        if (dma->ctrl_handler.error) {
1342                dev_err(dma->xdev->dev, "failed to add controls\n");
1343                ret = dma->ctrl_handler.error;
1344                goto error;
1345        }
1346
1347        ret = v4l2_ctrl_handler_setup(&dma->ctrl_handler);
1348        if (ret < 0) {
1349                dev_err(dma->xdev->dev, "failed to set controls\n");
1350                goto error;
1351        }
1352
1353        /* ... and the video node... */
1354        dma->video.fops = &xvip_dma_fops;
1355        dma->video.v4l2_dev = &xdev->v4l2_dev;
1356        dma->video.v4l2_dev->ctrl_handler = &dma->ctrl_handler;
1357        dma->video.queue = &dma->queue;
1358        snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
1359                 xdev->dev->of_node,
1360                 (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1361                  type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1362                                        ? "output" : "input",
1363                 port);
1364
1365        dma->video.vfl_type = VFL_TYPE_VIDEO;
1366        if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1367            type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1368                dma->video.vfl_dir = VFL_DIR_RX;
1369        else
1370                dma->video.vfl_dir = VFL_DIR_TX;
1371
1372        dma->video.release = video_device_release_empty;
1373        dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
1374        dma->video.lock = &dma->lock;
1375        dma->video.device_caps = V4L2_CAP_STREAMING;
1376        switch (dma->format.type) {
1377        case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
1378                dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
1379                break;
1380        case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1381                dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
1382                break;
1383        case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
1384                dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
1385                break;
1386        case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1387                dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
1388                break;
1389        }
1390
1391        video_set_drvdata(&dma->video, dma);
1392
1393        /* ... and the buffers queue... */
1394        /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
1395         * V4L2 APIs would be inefficient. Testing on the command line with a
1396         * 'cat /dev/video?' thus won't be possible, but given that the driver
1397         * anyway requires a test tool to setup the pipeline before any video
1398         * stream can be started, requiring a specific V4L2 test tool as well
1399         * instead of 'cat' isn't really a drawback.
1400         */
1401        dma->queue.type = type;
1402        dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1403        dma->queue.lock = &dma->lock;
1404        dma->queue.drv_priv = dma;
1405        dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
1406        dma->queue.ops = &xvip_dma_queue_qops;
1407        dma->queue.mem_ops = &vb2_dma_contig_memops;
1408        dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
1409                                   | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
1410        dma->queue.dev = dma->xdev->dev;
1411        ret = vb2_queue_init(&dma->queue);
1412        if (ret < 0) {
1413                dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
1414                goto error;
1415        }
1416
1417        /* ... and the DMA channel. */
1418        snprintf(name, sizeof(name), "port%u", port);
1419        dma->dma = dma_request_chan(dma->xdev->dev, name);
1420        if (IS_ERR(dma->dma)) {
1421                ret = PTR_ERR(dma->dma);
1422                if (ret != -EPROBE_DEFER)
1423                        dev_err(dma->xdev->dev, "no VDMA channel found\n");
1424                goto error;
1425        }
1426
1427        xilinx_xdma_get_width_align(dma->dma, &dma->width_align);
1428        if (!dma->width_align) {
1429                dev_dbg(dma->xdev->dev,
1430                        "Using width align %d\n", XVIP_DMA_DEF_WIDTH_ALIGN);
1431                dma->width_align = XVIP_DMA_DEF_WIDTH_ALIGN;
1432        }
1433
1434        dma->align = 1 << dma->dma->device->copy_align;
1435
1436        ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
1437        if (ret < 0) {
1438                dev_err(dma->xdev->dev, "failed to register video device\n");
1439                goto error;
1440        }
1441
1442        return 0;
1443
1444error:
1445        xvip_dma_cleanup(dma);
1446        return ret;
1447}
1448
1449void xvip_dma_cleanup(struct xvip_dma *dma)
1450{
1451        if (video_is_registered(&dma->video))
1452                video_unregister_device(&dma->video);
1453
1454        if (!IS_ERR_OR_NULL(dma->dma))
1455                dma_release_channel(dma->dma);
1456
1457        v4l2_ctrl_handler_free(&dma->ctrl_handler);
1458        media_entity_cleanup(&dma->video.entity);
1459
1460        mutex_destroy(&dma->lock);
1461        mutex_destroy(&dma->pipe.lock);
1462}
1463