linux/drivers/media/platform/xilinx/xilinx-dma.c
<<
>>
Prefs
   1/*
   2 * Xilinx Video DMA
   3 *
   4 * Copyright (C) 2013-2015 Ideas on Board
   5 * Copyright (C) 2013-2015 Xilinx, Inc.
   6 *
   7 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
   8 *           Laurent Pinchart <laurent.pinchart@ideasonboard.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/dma/xilinx_dma.h>
  16#include <linux/dma/xilinx_frmbuf.h>
  17#include <linux/lcm.h>
  18#include <linux/list.h>
  19#include <linux/module.h>
  20#include <linux/of.h>
  21#include <linux/slab.h>
  22#include <linux/xilinx-v4l2-controls.h>
  23
  24#include <media/v4l2-dev.h>
  25#include <media/v4l2-fh.h>
  26#include <media/v4l2-ioctl.h>
  27#include <media/videobuf2-v4l2.h>
  28#include <media/videobuf2-dma-contig.h>
  29
  30#include "xilinx-dma.h"
  31#include "xilinx-vip.h"
  32#include "xilinx-vipp.h"
  33
  34#define XVIP_DMA_DEF_FORMAT             V4L2_PIX_FMT_YUYV
  35#define XVIP_DMA_DEF_WIDTH              1920
  36#define XVIP_DMA_DEF_HEIGHT             1080
  37
  38/* Minimum and maximum widths are expressed in bytes */
  39#define XVIP_DMA_MIN_WIDTH              1U
  40#define XVIP_DMA_MAX_WIDTH              65535U
  41#define XVIP_DMA_MIN_HEIGHT             1U
  42#define XVIP_DMA_MAX_HEIGHT             8191U
  43
  44struct xventity_list {
  45        struct list_head list;
  46        struct media_entity *entity;
  47};
  48
  49/* -----------------------------------------------------------------------------
  50 * Helper functions
  51 */
  52
  53static struct v4l2_subdev *
  54xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
  55{
  56        struct media_pad *remote;
  57
  58        remote = media_entity_remote_pad(local);
  59        if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  60                return NULL;
  61
  62        if (pad)
  63                *pad = remote->index;
  64
  65        return media_entity_to_v4l2_subdev(remote->entity);
  66}
  67
  68static int xvip_dma_verify_format(struct xvip_dma *dma)
  69{
  70        struct v4l2_subdev_format fmt;
  71        struct v4l2_subdev *subdev;
  72        int ret;
  73        int width, height;
  74
  75        subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
  76        if (!subdev)
  77                return -EPIPE;
  78
  79        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  80        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  81        if (ret < 0)
  82                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  83
  84        if (dma->fmtinfo->code != fmt.format.code)
  85                return -EINVAL;
  86
  87        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
  88                width = dma->format.fmt.pix_mp.width;
  89                height = dma->format.fmt.pix_mp.height;
  90        } else {
  91                width = dma->format.fmt.pix.width;
  92                height = dma->format.fmt.pix.height;
  93        }
  94
  95        if (width != fmt.format.width || height != fmt.format.height)
  96                return -EINVAL;
  97
  98        return 0;
  99}
 100
 101/* -----------------------------------------------------------------------------
 102 * Pipeline Stream Management
 103 */
 104
 105static int xvip_entity_start_stop(struct xvip_composite_device *xdev,
 106                                  struct media_entity *entity, bool start)
 107{
 108        struct v4l2_subdev *subdev;
 109        bool is_streaming;
 110        int ret = 0;
 111
 112        dev_dbg(xdev->dev, "%s entity %s\n",
 113                start ? "Starting" : "Stopping", entity->name);
 114        subdev = media_entity_to_v4l2_subdev(entity);
 115
 116        /* This is to maintain list of stream on/off devices */
 117        is_streaming = xvip_subdev_set_streaming(xdev, subdev, start);
 118
 119        /*
 120         * start or stop the subdev only once in case if they are
 121         * shared between sub-graphs
 122         */
 123        if (start && !is_streaming) {
 124                /* power-on subdevice */
 125                ret = v4l2_subdev_call(subdev, core, s_power, 1);
 126                if (ret < 0 && ret != -ENOIOCTLCMD) {
 127                        dev_err(xdev->dev,
 128                                "s_power on failed on subdev\n");
 129                        xvip_subdev_set_streaming(xdev, subdev, 0);
 130                        return ret;
 131                }
 132
 133                /* stream-on subdevice */
 134                ret = v4l2_subdev_call(subdev, video, s_stream, 1);
 135                if (ret < 0 && ret != -ENOIOCTLCMD) {
 136                        dev_err(xdev->dev,
 137                                "s_stream on failed on subdev\n");
 138                        v4l2_subdev_call(subdev, core, s_power, 0);
 139                        xvip_subdev_set_streaming(xdev, subdev, 0);
 140                }
 141        } else if (!start && is_streaming) {
 142                /* stream-off subdevice */
 143                ret = v4l2_subdev_call(subdev, video, s_stream, 0);
 144                if (ret < 0 && ret != -ENOIOCTLCMD) {
 145                        dev_err(xdev->dev,
 146                                "s_stream off failed on subdev\n");
 147                        xvip_subdev_set_streaming(xdev, subdev, 1);
 148                }
 149
 150                /* power-off subdevice */
 151                ret = v4l2_subdev_call(subdev, core, s_power, 0);
 152                if (ret < 0 && ret != -ENOIOCTLCMD)
 153                        dev_err(xdev->dev,
 154                                "s_power off failed on subdev\n");
 155        }
 156
 157        return ret;
 158}
 159
 160/**
 161 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
 162 * @xdev: Composite video device
 163 * @dma: xvip dma
 164 * @start: Start (when true) or stop (when false) the pipeline
 165 *
 166 * Walk the entities chain starting @dma and start or stop all of them
 167 *
 168 * Return: 0 if successful, or the return value of the failed video::s_stream
 169 * operation otherwise.
 170 */
 171static int xvip_pipeline_start_stop(struct xvip_composite_device *xdev,
 172                                    struct xvip_dma *dma, bool start)
 173{
 174        struct media_graph graph;
 175        struct media_entity *entity = &dma->video.entity;
 176        struct media_device *mdev = entity->graph_obj.mdev;
 177        struct xventity_list *temp, *_temp;
 178        LIST_HEAD(ent_list);
 179        int ret = 0;
 180
 181        mutex_lock(&mdev->graph_mutex);
 182
 183        /* Walk the graph to locate the subdev nodes */
 184        ret = media_graph_walk_init(&graph, mdev);
 185        if (ret)
 186                goto error;
 187
 188        media_graph_walk_start(&graph, entity);
 189
 190        /* get the list of entities */
 191        while ((entity = media_graph_walk_next(&graph))) {
 192                struct xventity_list *ele;
 193
 194                /* We want to stream on/off only subdevs */
 195                if (!is_media_entity_v4l2_subdev(entity))
 196                        continue;
 197
 198                /* Maintain the pipeline sequence in a list */
 199                ele = kzalloc(sizeof(*ele), GFP_KERNEL);
 200                if (!ele) {
 201                        ret = -ENOMEM;
 202                        goto error;
 203                }
 204
 205                ele->entity = entity;
 206                list_add(&ele->list, &ent_list);
 207        }
 208
 209        if (start) {
 210                list_for_each_entry_safe(temp, _temp, &ent_list, list) {
 211                        /* Enable all subdevs from sink to source */
 212                        ret = xvip_entity_start_stop(xdev, temp->entity, start);
 213                        if (ret < 0) {
 214                                dev_err(xdev->dev, "ret = %d for entity %s\n",
 215                                        ret, temp->entity->name);
 216                                break;
 217                        }
 218                }
 219        } else {
 220                list_for_each_entry_safe_reverse(temp, _temp, &ent_list, list)
 221                        /* Enable all subdevs from source to sink */
 222                        xvip_entity_start_stop(xdev, temp->entity, start);
 223        }
 224
 225        list_for_each_entry_safe(temp, _temp, &ent_list, list) {
 226                list_del(&temp->list);
 227                kfree(temp);
 228        }
 229
 230error:
 231        mutex_unlock(&mdev->graph_mutex);
 232        media_graph_walk_cleanup(&graph);
 233        return ret;
 234}
 235
 236/**
 237 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
 238 * @pipe: The pipeline
 239 * @on: Turn the stream on when true or off when false
 240 *
 241 * The pipeline is shared between all DMA engines connect at its input and
 242 * output. While the stream state of DMA engines can be controlled
 243 * independently, pipelines have a shared stream state that enable or disable
 244 * all entities in the pipeline. For this reason the pipeline uses a streaming
 245 * counter that tracks the number of DMA engines that have requested the stream
 246 * to be enabled. This will walk the graph starting from each DMA and enable or
 247 * disable the entities in the path.
 248 *
 249 * When called with the @on argument set to true, this function will increment
 250 * the pipeline streaming count. If the streaming count reaches the number of
 251 * DMA engines in the pipeline it will enable all entities that belong to the
 252 * pipeline.
 253 *
 254 * Similarly, when called with the @on argument set to false, this function will
 255 * decrement the pipeline streaming count and disable all entities in the
 256 * pipeline when the streaming count reaches zero.
 257 *
 258 * Return: 0 if successful, or the return value of the failed video::s_stream
 259 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
 260 * not updated when the operation fails.
 261 */
 262static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
 263{
 264        struct xvip_composite_device *xdev;
 265        struct xvip_dma *dma;
 266        int ret = 0;
 267
 268        mutex_lock(&pipe->lock);
 269        xdev = pipe->xdev;
 270
 271        if (on) {
 272                if (pipe->stream_count == pipe->num_dmas - 1) {
 273                        /*
 274                         * This will iterate the DMAs and the stream-on of
 275                         * subdevs may not be sequential due to multiple
 276                         * sub-graph path
 277                         */
 278                        list_for_each_entry(dma, &xdev->dmas, list) {
 279                                ret = xvip_pipeline_start_stop(xdev, dma, true);
 280                                if (ret < 0)
 281                                        goto done;
 282                        }
 283                }
 284                pipe->stream_count++;
 285        } else {
 286                if (--pipe->stream_count == 0)
 287                        list_for_each_entry(dma, &xdev->dmas, list)
 288                                xvip_pipeline_start_stop(xdev, dma, false);
 289        }
 290
 291done:
 292        mutex_unlock(&pipe->lock);
 293        return ret;
 294}
 295
 296static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
 297                                  struct xvip_dma *start)
 298{
 299        struct media_graph graph;
 300        struct media_entity *entity = &start->video.entity;
 301        struct media_device *mdev = entity->graph_obj.mdev;
 302        unsigned int num_inputs = 0;
 303        unsigned int num_outputs = 0;
 304        int ret;
 305
 306        mutex_lock(&mdev->graph_mutex);
 307
 308        /* Walk the graph to locate the video nodes. */
 309        ret = media_graph_walk_init(&graph, mdev);
 310        if (ret) {
 311                mutex_unlock(&mdev->graph_mutex);
 312                return ret;
 313        }
 314
 315        media_graph_walk_start(&graph, entity);
 316
 317        while ((entity = media_graph_walk_next(&graph))) {
 318                struct xvip_dma *dma;
 319
 320                if (entity->function != MEDIA_ENT_F_IO_V4L)
 321                        continue;
 322
 323                dma = to_xvip_dma(media_entity_to_video_device(entity));
 324
 325                if (dma->pad.flags & MEDIA_PAD_FL_SINK)
 326                        num_outputs++;
 327                else
 328                        num_inputs++;
 329        }
 330
 331        mutex_unlock(&mdev->graph_mutex);
 332
 333        media_graph_walk_cleanup(&graph);
 334
 335        /* We need at least one DMA to proceed */
 336        if (num_outputs == 0 && num_inputs == 0)
 337                return -EPIPE;
 338
 339        pipe->num_dmas = num_inputs + num_outputs;
 340        pipe->xdev = start->xdev;
 341
 342        return 0;
 343}
 344
 345static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
 346{
 347        pipe->num_dmas = 0;
 348}
 349
 350/**
 351 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
 352 * @pipe: the pipeline
 353 *
 354 * Decrease the pipeline use count and clean it up if we were the last user.
 355 */
 356static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
 357{
 358        mutex_lock(&pipe->lock);
 359
 360        /* If we're the last user clean up the pipeline. */
 361        if (--pipe->use_count == 0)
 362                __xvip_pipeline_cleanup(pipe);
 363
 364        mutex_unlock(&pipe->lock);
 365}
 366
 367/**
 368 * xvip_pipeline_prepare - Prepare the pipeline for streaming
 369 * @pipe: the pipeline
 370 * @dma: DMA engine at one end of the pipeline
 371 *
 372 * Validate the pipeline if no user exists yet, otherwise just increase the use
 373 * count.
 374 *
 375 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
 376 */
 377static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
 378                                 struct xvip_dma *dma)
 379{
 380        int ret;
 381
 382        mutex_lock(&pipe->lock);
 383
 384        /* If we're the first user validate and initialize the pipeline. */
 385        if (pipe->use_count == 0) {
 386                ret = xvip_pipeline_validate(pipe, dma);
 387                if (ret < 0) {
 388                        __xvip_pipeline_cleanup(pipe);
 389                        goto done;
 390                }
 391        }
 392
 393        pipe->use_count++;
 394        ret = 0;
 395
 396done:
 397        mutex_unlock(&pipe->lock);
 398        return ret;
 399}
 400
 401/* -----------------------------------------------------------------------------
 402 * videobuf2 queue operations
 403 */
 404
 405/**
 406 * struct xvip_dma_buffer - Video DMA buffer
 407 * @buf: vb2 buffer base object
 408 * @queue: buffer list entry in the DMA engine queued buffers list
 409 * @dma: DMA channel that uses the buffer
 410 * @desc: Descriptor associated with this structure
 411 */
 412struct xvip_dma_buffer {
 413        struct vb2_v4l2_buffer buf;
 414        struct list_head queue;
 415        struct xvip_dma *dma;
 416        struct dma_async_tx_descriptor *desc;
 417};
 418
 419#define to_xvip_dma_buffer(vb)  container_of(vb, struct xvip_dma_buffer, buf)
 420
 421static void xvip_dma_complete(void *param)
 422{
 423        struct xvip_dma_buffer *buf = param;
 424        struct xvip_dma *dma = buf->dma;
 425        int i, sizeimage;
 426        u32 fid;
 427        int status;
 428
 429        spin_lock(&dma->queued_lock);
 430        list_del(&buf->queue);
 431        spin_unlock(&dma->queued_lock);
 432
 433        buf->buf.field = V4L2_FIELD_NONE;
 434        buf->buf.sequence = dma->sequence++;
 435        buf->buf.vb2_buf.timestamp = ktime_get_ns();
 436
 437        status = xilinx_xdma_get_fid(dma->dma, buf->desc, &fid);
 438        if (!status) {
 439                if (((V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) &&
 440                     dma->format.fmt.pix_mp.field == V4L2_FIELD_ALTERNATE) ||
 441                     dma->format.fmt.pix.field == V4L2_FIELD_ALTERNATE) {
 442                        /*
 443                         * fid = 1 is odd field i.e. V4L2_FIELD_TOP.
 444                         * fid = 0 is even field i.e. V4L2_FIELD_BOTTOM.
 445                         */
 446                        buf->buf.field = fid ?
 447                                         V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
 448
 449                        if (fid == dma->prev_fid)
 450                                buf->buf.sequence = dma->sequence++;
 451
 452                        buf->buf.sequence >>= 1;
 453                        dma->prev_fid = fid;
 454                }
 455        }
 456
 457        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 458                for (i = 0; i < dma->fmtinfo->buffers; i++) {
 459                        sizeimage =
 460                                dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
 461                        vb2_set_plane_payload(&buf->buf.vb2_buf, i, sizeimage);
 462                }
 463        } else {
 464                sizeimage = dma->format.fmt.pix.sizeimage;
 465                vb2_set_plane_payload(&buf->buf.vb2_buf, 0, sizeimage);
 466        }
 467
 468        vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 469}
 470
 471static int
 472xvip_dma_queue_setup(struct vb2_queue *vq,
 473                     unsigned int *nbuffers, unsigned int *nplanes,
 474                     unsigned int sizes[], struct device *alloc_devs[])
 475{
 476        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 477        u8 i;
 478        int sizeimage;
 479
 480        /* Multi planar case: Make sure the image size is large enough */
 481        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 482                if (*nplanes) {
 483                        if (*nplanes != dma->format.fmt.pix_mp.num_planes)
 484                                return -EINVAL;
 485
 486                        for (i = 0; i < *nplanes; i++) {
 487                                sizeimage =
 488                                  dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
 489                                if (sizes[i] < sizeimage)
 490                                        return -EINVAL;
 491                        }
 492                } else {
 493                        *nplanes = dma->fmtinfo->buffers;
 494                        for (i = 0; i < dma->fmtinfo->buffers; i++) {
 495                                sizeimage =
 496                                  dma->format.fmt.pix_mp.plane_fmt[i].sizeimage;
 497                                sizes[i] = sizeimage;
 498                        }
 499                }
 500                return 0;
 501        }
 502
 503        /* Single planar case: Make sure the image size is large enough */
 504        sizeimage = dma->format.fmt.pix.sizeimage;
 505        if (*nplanes == 1)
 506                return sizes[0] < sizeimage ? -EINVAL : 0;
 507
 508        *nplanes = 1;
 509        sizes[0] = sizeimage;
 510
 511        return 0;
 512}
 513
 514static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
 515{
 516        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 517        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 518        struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
 519
 520        buf->dma = dma;
 521
 522        return 0;
 523}
 524
 525static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
 526{
 527        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 528        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 529        struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
 530        struct dma_async_tx_descriptor *desc;
 531        dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
 532        u32 flags;
 533        u32 luma_size;
 534        u32 padding_factor_nume, padding_factor_deno, bpl_nume, bpl_deno;
 535        u32 fid = ~0;
 536
 537        if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
 538            dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
 539                flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 540                dma->xt.dir = DMA_DEV_TO_MEM;
 541                dma->xt.src_sgl = false;
 542                dma->xt.dst_sgl = true;
 543                dma->xt.dst_start = addr;
 544        } else if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
 545                   dma->queue.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 546                flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 547                dma->xt.dir = DMA_MEM_TO_DEV;
 548                dma->xt.src_sgl = true;
 549                dma->xt.dst_sgl = false;
 550                dma->xt.src_start = addr;
 551        }
 552
 553        /*
 554         * DMA IP supports only 2 planes, so one datachunk is sufficient
 555         * to get start address of 2nd plane
 556         */
 557        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 558                struct v4l2_pix_format_mplane *pix_mp;
 559
 560                pix_mp = &dma->format.fmt.pix_mp;
 561                xilinx_xdma_v4l2_config(dma->dma, pix_mp->pixelformat);
 562                xvip_width_padding_factor(pix_mp->pixelformat,
 563                                          &padding_factor_nume,
 564                                          &padding_factor_deno);
 565                xvip_bpl_scaling_factor(pix_mp->pixelformat, &bpl_nume,
 566                                        &bpl_deno);
 567                dma->xt.frame_size = dma->fmtinfo->num_planes;
 568                dma->sgl[0].size = (pix_mp->width * dma->fmtinfo->bpl_factor *
 569                                    padding_factor_nume * bpl_nume) /
 570                                    (padding_factor_deno * bpl_deno);
 571                dma->sgl[0].icg = pix_mp->plane_fmt[0].bytesperline -
 572                                                        dma->sgl[0].size;
 573                dma->xt.numf = pix_mp->height;
 574
 575                /*
 576                 * dst_icg is the number of bytes to jump after last luma addr
 577                 * and before first chroma addr
 578                 */
 579
 580                /* Handling contiguous data with mplanes */
 581                if (dma->fmtinfo->buffers == 1) {
 582                        dma->sgl[0].dst_icg = 0;
 583                } else {
 584                        /* Handling non-contiguous data with mplanes */
 585                        if (dma->fmtinfo->buffers == 2) {
 586                                dma_addr_t chroma_addr =
 587                                        vb2_dma_contig_plane_dma_addr(vb, 1);
 588                                luma_size = pix_mp->plane_fmt[0].bytesperline *
 589                                            dma->xt.numf;
 590                                if (chroma_addr > addr)
 591                                        dma->sgl[0].dst_icg = chroma_addr -
 592                                                              addr - luma_size;
 593                                }
 594                }
 595        } else {
 596                struct v4l2_pix_format *pix;
 597
 598                pix = &dma->format.fmt.pix;
 599                xilinx_xdma_v4l2_config(dma->dma, pix->pixelformat);
 600                xvip_width_padding_factor(pix->pixelformat,
 601                                          &padding_factor_nume,
 602                                          &padding_factor_deno);
 603                xvip_bpl_scaling_factor(pix->pixelformat, &bpl_nume,
 604                                        &bpl_deno);
 605                dma->xt.frame_size = dma->fmtinfo->num_planes;
 606                dma->sgl[0].size = (pix->width * dma->fmtinfo->bpl_factor *
 607                                    padding_factor_nume * bpl_nume) /
 608                                    (padding_factor_deno * bpl_deno);
 609                dma->sgl[0].icg = pix->bytesperline - dma->sgl[0].size;
 610                dma->xt.numf = pix->height;
 611                dma->sgl[0].dst_icg = 0;
 612        }
 613
 614        desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
 615        if (!desc) {
 616                dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
 617                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 618                return;
 619        }
 620        desc->callback = xvip_dma_complete;
 621        desc->callback_param = buf;
 622        buf->desc = desc;
 623
 624        if (buf->buf.field == V4L2_FIELD_TOP)
 625                fid = 1;
 626        else if (buf->buf.field == V4L2_FIELD_BOTTOM)
 627                fid = 0;
 628        else if (buf->buf.field == V4L2_FIELD_NONE)
 629                fid = 0;
 630
 631        xilinx_xdma_set_fid(dma->dma, desc, fid);
 632
 633        /* Set low latency capture mode */
 634        if (dma->earlycb_mode) {
 635                int ret;
 636
 637                ret = xilinx_xdma_set_earlycb(dma->dma, desc,
 638                                              dma->earlycb_mode);
 639                if (ret < 0) {
 640                        dev_err(dma->xdev->dev,
 641                                "Failed enable low latency mode\n");
 642                }
 643        }
 644
 645        spin_lock_irq(&dma->queued_lock);
 646        list_add_tail(&buf->queue, &dma->queued_bufs);
 647        spin_unlock_irq(&dma->queued_lock);
 648
 649        dmaengine_submit(desc);
 650
 651        if (vb2_is_streaming(&dma->queue))
 652                dma_async_issue_pending(dma->dma);
 653}
 654
 655static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
 656{
 657        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 658        struct xvip_dma_buffer *buf, *nbuf;
 659        struct xvip_pipeline *pipe;
 660        int ret;
 661
 662        dma->sequence = 0;
 663        dma->prev_fid = ~0;
 664
 665        /*
 666         * Start streaming on the pipeline. No link touching an entity in the
 667         * pipeline can be activated or deactivated once streaming is started.
 668         *
 669         * Use the pipeline object embedded in the first DMA object that starts
 670         * streaming.
 671         */
 672        mutex_lock(&dma->xdev->lock);
 673        pipe = dma->video.entity.pipe
 674             ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
 675
 676        ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
 677        mutex_unlock(&dma->xdev->lock);
 678        if (ret < 0)
 679                goto error;
 680
 681        /* Verify that the configured format matches the output of the
 682         * connected subdev.
 683         */
 684        ret = xvip_dma_verify_format(dma);
 685        if (ret < 0)
 686                goto error_stop;
 687
 688        ret = xvip_pipeline_prepare(pipe, dma);
 689        if (ret < 0)
 690                goto error_stop;
 691
 692        /* Start the DMA engine. This must be done before starting the blocks
 693         * in the pipeline to avoid DMA synchronization issues.
 694         */
 695        dma_async_issue_pending(dma->dma);
 696
 697        /* Start the pipeline. */
 698        ret = xvip_pipeline_set_stream(pipe, true);
 699        if (ret < 0)
 700                goto error_stop;
 701
 702        return 0;
 703
 704error_stop:
 705        media_pipeline_stop(&dma->video.entity);
 706
 707error:
 708        dmaengine_terminate_all(dma->dma);
 709        /* Give back all queued buffers to videobuf2. */
 710        spin_lock_irq(&dma->queued_lock);
 711        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
 712                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
 713                list_del(&buf->queue);
 714        }
 715        spin_unlock_irq(&dma->queued_lock);
 716
 717        return ret;
 718}
 719
 720static void xvip_dma_stop_streaming(struct vb2_queue *vq)
 721{
 722        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 723        struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
 724        struct xvip_dma_buffer *buf, *nbuf;
 725
 726        /* Stop the pipeline. */
 727        xvip_pipeline_set_stream(pipe, false);
 728
 729        /* Stop and reset the DMA engine. */
 730        dmaengine_terminate_all(dma->dma);
 731
 732        /* Cleanup the pipeline and mark it as being stopped. */
 733        xvip_pipeline_cleanup(pipe);
 734        media_pipeline_stop(&dma->video.entity);
 735
 736        /* Give back all queued buffers to videobuf2. */
 737        spin_lock_irq(&dma->queued_lock);
 738        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
 739                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 740                list_del(&buf->queue);
 741        }
 742        spin_unlock_irq(&dma->queued_lock);
 743}
 744
 745static const struct vb2_ops xvip_dma_queue_qops = {
 746        .queue_setup = xvip_dma_queue_setup,
 747        .buf_prepare = xvip_dma_buffer_prepare,
 748        .buf_queue = xvip_dma_buffer_queue,
 749        .wait_prepare = vb2_ops_wait_prepare,
 750        .wait_finish = vb2_ops_wait_finish,
 751        .start_streaming = xvip_dma_start_streaming,
 752        .stop_streaming = xvip_dma_stop_streaming,
 753};
 754
 755/* -----------------------------------------------------------------------------
 756 * V4L2 ioctls
 757 */
 758
 759static int
 760xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
 761{
 762        struct v4l2_fh *vfh = file->private_data;
 763        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 764
 765        cap->device_caps = V4L2_CAP_STREAMING;
 766
 767        switch (dma->queue.type) {
 768        case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
 769                cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE_MPLANE;
 770                break;
 771        case V4L2_BUF_TYPE_VIDEO_CAPTURE:
 772                cap->device_caps |= V4L2_CAP_VIDEO_CAPTURE;
 773                break;
 774        case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
 775                cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT_MPLANE;
 776                break;
 777        case V4L2_BUF_TYPE_VIDEO_OUTPUT:
 778                cap->device_caps |= V4L2_CAP_VIDEO_OUTPUT;
 779                break;
 780        }
 781
 782        cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS
 783                          | dma->xdev->v4l2_caps;
 784
 785        strlcpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
 786        strlcpy(cap->card, dma->video.name, sizeof(cap->card));
 787        snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s:%u",
 788                 dma->xdev->dev->of_node->name, dma->port);
 789
 790        return 0;
 791}
 792
 793static int xvip_xdma_enum_fmt(struct xvip_dma *dma, struct v4l2_fmtdesc *f,
 794                              struct v4l2_subdev_format *v4l_fmt)
 795{
 796        const struct xvip_video_format *fmt;
 797        int ret;
 798        u32 i, fmt_cnt, *fmts;
 799
 800        ret = xilinx_xdma_get_v4l2_vid_fmts(dma->dma, &fmt_cnt, &fmts);
 801        if (ret)
 802                return ret;
 803
 804        /* Has media pad value changed? */
 805        if (v4l_fmt->format.code != dma->remote_subdev_med_bus ||
 806            !dma->remote_subdev_med_bus) {
 807                /* Re-generate legal list of fourcc codes */
 808                dma->poss_v4l2_fmt_cnt = 0;
 809                dma->remote_subdev_med_bus = v4l_fmt->format.code;
 810
 811                if (!dma->poss_v4l2_fmts) {
 812                        dma->poss_v4l2_fmts =
 813                                devm_kzalloc(&dma->video.dev,
 814                                             sizeof(u32) * fmt_cnt,
 815                                             GFP_KERNEL);
 816                        if (!dma->poss_v4l2_fmts)
 817                                return -ENOMEM;
 818                }
 819
 820                for (i = 0; i < fmt_cnt; i++) {
 821                        fmt = xvip_get_format_by_fourcc(fmts[i]);
 822                        if (IS_ERR(fmt))
 823                                return PTR_ERR(fmt);
 824
 825                        if (fmt->code != dma->remote_subdev_med_bus)
 826                                continue;
 827
 828                        dma->poss_v4l2_fmts[dma->poss_v4l2_fmt_cnt++] = fmts[i];
 829                }
 830        }
 831
 832        /* Return err if index is greater than count of legal values */
 833        if (f->index >= dma->poss_v4l2_fmt_cnt)
 834                return -EINVAL;
 835
 836        /* Else return pix format in table */
 837        fmt = xvip_get_format_by_fourcc(dma->poss_v4l2_fmts[f->index]);
 838        if (IS_ERR(fmt))
 839                return PTR_ERR(fmt);
 840
 841        f->pixelformat = fmt->fourcc;
 842        strlcpy(f->description, fmt->description,
 843                sizeof(f->description));
 844
 845        return 0;
 846}
 847
 848/* FIXME: without this callback function, some applications are not configured
 849 * with correct formats, and it results in frames in wrong format. Whether this
 850 * callback needs to be required is not clearly defined, so it should be
 851 * clarified through the mailing list.
 852 */
 853static int
 854xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
 855{
 856        struct v4l2_fh *vfh = file->private_data;
 857        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 858        struct v4l2_subdev *subdev;
 859        struct v4l2_subdev_format v4l_fmt;
 860        const struct xvip_video_format *fmt;
 861        int err, ret;
 862
 863        /* Establish media pad format */
 864        subdev = xvip_dma_remote_subdev(&dma->pad, &v4l_fmt.pad);
 865        if (!subdev)
 866                return -EPIPE;
 867
 868        v4l_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 869        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &v4l_fmt);
 870        if (ret < 0)
 871                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
 872
 873        /*
 874         * In case of frmbuf DMA, this will invoke frambuf driver specific APIs
 875         * to enumerate formats otherwise return the pix format corresponding
 876         * to subdev's media bus format. This kind of separation would be
 877         * helpful for clean up and upstreaming.
 878         */
 879        err = xvip_xdma_enum_fmt(dma, f, &v4l_fmt);
 880        if (!err)
 881                return err;
 882
 883        /*
 884         * This logic will just return one pix format based on subdev's
 885         * media bus format
 886         */
 887        if (f->index > 0)
 888                return -EINVAL;
 889
 890        fmt = xvip_get_format_by_code(v4l_fmt.format.code);
 891        if (IS_ERR(fmt))
 892                return PTR_ERR(fmt);
 893
 894        f->pixelformat = fmt->fourcc;
 895        strlcpy(f->description, fmt->description,
 896                sizeof(f->description));
 897
 898        return 0;
 899}
 900
 901static int
 902xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
 903{
 904        struct v4l2_fh *vfh = file->private_data;
 905        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 906
 907        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 908                format->fmt.pix_mp = dma->format.fmt.pix_mp;
 909        else
 910                format->fmt.pix = dma->format.fmt.pix;
 911
 912        return 0;
 913}
 914
 915static void
 916__xvip_dma_try_format(struct xvip_dma *dma,
 917                      struct v4l2_format *format,
 918                      const struct xvip_video_format **fmtinfo)
 919{
 920        const struct xvip_video_format *info;
 921        unsigned int min_width;
 922        unsigned int max_width;
 923        unsigned int min_bpl;
 924        unsigned int max_bpl;
 925        unsigned int width;
 926        unsigned int align;
 927        unsigned int bpl;
 928        unsigned int i, hsub, vsub, plane_width, plane_height;
 929        unsigned int fourcc;
 930        unsigned int padding_factor_nume, padding_factor_deno;
 931        unsigned int bpl_nume, bpl_deno;
 932        struct v4l2_subdev_format fmt;
 933        struct v4l2_subdev *subdev;
 934        int ret;
 935
 936        subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
 937        if (!subdev)
 938                return;
 939
 940        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
 941        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
 942        if (ret < 0)
 943                return;
 944
 945        if (fmt.format.field == V4L2_FIELD_ALTERNATE) {
 946                if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 947                        dma->format.fmt.pix_mp.field = V4L2_FIELD_ALTERNATE;
 948                else
 949                        dma->format.fmt.pix.field = V4L2_FIELD_ALTERNATE;
 950        } else {
 951                if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 952                        dma->format.fmt.pix_mp.field = V4L2_FIELD_NONE;
 953                else
 954                        dma->format.fmt.pix.field = V4L2_FIELD_NONE;
 955        }
 956
 957        /* Retrieve format information and select the default format if the
 958         * requested format isn't supported.
 959         */
 960        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
 961                fourcc = format->fmt.pix_mp.pixelformat;
 962        else
 963                fourcc = format->fmt.pix.pixelformat;
 964
 965        info = xvip_get_format_by_fourcc(fourcc);
 966
 967        if (IS_ERR(info))
 968                info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
 969
 970        xvip_width_padding_factor(info->fourcc, &padding_factor_nume,
 971                                  &padding_factor_deno);
 972        xvip_bpl_scaling_factor(info->fourcc, &bpl_nume, &bpl_deno);
 973
 974        /* The transfer alignment requirements are expressed in bytes. Compute
 975         * the minimum and maximum values, clamp the requested width and convert
 976         * it back to pixels.
 977         */
 978        align = lcm(dma->align, info->bpp >> 3);
 979        min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
 980        max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
 981
 982        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type)) {
 983                struct v4l2_pix_format_mplane *pix_mp;
 984                struct v4l2_plane_pix_format *plane_fmt;
 985
 986                pix_mp = &format->fmt.pix_mp;
 987                plane_fmt = pix_mp->plane_fmt;
 988                pix_mp->field = dma->format.fmt.pix_mp.field;
 989                width = rounddown(pix_mp->width * info->bpl_factor, align);
 990                pix_mp->width = clamp(width, min_width, max_width) /
 991                                info->bpl_factor;
 992                pix_mp->height = clamp(pix_mp->height, XVIP_DMA_MIN_HEIGHT,
 993                                       XVIP_DMA_MAX_HEIGHT);
 994
 995                /*
 996                 * Clamp the requested bytes per line value. If the maximum
 997                 * bytes per line value is zero, the module doesn't support
 998                 * user configurable line sizes. Override the requested value
 999                 * with the minimum in that case.
1000                 */
1001
1002                max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
1003
1004                /* Handling contiguous data with mplanes */
1005                if (info->buffers == 1) {
1006                        min_bpl = (pix_mp->width * info->bpl_factor *
1007                                   padding_factor_nume * bpl_nume) /
1008                                   (padding_factor_deno * bpl_deno);
1009                        min_bpl = roundup(min_bpl, dma->align);
1010                        bpl = roundup(plane_fmt[0].bytesperline, dma->align);
1011                        plane_fmt[0].bytesperline = clamp(bpl, min_bpl,
1012                                                          max_bpl);
1013
1014                        if (info->num_planes == 1) {
1015                                /* Single plane formats */
1016                                plane_fmt[0].sizeimage =
1017                                                plane_fmt[0].bytesperline *
1018                                                pix_mp->height;
1019                        } else {
1020                                /* Multi plane formats */
1021                                plane_fmt[0].sizeimage =
1022                                        DIV_ROUND_UP(plane_fmt[0].bytesperline *
1023                                                     pix_mp->height *
1024                                                     info->bpp, 8);
1025                        }
1026                } else {
1027                        /* Handling non-contiguous data with mplanes */
1028                        hsub = info->hsub;
1029                        vsub = info->vsub;
1030                        for (i = 0; i < info->num_planes; i++) {
1031                                plane_width = pix_mp->width / (i ? hsub : 1);
1032                                plane_height = pix_mp->height / (i ? vsub : 1);
1033                                min_bpl = (plane_width * info->bpl_factor *
1034                                           padding_factor_nume * bpl_nume) /
1035                                           (padding_factor_deno * bpl_deno);
1036                                min_bpl = roundup(min_bpl, dma->align);
1037                                bpl = rounddown(plane_fmt[i].bytesperline,
1038                                                dma->align);
1039                                plane_fmt[i].bytesperline =
1040                                                clamp(bpl, min_bpl, max_bpl);
1041                                plane_fmt[i].sizeimage =
1042                                                plane_fmt[i].bytesperline *
1043                                                plane_height;
1044                        }
1045                }
1046        } else {
1047                struct v4l2_pix_format *pix;
1048
1049                pix = &format->fmt.pix;
1050                pix->field = dma->format.fmt.pix.field;
1051                width = rounddown(pix->width * info->bpl_factor, align);
1052                pix->width = clamp(width, min_width, max_width) /
1053                             info->bpl_factor;
1054                pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
1055                                    XVIP_DMA_MAX_HEIGHT);
1056
1057                min_bpl = (pix->width * info->bpl_factor *
1058                          padding_factor_nume * bpl_nume) /
1059                          (padding_factor_deno * bpl_deno);
1060                min_bpl = roundup(min_bpl, dma->align);
1061                max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
1062                bpl = rounddown(pix->bytesperline, dma->align);
1063                pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
1064                pix->sizeimage = pix->width * pix->height * info->bpp / 8;
1065        }
1066
1067        if (fmtinfo)
1068                *fmtinfo = info;
1069}
1070
1071static int
1072xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
1073{
1074        struct v4l2_fh *vfh = file->private_data;
1075        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1076
1077        __xvip_dma_try_format(dma, format, NULL);
1078        return 0;
1079}
1080
1081static int
1082xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
1083{
1084        struct v4l2_fh *vfh = file->private_data;
1085        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1086        const struct xvip_video_format *info;
1087
1088        __xvip_dma_try_format(dma, format, &info);
1089
1090        if (vb2_is_busy(&dma->queue))
1091                return -EBUSY;
1092
1093        if (V4L2_TYPE_IS_MULTIPLANAR(dma->format.type))
1094                dma->format.fmt.pix_mp = format->fmt.pix_mp;
1095        else
1096                dma->format.fmt.pix = format->fmt.pix;
1097
1098        dma->fmtinfo = info;
1099
1100        return 0;
1101}
1102
1103static int
1104xvip_dma_set_ctrl(struct file *file, void *fh, struct v4l2_control *ctl)
1105{
1106        struct v4l2_fh *vfh = file->private_data;
1107        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
1108
1109        if (vb2_is_busy(&dma->queue))
1110                return -EBUSY;
1111
1112        if (ctl->id == V4L2_CID_XILINX_LOW_LATENCY) {
1113                if (ctl->value)
1114                        dma->earlycb_mode = EARLY_CALLBACK_LOW_LATENCY;
1115                else
1116                        dma->earlycb_mode = 0;
1117        }
1118
1119        return 0;
1120}
1121
1122static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
1123        .vidioc_querycap                = xvip_dma_querycap,
1124        .vidioc_enum_fmt_vid_cap        = xvip_dma_enum_format,
1125        .vidioc_enum_fmt_vid_cap_mplane = xvip_dma_enum_format,
1126        .vidioc_enum_fmt_vid_out        = xvip_dma_enum_format,
1127        .vidioc_enum_fmt_vid_out_mplane = xvip_dma_enum_format,
1128        .vidioc_g_fmt_vid_cap           = xvip_dma_get_format,
1129        .vidioc_g_fmt_vid_cap_mplane    = xvip_dma_get_format,
1130        .vidioc_g_fmt_vid_out           = xvip_dma_get_format,
1131        .vidioc_g_fmt_vid_out_mplane    = xvip_dma_get_format,
1132        .vidioc_s_fmt_vid_cap           = xvip_dma_set_format,
1133        .vidioc_s_fmt_vid_cap_mplane    = xvip_dma_set_format,
1134        .vidioc_s_fmt_vid_out           = xvip_dma_set_format,
1135        .vidioc_s_fmt_vid_out_mplane    = xvip_dma_set_format,
1136        .vidioc_s_ctrl                  = xvip_dma_set_ctrl,
1137        .vidioc_try_fmt_vid_cap         = xvip_dma_try_format,
1138        .vidioc_try_fmt_vid_cap_mplane  = xvip_dma_try_format,
1139        .vidioc_try_fmt_vid_out         = xvip_dma_try_format,
1140        .vidioc_try_fmt_vid_out_mplane  = xvip_dma_try_format,
1141        .vidioc_reqbufs                 = vb2_ioctl_reqbufs,
1142        .vidioc_querybuf                = vb2_ioctl_querybuf,
1143        .vidioc_qbuf                    = vb2_ioctl_qbuf,
1144        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
1145        .vidioc_create_bufs             = vb2_ioctl_create_bufs,
1146        .vidioc_expbuf                  = vb2_ioctl_expbuf,
1147        .vidioc_streamon                = vb2_ioctl_streamon,
1148        .vidioc_streamoff               = vb2_ioctl_streamoff,
1149};
1150
1151/* -----------------------------------------------------------------------------
1152 * V4L2 file operations
1153 */
1154
1155static const struct v4l2_file_operations xvip_dma_fops = {
1156        .owner          = THIS_MODULE,
1157        .unlocked_ioctl = video_ioctl2,
1158        .open           = v4l2_fh_open,
1159        .release        = vb2_fop_release,
1160        .poll           = vb2_fop_poll,
1161        .mmap           = vb2_fop_mmap,
1162};
1163
1164/* -----------------------------------------------------------------------------
1165 * Xilinx Video DMA Core
1166 */
1167
1168int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
1169                  enum v4l2_buf_type type, unsigned int port)
1170{
1171        char name[16];
1172        int ret;
1173        u32 i, hsub, vsub, width, height;
1174
1175        dma->xdev = xdev;
1176        dma->port = port;
1177        mutex_init(&dma->lock);
1178        mutex_init(&dma->pipe.lock);
1179        INIT_LIST_HEAD(&dma->queued_bufs);
1180        spin_lock_init(&dma->queued_lock);
1181
1182        dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
1183        dma->format.type = type;
1184
1185        if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
1186                struct v4l2_pix_format_mplane *pix_mp;
1187
1188                pix_mp = &dma->format.fmt.pix_mp;
1189                pix_mp->pixelformat = dma->fmtinfo->fourcc;
1190                pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
1191                pix_mp->field = V4L2_FIELD_NONE;
1192                pix_mp->width = XVIP_DMA_DEF_WIDTH;
1193
1194                /* Handling contiguous data with mplanes */
1195                if (dma->fmtinfo->buffers == 1) {
1196                        pix_mp->plane_fmt[0].bytesperline =
1197                                pix_mp->width * dma->fmtinfo->bpl_factor;
1198                        pix_mp->plane_fmt[0].sizeimage =
1199                                        pix_mp->width * pix_mp->height *
1200                                        dma->fmtinfo->bpp / 8;
1201                } else {
1202                    /* Handling non-contiguous data with mplanes */
1203                        hsub = dma->fmtinfo->hsub;
1204                        vsub = dma->fmtinfo->vsub;
1205                        for (i = 0; i < dma->fmtinfo->buffers; i++) {
1206                                width = pix_mp->width / (i ? hsub : 1);
1207                                height = pix_mp->height / (i ? vsub : 1);
1208                                pix_mp->plane_fmt[i].bytesperline =
1209                                        width * dma->fmtinfo->bpl_factor;
1210                                pix_mp->plane_fmt[i].sizeimage = width * height;
1211                        }
1212                }
1213        } else {
1214                struct v4l2_pix_format *pix;
1215
1216                pix = &dma->format.fmt.pix;
1217                pix->pixelformat = dma->fmtinfo->fourcc;
1218                pix->colorspace = V4L2_COLORSPACE_SRGB;
1219                pix->field = V4L2_FIELD_NONE;
1220                pix->width = XVIP_DMA_DEF_WIDTH;
1221                pix->height = XVIP_DMA_DEF_HEIGHT;
1222                pix->bytesperline = pix->width * dma->fmtinfo->bpl_factor;
1223                pix->sizeimage =
1224                        pix->width * pix->height * dma->fmtinfo->bpp / 8;
1225        }
1226
1227        /* Initialize the media entity... */
1228        if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1229            type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1230                dma->pad.flags = MEDIA_PAD_FL_SINK;
1231        else
1232                dma->pad.flags = MEDIA_PAD_FL_SOURCE;
1233
1234        ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
1235        if (ret < 0)
1236                goto error;
1237
1238        /* ... and the video node... */
1239        dma->video.fops = &xvip_dma_fops;
1240        dma->video.v4l2_dev = &xdev->v4l2_dev;
1241        dma->video.queue = &dma->queue;
1242        snprintf(dma->video.name, sizeof(dma->video.name), "%s %s %u",
1243                 xdev->dev->of_node->name,
1244                 (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1245                  type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1246                                        ? "output" : "input",
1247                 port);
1248
1249        dma->video.vfl_type = VFL_TYPE_GRABBER;
1250        if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
1251            type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
1252                dma->video.vfl_dir = VFL_DIR_RX;
1253        else
1254                dma->video.vfl_dir = VFL_DIR_TX;
1255
1256        dma->video.release = video_device_release_empty;
1257        dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
1258        dma->video.lock = &dma->lock;
1259
1260        video_set_drvdata(&dma->video, dma);
1261
1262        /* ... and the buffers queue... */
1263        /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
1264         * V4L2 APIs would be inefficient. Testing on the command line with a
1265         * 'cat /dev/video?' thus won't be possible, but given that the driver
1266         * anyway requires a test tool to setup the pipeline before any video
1267         * stream can be started, requiring a specific V4L2 test tool as well
1268         * instead of 'cat' isn't really a drawback.
1269         */
1270        dma->queue.type = type;
1271        dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1272        dma->queue.lock = &dma->lock;
1273        dma->queue.drv_priv = dma;
1274        dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
1275        dma->queue.ops = &xvip_dma_queue_qops;
1276        dma->queue.mem_ops = &vb2_dma_contig_memops;
1277        dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
1278                                   | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
1279        dma->queue.dev = dma->xdev->dev;
1280        ret = vb2_queue_init(&dma->queue);
1281        if (ret < 0) {
1282                dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
1283                goto error;
1284        }
1285
1286        /* ... and the DMA channel. */
1287        snprintf(name, sizeof(name), "port%u", port);
1288        dma->dma = dma_request_chan(dma->xdev->dev, name);
1289        if (IS_ERR(dma->dma)) {
1290                ret = PTR_ERR(dma->dma);
1291                if (ret != -EPROBE_DEFER)
1292                        dev_err(dma->xdev->dev,
1293                                "No Video DMA channel found");
1294                goto error;
1295        }
1296
1297        dma->align = 1 << dma->dma->device->copy_align;
1298
1299        ret = video_register_device(&dma->video, VFL_TYPE_GRABBER, -1);
1300        if (ret < 0) {
1301                dev_err(dma->xdev->dev, "failed to register video device\n");
1302                goto error;
1303        }
1304
1305        return 0;
1306
1307error:
1308        xvip_dma_cleanup(dma);
1309        return ret;
1310}
1311
1312void xvip_dma_cleanup(struct xvip_dma *dma)
1313{
1314        if (video_is_registered(&dma->video))
1315                video_unregister_device(&dma->video);
1316
1317        if (!IS_ERR(dma->dma))
1318                dma_release_channel(dma->dma);
1319
1320        media_entity_cleanup(&dma->video.entity);
1321
1322        mutex_destroy(&dma->lock);
1323        mutex_destroy(&dma->pipe.lock);
1324}
1325