linux/drivers/media/platform/xilinx/xilinx-dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Xilinx Video DMA
   4 *
   5 * Copyright (C) 2013-2015 Ideas on Board
   6 * Copyright (C) 2013-2015 Xilinx, Inc.
   7 *
   8 * Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
   9 *           Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  10 */
  11
  12#include <linux/dma/xilinx_dma.h>
  13#include <linux/lcm.h>
  14#include <linux/list.h>
  15#include <linux/module.h>
  16#include <linux/of.h>
  17#include <linux/slab.h>
  18
  19#include <media/v4l2-dev.h>
  20#include <media/v4l2-fh.h>
  21#include <media/v4l2-ioctl.h>
  22#include <media/videobuf2-v4l2.h>
  23#include <media/videobuf2-dma-contig.h>
  24
  25#include "xilinx-dma.h"
  26#include "xilinx-vip.h"
  27#include "xilinx-vipp.h"
  28
  29#define XVIP_DMA_DEF_FORMAT             V4L2_PIX_FMT_YUYV
  30#define XVIP_DMA_DEF_WIDTH              1920
  31#define XVIP_DMA_DEF_HEIGHT             1080
  32
  33/* Minimum and maximum widths are expressed in bytes */
  34#define XVIP_DMA_MIN_WIDTH              1U
  35#define XVIP_DMA_MAX_WIDTH              65535U
  36#define XVIP_DMA_MIN_HEIGHT             1U
  37#define XVIP_DMA_MAX_HEIGHT             8191U
  38
  39/* -----------------------------------------------------------------------------
  40 * Helper functions
  41 */
  42
  43static struct v4l2_subdev *
  44xvip_dma_remote_subdev(struct media_pad *local, u32 *pad)
  45{
  46        struct media_pad *remote;
  47
  48        remote = media_entity_remote_pad(local);
  49        if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  50                return NULL;
  51
  52        if (pad)
  53                *pad = remote->index;
  54
  55        return media_entity_to_v4l2_subdev(remote->entity);
  56}
  57
  58static int xvip_dma_verify_format(struct xvip_dma *dma)
  59{
  60        struct v4l2_subdev_format fmt;
  61        struct v4l2_subdev *subdev;
  62        int ret;
  63
  64        subdev = xvip_dma_remote_subdev(&dma->pad, &fmt.pad);
  65        if (subdev == NULL)
  66                return -EPIPE;
  67
  68        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  69        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  70        if (ret < 0)
  71                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  72
  73        if (dma->fmtinfo->code != fmt.format.code ||
  74            dma->format.height != fmt.format.height ||
  75            dma->format.width != fmt.format.width ||
  76            dma->format.colorspace != fmt.format.colorspace)
  77                return -EINVAL;
  78
  79        return 0;
  80}
  81
  82/* -----------------------------------------------------------------------------
  83 * Pipeline Stream Management
  84 */
  85
  86/**
  87 * xvip_pipeline_start_stop - Start ot stop streaming on a pipeline
  88 * @pipe: The pipeline
  89 * @start: Start (when true) or stop (when false) the pipeline
  90 *
  91 * Walk the entities chain starting at the pipeline output video node and start
  92 * or stop all of them.
  93 *
  94 * Return: 0 if successful, or the return value of the failed video::s_stream
  95 * operation otherwise.
  96 */
  97static int xvip_pipeline_start_stop(struct xvip_pipeline *pipe, bool start)
  98{
  99        struct xvip_dma *dma = pipe->output;
 100        struct media_entity *entity;
 101        struct media_pad *pad;
 102        struct v4l2_subdev *subdev;
 103        int ret;
 104
 105        entity = &dma->video.entity;
 106        while (1) {
 107                pad = &entity->pads[0];
 108                if (!(pad->flags & MEDIA_PAD_FL_SINK))
 109                        break;
 110
 111                pad = media_entity_remote_pad(pad);
 112                if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
 113                        break;
 114
 115                entity = pad->entity;
 116                subdev = media_entity_to_v4l2_subdev(entity);
 117
 118                ret = v4l2_subdev_call(subdev, video, s_stream, start);
 119                if (start && ret < 0 && ret != -ENOIOCTLCMD)
 120                        return ret;
 121        }
 122
 123        return 0;
 124}
 125
 126/**
 127 * xvip_pipeline_set_stream - Enable/disable streaming on a pipeline
 128 * @pipe: The pipeline
 129 * @on: Turn the stream on when true or off when false
 130 *
 131 * The pipeline is shared between all DMA engines connect at its input and
 132 * output. While the stream state of DMA engines can be controlled
 133 * independently, pipelines have a shared stream state that enable or disable
 134 * all entities in the pipeline. For this reason the pipeline uses a streaming
 135 * counter that tracks the number of DMA engines that have requested the stream
 136 * to be enabled.
 137 *
 138 * When called with the @on argument set to true, this function will increment
 139 * the pipeline streaming count. If the streaming count reaches the number of
 140 * DMA engines in the pipeline it will enable all entities that belong to the
 141 * pipeline.
 142 *
 143 * Similarly, when called with the @on argument set to false, this function will
 144 * decrement the pipeline streaming count and disable all entities in the
 145 * pipeline when the streaming count reaches zero.
 146 *
 147 * Return: 0 if successful, or the return value of the failed video::s_stream
 148 * operation otherwise. Stopping the pipeline never fails. The pipeline state is
 149 * not updated when the operation fails.
 150 */
 151static int xvip_pipeline_set_stream(struct xvip_pipeline *pipe, bool on)
 152{
 153        int ret = 0;
 154
 155        mutex_lock(&pipe->lock);
 156
 157        if (on) {
 158                if (pipe->stream_count == pipe->num_dmas - 1) {
 159                        ret = xvip_pipeline_start_stop(pipe, true);
 160                        if (ret < 0)
 161                                goto done;
 162                }
 163                pipe->stream_count++;
 164        } else {
 165                if (--pipe->stream_count == 0)
 166                        xvip_pipeline_start_stop(pipe, false);
 167        }
 168
 169done:
 170        mutex_unlock(&pipe->lock);
 171        return ret;
 172}
 173
 174static int xvip_pipeline_validate(struct xvip_pipeline *pipe,
 175                                  struct xvip_dma *start)
 176{
 177        struct media_graph graph;
 178        struct media_entity *entity = &start->video.entity;
 179        struct media_device *mdev = entity->graph_obj.mdev;
 180        unsigned int num_inputs = 0;
 181        unsigned int num_outputs = 0;
 182        int ret;
 183
 184        mutex_lock(&mdev->graph_mutex);
 185
 186        /* Walk the graph to locate the video nodes. */
 187        ret = media_graph_walk_init(&graph, mdev);
 188        if (ret) {
 189                mutex_unlock(&mdev->graph_mutex);
 190                return ret;
 191        }
 192
 193        media_graph_walk_start(&graph, entity);
 194
 195        while ((entity = media_graph_walk_next(&graph))) {
 196                struct xvip_dma *dma;
 197
 198                if (entity->function != MEDIA_ENT_F_IO_V4L)
 199                        continue;
 200
 201                dma = to_xvip_dma(media_entity_to_video_device(entity));
 202
 203                if (dma->pad.flags & MEDIA_PAD_FL_SINK) {
 204                        pipe->output = dma;
 205                        num_outputs++;
 206                } else {
 207                        num_inputs++;
 208                }
 209        }
 210
 211        mutex_unlock(&mdev->graph_mutex);
 212
 213        media_graph_walk_cleanup(&graph);
 214
 215        /* We need exactly one output and zero or one input. */
 216        if (num_outputs != 1 || num_inputs > 1)
 217                return -EPIPE;
 218
 219        pipe->num_dmas = num_inputs + num_outputs;
 220
 221        return 0;
 222}
 223
 224static void __xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
 225{
 226        pipe->num_dmas = 0;
 227        pipe->output = NULL;
 228}
 229
 230/**
 231 * xvip_pipeline_cleanup - Cleanup the pipeline after streaming
 232 * @pipe: the pipeline
 233 *
 234 * Decrease the pipeline use count and clean it up if we were the last user.
 235 */
 236static void xvip_pipeline_cleanup(struct xvip_pipeline *pipe)
 237{
 238        mutex_lock(&pipe->lock);
 239
 240        /* If we're the last user clean up the pipeline. */
 241        if (--pipe->use_count == 0)
 242                __xvip_pipeline_cleanup(pipe);
 243
 244        mutex_unlock(&pipe->lock);
 245}
 246
 247/**
 248 * xvip_pipeline_prepare - Prepare the pipeline for streaming
 249 * @pipe: the pipeline
 250 * @dma: DMA engine at one end of the pipeline
 251 *
 252 * Validate the pipeline if no user exists yet, otherwise just increase the use
 253 * count.
 254 *
 255 * Return: 0 if successful or -EPIPE if the pipeline is not valid.
 256 */
 257static int xvip_pipeline_prepare(struct xvip_pipeline *pipe,
 258                                 struct xvip_dma *dma)
 259{
 260        int ret;
 261
 262        mutex_lock(&pipe->lock);
 263
 264        /* If we're the first user validate and initialize the pipeline. */
 265        if (pipe->use_count == 0) {
 266                ret = xvip_pipeline_validate(pipe, dma);
 267                if (ret < 0) {
 268                        __xvip_pipeline_cleanup(pipe);
 269                        goto done;
 270                }
 271        }
 272
 273        pipe->use_count++;
 274        ret = 0;
 275
 276done:
 277        mutex_unlock(&pipe->lock);
 278        return ret;
 279}
 280
 281/* -----------------------------------------------------------------------------
 282 * videobuf2 queue operations
 283 */
 284
 285/**
 286 * struct xvip_dma_buffer - Video DMA buffer
 287 * @buf: vb2 buffer base object
 288 * @queue: buffer list entry in the DMA engine queued buffers list
 289 * @dma: DMA channel that uses the buffer
 290 */
 291struct xvip_dma_buffer {
 292        struct vb2_v4l2_buffer buf;
 293        struct list_head queue;
 294        struct xvip_dma *dma;
 295};
 296
 297#define to_xvip_dma_buffer(vb)  container_of(vb, struct xvip_dma_buffer, buf)
 298
 299static void xvip_dma_complete(void *param)
 300{
 301        struct xvip_dma_buffer *buf = param;
 302        struct xvip_dma *dma = buf->dma;
 303
 304        spin_lock(&dma->queued_lock);
 305        list_del(&buf->queue);
 306        spin_unlock(&dma->queued_lock);
 307
 308        buf->buf.field = V4L2_FIELD_NONE;
 309        buf->buf.sequence = dma->sequence++;
 310        buf->buf.vb2_buf.timestamp = ktime_get_ns();
 311        vb2_set_plane_payload(&buf->buf.vb2_buf, 0, dma->format.sizeimage);
 312        vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 313}
 314
 315static int
 316xvip_dma_queue_setup(struct vb2_queue *vq,
 317                     unsigned int *nbuffers, unsigned int *nplanes,
 318                     unsigned int sizes[], struct device *alloc_devs[])
 319{
 320        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 321
 322        /* Make sure the image size is large enough. */
 323        if (*nplanes)
 324                return sizes[0] < dma->format.sizeimage ? -EINVAL : 0;
 325
 326        *nplanes = 1;
 327        sizes[0] = dma->format.sizeimage;
 328
 329        return 0;
 330}
 331
 332static int xvip_dma_buffer_prepare(struct vb2_buffer *vb)
 333{
 334        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 335        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 336        struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
 337
 338        buf->dma = dma;
 339
 340        return 0;
 341}
 342
 343static void xvip_dma_buffer_queue(struct vb2_buffer *vb)
 344{
 345        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 346        struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
 347        struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vbuf);
 348        struct dma_async_tx_descriptor *desc;
 349        dma_addr_t addr = vb2_dma_contig_plane_dma_addr(vb, 0);
 350        u32 flags;
 351
 352        if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
 353                flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 354                dma->xt.dir = DMA_DEV_TO_MEM;
 355                dma->xt.src_sgl = false;
 356                dma->xt.dst_sgl = true;
 357                dma->xt.dst_start = addr;
 358        } else {
 359                flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 360                dma->xt.dir = DMA_MEM_TO_DEV;
 361                dma->xt.src_sgl = true;
 362                dma->xt.dst_sgl = false;
 363                dma->xt.src_start = addr;
 364        }
 365
 366        dma->xt.frame_size = 1;
 367        dma->sgl[0].size = dma->format.width * dma->fmtinfo->bpp;
 368        dma->sgl[0].icg = dma->format.bytesperline - dma->sgl[0].size;
 369        dma->xt.numf = dma->format.height;
 370
 371        desc = dmaengine_prep_interleaved_dma(dma->dma, &dma->xt, flags);
 372        if (!desc) {
 373                dev_err(dma->xdev->dev, "Failed to prepare DMA transfer\n");
 374                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 375                return;
 376        }
 377        desc->callback = xvip_dma_complete;
 378        desc->callback_param = buf;
 379
 380        spin_lock_irq(&dma->queued_lock);
 381        list_add_tail(&buf->queue, &dma->queued_bufs);
 382        spin_unlock_irq(&dma->queued_lock);
 383
 384        dmaengine_submit(desc);
 385
 386        if (vb2_is_streaming(&dma->queue))
 387                dma_async_issue_pending(dma->dma);
 388}
 389
 390static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count)
 391{
 392        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 393        struct xvip_dma_buffer *buf, *nbuf;
 394        struct xvip_pipeline *pipe;
 395        int ret;
 396
 397        dma->sequence = 0;
 398
 399        /*
 400         * Start streaming on the pipeline. No link touching an entity in the
 401         * pipeline can be activated or deactivated once streaming is started.
 402         *
 403         * Use the pipeline object embedded in the first DMA object that starts
 404         * streaming.
 405         */
 406        pipe = dma->video.entity.pipe
 407             ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe;
 408
 409        ret = media_pipeline_start(&dma->video.entity, &pipe->pipe);
 410        if (ret < 0)
 411                goto error;
 412
 413        /* Verify that the configured format matches the output of the
 414         * connected subdev.
 415         */
 416        ret = xvip_dma_verify_format(dma);
 417        if (ret < 0)
 418                goto error_stop;
 419
 420        ret = xvip_pipeline_prepare(pipe, dma);
 421        if (ret < 0)
 422                goto error_stop;
 423
 424        /* Start the DMA engine. This must be done before starting the blocks
 425         * in the pipeline to avoid DMA synchronization issues.
 426         */
 427        dma_async_issue_pending(dma->dma);
 428
 429        /* Start the pipeline. */
 430        xvip_pipeline_set_stream(pipe, true);
 431
 432        return 0;
 433
 434error_stop:
 435        media_pipeline_stop(&dma->video.entity);
 436
 437error:
 438        /* Give back all queued buffers to videobuf2. */
 439        spin_lock_irq(&dma->queued_lock);
 440        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
 441                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_QUEUED);
 442                list_del(&buf->queue);
 443        }
 444        spin_unlock_irq(&dma->queued_lock);
 445
 446        return ret;
 447}
 448
 449static void xvip_dma_stop_streaming(struct vb2_queue *vq)
 450{
 451        struct xvip_dma *dma = vb2_get_drv_priv(vq);
 452        struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity);
 453        struct xvip_dma_buffer *buf, *nbuf;
 454
 455        /* Stop the pipeline. */
 456        xvip_pipeline_set_stream(pipe, false);
 457
 458        /* Stop and reset the DMA engine. */
 459        dmaengine_terminate_all(dma->dma);
 460
 461        /* Cleanup the pipeline and mark it as being stopped. */
 462        xvip_pipeline_cleanup(pipe);
 463        media_pipeline_stop(&dma->video.entity);
 464
 465        /* Give back all queued buffers to videobuf2. */
 466        spin_lock_irq(&dma->queued_lock);
 467        list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) {
 468                vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 469                list_del(&buf->queue);
 470        }
 471        spin_unlock_irq(&dma->queued_lock);
 472}
 473
 474static const struct vb2_ops xvip_dma_queue_qops = {
 475        .queue_setup = xvip_dma_queue_setup,
 476        .buf_prepare = xvip_dma_buffer_prepare,
 477        .buf_queue = xvip_dma_buffer_queue,
 478        .wait_prepare = vb2_ops_wait_prepare,
 479        .wait_finish = vb2_ops_wait_finish,
 480        .start_streaming = xvip_dma_start_streaming,
 481        .stop_streaming = xvip_dma_stop_streaming,
 482};
 483
 484/* -----------------------------------------------------------------------------
 485 * V4L2 ioctls
 486 */
 487
 488static int
 489xvip_dma_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
 490{
 491        struct v4l2_fh *vfh = file->private_data;
 492        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 493
 494        cap->capabilities = dma->xdev->v4l2_caps | V4L2_CAP_STREAMING |
 495                            V4L2_CAP_DEVICE_CAPS;
 496
 497        strscpy(cap->driver, "xilinx-vipp", sizeof(cap->driver));
 498        strscpy(cap->card, dma->video.name, sizeof(cap->card));
 499        snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%pOFn:%u",
 500                 dma->xdev->dev->of_node, dma->port);
 501
 502        return 0;
 503}
 504
 505/* FIXME: without this callback function, some applications are not configured
 506 * with correct formats, and it results in frames in wrong format. Whether this
 507 * callback needs to be required is not clearly defined, so it should be
 508 * clarified through the mailing list.
 509 */
 510static int
 511xvip_dma_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
 512{
 513        struct v4l2_fh *vfh = file->private_data;
 514        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 515
 516        if (f->index > 0)
 517                return -EINVAL;
 518
 519        f->pixelformat = dma->format.pixelformat;
 520
 521        return 0;
 522}
 523
 524static int
 525xvip_dma_get_format(struct file *file, void *fh, struct v4l2_format *format)
 526{
 527        struct v4l2_fh *vfh = file->private_data;
 528        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 529
 530        format->fmt.pix = dma->format;
 531
 532        return 0;
 533}
 534
 535static void
 536__xvip_dma_try_format(struct xvip_dma *dma, struct v4l2_pix_format *pix,
 537                      const struct xvip_video_format **fmtinfo)
 538{
 539        const struct xvip_video_format *info;
 540        unsigned int min_width;
 541        unsigned int max_width;
 542        unsigned int min_bpl;
 543        unsigned int max_bpl;
 544        unsigned int width;
 545        unsigned int align;
 546        unsigned int bpl;
 547
 548        /* Retrieve format information and select the default format if the
 549         * requested format isn't supported.
 550         */
 551        info = xvip_get_format_by_fourcc(pix->pixelformat);
 552        if (IS_ERR(info))
 553                info = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
 554
 555        pix->pixelformat = info->fourcc;
 556        pix->field = V4L2_FIELD_NONE;
 557
 558        /* The transfer alignment requirements are expressed in bytes. Compute
 559         * the minimum and maximum values, clamp the requested width and convert
 560         * it back to pixels.
 561         */
 562        align = lcm(dma->align, info->bpp);
 563        min_width = roundup(XVIP_DMA_MIN_WIDTH, align);
 564        max_width = rounddown(XVIP_DMA_MAX_WIDTH, align);
 565        width = rounddown(pix->width * info->bpp, align);
 566
 567        pix->width = clamp(width, min_width, max_width) / info->bpp;
 568        pix->height = clamp(pix->height, XVIP_DMA_MIN_HEIGHT,
 569                            XVIP_DMA_MAX_HEIGHT);
 570
 571        /* Clamp the requested bytes per line value. If the maximum bytes per
 572         * line value is zero, the module doesn't support user configurable line
 573         * sizes. Override the requested value with the minimum in that case.
 574         */
 575        min_bpl = pix->width * info->bpp;
 576        max_bpl = rounddown(XVIP_DMA_MAX_WIDTH, dma->align);
 577        bpl = rounddown(pix->bytesperline, dma->align);
 578
 579        pix->bytesperline = clamp(bpl, min_bpl, max_bpl);
 580        pix->sizeimage = pix->bytesperline * pix->height;
 581
 582        if (fmtinfo)
 583                *fmtinfo = info;
 584}
 585
 586static int
 587xvip_dma_try_format(struct file *file, void *fh, struct v4l2_format *format)
 588{
 589        struct v4l2_fh *vfh = file->private_data;
 590        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 591
 592        __xvip_dma_try_format(dma, &format->fmt.pix, NULL);
 593        return 0;
 594}
 595
 596static int
 597xvip_dma_set_format(struct file *file, void *fh, struct v4l2_format *format)
 598{
 599        struct v4l2_fh *vfh = file->private_data;
 600        struct xvip_dma *dma = to_xvip_dma(vfh->vdev);
 601        const struct xvip_video_format *info;
 602
 603        __xvip_dma_try_format(dma, &format->fmt.pix, &info);
 604
 605        if (vb2_is_busy(&dma->queue))
 606                return -EBUSY;
 607
 608        dma->format = format->fmt.pix;
 609        dma->fmtinfo = info;
 610
 611        return 0;
 612}
 613
 614static const struct v4l2_ioctl_ops xvip_dma_ioctl_ops = {
 615        .vidioc_querycap                = xvip_dma_querycap,
 616        .vidioc_enum_fmt_vid_cap        = xvip_dma_enum_format,
 617        .vidioc_g_fmt_vid_cap           = xvip_dma_get_format,
 618        .vidioc_g_fmt_vid_out           = xvip_dma_get_format,
 619        .vidioc_s_fmt_vid_cap           = xvip_dma_set_format,
 620        .vidioc_s_fmt_vid_out           = xvip_dma_set_format,
 621        .vidioc_try_fmt_vid_cap         = xvip_dma_try_format,
 622        .vidioc_try_fmt_vid_out         = xvip_dma_try_format,
 623        .vidioc_reqbufs                 = vb2_ioctl_reqbufs,
 624        .vidioc_querybuf                = vb2_ioctl_querybuf,
 625        .vidioc_qbuf                    = vb2_ioctl_qbuf,
 626        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
 627        .vidioc_create_bufs             = vb2_ioctl_create_bufs,
 628        .vidioc_expbuf                  = vb2_ioctl_expbuf,
 629        .vidioc_streamon                = vb2_ioctl_streamon,
 630        .vidioc_streamoff               = vb2_ioctl_streamoff,
 631};
 632
 633/* -----------------------------------------------------------------------------
 634 * V4L2 file operations
 635 */
 636
 637static const struct v4l2_file_operations xvip_dma_fops = {
 638        .owner          = THIS_MODULE,
 639        .unlocked_ioctl = video_ioctl2,
 640        .open           = v4l2_fh_open,
 641        .release        = vb2_fop_release,
 642        .poll           = vb2_fop_poll,
 643        .mmap           = vb2_fop_mmap,
 644};
 645
 646/* -----------------------------------------------------------------------------
 647 * Xilinx Video DMA Core
 648 */
 649
 650int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
 651                  enum v4l2_buf_type type, unsigned int port)
 652{
 653        char name[16];
 654        int ret;
 655
 656        dma->xdev = xdev;
 657        dma->port = port;
 658        mutex_init(&dma->lock);
 659        mutex_init(&dma->pipe.lock);
 660        INIT_LIST_HEAD(&dma->queued_bufs);
 661        spin_lock_init(&dma->queued_lock);
 662
 663        dma->fmtinfo = xvip_get_format_by_fourcc(XVIP_DMA_DEF_FORMAT);
 664        dma->format.pixelformat = dma->fmtinfo->fourcc;
 665        dma->format.colorspace = V4L2_COLORSPACE_SRGB;
 666        dma->format.field = V4L2_FIELD_NONE;
 667        dma->format.width = XVIP_DMA_DEF_WIDTH;
 668        dma->format.height = XVIP_DMA_DEF_HEIGHT;
 669        dma->format.bytesperline = dma->format.width * dma->fmtinfo->bpp;
 670        dma->format.sizeimage = dma->format.bytesperline * dma->format.height;
 671
 672        /* Initialize the media entity... */
 673        dma->pad.flags = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
 674                       ? MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
 675
 676        ret = media_entity_pads_init(&dma->video.entity, 1, &dma->pad);
 677        if (ret < 0)
 678                goto error;
 679
 680        /* ... and the video node... */
 681        dma->video.fops = &xvip_dma_fops;
 682        dma->video.v4l2_dev = &xdev->v4l2_dev;
 683        dma->video.queue = &dma->queue;
 684        snprintf(dma->video.name, sizeof(dma->video.name), "%pOFn %s %u",
 685                 xdev->dev->of_node,
 686                 type == V4L2_BUF_TYPE_VIDEO_CAPTURE ? "output" : "input",
 687                 port);
 688        dma->video.vfl_type = VFL_TYPE_VIDEO;
 689        dma->video.vfl_dir = type == V4L2_BUF_TYPE_VIDEO_CAPTURE
 690                           ? VFL_DIR_RX : VFL_DIR_TX;
 691        dma->video.release = video_device_release_empty;
 692        dma->video.ioctl_ops = &xvip_dma_ioctl_ops;
 693        dma->video.lock = &dma->lock;
 694        dma->video.device_caps = V4L2_CAP_STREAMING;
 695        if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
 696                dma->video.device_caps |= V4L2_CAP_VIDEO_CAPTURE;
 697        else
 698                dma->video.device_caps |= V4L2_CAP_VIDEO_OUTPUT;
 699
 700        video_set_drvdata(&dma->video, dma);
 701
 702        /* ... and the buffers queue... */
 703        /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write()
 704         * V4L2 APIs would be inefficient. Testing on the command line with a
 705         * 'cat /dev/video?' thus won't be possible, but given that the driver
 706         * anyway requires a test tool to setup the pipeline before any video
 707         * stream can be started, requiring a specific V4L2 test tool as well
 708         * instead of 'cat' isn't really a drawback.
 709         */
 710        dma->queue.type = type;
 711        dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
 712        dma->queue.lock = &dma->lock;
 713        dma->queue.drv_priv = dma;
 714        dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer);
 715        dma->queue.ops = &xvip_dma_queue_qops;
 716        dma->queue.mem_ops = &vb2_dma_contig_memops;
 717        dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
 718                                   | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
 719        dma->queue.dev = dma->xdev->dev;
 720        ret = vb2_queue_init(&dma->queue);
 721        if (ret < 0) {
 722                dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n");
 723                goto error;
 724        }
 725
 726        /* ... and the DMA channel. */
 727        snprintf(name, sizeof(name), "port%u", port);
 728        dma->dma = dma_request_chan(dma->xdev->dev, name);
 729        if (IS_ERR(dma->dma)) {
 730                ret = PTR_ERR(dma->dma);
 731                if (ret != -EPROBE_DEFER)
 732                        dev_err(dma->xdev->dev, "no VDMA channel found\n");
 733                goto error;
 734        }
 735
 736        dma->align = 1 << dma->dma->device->copy_align;
 737
 738        ret = video_register_device(&dma->video, VFL_TYPE_VIDEO, -1);
 739        if (ret < 0) {
 740                dev_err(dma->xdev->dev, "failed to register video device\n");
 741                goto error;
 742        }
 743
 744        return 0;
 745
 746error:
 747        xvip_dma_cleanup(dma);
 748        return ret;
 749}
 750
 751void xvip_dma_cleanup(struct xvip_dma *dma)
 752{
 753        if (video_is_registered(&dma->video))
 754                video_unregister_device(&dma->video);
 755
 756        if (!IS_ERR_OR_NULL(dma->dma))
 757                dma_release_channel(dma->dma);
 758
 759        media_entity_cleanup(&dma->video.entity);
 760
 761        mutex_destroy(&dma->lock);
 762        mutex_destroy(&dma->pipe.lock);
 763}
 764