linux/drivers/media/platform/vsp1/vsp1_video.c
<<
>>
Prefs
   1/*
   2 * vsp1_video.c  --  R-Car VSP1 Video Node
   3 *
   4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
   5 *
   6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/list.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/slab.h>
  18#include <linux/v4l2-mediabus.h>
  19#include <linux/videodev2.h>
  20#include <linux/wait.h>
  21
  22#include <media/media-entity.h>
  23#include <media/v4l2-dev.h>
  24#include <media/v4l2-fh.h>
  25#include <media/v4l2-ioctl.h>
  26#include <media/v4l2-subdev.h>
  27#include <media/videobuf2-v4l2.h>
  28#include <media/videobuf2-dma-contig.h>
  29
  30#include "vsp1.h"
  31#include "vsp1_bru.h"
  32#include "vsp1_dl.h"
  33#include "vsp1_entity.h"
  34#include "vsp1_pipe.h"
  35#include "vsp1_rwpf.h"
  36#include "vsp1_uds.h"
  37#include "vsp1_video.h"
  38
  39#define VSP1_VIDEO_DEF_FORMAT           V4L2_PIX_FMT_YUYV
  40#define VSP1_VIDEO_DEF_WIDTH            1024
  41#define VSP1_VIDEO_DEF_HEIGHT           768
  42
  43#define VSP1_VIDEO_MIN_WIDTH            2U
  44#define VSP1_VIDEO_MAX_WIDTH            8190U
  45#define VSP1_VIDEO_MIN_HEIGHT           2U
  46#define VSP1_VIDEO_MAX_HEIGHT           8190U
  47
  48/* -----------------------------------------------------------------------------
  49 * Helper functions
  50 */
  51
  52static struct v4l2_subdev *
  53vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
  54{
  55        struct media_pad *remote;
  56
  57        remote = media_entity_remote_pad(local);
  58        if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  59                return NULL;
  60
  61        if (pad)
  62                *pad = remote->index;
  63
  64        return media_entity_to_v4l2_subdev(remote->entity);
  65}
  66
  67static int vsp1_video_verify_format(struct vsp1_video *video)
  68{
  69        struct v4l2_subdev_format fmt;
  70        struct v4l2_subdev *subdev;
  71        int ret;
  72
  73        subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
  74        if (subdev == NULL)
  75                return -EINVAL;
  76
  77        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  78        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  79        if (ret < 0)
  80                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  81
  82        if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
  83            video->rwpf->format.height != fmt.format.height ||
  84            video->rwpf->format.width != fmt.format.width)
  85                return -EINVAL;
  86
  87        return 0;
  88}
  89
  90static int __vsp1_video_try_format(struct vsp1_video *video,
  91                                   struct v4l2_pix_format_mplane *pix,
  92                                   const struct vsp1_format_info **fmtinfo)
  93{
  94        static const u32 xrgb_formats[][2] = {
  95                { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
  96                { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
  97                { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
  98                { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
  99        };
 100
 101        const struct vsp1_format_info *info;
 102        unsigned int width = pix->width;
 103        unsigned int height = pix->height;
 104        unsigned int i;
 105
 106        /* Backward compatibility: replace deprecated RGB formats by their XRGB
 107         * equivalent. This selects the format older userspace applications want
 108         * while still exposing the new format.
 109         */
 110        for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
 111                if (xrgb_formats[i][0] == pix->pixelformat) {
 112                        pix->pixelformat = xrgb_formats[i][1];
 113                        break;
 114                }
 115        }
 116
 117        /* Retrieve format information and select the default format if the
 118         * requested format isn't supported.
 119         */
 120        info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
 121        if (info == NULL)
 122                info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
 123
 124        pix->pixelformat = info->fourcc;
 125        pix->colorspace = V4L2_COLORSPACE_SRGB;
 126        pix->field = V4L2_FIELD_NONE;
 127
 128        if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
 129            info->fourcc == V4L2_PIX_FMT_HSV32)
 130                pix->hsv_enc = V4L2_HSV_ENC_256;
 131
 132        memset(pix->reserved, 0, sizeof(pix->reserved));
 133
 134        /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
 135        width = round_down(width, info->hsub);
 136        height = round_down(height, info->vsub);
 137
 138        /* Clamp the width and height. */
 139        pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
 140        pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
 141                            VSP1_VIDEO_MAX_HEIGHT);
 142
 143        /* Compute and clamp the stride and image size. While not documented in
 144         * the datasheet, strides not aligned to a multiple of 128 bytes result
 145         * in image corruption.
 146         */
 147        for (i = 0; i < min(info->planes, 2U); ++i) {
 148                unsigned int hsub = i > 0 ? info->hsub : 1;
 149                unsigned int vsub = i > 0 ? info->vsub : 1;
 150                unsigned int align = 128;
 151                unsigned int bpl;
 152
 153                bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
 154                              pix->width / hsub * info->bpp[i] / 8,
 155                              round_down(65535U, align));
 156
 157                pix->plane_fmt[i].bytesperline = round_up(bpl, align);
 158                pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
 159                                            * pix->height / vsub;
 160        }
 161
 162        if (info->planes == 3) {
 163                /* The second and third planes must have the same stride. */
 164                pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
 165                pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
 166        }
 167
 168        pix->num_planes = info->planes;
 169
 170        if (fmtinfo)
 171                *fmtinfo = info;
 172
 173        return 0;
 174}
 175
 176/* -----------------------------------------------------------------------------
 177 * VSP1 Partition Algorithm support
 178 */
 179
 180static void vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
 181{
 182        struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
 183        const struct v4l2_mbus_framefmt *format;
 184        struct vsp1_entity *entity;
 185        unsigned int div_size;
 186
 187        format = vsp1_entity_get_pad_format(&pipe->output->entity,
 188                                            pipe->output->entity.config,
 189                                            RWPF_PAD_SOURCE);
 190        div_size = format->width;
 191
 192        /* Gen2 hardware doesn't require image partitioning. */
 193        if (vsp1->info->gen == 2) {
 194                pipe->div_size = div_size;
 195                pipe->partitions = 1;
 196                return;
 197        }
 198
 199        list_for_each_entry(entity, &pipe->entities, list_pipe) {
 200                unsigned int entity_max = VSP1_VIDEO_MAX_WIDTH;
 201
 202                if (entity->ops->max_width) {
 203                        entity_max = entity->ops->max_width(entity, pipe);
 204                        if (entity_max)
 205                                div_size = min(div_size, entity_max);
 206                }
 207        }
 208
 209        pipe->div_size = div_size;
 210        pipe->partitions = DIV_ROUND_UP(format->width, div_size);
 211}
 212
 213/**
 214 * vsp1_video_partition - Calculate the active partition output window
 215 *
 216 * @div_size: pre-determined maximum partition division size
 217 * @index: partition index
 218 *
 219 * Returns a v4l2_rect describing the partition window.
 220 */
 221static struct v4l2_rect vsp1_video_partition(struct vsp1_pipeline *pipe,
 222                                             unsigned int div_size,
 223                                             unsigned int index)
 224{
 225        const struct v4l2_mbus_framefmt *format;
 226        struct v4l2_rect partition;
 227        unsigned int modulus;
 228
 229        format = vsp1_entity_get_pad_format(&pipe->output->entity,
 230                                            pipe->output->entity.config,
 231                                            RWPF_PAD_SOURCE);
 232
 233        /* A single partition simply processes the output size in full. */
 234        if (pipe->partitions <= 1) {
 235                partition.left = 0;
 236                partition.top = 0;
 237                partition.width = format->width;
 238                partition.height = format->height;
 239                return partition;
 240        }
 241
 242        /* Initialise the partition with sane starting conditions. */
 243        partition.left = index * div_size;
 244        partition.top = 0;
 245        partition.width = div_size;
 246        partition.height = format->height;
 247
 248        modulus = format->width % div_size;
 249
 250        /*
 251         * We need to prevent the last partition from being smaller than the
 252         * *minimum* width of the hardware capabilities.
 253         *
 254         * If the modulus is less than half of the partition size,
 255         * the penultimate partition is reduced to half, which is added
 256         * to the final partition: |1234|1234|1234|12|341|
 257         * to prevents this:       |1234|1234|1234|1234|1|.
 258         */
 259        if (modulus) {
 260                /*
 261                 * pipe->partitions is 1 based, whilst index is a 0 based index.
 262                 * Normalise this locally.
 263                 */
 264                unsigned int partitions = pipe->partitions - 1;
 265
 266                if (modulus < div_size / 2) {
 267                        if (index == partitions - 1) {
 268                                /* Halve the penultimate partition. */
 269                                partition.width = div_size / 2;
 270                        } else if (index == partitions) {
 271                                /* Increase the final partition. */
 272                                partition.width = (div_size / 2) + modulus;
 273                                partition.left -= div_size / 2;
 274                        }
 275                } else if (index == partitions) {
 276                        partition.width = modulus;
 277                }
 278        }
 279
 280        return partition;
 281}
 282
 283/* -----------------------------------------------------------------------------
 284 * Pipeline Management
 285 */
 286
 287/*
 288 * vsp1_video_complete_buffer - Complete the current buffer
 289 * @video: the video node
 290 *
 291 * This function completes the current buffer by filling its sequence number,
 292 * time stamp and payload size, and hands it back to the videobuf core.
 293 *
 294 * When operating in DU output mode (deep pipeline to the DU through the LIF),
 295 * the VSP1 needs to constantly supply frames to the display. In that case, if
 296 * no other buffer is queued, reuse the one that has just been processed instead
 297 * of handing it back to the videobuf core.
 298 *
 299 * Return the next queued buffer or NULL if the queue is empty.
 300 */
 301static struct vsp1_vb2_buffer *
 302vsp1_video_complete_buffer(struct vsp1_video *video)
 303{
 304        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 305        struct vsp1_vb2_buffer *next = NULL;
 306        struct vsp1_vb2_buffer *done;
 307        unsigned long flags;
 308        unsigned int i;
 309
 310        spin_lock_irqsave(&video->irqlock, flags);
 311
 312        if (list_empty(&video->irqqueue)) {
 313                spin_unlock_irqrestore(&video->irqlock, flags);
 314                return NULL;
 315        }
 316
 317        done = list_first_entry(&video->irqqueue,
 318                                struct vsp1_vb2_buffer, queue);
 319
 320        /* In DU output mode reuse the buffer if the list is singular. */
 321        if (pipe->lif && list_is_singular(&video->irqqueue)) {
 322                spin_unlock_irqrestore(&video->irqlock, flags);
 323                return done;
 324        }
 325
 326        list_del(&done->queue);
 327
 328        if (!list_empty(&video->irqqueue))
 329                next = list_first_entry(&video->irqqueue,
 330                                        struct vsp1_vb2_buffer, queue);
 331
 332        spin_unlock_irqrestore(&video->irqlock, flags);
 333
 334        done->buf.sequence = pipe->sequence;
 335        done->buf.vb2_buf.timestamp = ktime_get_ns();
 336        for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
 337                vb2_set_plane_payload(&done->buf.vb2_buf, i,
 338                                      vb2_plane_size(&done->buf.vb2_buf, i));
 339        vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
 340
 341        return next;
 342}
 343
 344static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
 345                                 struct vsp1_rwpf *rwpf)
 346{
 347        struct vsp1_video *video = rwpf->video;
 348        struct vsp1_vb2_buffer *buf;
 349
 350        buf = vsp1_video_complete_buffer(video);
 351        if (buf == NULL)
 352                return;
 353
 354        video->rwpf->mem = buf->mem;
 355        pipe->buffers_ready |= 1 << video->pipe_index;
 356}
 357
 358static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
 359                                              struct vsp1_dl_list *dl)
 360{
 361        struct vsp1_entity *entity;
 362
 363        pipe->partition = vsp1_video_partition(pipe, pipe->div_size,
 364                                               pipe->current_partition);
 365
 366        list_for_each_entry(entity, &pipe->entities, list_pipe) {
 367                if (entity->ops->configure)
 368                        entity->ops->configure(entity, pipe, dl,
 369                                               VSP1_ENTITY_PARAMS_PARTITION);
 370        }
 371}
 372
 373static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
 374{
 375        struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
 376        struct vsp1_entity *entity;
 377
 378        if (!pipe->dl)
 379                pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
 380
 381        /*
 382         * Start with the runtime parameters as the configure operation can
 383         * compute/cache information needed when configuring partitions. This
 384         * is the case with flipping in the WPF.
 385         */
 386        list_for_each_entry(entity, &pipe->entities, list_pipe) {
 387                if (entity->ops->configure)
 388                        entity->ops->configure(entity, pipe, pipe->dl,
 389                                               VSP1_ENTITY_PARAMS_RUNTIME);
 390        }
 391
 392        /* Run the first partition */
 393        pipe->current_partition = 0;
 394        vsp1_video_pipeline_run_partition(pipe, pipe->dl);
 395
 396        /* Process consecutive partitions as necessary */
 397        for (pipe->current_partition = 1;
 398             pipe->current_partition < pipe->partitions;
 399             pipe->current_partition++) {
 400                struct vsp1_dl_list *dl;
 401
 402                /*
 403                 * Partition configuration operations will utilise
 404                 * the pipe->current_partition variable to determine
 405                 * the work they should complete.
 406                 */
 407                dl = vsp1_dl_list_get(pipe->output->dlm);
 408
 409                /*
 410                 * An incomplete chain will still function, but output only
 411                 * the partitions that had a dl available. The frame end
 412                 * interrupt will be marked on the last dl in the chain.
 413                 */
 414                if (!dl) {
 415                        dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
 416                        break;
 417                }
 418
 419                vsp1_video_pipeline_run_partition(pipe, dl);
 420                vsp1_dl_list_add_chain(pipe->dl, dl);
 421        }
 422
 423        /* Complete, and commit the head display list. */
 424        vsp1_dl_list_commit(pipe->dl);
 425        pipe->dl = NULL;
 426
 427        vsp1_pipeline_run(pipe);
 428}
 429
 430static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe)
 431{
 432        struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
 433        enum vsp1_pipeline_state state;
 434        unsigned long flags;
 435        unsigned int i;
 436
 437        spin_lock_irqsave(&pipe->irqlock, flags);
 438
 439        /* Complete buffers on all video nodes. */
 440        for (i = 0; i < vsp1->info->rpf_count; ++i) {
 441                if (!pipe->inputs[i])
 442                        continue;
 443
 444                vsp1_video_frame_end(pipe, pipe->inputs[i]);
 445        }
 446
 447        vsp1_video_frame_end(pipe, pipe->output);
 448
 449        state = pipe->state;
 450        pipe->state = VSP1_PIPELINE_STOPPED;
 451
 452        /* If a stop has been requested, mark the pipeline as stopped and
 453         * return. Otherwise restart the pipeline if ready.
 454         */
 455        if (state == VSP1_PIPELINE_STOPPING)
 456                wake_up(&pipe->wq);
 457        else if (vsp1_pipeline_ready(pipe))
 458                vsp1_video_pipeline_run(pipe);
 459
 460        spin_unlock_irqrestore(&pipe->irqlock, flags);
 461}
 462
 463static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
 464                                            struct vsp1_rwpf *input,
 465                                            struct vsp1_rwpf *output)
 466{
 467        struct media_entity_enum ent_enum;
 468        struct vsp1_entity *entity;
 469        struct media_pad *pad;
 470        bool bru_found = false;
 471        int ret;
 472
 473        ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
 474        if (ret < 0)
 475                return ret;
 476
 477        pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
 478
 479        while (1) {
 480                if (pad == NULL) {
 481                        ret = -EPIPE;
 482                        goto out;
 483                }
 484
 485                /* We've reached a video node, that shouldn't have happened. */
 486                if (!is_media_entity_v4l2_subdev(pad->entity)) {
 487                        ret = -EPIPE;
 488                        goto out;
 489                }
 490
 491                entity = to_vsp1_entity(
 492                        media_entity_to_v4l2_subdev(pad->entity));
 493
 494                /* A BRU is present in the pipeline, store the BRU input pad
 495                 * number in the input RPF for use when configuring the RPF.
 496                 */
 497                if (entity->type == VSP1_ENTITY_BRU) {
 498                        struct vsp1_bru *bru = to_bru(&entity->subdev);
 499
 500                        bru->inputs[pad->index].rpf = input;
 501                        input->bru_input = pad->index;
 502
 503                        bru_found = true;
 504                }
 505
 506                /* We've reached the WPF, we're done. */
 507                if (entity->type == VSP1_ENTITY_WPF)
 508                        break;
 509
 510                /* Ensure the branch has no loop. */
 511                if (media_entity_enum_test_and_set(&ent_enum,
 512                                                   &entity->subdev.entity)) {
 513                        ret = -EPIPE;
 514                        goto out;
 515                }
 516
 517                /* UDS can't be chained. */
 518                if (entity->type == VSP1_ENTITY_UDS) {
 519                        if (pipe->uds) {
 520                                ret = -EPIPE;
 521                                goto out;
 522                        }
 523
 524                        pipe->uds = entity;
 525                        pipe->uds_input = bru_found ? pipe->bru
 526                                        : &input->entity;
 527                }
 528
 529                /* Follow the source link. The link setup operations ensure
 530                 * that the output fan-out can't be more than one, there is thus
 531                 * no need to verify here that only a single source link is
 532                 * activated.
 533                 */
 534                pad = &entity->pads[entity->source_pad];
 535                pad = media_entity_remote_pad(pad);
 536        }
 537
 538        /* The last entity must be the output WPF. */
 539        if (entity != &output->entity)
 540                ret = -EPIPE;
 541
 542out:
 543        media_entity_enum_cleanup(&ent_enum);
 544
 545        return ret;
 546}
 547
 548static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
 549                                     struct vsp1_video *video)
 550{
 551        struct media_graph graph;
 552        struct media_entity *entity = &video->video.entity;
 553        struct media_device *mdev = entity->graph_obj.mdev;
 554        unsigned int i;
 555        int ret;
 556
 557        /* Walk the graph to locate the entities and video nodes. */
 558        ret = media_graph_walk_init(&graph, mdev);
 559        if (ret)
 560                return ret;
 561
 562        media_graph_walk_start(&graph, entity);
 563
 564        while ((entity = media_graph_walk_next(&graph))) {
 565                struct v4l2_subdev *subdev;
 566                struct vsp1_rwpf *rwpf;
 567                struct vsp1_entity *e;
 568
 569                if (!is_media_entity_v4l2_subdev(entity))
 570                        continue;
 571
 572                subdev = media_entity_to_v4l2_subdev(entity);
 573                e = to_vsp1_entity(subdev);
 574                list_add_tail(&e->list_pipe, &pipe->entities);
 575
 576                if (e->type == VSP1_ENTITY_RPF) {
 577                        rwpf = to_rwpf(subdev);
 578                        pipe->inputs[rwpf->entity.index] = rwpf;
 579                        rwpf->video->pipe_index = ++pipe->num_inputs;
 580                        rwpf->pipe = pipe;
 581                } else if (e->type == VSP1_ENTITY_WPF) {
 582                        rwpf = to_rwpf(subdev);
 583                        pipe->output = rwpf;
 584                        rwpf->video->pipe_index = 0;
 585                        rwpf->pipe = pipe;
 586                } else if (e->type == VSP1_ENTITY_LIF) {
 587                        pipe->lif = e;
 588                } else if (e->type == VSP1_ENTITY_BRU) {
 589                        pipe->bru = e;
 590                }
 591        }
 592
 593        media_graph_walk_cleanup(&graph);
 594
 595        /* We need one output and at least one input. */
 596        if (pipe->num_inputs == 0 || !pipe->output)
 597                return -EPIPE;
 598
 599        /* Follow links downstream for each input and make sure the graph
 600         * contains no loop and that all branches end at the output WPF.
 601         */
 602        for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
 603                if (!pipe->inputs[i])
 604                        continue;
 605
 606                ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
 607                                                       pipe->output);
 608                if (ret < 0)
 609                        return ret;
 610        }
 611
 612        return 0;
 613}
 614
 615static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
 616                                    struct vsp1_video *video)
 617{
 618        vsp1_pipeline_init(pipe);
 619
 620        pipe->frame_end = vsp1_video_pipeline_frame_end;
 621
 622        return vsp1_video_pipeline_build(pipe, video);
 623}
 624
 625static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
 626{
 627        struct vsp1_pipeline *pipe;
 628        int ret;
 629
 630        /* Get a pipeline object for the video node. If a pipeline has already
 631         * been allocated just increment its reference count and return it.
 632         * Otherwise allocate a new pipeline and initialize it, it will be freed
 633         * when the last reference is released.
 634         */
 635        if (!video->rwpf->pipe) {
 636                pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
 637                if (!pipe)
 638                        return ERR_PTR(-ENOMEM);
 639
 640                ret = vsp1_video_pipeline_init(pipe, video);
 641                if (ret < 0) {
 642                        vsp1_pipeline_reset(pipe);
 643                        kfree(pipe);
 644                        return ERR_PTR(ret);
 645                }
 646        } else {
 647                pipe = video->rwpf->pipe;
 648                kref_get(&pipe->kref);
 649        }
 650
 651        return pipe;
 652}
 653
 654static void vsp1_video_pipeline_release(struct kref *kref)
 655{
 656        struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
 657
 658        vsp1_pipeline_reset(pipe);
 659        kfree(pipe);
 660}
 661
 662static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
 663{
 664        struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
 665
 666        mutex_lock(&mdev->graph_mutex);
 667        kref_put(&pipe->kref, vsp1_video_pipeline_release);
 668        mutex_unlock(&mdev->graph_mutex);
 669}
 670
 671/* -----------------------------------------------------------------------------
 672 * videobuf2 Queue Operations
 673 */
 674
 675static int
 676vsp1_video_queue_setup(struct vb2_queue *vq,
 677                       unsigned int *nbuffers, unsigned int *nplanes,
 678                       unsigned int sizes[], struct device *alloc_devs[])
 679{
 680        struct vsp1_video *video = vb2_get_drv_priv(vq);
 681        const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
 682        unsigned int i;
 683
 684        if (*nplanes) {
 685                if (*nplanes != format->num_planes)
 686                        return -EINVAL;
 687
 688                for (i = 0; i < *nplanes; i++)
 689                        if (sizes[i] < format->plane_fmt[i].sizeimage)
 690                                return -EINVAL;
 691                return 0;
 692        }
 693
 694        *nplanes = format->num_planes;
 695
 696        for (i = 0; i < format->num_planes; ++i)
 697                sizes[i] = format->plane_fmt[i].sizeimage;
 698
 699        return 0;
 700}
 701
 702static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
 703{
 704        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 705        struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
 706        struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
 707        const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
 708        unsigned int i;
 709
 710        if (vb->num_planes < format->num_planes)
 711                return -EINVAL;
 712
 713        for (i = 0; i < vb->num_planes; ++i) {
 714                buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
 715
 716                if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
 717                        return -EINVAL;
 718        }
 719
 720        for ( ; i < 3; ++i)
 721                buf->mem.addr[i] = 0;
 722
 723        return 0;
 724}
 725
 726static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
 727{
 728        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 729        struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
 730        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 731        struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
 732        unsigned long flags;
 733        bool empty;
 734
 735        spin_lock_irqsave(&video->irqlock, flags);
 736        empty = list_empty(&video->irqqueue);
 737        list_add_tail(&buf->queue, &video->irqqueue);
 738        spin_unlock_irqrestore(&video->irqlock, flags);
 739
 740        if (!empty)
 741                return;
 742
 743        spin_lock_irqsave(&pipe->irqlock, flags);
 744
 745        video->rwpf->mem = buf->mem;
 746        pipe->buffers_ready |= 1 << video->pipe_index;
 747
 748        if (vb2_is_streaming(&video->queue) &&
 749            vsp1_pipeline_ready(pipe))
 750                vsp1_video_pipeline_run(pipe);
 751
 752        spin_unlock_irqrestore(&pipe->irqlock, flags);
 753}
 754
 755static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
 756{
 757        struct vsp1_entity *entity;
 758
 759        /* Determine this pipelines sizes for image partitioning support. */
 760        vsp1_video_pipeline_setup_partitions(pipe);
 761
 762        /* Prepare the display list. */
 763        pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
 764        if (!pipe->dl)
 765                return -ENOMEM;
 766
 767        if (pipe->uds) {
 768                struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
 769
 770                /* If a BRU is present in the pipeline before the UDS, the alpha
 771                 * component doesn't need to be scaled as the BRU output alpha
 772                 * value is fixed to 255. Otherwise we need to scale the alpha
 773                 * component only when available at the input RPF.
 774                 */
 775                if (pipe->uds_input->type == VSP1_ENTITY_BRU) {
 776                        uds->scale_alpha = false;
 777                } else {
 778                        struct vsp1_rwpf *rpf =
 779                                to_rwpf(&pipe->uds_input->subdev);
 780
 781                        uds->scale_alpha = rpf->fmtinfo->alpha;
 782                }
 783        }
 784
 785        list_for_each_entry(entity, &pipe->entities, list_pipe) {
 786                vsp1_entity_route_setup(entity, pipe->dl);
 787
 788                if (entity->ops->configure)
 789                        entity->ops->configure(entity, pipe, pipe->dl,
 790                                               VSP1_ENTITY_PARAMS_INIT);
 791        }
 792
 793        return 0;
 794}
 795
 796static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
 797{
 798        struct vsp1_video *video = vb2_get_drv_priv(vq);
 799        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 800        unsigned long flags;
 801        int ret;
 802
 803        mutex_lock(&pipe->lock);
 804        if (pipe->stream_count == pipe->num_inputs) {
 805                ret = vsp1_video_setup_pipeline(pipe);
 806                if (ret < 0) {
 807                        mutex_unlock(&pipe->lock);
 808                        return ret;
 809                }
 810        }
 811
 812        pipe->stream_count++;
 813        mutex_unlock(&pipe->lock);
 814
 815        spin_lock_irqsave(&pipe->irqlock, flags);
 816        if (vsp1_pipeline_ready(pipe))
 817                vsp1_video_pipeline_run(pipe);
 818        spin_unlock_irqrestore(&pipe->irqlock, flags);
 819
 820        return 0;
 821}
 822
 823static void vsp1_video_stop_streaming(struct vb2_queue *vq)
 824{
 825        struct vsp1_video *video = vb2_get_drv_priv(vq);
 826        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 827        struct vsp1_vb2_buffer *buffer;
 828        unsigned long flags;
 829        int ret;
 830
 831        /*
 832         * Clear the buffers ready flag to make sure the device won't be started
 833         * by a QBUF on the video node on the other side of the pipeline.
 834         */
 835        spin_lock_irqsave(&video->irqlock, flags);
 836        pipe->buffers_ready &= ~(1 << video->pipe_index);
 837        spin_unlock_irqrestore(&video->irqlock, flags);
 838
 839        mutex_lock(&pipe->lock);
 840        if (--pipe->stream_count == pipe->num_inputs) {
 841                /* Stop the pipeline. */
 842                ret = vsp1_pipeline_stop(pipe);
 843                if (ret == -ETIMEDOUT)
 844                        dev_err(video->vsp1->dev, "pipeline stop timeout\n");
 845
 846                vsp1_dl_list_put(pipe->dl);
 847                pipe->dl = NULL;
 848        }
 849        mutex_unlock(&pipe->lock);
 850
 851        media_pipeline_stop(&video->video.entity);
 852        vsp1_video_pipeline_put(pipe);
 853
 854        /* Remove all buffers from the IRQ queue. */
 855        spin_lock_irqsave(&video->irqlock, flags);
 856        list_for_each_entry(buffer, &video->irqqueue, queue)
 857                vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 858        INIT_LIST_HEAD(&video->irqqueue);
 859        spin_unlock_irqrestore(&video->irqlock, flags);
 860}
 861
 862static const struct vb2_ops vsp1_video_queue_qops = {
 863        .queue_setup = vsp1_video_queue_setup,
 864        .buf_prepare = vsp1_video_buffer_prepare,
 865        .buf_queue = vsp1_video_buffer_queue,
 866        .wait_prepare = vb2_ops_wait_prepare,
 867        .wait_finish = vb2_ops_wait_finish,
 868        .start_streaming = vsp1_video_start_streaming,
 869        .stop_streaming = vsp1_video_stop_streaming,
 870};
 871
 872/* -----------------------------------------------------------------------------
 873 * V4L2 ioctls
 874 */
 875
 876static int
 877vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
 878{
 879        struct v4l2_fh *vfh = file->private_data;
 880        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
 881
 882        cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
 883                          | V4L2_CAP_VIDEO_CAPTURE_MPLANE
 884                          | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
 885
 886        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
 887                cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
 888                                 | V4L2_CAP_STREAMING;
 889        else
 890                cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
 891                                 | V4L2_CAP_STREAMING;
 892
 893        strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
 894        strlcpy(cap->card, video->video.name, sizeof(cap->card));
 895        snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
 896                 dev_name(video->vsp1->dev));
 897
 898        return 0;
 899}
 900
 901static int
 902vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
 903{
 904        struct v4l2_fh *vfh = file->private_data;
 905        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
 906
 907        if (format->type != video->queue.type)
 908                return -EINVAL;
 909
 910        mutex_lock(&video->lock);
 911        format->fmt.pix_mp = video->rwpf->format;
 912        mutex_unlock(&video->lock);
 913
 914        return 0;
 915}
 916
 917static int
 918vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
 919{
 920        struct v4l2_fh *vfh = file->private_data;
 921        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
 922
 923        if (format->type != video->queue.type)
 924                return -EINVAL;
 925
 926        return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
 927}
 928
 929static int
 930vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
 931{
 932        struct v4l2_fh *vfh = file->private_data;
 933        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
 934        const struct vsp1_format_info *info;
 935        int ret;
 936
 937        if (format->type != video->queue.type)
 938                return -EINVAL;
 939
 940        ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
 941        if (ret < 0)
 942                return ret;
 943
 944        mutex_lock(&video->lock);
 945
 946        if (vb2_is_busy(&video->queue)) {
 947                ret = -EBUSY;
 948                goto done;
 949        }
 950
 951        video->rwpf->format = format->fmt.pix_mp;
 952        video->rwpf->fmtinfo = info;
 953
 954done:
 955        mutex_unlock(&video->lock);
 956        return ret;
 957}
 958
 959static int
 960vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
 961{
 962        struct v4l2_fh *vfh = file->private_data;
 963        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
 964        struct media_device *mdev = &video->vsp1->media_dev;
 965        struct vsp1_pipeline *pipe;
 966        int ret;
 967
 968        if (video->queue.owner && video->queue.owner != file->private_data)
 969                return -EBUSY;
 970
 971        /* Get a pipeline for the video node and start streaming on it. No link
 972         * touching an entity in the pipeline can be activated or deactivated
 973         * once streaming is started.
 974         */
 975        mutex_lock(&mdev->graph_mutex);
 976
 977        pipe = vsp1_video_pipeline_get(video);
 978        if (IS_ERR(pipe)) {
 979                mutex_unlock(&mdev->graph_mutex);
 980                return PTR_ERR(pipe);
 981        }
 982
 983        ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
 984        if (ret < 0) {
 985                mutex_unlock(&mdev->graph_mutex);
 986                goto err_pipe;
 987        }
 988
 989        mutex_unlock(&mdev->graph_mutex);
 990
 991        /* Verify that the configured format matches the output of the connected
 992         * subdev.
 993         */
 994        ret = vsp1_video_verify_format(video);
 995        if (ret < 0)
 996                goto err_stop;
 997
 998        /* Start the queue. */
 999        ret = vb2_streamon(&video->queue, type);
1000        if (ret < 0)
1001                goto err_stop;
1002
1003        return 0;
1004
1005err_stop:
1006        media_pipeline_stop(&video->video.entity);
1007err_pipe:
1008        vsp1_video_pipeline_put(pipe);
1009        return ret;
1010}
1011
1012static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
1013        .vidioc_querycap                = vsp1_video_querycap,
1014        .vidioc_g_fmt_vid_cap_mplane    = vsp1_video_get_format,
1015        .vidioc_s_fmt_vid_cap_mplane    = vsp1_video_set_format,
1016        .vidioc_try_fmt_vid_cap_mplane  = vsp1_video_try_format,
1017        .vidioc_g_fmt_vid_out_mplane    = vsp1_video_get_format,
1018        .vidioc_s_fmt_vid_out_mplane    = vsp1_video_set_format,
1019        .vidioc_try_fmt_vid_out_mplane  = vsp1_video_try_format,
1020        .vidioc_reqbufs                 = vb2_ioctl_reqbufs,
1021        .vidioc_querybuf                = vb2_ioctl_querybuf,
1022        .vidioc_qbuf                    = vb2_ioctl_qbuf,
1023        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
1024        .vidioc_expbuf                  = vb2_ioctl_expbuf,
1025        .vidioc_create_bufs             = vb2_ioctl_create_bufs,
1026        .vidioc_prepare_buf             = vb2_ioctl_prepare_buf,
1027        .vidioc_streamon                = vsp1_video_streamon,
1028        .vidioc_streamoff               = vb2_ioctl_streamoff,
1029};
1030
1031/* -----------------------------------------------------------------------------
1032 * V4L2 File Operations
1033 */
1034
1035static int vsp1_video_open(struct file *file)
1036{
1037        struct vsp1_video *video = video_drvdata(file);
1038        struct v4l2_fh *vfh;
1039        int ret = 0;
1040
1041        vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
1042        if (vfh == NULL)
1043                return -ENOMEM;
1044
1045        v4l2_fh_init(vfh, &video->video);
1046        v4l2_fh_add(vfh);
1047
1048        file->private_data = vfh;
1049
1050        ret = vsp1_device_get(video->vsp1);
1051        if (ret < 0) {
1052                v4l2_fh_del(vfh);
1053                kfree(vfh);
1054        }
1055
1056        return ret;
1057}
1058
1059static int vsp1_video_release(struct file *file)
1060{
1061        struct vsp1_video *video = video_drvdata(file);
1062        struct v4l2_fh *vfh = file->private_data;
1063
1064        mutex_lock(&video->lock);
1065        if (video->queue.owner == vfh) {
1066                vb2_queue_release(&video->queue);
1067                video->queue.owner = NULL;
1068        }
1069        mutex_unlock(&video->lock);
1070
1071        vsp1_device_put(video->vsp1);
1072
1073        v4l2_fh_release(file);
1074
1075        file->private_data = NULL;
1076
1077        return 0;
1078}
1079
1080static const struct v4l2_file_operations vsp1_video_fops = {
1081        .owner = THIS_MODULE,
1082        .unlocked_ioctl = video_ioctl2,
1083        .open = vsp1_video_open,
1084        .release = vsp1_video_release,
1085        .poll = vb2_fop_poll,
1086        .mmap = vb2_fop_mmap,
1087};
1088
1089/* -----------------------------------------------------------------------------
1090 * Initialization and Cleanup
1091 */
1092
1093struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
1094                                     struct vsp1_rwpf *rwpf)
1095{
1096        struct vsp1_video *video;
1097        const char *direction;
1098        int ret;
1099
1100        video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
1101        if (!video)
1102                return ERR_PTR(-ENOMEM);
1103
1104        rwpf->video = video;
1105
1106        video->vsp1 = vsp1;
1107        video->rwpf = rwpf;
1108
1109        if (rwpf->entity.type == VSP1_ENTITY_RPF) {
1110                direction = "input";
1111                video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1112                video->pad.flags = MEDIA_PAD_FL_SOURCE;
1113                video->video.vfl_dir = VFL_DIR_TX;
1114        } else {
1115                direction = "output";
1116                video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1117                video->pad.flags = MEDIA_PAD_FL_SINK;
1118                video->video.vfl_dir = VFL_DIR_RX;
1119        }
1120
1121        mutex_init(&video->lock);
1122        spin_lock_init(&video->irqlock);
1123        INIT_LIST_HEAD(&video->irqqueue);
1124
1125        /* Initialize the media entity... */
1126        ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
1127        if (ret < 0)
1128                return ERR_PTR(ret);
1129
1130        /* ... and the format ... */
1131        rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
1132        rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
1133        rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
1134        __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
1135
1136        /* ... and the video node... */
1137        video->video.v4l2_dev = &video->vsp1->v4l2_dev;
1138        video->video.fops = &vsp1_video_fops;
1139        snprintf(video->video.name, sizeof(video->video.name), "%s %s",
1140                 rwpf->entity.subdev.name, direction);
1141        video->video.vfl_type = VFL_TYPE_GRABBER;
1142        video->video.release = video_device_release_empty;
1143        video->video.ioctl_ops = &vsp1_video_ioctl_ops;
1144
1145        video_set_drvdata(&video->video, video);
1146
1147        video->queue.type = video->type;
1148        video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1149        video->queue.lock = &video->lock;
1150        video->queue.drv_priv = video;
1151        video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
1152        video->queue.ops = &vsp1_video_queue_qops;
1153        video->queue.mem_ops = &vb2_dma_contig_memops;
1154        video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1155        video->queue.dev = video->vsp1->dev;
1156        ret = vb2_queue_init(&video->queue);
1157        if (ret < 0) {
1158                dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
1159                goto error;
1160        }
1161
1162        /* ... and register the video device. */
1163        video->video.queue = &video->queue;
1164        ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1165        if (ret < 0) {
1166                dev_err(video->vsp1->dev, "failed to register video device\n");
1167                goto error;
1168        }
1169
1170        return video;
1171
1172error:
1173        vsp1_video_cleanup(video);
1174        return ERR_PTR(ret);
1175}
1176
1177void vsp1_video_cleanup(struct vsp1_video *video)
1178{
1179        if (video_is_registered(&video->video))
1180                video_unregister_device(&video->video);
1181
1182        media_entity_cleanup(&video->video.entity);
1183}
1184