linux/drivers/media/platform/vsp1/vsp1_video.c
<<
>>
Prefs
   1/*
   2 * vsp1_video.c  --  R-Car VSP1 Video Node
   3 *
   4 * Copyright (C) 2013-2015 Renesas Electronics Corporation
   5 *
   6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 */
  13
  14#include <linux/list.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/slab.h>
  18#include <linux/v4l2-mediabus.h>
  19#include <linux/videodev2.h>
  20#include <linux/wait.h>
  21
  22#include <media/media-entity.h>
  23#include <media/v4l2-dev.h>
  24#include <media/v4l2-fh.h>
  25#include <media/v4l2-ioctl.h>
  26#include <media/v4l2-subdev.h>
  27#include <media/videobuf2-v4l2.h>
  28#include <media/videobuf2-dma-contig.h>
  29
  30#include "vsp1.h"
  31#include "vsp1_bru.h"
  32#include "vsp1_dl.h"
  33#include "vsp1_entity.h"
  34#include "vsp1_hgo.h"
  35#include "vsp1_hgt.h"
  36#include "vsp1_pipe.h"
  37#include "vsp1_rwpf.h"
  38#include "vsp1_uds.h"
  39#include "vsp1_video.h"
  40
  41#define VSP1_VIDEO_DEF_FORMAT           V4L2_PIX_FMT_YUYV
  42#define VSP1_VIDEO_DEF_WIDTH            1024
  43#define VSP1_VIDEO_DEF_HEIGHT           768
  44
  45#define VSP1_VIDEO_MIN_WIDTH            2U
  46#define VSP1_VIDEO_MAX_WIDTH            8190U
  47#define VSP1_VIDEO_MIN_HEIGHT           2U
  48#define VSP1_VIDEO_MAX_HEIGHT           8190U
  49
  50/* -----------------------------------------------------------------------------
  51 * Helper functions
  52 */
  53
  54static struct v4l2_subdev *
  55vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
  56{
  57        struct media_pad *remote;
  58
  59        remote = media_entity_remote_pad(local);
  60        if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
  61                return NULL;
  62
  63        if (pad)
  64                *pad = remote->index;
  65
  66        return media_entity_to_v4l2_subdev(remote->entity);
  67}
  68
  69static int vsp1_video_verify_format(struct vsp1_video *video)
  70{
  71        struct v4l2_subdev_format fmt;
  72        struct v4l2_subdev *subdev;
  73        int ret;
  74
  75        subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
  76        if (subdev == NULL)
  77                return -EINVAL;
  78
  79        fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
  80        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
  81        if (ret < 0)
  82                return ret == -ENOIOCTLCMD ? -EINVAL : ret;
  83
  84        if (video->rwpf->fmtinfo->mbus != fmt.format.code ||
  85            video->rwpf->format.height != fmt.format.height ||
  86            video->rwpf->format.width != fmt.format.width)
  87                return -EINVAL;
  88
  89        return 0;
  90}
  91
  92static int __vsp1_video_try_format(struct vsp1_video *video,
  93                                   struct v4l2_pix_format_mplane *pix,
  94                                   const struct vsp1_format_info **fmtinfo)
  95{
  96        static const u32 xrgb_formats[][2] = {
  97                { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 },
  98                { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 },
  99                { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 },
 100                { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 },
 101        };
 102
 103        const struct vsp1_format_info *info;
 104        unsigned int width = pix->width;
 105        unsigned int height = pix->height;
 106        unsigned int i;
 107
 108        /*
 109         * Backward compatibility: replace deprecated RGB formats by their XRGB
 110         * equivalent. This selects the format older userspace applications want
 111         * while still exposing the new format.
 112         */
 113        for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) {
 114                if (xrgb_formats[i][0] == pix->pixelformat) {
 115                        pix->pixelformat = xrgb_formats[i][1];
 116                        break;
 117                }
 118        }
 119
 120        /*
 121         * Retrieve format information and select the default format if the
 122         * requested format isn't supported.
 123         */
 124        info = vsp1_get_format_info(video->vsp1, pix->pixelformat);
 125        if (info == NULL)
 126                info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT);
 127
 128        pix->pixelformat = info->fourcc;
 129        pix->colorspace = V4L2_COLORSPACE_SRGB;
 130        pix->field = V4L2_FIELD_NONE;
 131
 132        if (info->fourcc == V4L2_PIX_FMT_HSV24 ||
 133            info->fourcc == V4L2_PIX_FMT_HSV32)
 134                pix->hsv_enc = V4L2_HSV_ENC_256;
 135
 136        memset(pix->reserved, 0, sizeof(pix->reserved));
 137
 138        /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
 139        width = round_down(width, info->hsub);
 140        height = round_down(height, info->vsub);
 141
 142        /* Clamp the width and height. */
 143        pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
 144        pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
 145                            VSP1_VIDEO_MAX_HEIGHT);
 146
 147        /*
 148         * Compute and clamp the stride and image size. While not documented in
 149         * the datasheet, strides not aligned to a multiple of 128 bytes result
 150         * in image corruption.
 151         */
 152        for (i = 0; i < min(info->planes, 2U); ++i) {
 153                unsigned int hsub = i > 0 ? info->hsub : 1;
 154                unsigned int vsub = i > 0 ? info->vsub : 1;
 155                unsigned int align = 128;
 156                unsigned int bpl;
 157
 158                bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
 159                              pix->width / hsub * info->bpp[i] / 8,
 160                              round_down(65535U, align));
 161
 162                pix->plane_fmt[i].bytesperline = round_up(bpl, align);
 163                pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
 164                                            * pix->height / vsub;
 165        }
 166
 167        if (info->planes == 3) {
 168                /* The second and third planes must have the same stride. */
 169                pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
 170                pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
 171        }
 172
 173        pix->num_planes = info->planes;
 174
 175        if (fmtinfo)
 176                *fmtinfo = info;
 177
 178        return 0;
 179}
 180
 181/* -----------------------------------------------------------------------------
 182 * VSP1 Partition Algorithm support
 183 */
 184
 185/**
 186 * vsp1_video_calculate_partition - Calculate the active partition output window
 187 *
 188 * @pipe: the pipeline
 189 * @partition: partition that will hold the calculated values
 190 * @div_size: pre-determined maximum partition division size
 191 * @index: partition index
 192 */
 193static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe,
 194                                           struct vsp1_partition *partition,
 195                                           unsigned int div_size,
 196                                           unsigned int index)
 197{
 198        const struct v4l2_mbus_framefmt *format;
 199        struct vsp1_partition_window window;
 200        unsigned int modulus;
 201
 202        /*
 203         * Partitions are computed on the size before rotation, use the format
 204         * at the WPF sink.
 205         */
 206        format = vsp1_entity_get_pad_format(&pipe->output->entity,
 207                                            pipe->output->entity.config,
 208                                            RWPF_PAD_SINK);
 209
 210        /* A single partition simply processes the output size in full. */
 211        if (pipe->partitions <= 1) {
 212                window.left = 0;
 213                window.width = format->width;
 214
 215                vsp1_pipeline_propagate_partition(pipe, partition, index,
 216                                                  &window);
 217                return;
 218        }
 219
 220        /* Initialise the partition with sane starting conditions. */
 221        window.left = index * div_size;
 222        window.width = div_size;
 223
 224        modulus = format->width % div_size;
 225
 226        /*
 227         * We need to prevent the last partition from being smaller than the
 228         * *minimum* width of the hardware capabilities.
 229         *
 230         * If the modulus is less than half of the partition size,
 231         * the penultimate partition is reduced to half, which is added
 232         * to the final partition: |1234|1234|1234|12|341|
 233         * to prevents this:       |1234|1234|1234|1234|1|.
 234         */
 235        if (modulus) {
 236                /*
 237                 * pipe->partitions is 1 based, whilst index is a 0 based index.
 238                 * Normalise this locally.
 239                 */
 240                unsigned int partitions = pipe->partitions - 1;
 241
 242                if (modulus < div_size / 2) {
 243                        if (index == partitions - 1) {
 244                                /* Halve the penultimate partition. */
 245                                window.width = div_size / 2;
 246                        } else if (index == partitions) {
 247                                /* Increase the final partition. */
 248                                window.width = (div_size / 2) + modulus;
 249                                window.left -= div_size / 2;
 250                        }
 251                } else if (index == partitions) {
 252                        window.width = modulus;
 253                }
 254        }
 255
 256        vsp1_pipeline_propagate_partition(pipe, partition, index, &window);
 257}
 258
 259static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
 260{
 261        struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
 262        const struct v4l2_mbus_framefmt *format;
 263        struct vsp1_entity *entity;
 264        unsigned int div_size;
 265        unsigned int i;
 266
 267        /*
 268         * Partitions are computed on the size before rotation, use the format
 269         * at the WPF sink.
 270         */
 271        format = vsp1_entity_get_pad_format(&pipe->output->entity,
 272                                            pipe->output->entity.config,
 273                                            RWPF_PAD_SINK);
 274        div_size = format->width;
 275
 276        /*
 277         * Only Gen3 hardware requires image partitioning, Gen2 will operate
 278         * with a single partition that covers the whole output.
 279         */
 280        if (vsp1->info->gen == 3) {
 281                list_for_each_entry(entity, &pipe->entities, list_pipe) {
 282                        unsigned int entity_max;
 283
 284                        if (!entity->ops->max_width)
 285                                continue;
 286
 287                        entity_max = entity->ops->max_width(entity, pipe);
 288                        if (entity_max)
 289                                div_size = min(div_size, entity_max);
 290                }
 291        }
 292
 293        pipe->partitions = DIV_ROUND_UP(format->width, div_size);
 294        pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table),
 295                                   GFP_KERNEL);
 296        if (!pipe->part_table)
 297                return -ENOMEM;
 298
 299        for (i = 0; i < pipe->partitions; ++i)
 300                vsp1_video_calculate_partition(pipe, &pipe->part_table[i],
 301                                               div_size, i);
 302
 303        return 0;
 304}
 305
 306/* -----------------------------------------------------------------------------
 307 * Pipeline Management
 308 */
 309
 310/*
 311 * vsp1_video_complete_buffer - Complete the current buffer
 312 * @video: the video node
 313 *
 314 * This function completes the current buffer by filling its sequence number,
 315 * time stamp and payload size, and hands it back to the videobuf core.
 316 *
 317 * When operating in DU output mode (deep pipeline to the DU through the LIF),
 318 * the VSP1 needs to constantly supply frames to the display. In that case, if
 319 * no other buffer is queued, reuse the one that has just been processed instead
 320 * of handing it back to the videobuf core.
 321 *
 322 * Return the next queued buffer or NULL if the queue is empty.
 323 */
 324static struct vsp1_vb2_buffer *
 325vsp1_video_complete_buffer(struct vsp1_video *video)
 326{
 327        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 328        struct vsp1_vb2_buffer *next = NULL;
 329        struct vsp1_vb2_buffer *done;
 330        unsigned long flags;
 331        unsigned int i;
 332
 333        spin_lock_irqsave(&video->irqlock, flags);
 334
 335        if (list_empty(&video->irqqueue)) {
 336                spin_unlock_irqrestore(&video->irqlock, flags);
 337                return NULL;
 338        }
 339
 340        done = list_first_entry(&video->irqqueue,
 341                                struct vsp1_vb2_buffer, queue);
 342
 343        /* In DU output mode reuse the buffer if the list is singular. */
 344        if (pipe->lif && list_is_singular(&video->irqqueue)) {
 345                spin_unlock_irqrestore(&video->irqlock, flags);
 346                return done;
 347        }
 348
 349        list_del(&done->queue);
 350
 351        if (!list_empty(&video->irqqueue))
 352                next = list_first_entry(&video->irqqueue,
 353                                        struct vsp1_vb2_buffer, queue);
 354
 355        spin_unlock_irqrestore(&video->irqlock, flags);
 356
 357        done->buf.sequence = pipe->sequence;
 358        done->buf.vb2_buf.timestamp = ktime_get_ns();
 359        for (i = 0; i < done->buf.vb2_buf.num_planes; ++i)
 360                vb2_set_plane_payload(&done->buf.vb2_buf, i,
 361                                      vb2_plane_size(&done->buf.vb2_buf, i));
 362        vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE);
 363
 364        return next;
 365}
 366
 367static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
 368                                 struct vsp1_rwpf *rwpf)
 369{
 370        struct vsp1_video *video = rwpf->video;
 371        struct vsp1_vb2_buffer *buf;
 372
 373        buf = vsp1_video_complete_buffer(video);
 374        if (buf == NULL)
 375                return;
 376
 377        video->rwpf->mem = buf->mem;
 378        pipe->buffers_ready |= 1 << video->pipe_index;
 379}
 380
 381static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe,
 382                                              struct vsp1_dl_list *dl,
 383                                              unsigned int partition)
 384{
 385        struct vsp1_entity *entity;
 386
 387        pipe->partition = &pipe->part_table[partition];
 388
 389        list_for_each_entry(entity, &pipe->entities, list_pipe) {
 390                if (entity->ops->configure)
 391                        entity->ops->configure(entity, pipe, dl,
 392                                               VSP1_ENTITY_PARAMS_PARTITION);
 393        }
 394}
 395
 396static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe)
 397{
 398        struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
 399        struct vsp1_entity *entity;
 400        unsigned int partition;
 401
 402        if (!pipe->dl)
 403                pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
 404
 405        /*
 406         * Start with the runtime parameters as the configure operation can
 407         * compute/cache information needed when configuring partitions. This
 408         * is the case with flipping in the WPF.
 409         */
 410        list_for_each_entry(entity, &pipe->entities, list_pipe) {
 411                if (entity->ops->configure)
 412                        entity->ops->configure(entity, pipe, pipe->dl,
 413                                               VSP1_ENTITY_PARAMS_RUNTIME);
 414        }
 415
 416        /* Run the first partition */
 417        vsp1_video_pipeline_run_partition(pipe, pipe->dl, 0);
 418
 419        /* Process consecutive partitions as necessary */
 420        for (partition = 1; partition < pipe->partitions; ++partition) {
 421                struct vsp1_dl_list *dl;
 422
 423                dl = vsp1_dl_list_get(pipe->output->dlm);
 424
 425                /*
 426                 * An incomplete chain will still function, but output only
 427                 * the partitions that had a dl available. The frame end
 428                 * interrupt will be marked on the last dl in the chain.
 429                 */
 430                if (!dl) {
 431                        dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n");
 432                        break;
 433                }
 434
 435                vsp1_video_pipeline_run_partition(pipe, dl, partition);
 436                vsp1_dl_list_add_chain(pipe->dl, dl);
 437        }
 438
 439        /* Complete, and commit the head display list. */
 440        vsp1_dl_list_commit(pipe->dl);
 441        pipe->dl = NULL;
 442
 443        vsp1_pipeline_run(pipe);
 444}
 445
 446static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe,
 447                                          bool completed)
 448{
 449        struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
 450        enum vsp1_pipeline_state state;
 451        unsigned long flags;
 452        unsigned int i;
 453
 454        /* M2M Pipelines should never call here with an incomplete frame. */
 455        WARN_ON_ONCE(!completed);
 456
 457        spin_lock_irqsave(&pipe->irqlock, flags);
 458
 459        /* Complete buffers on all video nodes. */
 460        for (i = 0; i < vsp1->info->rpf_count; ++i) {
 461                if (!pipe->inputs[i])
 462                        continue;
 463
 464                vsp1_video_frame_end(pipe, pipe->inputs[i]);
 465        }
 466
 467        vsp1_video_frame_end(pipe, pipe->output);
 468
 469        state = pipe->state;
 470        pipe->state = VSP1_PIPELINE_STOPPED;
 471
 472        /*
 473         * If a stop has been requested, mark the pipeline as stopped and
 474         * return. Otherwise restart the pipeline if ready.
 475         */
 476        if (state == VSP1_PIPELINE_STOPPING)
 477                wake_up(&pipe->wq);
 478        else if (vsp1_pipeline_ready(pipe))
 479                vsp1_video_pipeline_run(pipe);
 480
 481        spin_unlock_irqrestore(&pipe->irqlock, flags);
 482}
 483
 484static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe,
 485                                            struct vsp1_rwpf *input,
 486                                            struct vsp1_rwpf *output)
 487{
 488        struct media_entity_enum ent_enum;
 489        struct vsp1_entity *entity;
 490        struct media_pad *pad;
 491        struct vsp1_bru *bru = NULL;
 492        int ret;
 493
 494        ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev);
 495        if (ret < 0)
 496                return ret;
 497
 498        /*
 499         * The main data path doesn't include the HGO or HGT, use
 500         * vsp1_entity_remote_pad() to traverse the graph.
 501         */
 502
 503        pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
 504
 505        while (1) {
 506                if (pad == NULL) {
 507                        ret = -EPIPE;
 508                        goto out;
 509                }
 510
 511                /* We've reached a video node, that shouldn't have happened. */
 512                if (!is_media_entity_v4l2_subdev(pad->entity)) {
 513                        ret = -EPIPE;
 514                        goto out;
 515                }
 516
 517                entity = to_vsp1_entity(
 518                        media_entity_to_v4l2_subdev(pad->entity));
 519
 520                /*
 521                 * A BRU or BRS is present in the pipeline, store its input pad
 522                 * number in the input RPF for use when configuring the RPF.
 523                 */
 524                if (entity->type == VSP1_ENTITY_BRU ||
 525                    entity->type == VSP1_ENTITY_BRS) {
 526                        /* BRU and BRS can't be chained. */
 527                        if (bru) {
 528                                ret = -EPIPE;
 529                                goto out;
 530                        }
 531
 532                        bru = to_bru(&entity->subdev);
 533                        bru->inputs[pad->index].rpf = input;
 534                        input->bru_input = pad->index;
 535                }
 536
 537                /* We've reached the WPF, we're done. */
 538                if (entity->type == VSP1_ENTITY_WPF)
 539                        break;
 540
 541                /* Ensure the branch has no loop. */
 542                if (media_entity_enum_test_and_set(&ent_enum,
 543                                                   &entity->subdev.entity)) {
 544                        ret = -EPIPE;
 545                        goto out;
 546                }
 547
 548                /* UDS can't be chained. */
 549                if (entity->type == VSP1_ENTITY_UDS) {
 550                        if (pipe->uds) {
 551                                ret = -EPIPE;
 552                                goto out;
 553                        }
 554
 555                        pipe->uds = entity;
 556                        pipe->uds_input = bru ? &bru->entity : &input->entity;
 557                }
 558
 559                /* Follow the source link, ignoring any HGO or HGT. */
 560                pad = &entity->pads[entity->source_pad];
 561                pad = vsp1_entity_remote_pad(pad);
 562        }
 563
 564        /* The last entity must be the output WPF. */
 565        if (entity != &output->entity)
 566                ret = -EPIPE;
 567
 568out:
 569        media_entity_enum_cleanup(&ent_enum);
 570
 571        return ret;
 572}
 573
 574static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe,
 575                                     struct vsp1_video *video)
 576{
 577        struct media_graph graph;
 578        struct media_entity *entity = &video->video.entity;
 579        struct media_device *mdev = entity->graph_obj.mdev;
 580        unsigned int i;
 581        int ret;
 582
 583        /* Walk the graph to locate the entities and video nodes. */
 584        ret = media_graph_walk_init(&graph, mdev);
 585        if (ret)
 586                return ret;
 587
 588        media_graph_walk_start(&graph, entity);
 589
 590        while ((entity = media_graph_walk_next(&graph))) {
 591                struct v4l2_subdev *subdev;
 592                struct vsp1_rwpf *rwpf;
 593                struct vsp1_entity *e;
 594
 595                if (!is_media_entity_v4l2_subdev(entity))
 596                        continue;
 597
 598                subdev = media_entity_to_v4l2_subdev(entity);
 599                e = to_vsp1_entity(subdev);
 600                list_add_tail(&e->list_pipe, &pipe->entities);
 601
 602                switch (e->type) {
 603                case VSP1_ENTITY_RPF:
 604                        rwpf = to_rwpf(subdev);
 605                        pipe->inputs[rwpf->entity.index] = rwpf;
 606                        rwpf->video->pipe_index = ++pipe->num_inputs;
 607                        rwpf->pipe = pipe;
 608                        break;
 609
 610                case VSP1_ENTITY_WPF:
 611                        rwpf = to_rwpf(subdev);
 612                        pipe->output = rwpf;
 613                        rwpf->video->pipe_index = 0;
 614                        rwpf->pipe = pipe;
 615                        break;
 616
 617                case VSP1_ENTITY_LIF:
 618                        pipe->lif = e;
 619                        break;
 620
 621                case VSP1_ENTITY_BRU:
 622                case VSP1_ENTITY_BRS:
 623                        pipe->bru = e;
 624                        break;
 625
 626                case VSP1_ENTITY_HGO:
 627                        pipe->hgo = e;
 628                        to_hgo(subdev)->histo.pipe = pipe;
 629                        break;
 630
 631                case VSP1_ENTITY_HGT:
 632                        pipe->hgt = e;
 633                        to_hgt(subdev)->histo.pipe = pipe;
 634                        break;
 635
 636                default:
 637                        break;
 638                }
 639        }
 640
 641        media_graph_walk_cleanup(&graph);
 642
 643        /* We need one output and at least one input. */
 644        if (pipe->num_inputs == 0 || !pipe->output)
 645                return -EPIPE;
 646
 647        /*
 648         * Follow links downstream for each input and make sure the graph
 649         * contains no loop and that all branches end at the output WPF.
 650         */
 651        for (i = 0; i < video->vsp1->info->rpf_count; ++i) {
 652                if (!pipe->inputs[i])
 653                        continue;
 654
 655                ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i],
 656                                                       pipe->output);
 657                if (ret < 0)
 658                        return ret;
 659        }
 660
 661        return 0;
 662}
 663
 664static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe,
 665                                    struct vsp1_video *video)
 666{
 667        vsp1_pipeline_init(pipe);
 668
 669        pipe->frame_end = vsp1_video_pipeline_frame_end;
 670
 671        return vsp1_video_pipeline_build(pipe, video);
 672}
 673
 674static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video)
 675{
 676        struct vsp1_pipeline *pipe;
 677        int ret;
 678
 679        /*
 680         * Get a pipeline object for the video node. If a pipeline has already
 681         * been allocated just increment its reference count and return it.
 682         * Otherwise allocate a new pipeline and initialize it, it will be freed
 683         * when the last reference is released.
 684         */
 685        if (!video->rwpf->pipe) {
 686                pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
 687                if (!pipe)
 688                        return ERR_PTR(-ENOMEM);
 689
 690                ret = vsp1_video_pipeline_init(pipe, video);
 691                if (ret < 0) {
 692                        vsp1_pipeline_reset(pipe);
 693                        kfree(pipe);
 694                        return ERR_PTR(ret);
 695                }
 696        } else {
 697                pipe = video->rwpf->pipe;
 698                kref_get(&pipe->kref);
 699        }
 700
 701        return pipe;
 702}
 703
 704static void vsp1_video_pipeline_release(struct kref *kref)
 705{
 706        struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref);
 707
 708        vsp1_pipeline_reset(pipe);
 709        kfree(pipe);
 710}
 711
 712static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe)
 713{
 714        struct media_device *mdev = &pipe->output->entity.vsp1->media_dev;
 715
 716        mutex_lock(&mdev->graph_mutex);
 717        kref_put(&pipe->kref, vsp1_video_pipeline_release);
 718        mutex_unlock(&mdev->graph_mutex);
 719}
 720
 721/* -----------------------------------------------------------------------------
 722 * videobuf2 Queue Operations
 723 */
 724
 725static int
 726vsp1_video_queue_setup(struct vb2_queue *vq,
 727                       unsigned int *nbuffers, unsigned int *nplanes,
 728                       unsigned int sizes[], struct device *alloc_devs[])
 729{
 730        struct vsp1_video *video = vb2_get_drv_priv(vq);
 731        const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
 732        unsigned int i;
 733
 734        if (*nplanes) {
 735                if (*nplanes != format->num_planes)
 736                        return -EINVAL;
 737
 738                for (i = 0; i < *nplanes; i++)
 739                        if (sizes[i] < format->plane_fmt[i].sizeimage)
 740                                return -EINVAL;
 741                return 0;
 742        }
 743
 744        *nplanes = format->num_planes;
 745
 746        for (i = 0; i < format->num_planes; ++i)
 747                sizes[i] = format->plane_fmt[i].sizeimage;
 748
 749        return 0;
 750}
 751
 752static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
 753{
 754        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 755        struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
 756        struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
 757        const struct v4l2_pix_format_mplane *format = &video->rwpf->format;
 758        unsigned int i;
 759
 760        if (vb->num_planes < format->num_planes)
 761                return -EINVAL;
 762
 763        for (i = 0; i < vb->num_planes; ++i) {
 764                buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
 765
 766                if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage)
 767                        return -EINVAL;
 768        }
 769
 770        for ( ; i < 3; ++i)
 771                buf->mem.addr[i] = 0;
 772
 773        return 0;
 774}
 775
 776static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
 777{
 778        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
 779        struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
 780        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 781        struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf);
 782        unsigned long flags;
 783        bool empty;
 784
 785        spin_lock_irqsave(&video->irqlock, flags);
 786        empty = list_empty(&video->irqqueue);
 787        list_add_tail(&buf->queue, &video->irqqueue);
 788        spin_unlock_irqrestore(&video->irqlock, flags);
 789
 790        if (!empty)
 791                return;
 792
 793        spin_lock_irqsave(&pipe->irqlock, flags);
 794
 795        video->rwpf->mem = buf->mem;
 796        pipe->buffers_ready |= 1 << video->pipe_index;
 797
 798        if (vb2_is_streaming(&video->queue) &&
 799            vsp1_pipeline_ready(pipe))
 800                vsp1_video_pipeline_run(pipe);
 801
 802        spin_unlock_irqrestore(&pipe->irqlock, flags);
 803}
 804
 805static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
 806{
 807        struct vsp1_entity *entity;
 808        int ret;
 809
 810        /* Determine this pipelines sizes for image partitioning support. */
 811        ret = vsp1_video_pipeline_setup_partitions(pipe);
 812        if (ret < 0)
 813                return ret;
 814
 815        /* Prepare the display list. */
 816        pipe->dl = vsp1_dl_list_get(pipe->output->dlm);
 817        if (!pipe->dl)
 818                return -ENOMEM;
 819
 820        if (pipe->uds) {
 821                struct vsp1_uds *uds = to_uds(&pipe->uds->subdev);
 822
 823                /*
 824                 * If a BRU or BRS is present in the pipeline before the UDS,
 825                 * the alpha component doesn't need to be scaled as the BRU and
 826                 * BRS output alpha value is fixed to 255. Otherwise we need to
 827                 * scale the alpha component only when available at the input
 828                 * RPF.
 829                 */
 830                if (pipe->uds_input->type == VSP1_ENTITY_BRU ||
 831                    pipe->uds_input->type == VSP1_ENTITY_BRS) {
 832                        uds->scale_alpha = false;
 833                } else {
 834                        struct vsp1_rwpf *rpf =
 835                                to_rwpf(&pipe->uds_input->subdev);
 836
 837                        uds->scale_alpha = rpf->fmtinfo->alpha;
 838                }
 839        }
 840
 841        list_for_each_entry(entity, &pipe->entities, list_pipe) {
 842                vsp1_entity_route_setup(entity, pipe, pipe->dl);
 843
 844                if (entity->ops->configure)
 845                        entity->ops->configure(entity, pipe, pipe->dl,
 846                                               VSP1_ENTITY_PARAMS_INIT);
 847        }
 848
 849        return 0;
 850}
 851
 852static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
 853{
 854        struct vsp1_video *video = pipe->output->video;
 855        struct vsp1_vb2_buffer *buffer;
 856        unsigned long flags;
 857
 858        /* Remove all buffers from the IRQ queue. */
 859        spin_lock_irqsave(&video->irqlock, flags);
 860        list_for_each_entry(buffer, &video->irqqueue, queue)
 861                vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 862        INIT_LIST_HEAD(&video->irqqueue);
 863        spin_unlock_irqrestore(&video->irqlock, flags);
 864
 865        /* Release our partition table allocation */
 866        mutex_lock(&pipe->lock);
 867        kfree(pipe->part_table);
 868        pipe->part_table = NULL;
 869        mutex_unlock(&pipe->lock);
 870}
 871
 872static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
 873{
 874        struct vsp1_video *video = vb2_get_drv_priv(vq);
 875        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 876        bool start_pipeline = false;
 877        unsigned long flags;
 878        int ret;
 879
 880        mutex_lock(&pipe->lock);
 881        if (pipe->stream_count == pipe->num_inputs) {
 882                ret = vsp1_video_setup_pipeline(pipe);
 883                if (ret < 0) {
 884                        mutex_unlock(&pipe->lock);
 885                        vsp1_video_cleanup_pipeline(pipe);
 886                        return ret;
 887                }
 888
 889                start_pipeline = true;
 890        }
 891
 892        pipe->stream_count++;
 893        mutex_unlock(&pipe->lock);
 894
 895        /*
 896         * vsp1_pipeline_ready() is not sufficient to establish that all streams
 897         * are prepared and the pipeline is configured, as multiple streams
 898         * can race through streamon with buffers already queued; Therefore we
 899         * don't even attempt to start the pipeline until the last stream has
 900         * called through here.
 901         */
 902        if (!start_pipeline)
 903                return 0;
 904
 905        spin_lock_irqsave(&pipe->irqlock, flags);
 906        if (vsp1_pipeline_ready(pipe))
 907                vsp1_video_pipeline_run(pipe);
 908        spin_unlock_irqrestore(&pipe->irqlock, flags);
 909
 910        return 0;
 911}
 912
 913static void vsp1_video_stop_streaming(struct vb2_queue *vq)
 914{
 915        struct vsp1_video *video = vb2_get_drv_priv(vq);
 916        struct vsp1_pipeline *pipe = video->rwpf->pipe;
 917        unsigned long flags;
 918        int ret;
 919
 920        /*
 921         * Clear the buffers ready flag to make sure the device won't be started
 922         * by a QBUF on the video node on the other side of the pipeline.
 923         */
 924        spin_lock_irqsave(&video->irqlock, flags);
 925        pipe->buffers_ready &= ~(1 << video->pipe_index);
 926        spin_unlock_irqrestore(&video->irqlock, flags);
 927
 928        mutex_lock(&pipe->lock);
 929        if (--pipe->stream_count == pipe->num_inputs) {
 930                /* Stop the pipeline. */
 931                ret = vsp1_pipeline_stop(pipe);
 932                if (ret == -ETIMEDOUT)
 933                        dev_err(video->vsp1->dev, "pipeline stop timeout\n");
 934
 935                vsp1_dl_list_put(pipe->dl);
 936                pipe->dl = NULL;
 937        }
 938        mutex_unlock(&pipe->lock);
 939
 940        media_pipeline_stop(&video->video.entity);
 941        vsp1_video_cleanup_pipeline(pipe);
 942        vsp1_video_pipeline_put(pipe);
 943}
 944
 945static const struct vb2_ops vsp1_video_queue_qops = {
 946        .queue_setup = vsp1_video_queue_setup,
 947        .buf_prepare = vsp1_video_buffer_prepare,
 948        .buf_queue = vsp1_video_buffer_queue,
 949        .wait_prepare = vb2_ops_wait_prepare,
 950        .wait_finish = vb2_ops_wait_finish,
 951        .start_streaming = vsp1_video_start_streaming,
 952        .stop_streaming = vsp1_video_stop_streaming,
 953};
 954
 955/* -----------------------------------------------------------------------------
 956 * V4L2 ioctls
 957 */
 958
 959static int
 960vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
 961{
 962        struct v4l2_fh *vfh = file->private_data;
 963        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
 964
 965        cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
 966                          | V4L2_CAP_VIDEO_CAPTURE_MPLANE
 967                          | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
 968
 969        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
 970                cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
 971                                 | V4L2_CAP_STREAMING;
 972        else
 973                cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
 974                                 | V4L2_CAP_STREAMING;
 975
 976        strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
 977        strlcpy(cap->card, video->video.name, sizeof(cap->card));
 978        snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
 979                 dev_name(video->vsp1->dev));
 980
 981        return 0;
 982}
 983
 984static int
 985vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
 986{
 987        struct v4l2_fh *vfh = file->private_data;
 988        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
 989
 990        if (format->type != video->queue.type)
 991                return -EINVAL;
 992
 993        mutex_lock(&video->lock);
 994        format->fmt.pix_mp = video->rwpf->format;
 995        mutex_unlock(&video->lock);
 996
 997        return 0;
 998}
 999
1000static int
1001vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
1002{
1003        struct v4l2_fh *vfh = file->private_data;
1004        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
1005
1006        if (format->type != video->queue.type)
1007                return -EINVAL;
1008
1009        return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
1010}
1011
1012static int
1013vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
1014{
1015        struct v4l2_fh *vfh = file->private_data;
1016        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
1017        const struct vsp1_format_info *info;
1018        int ret;
1019
1020        if (format->type != video->queue.type)
1021                return -EINVAL;
1022
1023        ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
1024        if (ret < 0)
1025                return ret;
1026
1027        mutex_lock(&video->lock);
1028
1029        if (vb2_is_busy(&video->queue)) {
1030                ret = -EBUSY;
1031                goto done;
1032        }
1033
1034        video->rwpf->format = format->fmt.pix_mp;
1035        video->rwpf->fmtinfo = info;
1036
1037done:
1038        mutex_unlock(&video->lock);
1039        return ret;
1040}
1041
1042static int
1043vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1044{
1045        struct v4l2_fh *vfh = file->private_data;
1046        struct vsp1_video *video = to_vsp1_video(vfh->vdev);
1047        struct media_device *mdev = &video->vsp1->media_dev;
1048        struct vsp1_pipeline *pipe;
1049        int ret;
1050
1051        if (video->queue.owner && video->queue.owner != file->private_data)
1052                return -EBUSY;
1053
1054        /*
1055         * Get a pipeline for the video node and start streaming on it. No link
1056         * touching an entity in the pipeline can be activated or deactivated
1057         * once streaming is started.
1058         */
1059        mutex_lock(&mdev->graph_mutex);
1060
1061        pipe = vsp1_video_pipeline_get(video);
1062        if (IS_ERR(pipe)) {
1063                mutex_unlock(&mdev->graph_mutex);
1064                return PTR_ERR(pipe);
1065        }
1066
1067        ret = __media_pipeline_start(&video->video.entity, &pipe->pipe);
1068        if (ret < 0) {
1069                mutex_unlock(&mdev->graph_mutex);
1070                goto err_pipe;
1071        }
1072
1073        mutex_unlock(&mdev->graph_mutex);
1074
1075        /*
1076         * Verify that the configured format matches the output of the connected
1077         * subdev.
1078         */
1079        ret = vsp1_video_verify_format(video);
1080        if (ret < 0)
1081                goto err_stop;
1082
1083        /* Start the queue. */
1084        ret = vb2_streamon(&video->queue, type);
1085        if (ret < 0)
1086                goto err_stop;
1087
1088        return 0;
1089
1090err_stop:
1091        media_pipeline_stop(&video->video.entity);
1092err_pipe:
1093        vsp1_video_pipeline_put(pipe);
1094        return ret;
1095}
1096
1097static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
1098        .vidioc_querycap                = vsp1_video_querycap,
1099        .vidioc_g_fmt_vid_cap_mplane    = vsp1_video_get_format,
1100        .vidioc_s_fmt_vid_cap_mplane    = vsp1_video_set_format,
1101        .vidioc_try_fmt_vid_cap_mplane  = vsp1_video_try_format,
1102        .vidioc_g_fmt_vid_out_mplane    = vsp1_video_get_format,
1103        .vidioc_s_fmt_vid_out_mplane    = vsp1_video_set_format,
1104        .vidioc_try_fmt_vid_out_mplane  = vsp1_video_try_format,
1105        .vidioc_reqbufs                 = vb2_ioctl_reqbufs,
1106        .vidioc_querybuf                = vb2_ioctl_querybuf,
1107        .vidioc_qbuf                    = vb2_ioctl_qbuf,
1108        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
1109        .vidioc_expbuf                  = vb2_ioctl_expbuf,
1110        .vidioc_create_bufs             = vb2_ioctl_create_bufs,
1111        .vidioc_prepare_buf             = vb2_ioctl_prepare_buf,
1112        .vidioc_streamon                = vsp1_video_streamon,
1113        .vidioc_streamoff               = vb2_ioctl_streamoff,
1114};
1115
1116/* -----------------------------------------------------------------------------
1117 * V4L2 File Operations
1118 */
1119
1120static int vsp1_video_open(struct file *file)
1121{
1122        struct vsp1_video *video = video_drvdata(file);
1123        struct v4l2_fh *vfh;
1124        int ret = 0;
1125
1126        vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
1127        if (vfh == NULL)
1128                return -ENOMEM;
1129
1130        v4l2_fh_init(vfh, &video->video);
1131        v4l2_fh_add(vfh);
1132
1133        file->private_data = vfh;
1134
1135        ret = vsp1_device_get(video->vsp1);
1136        if (ret < 0) {
1137                v4l2_fh_del(vfh);
1138                v4l2_fh_exit(vfh);
1139                kfree(vfh);
1140        }
1141
1142        return ret;
1143}
1144
1145static int vsp1_video_release(struct file *file)
1146{
1147        struct vsp1_video *video = video_drvdata(file);
1148        struct v4l2_fh *vfh = file->private_data;
1149
1150        mutex_lock(&video->lock);
1151        if (video->queue.owner == vfh) {
1152                vb2_queue_release(&video->queue);
1153                video->queue.owner = NULL;
1154        }
1155        mutex_unlock(&video->lock);
1156
1157        vsp1_device_put(video->vsp1);
1158
1159        v4l2_fh_release(file);
1160
1161        file->private_data = NULL;
1162
1163        return 0;
1164}
1165
1166static const struct v4l2_file_operations vsp1_video_fops = {
1167        .owner = THIS_MODULE,
1168        .unlocked_ioctl = video_ioctl2,
1169        .open = vsp1_video_open,
1170        .release = vsp1_video_release,
1171        .poll = vb2_fop_poll,
1172        .mmap = vb2_fop_mmap,
1173};
1174
1175/* -----------------------------------------------------------------------------
1176 * Initialization and Cleanup
1177 */
1178
1179struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1,
1180                                     struct vsp1_rwpf *rwpf)
1181{
1182        struct vsp1_video *video;
1183        const char *direction;
1184        int ret;
1185
1186        video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL);
1187        if (!video)
1188                return ERR_PTR(-ENOMEM);
1189
1190        rwpf->video = video;
1191
1192        video->vsp1 = vsp1;
1193        video->rwpf = rwpf;
1194
1195        if (rwpf->entity.type == VSP1_ENTITY_RPF) {
1196                direction = "input";
1197                video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
1198                video->pad.flags = MEDIA_PAD_FL_SOURCE;
1199                video->video.vfl_dir = VFL_DIR_TX;
1200        } else {
1201                direction = "output";
1202                video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
1203                video->pad.flags = MEDIA_PAD_FL_SINK;
1204                video->video.vfl_dir = VFL_DIR_RX;
1205        }
1206
1207        mutex_init(&video->lock);
1208        spin_lock_init(&video->irqlock);
1209        INIT_LIST_HEAD(&video->irqqueue);
1210
1211        /* Initialize the media entity... */
1212        ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
1213        if (ret < 0)
1214                return ERR_PTR(ret);
1215
1216        /* ... and the format ... */
1217        rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT;
1218        rwpf->format.width = VSP1_VIDEO_DEF_WIDTH;
1219        rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT;
1220        __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo);
1221
1222        /* ... and the video node... */
1223        video->video.v4l2_dev = &video->vsp1->v4l2_dev;
1224        video->video.fops = &vsp1_video_fops;
1225        snprintf(video->video.name, sizeof(video->video.name), "%s %s",
1226                 rwpf->entity.subdev.name, direction);
1227        video->video.vfl_type = VFL_TYPE_GRABBER;
1228        video->video.release = video_device_release_empty;
1229        video->video.ioctl_ops = &vsp1_video_ioctl_ops;
1230
1231        video_set_drvdata(&video->video, video);
1232
1233        video->queue.type = video->type;
1234        video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
1235        video->queue.lock = &video->lock;
1236        video->queue.drv_priv = video;
1237        video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer);
1238        video->queue.ops = &vsp1_video_queue_qops;
1239        video->queue.mem_ops = &vb2_dma_contig_memops;
1240        video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
1241        video->queue.dev = video->vsp1->bus_master;
1242        ret = vb2_queue_init(&video->queue);
1243        if (ret < 0) {
1244                dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
1245                goto error;
1246        }
1247
1248        /* ... and register the video device. */
1249        video->video.queue = &video->queue;
1250        ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1251        if (ret < 0) {
1252                dev_err(video->vsp1->dev, "failed to register video device\n");
1253                goto error;
1254        }
1255
1256        return video;
1257
1258error:
1259        vsp1_video_cleanup(video);
1260        return ERR_PTR(ret);
1261}
1262
1263void vsp1_video_cleanup(struct vsp1_video *video)
1264{
1265        if (video_is_registered(&video->video))
1266                video_unregister_device(&video->video);
1267
1268        media_entity_cleanup(&video->video.entity);
1269}
1270