linux/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2016 NextThing Co
   4 * Copyright (C) 2016-2019 Bootlin
   5 *
   6 * Author: Maxime Ripard <maxime.ripard@bootlin.com>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/interrupt.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <media/videobuf2-dma-contig.h>
  15#include <media/videobuf2-v4l2.h>
  16
  17#include "sun4i_csi.h"
  18
  19struct sun4i_csi_buffer {
  20        struct vb2_v4l2_buffer  vb;
  21        struct list_head        list;
  22};
  23
  24static inline struct sun4i_csi_buffer *
  25vb2_v4l2_to_csi_buffer(const struct vb2_v4l2_buffer *p)
  26{
  27        return container_of(p, struct sun4i_csi_buffer, vb);
  28}
  29
  30static inline struct sun4i_csi_buffer *
  31vb2_to_csi_buffer(const struct vb2_buffer *p)
  32{
  33        return vb2_v4l2_to_csi_buffer(to_vb2_v4l2_buffer(p));
  34}
  35
  36static void sun4i_csi_capture_start(struct sun4i_csi *csi)
  37{
  38        writel(CSI_CPT_CTRL_VIDEO_START, csi->regs + CSI_CPT_CTRL_REG);
  39}
  40
  41static void sun4i_csi_capture_stop(struct sun4i_csi *csi)
  42{
  43        writel(0, csi->regs + CSI_CPT_CTRL_REG);
  44}
  45
  46static int sun4i_csi_queue_setup(struct vb2_queue *vq,
  47                                 unsigned int *nbuffers,
  48                                 unsigned int *nplanes,
  49                                 unsigned int sizes[],
  50                                 struct device *alloc_devs[])
  51{
  52        struct sun4i_csi *csi = vb2_get_drv_priv(vq);
  53        unsigned int num_planes = csi->fmt.num_planes;
  54        unsigned int i;
  55
  56        if (*nplanes) {
  57                if (*nplanes != num_planes)
  58                        return -EINVAL;
  59
  60                for (i = 0; i < num_planes; i++)
  61                        if (sizes[i] < csi->fmt.plane_fmt[i].sizeimage)
  62                                return -EINVAL;
  63                return 0;
  64        }
  65
  66        *nplanes = num_planes;
  67        for (i = 0; i < num_planes; i++)
  68                sizes[i] = csi->fmt.plane_fmt[i].sizeimage;
  69
  70        return 0;
  71};
  72
  73static int sun4i_csi_buffer_prepare(struct vb2_buffer *vb)
  74{
  75        struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
  76        unsigned int i;
  77
  78        for (i = 0; i < csi->fmt.num_planes; i++) {
  79                unsigned long size = csi->fmt.plane_fmt[i].sizeimage;
  80
  81                if (vb2_plane_size(vb, i) < size) {
  82                        dev_err(csi->dev, "buffer too small (%lu < %lu)\n",
  83                                vb2_plane_size(vb, i), size);
  84                        return -EINVAL;
  85                }
  86
  87                vb2_set_plane_payload(vb, i, size);
  88        }
  89
  90        return 0;
  91}
  92
  93static int sun4i_csi_setup_scratch_buffer(struct sun4i_csi *csi,
  94                                          unsigned int slot)
  95{
  96        dma_addr_t addr = csi->scratch.paddr;
  97        unsigned int plane;
  98
  99        dev_dbg(csi->dev,
 100                "No more available buffer, using the scratch buffer\n");
 101
 102        for (plane = 0; plane < csi->fmt.num_planes; plane++) {
 103                writel(addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
 104                addr += csi->fmt.plane_fmt[plane].sizeimage;
 105        }
 106
 107        csi->current_buf[slot] = NULL;
 108        return 0;
 109}
 110
 111static int sun4i_csi_buffer_fill_slot(struct sun4i_csi *csi, unsigned int slot)
 112{
 113        struct sun4i_csi_buffer *c_buf;
 114        struct vb2_v4l2_buffer *v_buf;
 115        unsigned int plane;
 116
 117        /*
 118         * We should never end up in a situation where we overwrite an
 119         * already filled slot.
 120         */
 121        if (WARN_ON(csi->current_buf[slot]))
 122                return -EINVAL;
 123
 124        if (list_empty(&csi->buf_list))
 125                return sun4i_csi_setup_scratch_buffer(csi, slot);
 126
 127        c_buf = list_first_entry(&csi->buf_list, struct sun4i_csi_buffer, list);
 128        list_del_init(&c_buf->list);
 129
 130        v_buf = &c_buf->vb;
 131        csi->current_buf[slot] = v_buf;
 132
 133        for (plane = 0; plane < csi->fmt.num_planes; plane++) {
 134                dma_addr_t buf_addr;
 135
 136                buf_addr = vb2_dma_contig_plane_dma_addr(&v_buf->vb2_buf,
 137                                                         plane);
 138                writel(buf_addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
 139        }
 140
 141        return 0;
 142}
 143
 144static int sun4i_csi_buffer_fill_all(struct sun4i_csi *csi)
 145{
 146        unsigned int slot;
 147        int ret;
 148
 149        for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
 150                ret = sun4i_csi_buffer_fill_slot(csi, slot);
 151                if (ret)
 152                        return ret;
 153        }
 154
 155        return 0;
 156}
 157
 158static void sun4i_csi_buffer_mark_done(struct sun4i_csi *csi,
 159                                       unsigned int slot,
 160                                       unsigned int sequence)
 161{
 162        struct vb2_v4l2_buffer *v_buf;
 163
 164        if (!csi->current_buf[slot]) {
 165                dev_dbg(csi->dev, "Scratch buffer was used, ignoring..\n");
 166                return;
 167        }
 168
 169        v_buf = csi->current_buf[slot];
 170        v_buf->field = csi->fmt.field;
 171        v_buf->sequence = sequence;
 172        v_buf->vb2_buf.timestamp = ktime_get_ns();
 173        vb2_buffer_done(&v_buf->vb2_buf, VB2_BUF_STATE_DONE);
 174
 175        csi->current_buf[slot] = NULL;
 176}
 177
 178static int sun4i_csi_buffer_flip(struct sun4i_csi *csi, unsigned int sequence)
 179{
 180        u32 reg = readl(csi->regs + CSI_BUF_CTRL_REG);
 181        unsigned int next;
 182
 183        /* Our next buffer is not the current buffer */
 184        next = !(reg & CSI_BUF_CTRL_DBS);
 185
 186        /* Report the previous buffer as done */
 187        sun4i_csi_buffer_mark_done(csi, next, sequence);
 188
 189        /* Put a new buffer in there */
 190        return sun4i_csi_buffer_fill_slot(csi, next);
 191}
 192
 193static void sun4i_csi_buffer_queue(struct vb2_buffer *vb)
 194{
 195        struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
 196        struct sun4i_csi_buffer *buf = vb2_to_csi_buffer(vb);
 197        unsigned long flags;
 198
 199        spin_lock_irqsave(&csi->qlock, flags);
 200        list_add_tail(&buf->list, &csi->buf_list);
 201        spin_unlock_irqrestore(&csi->qlock, flags);
 202}
 203
 204static void return_all_buffers(struct sun4i_csi *csi,
 205                               enum vb2_buffer_state state)
 206{
 207        struct sun4i_csi_buffer *buf, *node;
 208        unsigned int slot;
 209
 210        list_for_each_entry_safe(buf, node, &csi->buf_list, list) {
 211                vb2_buffer_done(&buf->vb.vb2_buf, state);
 212                list_del(&buf->list);
 213        }
 214
 215        for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
 216                struct vb2_v4l2_buffer *v_buf = csi->current_buf[slot];
 217
 218                if (!v_buf)
 219                        continue;
 220
 221                vb2_buffer_done(&v_buf->vb2_buf, state);
 222                csi->current_buf[slot] = NULL;
 223        }
 224}
 225
 226static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
 227{
 228        struct sun4i_csi *csi = vb2_get_drv_priv(vq);
 229        struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
 230        const struct sun4i_csi_format *csi_fmt;
 231        unsigned long href_pol, pclk_pol, vref_pol;
 232        unsigned long flags;
 233        unsigned int i;
 234        int ret;
 235
 236        csi_fmt = sun4i_csi_find_format(&csi->fmt.pixelformat, NULL);
 237        if (!csi_fmt)
 238                return -EINVAL;
 239
 240        dev_dbg(csi->dev, "Starting capture\n");
 241
 242        csi->sequence = 0;
 243
 244        /*
 245         * We need a scratch buffer in case where we'll not have any
 246         * more buffer queued so that we don't error out. One of those
 247         * cases is when you end up at the last frame to capture, you
 248         * don't havea any buffer queued any more, and yet it doesn't
 249         * really matter since you'll never reach the next buffer.
 250         *
 251         * Since we support the multi-planar API, we need to have a
 252         * buffer for each plane. Allocating a single one large enough
 253         * to hold all the buffers is simpler, so let's go for that.
 254         */
 255        csi->scratch.size = 0;
 256        for (i = 0; i < csi->fmt.num_planes; i++)
 257                csi->scratch.size += csi->fmt.plane_fmt[i].sizeimage;
 258
 259        csi->scratch.vaddr = dma_alloc_coherent(csi->dev,
 260                                                csi->scratch.size,
 261                                                &csi->scratch.paddr,
 262                                                GFP_KERNEL);
 263        if (!csi->scratch.vaddr) {
 264                dev_err(csi->dev, "Failed to allocate scratch buffer\n");
 265                ret = -ENOMEM;
 266                goto err_clear_dma_queue;
 267        }
 268
 269        ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe);
 270        if (ret < 0)
 271                goto err_free_scratch_buffer;
 272
 273        spin_lock_irqsave(&csi->qlock, flags);
 274
 275        /* Setup timings */
 276        writel(CSI_WIN_CTRL_W_ACTIVE(csi->fmt.width * 2),
 277               csi->regs + CSI_WIN_CTRL_W_REG);
 278        writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
 279               csi->regs + CSI_WIN_CTRL_H_REG);
 280
 281        /*
 282         * This hardware uses [HV]REF instead of [HV]SYNC. Based on the
 283         * provided timing diagrams in the manual, positive polarity
 284         * equals active high [HV]REF.
 285         *
 286         * When the back porch is 0, [HV]REF is more or less equivalent
 287         * to [HV]SYNC inverted.
 288         */
 289        href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW);
 290        vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW);
 291        pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING);
 292        writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
 293               CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
 294               CSI_CFG_VREF_POL(vref_pol) |
 295               CSI_CFG_HREF_POL(href_pol) |
 296               CSI_CFG_PCLK_POL(pclk_pol),
 297               csi->regs + CSI_CFG_REG);
 298
 299        /* Setup buffer length */
 300        writel(csi->fmt.plane_fmt[0].bytesperline,
 301               csi->regs + CSI_BUF_LEN_REG);
 302
 303        /* Prepare our buffers in hardware */
 304        ret = sun4i_csi_buffer_fill_all(csi);
 305        if (ret) {
 306                spin_unlock_irqrestore(&csi->qlock, flags);
 307                goto err_disable_pipeline;
 308        }
 309
 310        /* Enable double buffering */
 311        writel(CSI_BUF_CTRL_DBE, csi->regs + CSI_BUF_CTRL_REG);
 312
 313        /* Clear the pending interrupts */
 314        writel(CSI_INT_FRM_DONE, csi->regs + 0x34);
 315
 316        /* Enable frame done interrupt */
 317        writel(CSI_INT_FRM_DONE, csi->regs + CSI_INT_EN_REG);
 318
 319        sun4i_csi_capture_start(csi);
 320
 321        spin_unlock_irqrestore(&csi->qlock, flags);
 322
 323        ret = v4l2_subdev_call(csi->src_subdev, video, s_stream, 1);
 324        if (ret < 0 && ret != -ENOIOCTLCMD)
 325                goto err_disable_device;
 326
 327        return 0;
 328
 329err_disable_device:
 330        sun4i_csi_capture_stop(csi);
 331
 332err_disable_pipeline:
 333        media_pipeline_stop(&csi->vdev.entity);
 334
 335err_free_scratch_buffer:
 336        dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
 337                          csi->scratch.paddr);
 338
 339err_clear_dma_queue:
 340        spin_lock_irqsave(&csi->qlock, flags);
 341        return_all_buffers(csi, VB2_BUF_STATE_QUEUED);
 342        spin_unlock_irqrestore(&csi->qlock, flags);
 343
 344        return ret;
 345}
 346
 347static void sun4i_csi_stop_streaming(struct vb2_queue *vq)
 348{
 349        struct sun4i_csi *csi = vb2_get_drv_priv(vq);
 350        unsigned long flags;
 351
 352        dev_dbg(csi->dev, "Stopping capture\n");
 353
 354        v4l2_subdev_call(csi->src_subdev, video, s_stream, 0);
 355        sun4i_csi_capture_stop(csi);
 356
 357        /* Release all active buffers */
 358        spin_lock_irqsave(&csi->qlock, flags);
 359        return_all_buffers(csi, VB2_BUF_STATE_ERROR);
 360        spin_unlock_irqrestore(&csi->qlock, flags);
 361
 362        media_pipeline_stop(&csi->vdev.entity);
 363
 364        dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
 365                          csi->scratch.paddr);
 366}
 367
 368static const struct vb2_ops sun4i_csi_qops = {
 369        .queue_setup            = sun4i_csi_queue_setup,
 370        .buf_prepare            = sun4i_csi_buffer_prepare,
 371        .buf_queue              = sun4i_csi_buffer_queue,
 372        .start_streaming        = sun4i_csi_start_streaming,
 373        .stop_streaming         = sun4i_csi_stop_streaming,
 374        .wait_prepare           = vb2_ops_wait_prepare,
 375        .wait_finish            = vb2_ops_wait_finish,
 376};
 377
 378static irqreturn_t sun4i_csi_irq(int irq, void *data)
 379{
 380        struct sun4i_csi *csi = data;
 381        u32 reg;
 382
 383        reg = readl(csi->regs + CSI_INT_STA_REG);
 384
 385        /* Acknowledge the interrupts */
 386        writel(reg, csi->regs + CSI_INT_STA_REG);
 387
 388        if (!(reg & CSI_INT_FRM_DONE))
 389                return IRQ_HANDLED;
 390
 391        spin_lock(&csi->qlock);
 392        if (sun4i_csi_buffer_flip(csi, csi->sequence++)) {
 393                dev_warn(csi->dev, "%s: Flip failed\n", __func__);
 394                sun4i_csi_capture_stop(csi);
 395        }
 396        spin_unlock(&csi->qlock);
 397
 398        return IRQ_HANDLED;
 399}
 400
 401int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq)
 402{
 403        struct vb2_queue *q = &csi->queue;
 404        int ret;
 405        int i;
 406
 407        spin_lock_init(&csi->qlock);
 408        mutex_init(&csi->lock);
 409
 410        INIT_LIST_HEAD(&csi->buf_list);
 411        for (i = 0; i < CSI_MAX_BUFFER; i++)
 412                csi->current_buf[i] = NULL;
 413
 414        q->min_buffers_needed = 3;
 415        q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
 416        q->io_modes = VB2_MMAP;
 417        q->lock = &csi->lock;
 418        q->drv_priv = csi;
 419        q->buf_struct_size = sizeof(struct sun4i_csi_buffer);
 420        q->ops = &sun4i_csi_qops;
 421        q->mem_ops = &vb2_dma_contig_memops;
 422        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 423        q->dev = csi->dev;
 424
 425        ret = vb2_queue_init(q);
 426        if (ret < 0) {
 427                dev_err(csi->dev, "failed to initialize VB2 queue\n");
 428                goto err_free_mutex;
 429        }
 430
 431        ret = v4l2_device_register(csi->dev, &csi->v4l);
 432        if (ret) {
 433                dev_err(csi->dev, "Couldn't register the v4l2 device\n");
 434                goto err_free_queue;
 435        }
 436
 437        ret = devm_request_irq(csi->dev, irq, sun4i_csi_irq, 0,
 438                               dev_name(csi->dev), csi);
 439        if (ret) {
 440                dev_err(csi->dev, "Couldn't register our interrupt\n");
 441                goto err_unregister_device;
 442        }
 443
 444        return 0;
 445
 446err_unregister_device:
 447        v4l2_device_unregister(&csi->v4l);
 448
 449err_free_queue:
 450        vb2_queue_release(q);
 451
 452err_free_mutex:
 453        mutex_destroy(&csi->lock);
 454        return ret;
 455}
 456
 457void sun4i_csi_dma_unregister(struct sun4i_csi *csi)
 458{
 459        v4l2_device_unregister(&csi->v4l);
 460        vb2_queue_release(&csi->queue);
 461        mutex_destroy(&csi->lock);
 462}
 463