linux/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2016 NextThing Co
   4 * Copyright (C) 2016-2019 Bootlin
   5 *
   6 * Author: Maxime Ripard <maxime.ripard@bootlin.com>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/interrupt.h>
  11#include <linux/list.h>
  12#include <linux/mutex.h>
  13#include <linux/spinlock.h>
  14#include <media/videobuf2-dma-contig.h>
  15#include <media/videobuf2-v4l2.h>
  16
  17#include "sun4i_csi.h"
  18
  19struct sun4i_csi_buffer {
  20        struct vb2_v4l2_buffer  vb;
  21        struct list_head        list;
  22};
  23
  24static inline struct sun4i_csi_buffer *
  25vb2_v4l2_to_csi_buffer(const struct vb2_v4l2_buffer *p)
  26{
  27        return container_of(p, struct sun4i_csi_buffer, vb);
  28}
  29
  30static inline struct sun4i_csi_buffer *
  31vb2_to_csi_buffer(const struct vb2_buffer *p)
  32{
  33        return vb2_v4l2_to_csi_buffer(to_vb2_v4l2_buffer(p));
  34}
  35
  36static void sun4i_csi_capture_start(struct sun4i_csi *csi)
  37{
  38        writel(CSI_CPT_CTRL_VIDEO_START, csi->regs + CSI_CPT_CTRL_REG);
  39}
  40
  41static void sun4i_csi_capture_stop(struct sun4i_csi *csi)
  42{
  43        writel(0, csi->regs + CSI_CPT_CTRL_REG);
  44}
  45
  46static int sun4i_csi_queue_setup(struct vb2_queue *vq,
  47                                 unsigned int *nbuffers,
  48                                 unsigned int *nplanes,
  49                                 unsigned int sizes[],
  50                                 struct device *alloc_devs[])
  51{
  52        struct sun4i_csi *csi = vb2_get_drv_priv(vq);
  53        unsigned int num_planes = csi->fmt.num_planes;
  54        unsigned int i;
  55
  56        if (*nplanes) {
  57                if (*nplanes != num_planes)
  58                        return -EINVAL;
  59
  60                for (i = 0; i < num_planes; i++)
  61                        if (sizes[i] < csi->fmt.plane_fmt[i].sizeimage)
  62                                return -EINVAL;
  63                return 0;
  64        }
  65
  66        *nplanes = num_planes;
  67        for (i = 0; i < num_planes; i++)
  68                sizes[i] = csi->fmt.plane_fmt[i].sizeimage;
  69
  70        return 0;
  71};
  72
  73static int sun4i_csi_buffer_prepare(struct vb2_buffer *vb)
  74{
  75        struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
  76        unsigned int i;
  77
  78        for (i = 0; i < csi->fmt.num_planes; i++) {
  79                unsigned long size = csi->fmt.plane_fmt[i].sizeimage;
  80
  81                if (vb2_plane_size(vb, i) < size) {
  82                        dev_err(csi->dev, "buffer too small (%lu < %lu)\n",
  83                                vb2_plane_size(vb, i), size);
  84                        return -EINVAL;
  85                }
  86
  87                vb2_set_plane_payload(vb, i, size);
  88        }
  89
  90        return 0;
  91}
  92
  93static int sun4i_csi_setup_scratch_buffer(struct sun4i_csi *csi,
  94                                          unsigned int slot)
  95{
  96        dma_addr_t addr = csi->scratch.paddr;
  97        unsigned int plane;
  98
  99        dev_dbg(csi->dev,
 100                "No more available buffer, using the scratch buffer\n");
 101
 102        for (plane = 0; plane < csi->fmt.num_planes; plane++) {
 103                writel(addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
 104                addr += csi->fmt.plane_fmt[plane].sizeimage;
 105        }
 106
 107        csi->current_buf[slot] = NULL;
 108        return 0;
 109}
 110
 111static int sun4i_csi_buffer_fill_slot(struct sun4i_csi *csi, unsigned int slot)
 112{
 113        struct sun4i_csi_buffer *c_buf;
 114        struct vb2_v4l2_buffer *v_buf;
 115        unsigned int plane;
 116
 117        /*
 118         * We should never end up in a situation where we overwrite an
 119         * already filled slot.
 120         */
 121        if (WARN_ON(csi->current_buf[slot]))
 122                return -EINVAL;
 123
 124        if (list_empty(&csi->buf_list))
 125                return sun4i_csi_setup_scratch_buffer(csi, slot);
 126
 127        c_buf = list_first_entry(&csi->buf_list, struct sun4i_csi_buffer, list);
 128        list_del_init(&c_buf->list);
 129
 130        v_buf = &c_buf->vb;
 131        csi->current_buf[slot] = v_buf;
 132
 133        for (plane = 0; plane < csi->fmt.num_planes; plane++) {
 134                dma_addr_t buf_addr;
 135
 136                buf_addr = vb2_dma_contig_plane_dma_addr(&v_buf->vb2_buf,
 137                                                         plane);
 138                writel(buf_addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot));
 139        }
 140
 141        return 0;
 142}
 143
 144static int sun4i_csi_buffer_fill_all(struct sun4i_csi *csi)
 145{
 146        unsigned int slot;
 147        int ret;
 148
 149        for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
 150                ret = sun4i_csi_buffer_fill_slot(csi, slot);
 151                if (ret)
 152                        return ret;
 153        }
 154
 155        return 0;
 156}
 157
 158static void sun4i_csi_buffer_mark_done(struct sun4i_csi *csi,
 159                                       unsigned int slot,
 160                                       unsigned int sequence)
 161{
 162        struct vb2_v4l2_buffer *v_buf;
 163
 164        if (!csi->current_buf[slot]) {
 165                dev_dbg(csi->dev, "Scratch buffer was used, ignoring..\n");
 166                return;
 167        }
 168
 169        v_buf = csi->current_buf[slot];
 170        v_buf->field = csi->fmt.field;
 171        v_buf->sequence = sequence;
 172        v_buf->vb2_buf.timestamp = ktime_get_ns();
 173        vb2_buffer_done(&v_buf->vb2_buf, VB2_BUF_STATE_DONE);
 174
 175        csi->current_buf[slot] = NULL;
 176}
 177
 178static int sun4i_csi_buffer_flip(struct sun4i_csi *csi, unsigned int sequence)
 179{
 180        u32 reg = readl(csi->regs + CSI_BUF_CTRL_REG);
 181        unsigned int next;
 182
 183        /* Our next buffer is not the current buffer */
 184        next = !(reg & CSI_BUF_CTRL_DBS);
 185
 186        /* Report the previous buffer as done */
 187        sun4i_csi_buffer_mark_done(csi, next, sequence);
 188
 189        /* Put a new buffer in there */
 190        return sun4i_csi_buffer_fill_slot(csi, next);
 191}
 192
 193static void sun4i_csi_buffer_queue(struct vb2_buffer *vb)
 194{
 195        struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue);
 196        struct sun4i_csi_buffer *buf = vb2_to_csi_buffer(vb);
 197        unsigned long flags;
 198
 199        spin_lock_irqsave(&csi->qlock, flags);
 200        list_add_tail(&buf->list, &csi->buf_list);
 201        spin_unlock_irqrestore(&csi->qlock, flags);
 202}
 203
 204static void return_all_buffers(struct sun4i_csi *csi,
 205                               enum vb2_buffer_state state)
 206{
 207        struct sun4i_csi_buffer *buf, *node;
 208        unsigned int slot;
 209
 210        list_for_each_entry_safe(buf, node, &csi->buf_list, list) {
 211                vb2_buffer_done(&buf->vb.vb2_buf, state);
 212                list_del(&buf->list);
 213        }
 214
 215        for (slot = 0; slot < CSI_MAX_BUFFER; slot++) {
 216                struct vb2_v4l2_buffer *v_buf = csi->current_buf[slot];
 217
 218                if (!v_buf)
 219                        continue;
 220
 221                vb2_buffer_done(&v_buf->vb2_buf, state);
 222                csi->current_buf[slot] = NULL;
 223        }
 224}
 225
 226static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count)
 227{
 228        struct sun4i_csi *csi = vb2_get_drv_priv(vq);
 229        struct v4l2_fwnode_bus_parallel *bus = &csi->bus;
 230        const struct sun4i_csi_format *csi_fmt;
 231        unsigned long hsync_pol, pclk_pol, vsync_pol;
 232        unsigned long flags;
 233        unsigned int i;
 234        int ret;
 235
 236        csi_fmt = sun4i_csi_find_format(&csi->fmt.pixelformat, NULL);
 237        if (!csi_fmt)
 238                return -EINVAL;
 239
 240        dev_dbg(csi->dev, "Starting capture\n");
 241
 242        csi->sequence = 0;
 243
 244        /*
 245         * We need a scratch buffer in case where we'll not have any
 246         * more buffer queued so that we don't error out. One of those
 247         * cases is when you end up at the last frame to capture, you
 248         * don't havea any buffer queued any more, and yet it doesn't
 249         * really matter since you'll never reach the next buffer.
 250         *
 251         * Since we support the multi-planar API, we need to have a
 252         * buffer for each plane. Allocating a single one large enough
 253         * to hold all the buffers is simpler, so let's go for that.
 254         */
 255        csi->scratch.size = 0;
 256        for (i = 0; i < csi->fmt.num_planes; i++)
 257                csi->scratch.size += csi->fmt.plane_fmt[i].sizeimage;
 258
 259        csi->scratch.vaddr = dma_alloc_coherent(csi->dev,
 260                                                csi->scratch.size,
 261                                                &csi->scratch.paddr,
 262                                                GFP_KERNEL);
 263        if (!csi->scratch.vaddr) {
 264                dev_err(csi->dev, "Failed to allocate scratch buffer\n");
 265                ret = -ENOMEM;
 266                goto err_clear_dma_queue;
 267        }
 268
 269        ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe);
 270        if (ret < 0)
 271                goto err_free_scratch_buffer;
 272
 273        spin_lock_irqsave(&csi->qlock, flags);
 274
 275        /* Setup timings */
 276        writel(CSI_WIN_CTRL_W_ACTIVE(csi->fmt.width * 2),
 277               csi->regs + CSI_WIN_CTRL_W_REG);
 278        writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height),
 279               csi->regs + CSI_WIN_CTRL_H_REG);
 280
 281        hsync_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
 282        pclk_pol = !!(bus->flags & V4L2_MBUS_DATA_ACTIVE_HIGH);
 283        vsync_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
 284        writel(CSI_CFG_INPUT_FMT(csi_fmt->input) |
 285               CSI_CFG_OUTPUT_FMT(csi_fmt->output) |
 286               CSI_CFG_VSYNC_POL(vsync_pol) |
 287               CSI_CFG_HSYNC_POL(hsync_pol) |
 288               CSI_CFG_PCLK_POL(pclk_pol),
 289               csi->regs + CSI_CFG_REG);
 290
 291        /* Setup buffer length */
 292        writel(csi->fmt.plane_fmt[0].bytesperline,
 293               csi->regs + CSI_BUF_LEN_REG);
 294
 295        /* Prepare our buffers in hardware */
 296        ret = sun4i_csi_buffer_fill_all(csi);
 297        if (ret) {
 298                spin_unlock_irqrestore(&csi->qlock, flags);
 299                goto err_disable_pipeline;
 300        }
 301
 302        /* Enable double buffering */
 303        writel(CSI_BUF_CTRL_DBE, csi->regs + CSI_BUF_CTRL_REG);
 304
 305        /* Clear the pending interrupts */
 306        writel(CSI_INT_FRM_DONE, csi->regs + 0x34);
 307
 308        /* Enable frame done interrupt */
 309        writel(CSI_INT_FRM_DONE, csi->regs + CSI_INT_EN_REG);
 310
 311        sun4i_csi_capture_start(csi);
 312
 313        spin_unlock_irqrestore(&csi->qlock, flags);
 314
 315        ret = v4l2_subdev_call(csi->src_subdev, video, s_stream, 1);
 316        if (ret < 0 && ret != -ENOIOCTLCMD)
 317                goto err_disable_device;
 318
 319        return 0;
 320
 321err_disable_device:
 322        sun4i_csi_capture_stop(csi);
 323
 324err_disable_pipeline:
 325        media_pipeline_stop(&csi->vdev.entity);
 326
 327err_free_scratch_buffer:
 328        dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
 329                          csi->scratch.paddr);
 330
 331err_clear_dma_queue:
 332        spin_lock_irqsave(&csi->qlock, flags);
 333        return_all_buffers(csi, VB2_BUF_STATE_QUEUED);
 334        spin_unlock_irqrestore(&csi->qlock, flags);
 335
 336        return ret;
 337}
 338
 339static void sun4i_csi_stop_streaming(struct vb2_queue *vq)
 340{
 341        struct sun4i_csi *csi = vb2_get_drv_priv(vq);
 342        unsigned long flags;
 343
 344        dev_dbg(csi->dev, "Stopping capture\n");
 345
 346        v4l2_subdev_call(csi->src_subdev, video, s_stream, 0);
 347        sun4i_csi_capture_stop(csi);
 348
 349        /* Release all active buffers */
 350        spin_lock_irqsave(&csi->qlock, flags);
 351        return_all_buffers(csi, VB2_BUF_STATE_ERROR);
 352        spin_unlock_irqrestore(&csi->qlock, flags);
 353
 354        media_pipeline_stop(&csi->vdev.entity);
 355
 356        dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr,
 357                          csi->scratch.paddr);
 358}
 359
 360static const struct vb2_ops sun4i_csi_qops = {
 361        .queue_setup            = sun4i_csi_queue_setup,
 362        .buf_prepare            = sun4i_csi_buffer_prepare,
 363        .buf_queue              = sun4i_csi_buffer_queue,
 364        .start_streaming        = sun4i_csi_start_streaming,
 365        .stop_streaming         = sun4i_csi_stop_streaming,
 366        .wait_prepare           = vb2_ops_wait_prepare,
 367        .wait_finish            = vb2_ops_wait_finish,
 368};
 369
 370static irqreturn_t sun4i_csi_irq(int irq, void *data)
 371{
 372        struct sun4i_csi *csi = data;
 373        u32 reg;
 374
 375        reg = readl(csi->regs + CSI_INT_STA_REG);
 376
 377        /* Acknowledge the interrupts */
 378        writel(reg, csi->regs + CSI_INT_STA_REG);
 379
 380        if (!(reg & CSI_INT_FRM_DONE))
 381                return IRQ_HANDLED;
 382
 383        spin_lock(&csi->qlock);
 384        if (sun4i_csi_buffer_flip(csi, csi->sequence++)) {
 385                dev_warn(csi->dev, "%s: Flip failed\n", __func__);
 386                sun4i_csi_capture_stop(csi);
 387        }
 388        spin_unlock(&csi->qlock);
 389
 390        return IRQ_HANDLED;
 391}
 392
 393int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq)
 394{
 395        struct vb2_queue *q = &csi->queue;
 396        int ret;
 397        int i;
 398
 399        spin_lock_init(&csi->qlock);
 400        mutex_init(&csi->lock);
 401
 402        INIT_LIST_HEAD(&csi->buf_list);
 403        for (i = 0; i < CSI_MAX_BUFFER; i++)
 404                csi->current_buf[i] = NULL;
 405
 406        q->min_buffers_needed = 3;
 407        q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
 408        q->io_modes = VB2_MMAP;
 409        q->lock = &csi->lock;
 410        q->drv_priv = csi;
 411        q->buf_struct_size = sizeof(struct sun4i_csi_buffer);
 412        q->ops = &sun4i_csi_qops;
 413        q->mem_ops = &vb2_dma_contig_memops;
 414        q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
 415        q->dev = csi->dev;
 416
 417        ret = vb2_queue_init(q);
 418        if (ret < 0) {
 419                dev_err(csi->dev, "failed to initialize VB2 queue\n");
 420                goto err_free_mutex;
 421        }
 422
 423        ret = v4l2_device_register(csi->dev, &csi->v4l);
 424        if (ret) {
 425                dev_err(csi->dev, "Couldn't register the v4l2 device\n");
 426                goto err_free_queue;
 427        }
 428
 429        ret = devm_request_irq(csi->dev, irq, sun4i_csi_irq, 0,
 430                               dev_name(csi->dev), csi);
 431        if (ret) {
 432                dev_err(csi->dev, "Couldn't register our interrupt\n");
 433                goto err_unregister_device;
 434        }
 435
 436        return 0;
 437
 438err_unregister_device:
 439        v4l2_device_unregister(&csi->v4l);
 440
 441err_free_queue:
 442        vb2_queue_release(q);
 443
 444err_free_mutex:
 445        mutex_destroy(&csi->lock);
 446        return ret;
 447}
 448
 449void sun4i_csi_dma_unregister(struct sun4i_csi *csi)
 450{
 451        v4l2_device_unregister(&csi->v4l);
 452        vb2_queue_release(&csi->queue);
 453        mutex_destroy(&csi->lock);
 454}
 455