linux/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Cedrus VPU driver
   4 *
   5 * Copyright (C) 2016 Florent Revest <florent.revest@free-electrons.com>
   6 * Copyright (C) 2018 Paul Kocialkowski <paul.kocialkowski@bootlin.com>
   7 * Copyright (C) 2018 Bootlin
   8 *
   9 * Based on the vim2m driver, that is:
  10 *
  11 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
  12 * Pawel Osciak, <pawel@osciak.com>
  13 * Marek Szyprowski, <m.szyprowski@samsung.com>
  14 */
  15
  16#include <linux/platform_device.h>
  17#include <linux/of_reserved_mem.h>
  18#include <linux/of_device.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/interrupt.h>
  21#include <linux/clk.h>
  22#include <linux/regmap.h>
  23#include <linux/reset.h>
  24#include <linux/soc/sunxi/sunxi_sram.h>
  25
  26#include <media/videobuf2-core.h>
  27#include <media/v4l2-mem2mem.h>
  28
  29#include "cedrus.h"
  30#include "cedrus_hw.h"
  31#include "cedrus_regs.h"
  32
  33int cedrus_engine_enable(struct cedrus_dev *dev, enum cedrus_codec codec)
  34{
  35        u32 reg = 0;
  36
  37        /*
  38         * FIXME: This is only valid on 32-bits DDR's, we should test
  39         * it on the A13/A33.
  40         */
  41        reg |= VE_MODE_REC_WR_MODE_2MB;
  42        reg |= VE_MODE_DDR_MODE_BW_128;
  43
  44        switch (codec) {
  45        case CEDRUS_CODEC_MPEG2:
  46                reg |= VE_MODE_DEC_MPEG;
  47                break;
  48
  49        case CEDRUS_CODEC_H264:
  50                reg |= VE_MODE_DEC_H264;
  51                break;
  52
  53        default:
  54                return -EINVAL;
  55        }
  56
  57        cedrus_write(dev, VE_MODE, reg);
  58
  59        return 0;
  60}
  61
  62void cedrus_engine_disable(struct cedrus_dev *dev)
  63{
  64        cedrus_write(dev, VE_MODE, VE_MODE_DISABLED);
  65}
  66
  67void cedrus_dst_format_set(struct cedrus_dev *dev,
  68                           struct v4l2_pix_format *fmt)
  69{
  70        unsigned int width = fmt->width;
  71        unsigned int height = fmt->height;
  72        u32 chroma_size;
  73        u32 reg;
  74
  75        switch (fmt->pixelformat) {
  76        case V4L2_PIX_FMT_NV12:
  77                chroma_size = ALIGN(width, 16) * ALIGN(height, 16) / 2;
  78
  79                reg = VE_PRIMARY_OUT_FMT_NV12;
  80                cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
  81
  82                reg = VE_CHROMA_BUF_LEN_SDRT(chroma_size / 2);
  83                cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
  84
  85                reg = chroma_size / 2;
  86                cedrus_write(dev, VE_PRIMARY_CHROMA_BUF_LEN, reg);
  87
  88                reg = VE_PRIMARY_FB_LINE_STRIDE_LUMA(ALIGN(width, 16)) |
  89                      VE_PRIMARY_FB_LINE_STRIDE_CHROMA(ALIGN(width, 16) / 2);
  90                cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
  91
  92                break;
  93        case V4L2_PIX_FMT_SUNXI_TILED_NV12:
  94        default:
  95                reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
  96                cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
  97
  98                reg = VE_SECONDARY_OUT_FMT_TILED_32_NV12;
  99                cedrus_write(dev, VE_CHROMA_BUF_LEN, reg);
 100
 101                break;
 102        }
 103}
 104
 105static irqreturn_t cedrus_irq(int irq, void *data)
 106{
 107        struct cedrus_dev *dev = data;
 108        struct cedrus_ctx *ctx;
 109        struct vb2_v4l2_buffer *src_buf, *dst_buf;
 110        enum vb2_buffer_state state;
 111        enum cedrus_irq_status status;
 112
 113        ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
 114        if (!ctx) {
 115                v4l2_err(&dev->v4l2_dev,
 116                         "Instance released before the end of transaction\n");
 117                return IRQ_NONE;
 118        }
 119
 120        status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
 121        if (status == CEDRUS_IRQ_NONE)
 122                return IRQ_NONE;
 123
 124        dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
 125        dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
 126
 127        src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
 128        dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
 129
 130        if (!src_buf || !dst_buf) {
 131                v4l2_err(&dev->v4l2_dev,
 132                         "Missing source and/or destination buffers\n");
 133                return IRQ_HANDLED;
 134        }
 135
 136        if (status == CEDRUS_IRQ_ERROR)
 137                state = VB2_BUF_STATE_ERROR;
 138        else
 139                state = VB2_BUF_STATE_DONE;
 140
 141        v4l2_m2m_buf_done(src_buf, state);
 142        v4l2_m2m_buf_done(dst_buf, state);
 143
 144        v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
 145
 146        return IRQ_HANDLED;
 147}
 148
 149int cedrus_hw_probe(struct cedrus_dev *dev)
 150{
 151        const struct cedrus_variant *variant;
 152        struct resource *res;
 153        int irq_dec;
 154        int ret;
 155
 156        variant = of_device_get_match_data(dev->dev);
 157        if (!variant)
 158                return -EINVAL;
 159
 160        dev->capabilities = variant->capabilities;
 161
 162        irq_dec = platform_get_irq(dev->pdev, 0);
 163        if (irq_dec <= 0) {
 164                dev_err(dev->dev, "Failed to get IRQ\n");
 165
 166                return irq_dec;
 167        }
 168        ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
 169                               0, dev_name(dev->dev), dev);
 170        if (ret) {
 171                dev_err(dev->dev, "Failed to request IRQ\n");
 172
 173                return ret;
 174        }
 175
 176        /*
 177         * The VPU is only able to handle bus addresses so we have to subtract
 178         * the RAM offset to the physcal addresses.
 179         *
 180         * This information will eventually be obtained from device-tree.
 181         */
 182
 183#ifdef PHYS_PFN_OFFSET
 184        if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET))
 185                dev->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
 186#endif
 187
 188        ret = of_reserved_mem_device_init(dev->dev);
 189        if (ret && ret != -ENODEV) {
 190                dev_err(dev->dev, "Failed to reserve memory\n");
 191
 192                return ret;
 193        }
 194
 195        ret = sunxi_sram_claim(dev->dev);
 196        if (ret) {
 197                dev_err(dev->dev, "Failed to claim SRAM\n");
 198
 199                goto err_mem;
 200        }
 201
 202        dev->ahb_clk = devm_clk_get(dev->dev, "ahb");
 203        if (IS_ERR(dev->ahb_clk)) {
 204                dev_err(dev->dev, "Failed to get AHB clock\n");
 205
 206                ret = PTR_ERR(dev->ahb_clk);
 207                goto err_sram;
 208        }
 209
 210        dev->mod_clk = devm_clk_get(dev->dev, "mod");
 211        if (IS_ERR(dev->mod_clk)) {
 212                dev_err(dev->dev, "Failed to get MOD clock\n");
 213
 214                ret = PTR_ERR(dev->mod_clk);
 215                goto err_sram;
 216        }
 217
 218        dev->ram_clk = devm_clk_get(dev->dev, "ram");
 219        if (IS_ERR(dev->ram_clk)) {
 220                dev_err(dev->dev, "Failed to get RAM clock\n");
 221
 222                ret = PTR_ERR(dev->ram_clk);
 223                goto err_sram;
 224        }
 225
 226        dev->rstc = devm_reset_control_get(dev->dev, NULL);
 227        if (IS_ERR(dev->rstc)) {
 228                dev_err(dev->dev, "Failed to get reset control\n");
 229
 230                ret = PTR_ERR(dev->rstc);
 231                goto err_sram;
 232        }
 233
 234        res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
 235        dev->base = devm_ioremap_resource(dev->dev, res);
 236        if (IS_ERR(dev->base)) {
 237                dev_err(dev->dev, "Failed to map registers\n");
 238
 239                ret = PTR_ERR(dev->base);
 240                goto err_sram;
 241        }
 242
 243        ret = clk_set_rate(dev->mod_clk, variant->mod_rate);
 244        if (ret) {
 245                dev_err(dev->dev, "Failed to set clock rate\n");
 246
 247                goto err_sram;
 248        }
 249
 250        ret = clk_prepare_enable(dev->ahb_clk);
 251        if (ret) {
 252                dev_err(dev->dev, "Failed to enable AHB clock\n");
 253
 254                goto err_sram;
 255        }
 256
 257        ret = clk_prepare_enable(dev->mod_clk);
 258        if (ret) {
 259                dev_err(dev->dev, "Failed to enable MOD clock\n");
 260
 261                goto err_ahb_clk;
 262        }
 263
 264        ret = clk_prepare_enable(dev->ram_clk);
 265        if (ret) {
 266                dev_err(dev->dev, "Failed to enable RAM clock\n");
 267
 268                goto err_mod_clk;
 269        }
 270
 271        ret = reset_control_reset(dev->rstc);
 272        if (ret) {
 273                dev_err(dev->dev, "Failed to apply reset\n");
 274
 275                goto err_ram_clk;
 276        }
 277
 278        return 0;
 279
 280err_ram_clk:
 281        clk_disable_unprepare(dev->ram_clk);
 282err_mod_clk:
 283        clk_disable_unprepare(dev->mod_clk);
 284err_ahb_clk:
 285        clk_disable_unprepare(dev->ahb_clk);
 286err_sram:
 287        sunxi_sram_release(dev->dev);
 288err_mem:
 289        of_reserved_mem_device_release(dev->dev);
 290
 291        return ret;
 292}
 293
 294void cedrus_hw_remove(struct cedrus_dev *dev)
 295{
 296        reset_control_assert(dev->rstc);
 297
 298        clk_disable_unprepare(dev->ram_clk);
 299        clk_disable_unprepare(dev->mod_clk);
 300        clk_disable_unprepare(dev->ahb_clk);
 301
 302        sunxi_sram_release(dev->dev);
 303
 304        of_reserved_mem_device_release(dev->dev);
 305}
 306