linux/drivers/media/platform/ti-vpe/vpdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VPDMA helper library
   4 *
   5 * Copyright (c) 2013 Texas Instruments Inc.
   6 *
   7 * David Griego, <dagriego@biglakesoftware.com>
   8 * Dale Farnsworth, <dale@farnsworth.org>
   9 * Archit Taneja, <archit@ti.com>
  10 */
  11
  12#include <linux/delay.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/firmware.h>
  16#include <linux/io.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/videodev2.h>
  22
  23#include "vpdma.h"
  24#include "vpdma_priv.h"
  25
  26#define VPDMA_FIRMWARE  "vpdma-1b8.bin"
  27
  28const struct vpdma_data_format vpdma_yuv_fmts[] = {
  29        [VPDMA_DATA_FMT_Y444] = {
  30                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  31                .data_type      = DATA_TYPE_Y444,
  32                .depth          = 8,
  33        },
  34        [VPDMA_DATA_FMT_Y422] = {
  35                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  36                .data_type      = DATA_TYPE_Y422,
  37                .depth          = 8,
  38        },
  39        [VPDMA_DATA_FMT_Y420] = {
  40                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  41                .data_type      = DATA_TYPE_Y420,
  42                .depth          = 8,
  43        },
  44        [VPDMA_DATA_FMT_C444] = {
  45                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  46                .data_type      = DATA_TYPE_C444,
  47                .depth          = 8,
  48        },
  49        [VPDMA_DATA_FMT_C422] = {
  50                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  51                .data_type      = DATA_TYPE_C422,
  52                .depth          = 8,
  53        },
  54        [VPDMA_DATA_FMT_C420] = {
  55                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  56                .data_type      = DATA_TYPE_C420,
  57                .depth          = 4,
  58        },
  59        [VPDMA_DATA_FMT_CB420] = {
  60                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  61                .data_type      = DATA_TYPE_CB420,
  62                .depth          = 4,
  63        },
  64        [VPDMA_DATA_FMT_YCR422] = {
  65                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  66                .data_type      = DATA_TYPE_YCR422,
  67                .depth          = 16,
  68        },
  69        [VPDMA_DATA_FMT_YC444] = {
  70                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  71                .data_type      = DATA_TYPE_YC444,
  72                .depth          = 24,
  73        },
  74        [VPDMA_DATA_FMT_CRY422] = {
  75                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  76                .data_type      = DATA_TYPE_CRY422,
  77                .depth          = 16,
  78        },
  79        [VPDMA_DATA_FMT_CBY422] = {
  80                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  81                .data_type      = DATA_TYPE_CBY422,
  82                .depth          = 16,
  83        },
  84        [VPDMA_DATA_FMT_YCB422] = {
  85                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  86                .data_type      = DATA_TYPE_YCB422,
  87                .depth          = 16,
  88        },
  89};
  90EXPORT_SYMBOL(vpdma_yuv_fmts);
  91
  92const struct vpdma_data_format vpdma_rgb_fmts[] = {
  93        [VPDMA_DATA_FMT_RGB565] = {
  94                .type           = VPDMA_DATA_FMT_TYPE_RGB,
  95                .data_type      = DATA_TYPE_RGB16_565,
  96                .depth          = 16,
  97        },
  98        [VPDMA_DATA_FMT_ARGB16_1555] = {
  99                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 100                .data_type      = DATA_TYPE_ARGB_1555,
 101                .depth          = 16,
 102        },
 103        [VPDMA_DATA_FMT_ARGB16] = {
 104                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 105                .data_type      = DATA_TYPE_ARGB_4444,
 106                .depth          = 16,
 107        },
 108        [VPDMA_DATA_FMT_RGBA16_5551] = {
 109                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 110                .data_type      = DATA_TYPE_RGBA_5551,
 111                .depth          = 16,
 112        },
 113        [VPDMA_DATA_FMT_RGBA16] = {
 114                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 115                .data_type      = DATA_TYPE_RGBA_4444,
 116                .depth          = 16,
 117        },
 118        [VPDMA_DATA_FMT_ARGB24] = {
 119                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 120                .data_type      = DATA_TYPE_ARGB24_6666,
 121                .depth          = 24,
 122        },
 123        [VPDMA_DATA_FMT_RGB24] = {
 124                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 125                .data_type      = DATA_TYPE_RGB24_888,
 126                .depth          = 24,
 127        },
 128        [VPDMA_DATA_FMT_ARGB32] = {
 129                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 130                .data_type      = DATA_TYPE_ARGB32_8888,
 131                .depth          = 32,
 132        },
 133        [VPDMA_DATA_FMT_RGBA24] = {
 134                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 135                .data_type      = DATA_TYPE_RGBA24_6666,
 136                .depth          = 24,
 137        },
 138        [VPDMA_DATA_FMT_RGBA32] = {
 139                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 140                .data_type      = DATA_TYPE_RGBA32_8888,
 141                .depth          = 32,
 142        },
 143        [VPDMA_DATA_FMT_BGR565] = {
 144                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 145                .data_type      = DATA_TYPE_BGR16_565,
 146                .depth          = 16,
 147        },
 148        [VPDMA_DATA_FMT_ABGR16_1555] = {
 149                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 150                .data_type      = DATA_TYPE_ABGR_1555,
 151                .depth          = 16,
 152        },
 153        [VPDMA_DATA_FMT_ABGR16] = {
 154                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 155                .data_type      = DATA_TYPE_ABGR_4444,
 156                .depth          = 16,
 157        },
 158        [VPDMA_DATA_FMT_BGRA16_5551] = {
 159                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 160                .data_type      = DATA_TYPE_BGRA_5551,
 161                .depth          = 16,
 162        },
 163        [VPDMA_DATA_FMT_BGRA16] = {
 164                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 165                .data_type      = DATA_TYPE_BGRA_4444,
 166                .depth          = 16,
 167        },
 168        [VPDMA_DATA_FMT_ABGR24] = {
 169                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 170                .data_type      = DATA_TYPE_ABGR24_6666,
 171                .depth          = 24,
 172        },
 173        [VPDMA_DATA_FMT_BGR24] = {
 174                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 175                .data_type      = DATA_TYPE_BGR24_888,
 176                .depth          = 24,
 177        },
 178        [VPDMA_DATA_FMT_ABGR32] = {
 179                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 180                .data_type      = DATA_TYPE_ABGR32_8888,
 181                .depth          = 32,
 182        },
 183        [VPDMA_DATA_FMT_BGRA24] = {
 184                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 185                .data_type      = DATA_TYPE_BGRA24_6666,
 186                .depth          = 24,
 187        },
 188        [VPDMA_DATA_FMT_BGRA32] = {
 189                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 190                .data_type      = DATA_TYPE_BGRA32_8888,
 191                .depth          = 32,
 192        },
 193};
 194EXPORT_SYMBOL(vpdma_rgb_fmts);
 195
 196/*
 197 * To handle RAW format we are re-using the CBY422
 198 * vpdma data type so that we use the vpdma to re-order
 199 * the incoming bytes, as the parser assumes that the
 200 * first byte presented on the bus is the MSB of a 2
 201 * bytes value.
 202 * RAW8 handles from 1 to 8 bits
 203 * RAW16 handles from 9 to 16 bits
 204 */
 205const struct vpdma_data_format vpdma_raw_fmts[] = {
 206        [VPDMA_DATA_FMT_RAW8] = {
 207                .type           = VPDMA_DATA_FMT_TYPE_YUV,
 208                .data_type      = DATA_TYPE_CBY422,
 209                .depth          = 8,
 210        },
 211        [VPDMA_DATA_FMT_RAW16] = {
 212                .type           = VPDMA_DATA_FMT_TYPE_YUV,
 213                .data_type      = DATA_TYPE_CBY422,
 214                .depth          = 16,
 215        },
 216};
 217EXPORT_SYMBOL(vpdma_raw_fmts);
 218
 219const struct vpdma_data_format vpdma_misc_fmts[] = {
 220        [VPDMA_DATA_FMT_MV] = {
 221                .type           = VPDMA_DATA_FMT_TYPE_MISC,
 222                .data_type      = DATA_TYPE_MV,
 223                .depth          = 4,
 224        },
 225};
 226EXPORT_SYMBOL(vpdma_misc_fmts);
 227
 228struct vpdma_channel_info {
 229        int num;                /* VPDMA channel number */
 230        int cstat_offset;       /* client CSTAT register offset */
 231};
 232
 233static const struct vpdma_channel_info chan_info[] = {
 234        [VPE_CHAN_LUMA1_IN] = {
 235                .num            = VPE_CHAN_NUM_LUMA1_IN,
 236                .cstat_offset   = VPDMA_DEI_LUMA1_CSTAT,
 237        },
 238        [VPE_CHAN_CHROMA1_IN] = {
 239                .num            = VPE_CHAN_NUM_CHROMA1_IN,
 240                .cstat_offset   = VPDMA_DEI_CHROMA1_CSTAT,
 241        },
 242        [VPE_CHAN_LUMA2_IN] = {
 243                .num            = VPE_CHAN_NUM_LUMA2_IN,
 244                .cstat_offset   = VPDMA_DEI_LUMA2_CSTAT,
 245        },
 246        [VPE_CHAN_CHROMA2_IN] = {
 247                .num            = VPE_CHAN_NUM_CHROMA2_IN,
 248                .cstat_offset   = VPDMA_DEI_CHROMA2_CSTAT,
 249        },
 250        [VPE_CHAN_LUMA3_IN] = {
 251                .num            = VPE_CHAN_NUM_LUMA3_IN,
 252                .cstat_offset   = VPDMA_DEI_LUMA3_CSTAT,
 253        },
 254        [VPE_CHAN_CHROMA3_IN] = {
 255                .num            = VPE_CHAN_NUM_CHROMA3_IN,
 256                .cstat_offset   = VPDMA_DEI_CHROMA3_CSTAT,
 257        },
 258        [VPE_CHAN_MV_IN] = {
 259                .num            = VPE_CHAN_NUM_MV_IN,
 260                .cstat_offset   = VPDMA_DEI_MV_IN_CSTAT,
 261        },
 262        [VPE_CHAN_MV_OUT] = {
 263                .num            = VPE_CHAN_NUM_MV_OUT,
 264                .cstat_offset   = VPDMA_DEI_MV_OUT_CSTAT,
 265        },
 266        [VPE_CHAN_LUMA_OUT] = {
 267                .num            = VPE_CHAN_NUM_LUMA_OUT,
 268                .cstat_offset   = VPDMA_VIP_UP_Y_CSTAT,
 269        },
 270        [VPE_CHAN_CHROMA_OUT] = {
 271                .num            = VPE_CHAN_NUM_CHROMA_OUT,
 272                .cstat_offset   = VPDMA_VIP_UP_UV_CSTAT,
 273        },
 274        [VPE_CHAN_RGB_OUT] = {
 275                .num            = VPE_CHAN_NUM_RGB_OUT,
 276                .cstat_offset   = VPDMA_VIP_UP_Y_CSTAT,
 277        },
 278};
 279
 280static u32 read_reg(struct vpdma_data *vpdma, int offset)
 281{
 282        return ioread32(vpdma->base + offset);
 283}
 284
 285static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
 286{
 287        iowrite32(value, vpdma->base + offset);
 288}
 289
 290static int read_field_reg(struct vpdma_data *vpdma, int offset,
 291                u32 mask, int shift)
 292{
 293        return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
 294}
 295
 296static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
 297                u32 mask, int shift)
 298{
 299        u32 val = read_reg(vpdma, offset);
 300
 301        val &= ~(mask << shift);
 302        val |= (field & mask) << shift;
 303
 304        write_reg(vpdma, offset, val);
 305}
 306
 307void vpdma_dump_regs(struct vpdma_data *vpdma)
 308{
 309        struct device *dev = &vpdma->pdev->dev;
 310
 311#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
 312
 313        dev_dbg(dev, "VPDMA Registers:\n");
 314
 315        DUMPREG(PID);
 316        DUMPREG(LIST_ADDR);
 317        DUMPREG(LIST_ATTR);
 318        DUMPREG(LIST_STAT_SYNC);
 319        DUMPREG(BG_RGB);
 320        DUMPREG(BG_YUV);
 321        DUMPREG(SETUP);
 322        DUMPREG(MAX_SIZE1);
 323        DUMPREG(MAX_SIZE2);
 324        DUMPREG(MAX_SIZE3);
 325
 326        /*
 327         * dumping registers of only group0 and group3, because VPE channels
 328         * lie within group0 and group3 registers
 329         */
 330        DUMPREG(INT_CHAN_STAT(0));
 331        DUMPREG(INT_CHAN_MASK(0));
 332        DUMPREG(INT_CHAN_STAT(3));
 333        DUMPREG(INT_CHAN_MASK(3));
 334        DUMPREG(INT_CLIENT0_STAT);
 335        DUMPREG(INT_CLIENT0_MASK);
 336        DUMPREG(INT_CLIENT1_STAT);
 337        DUMPREG(INT_CLIENT1_MASK);
 338        DUMPREG(INT_LIST0_STAT);
 339        DUMPREG(INT_LIST0_MASK);
 340
 341        /*
 342         * these are registers specific to VPE clients, we can make this
 343         * function dump client registers specific to VPE or VIP based on
 344         * who is using it
 345         */
 346        DUMPREG(DEI_CHROMA1_CSTAT);
 347        DUMPREG(DEI_LUMA1_CSTAT);
 348        DUMPREG(DEI_CHROMA2_CSTAT);
 349        DUMPREG(DEI_LUMA2_CSTAT);
 350        DUMPREG(DEI_CHROMA3_CSTAT);
 351        DUMPREG(DEI_LUMA3_CSTAT);
 352        DUMPREG(DEI_MV_IN_CSTAT);
 353        DUMPREG(DEI_MV_OUT_CSTAT);
 354        DUMPREG(VIP_UP_Y_CSTAT);
 355        DUMPREG(VIP_UP_UV_CSTAT);
 356        DUMPREG(VPI_CTL_CSTAT);
 357}
 358EXPORT_SYMBOL(vpdma_dump_regs);
 359
 360/*
 361 * Allocate a DMA buffer
 362 */
 363int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
 364{
 365        buf->size = size;
 366        buf->mapped = false;
 367        buf->addr = kzalloc(size, GFP_KERNEL);
 368        if (!buf->addr)
 369                return -ENOMEM;
 370
 371        WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
 372
 373        return 0;
 374}
 375EXPORT_SYMBOL(vpdma_alloc_desc_buf);
 376
 377void vpdma_free_desc_buf(struct vpdma_buf *buf)
 378{
 379        WARN_ON(buf->mapped);
 380        kfree(buf->addr);
 381        buf->addr = NULL;
 382        buf->size = 0;
 383}
 384EXPORT_SYMBOL(vpdma_free_desc_buf);
 385
 386/*
 387 * map descriptor/payload DMA buffer, enabling DMA access
 388 */
 389int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
 390{
 391        struct device *dev = &vpdma->pdev->dev;
 392
 393        WARN_ON(buf->mapped);
 394        buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
 395                                DMA_BIDIRECTIONAL);
 396        if (dma_mapping_error(dev, buf->dma_addr)) {
 397                dev_err(dev, "failed to map buffer\n");
 398                return -EINVAL;
 399        }
 400
 401        buf->mapped = true;
 402
 403        return 0;
 404}
 405EXPORT_SYMBOL(vpdma_map_desc_buf);
 406
 407/*
 408 * unmap descriptor/payload DMA buffer, disabling DMA access and
 409 * allowing the main processor to access the data
 410 */
 411void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
 412{
 413        struct device *dev = &vpdma->pdev->dev;
 414
 415        if (buf->mapped)
 416                dma_unmap_single(dev, buf->dma_addr, buf->size,
 417                                DMA_BIDIRECTIONAL);
 418
 419        buf->mapped = false;
 420}
 421EXPORT_SYMBOL(vpdma_unmap_desc_buf);
 422
 423/*
 424 * Cleanup all pending descriptors of a list
 425 * First, stop the current list being processed.
 426 * If the VPDMA was busy, this step makes vpdma to accept post lists.
 427 * To cleanup the internal FSM, post abort list descriptor for all the
 428 * channels from @channels array of size @size.
 429 */
 430int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
 431                int *channels, int size)
 432{
 433        struct vpdma_desc_list abort_list;
 434        int i, ret, timeout = 500;
 435
 436        write_reg(vpdma, VPDMA_LIST_ATTR,
 437                        (list_num << VPDMA_LIST_NUM_SHFT) |
 438                        (1 << VPDMA_LIST_STOP_SHFT));
 439
 440        if (size <= 0 || !channels)
 441                return 0;
 442
 443        ret = vpdma_create_desc_list(&abort_list,
 444                size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
 445        if (ret)
 446                return ret;
 447
 448        for (i = 0; i < size; i++)
 449                vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
 450
 451        ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
 452        if (ret)
 453                goto free_desc;
 454        ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
 455        if (ret)
 456                goto unmap_desc;
 457
 458        while (vpdma_list_busy(vpdma, list_num) && --timeout)
 459                ;
 460
 461        if (timeout == 0) {
 462                dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
 463                ret = -EBUSY;
 464        }
 465
 466unmap_desc:
 467        vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
 468free_desc:
 469        vpdma_free_desc_buf(&abort_list.buf);
 470
 471        return ret;
 472}
 473EXPORT_SYMBOL(vpdma_list_cleanup);
 474
 475/*
 476 * create a descriptor list, the user of this list will append configuration,
 477 * control and data descriptors to this list, this list will be submitted to
 478 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
 479 * required DMA operations
 480 */
 481int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
 482{
 483        int r;
 484
 485        r = vpdma_alloc_desc_buf(&list->buf, size);
 486        if (r)
 487                return r;
 488
 489        list->next = list->buf.addr;
 490
 491        list->type = type;
 492
 493        return 0;
 494}
 495EXPORT_SYMBOL(vpdma_create_desc_list);
 496
 497/*
 498 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
 499 * to allow new descriptors to be added to the list.
 500 */
 501void vpdma_reset_desc_list(struct vpdma_desc_list *list)
 502{
 503        list->next = list->buf.addr;
 504}
 505EXPORT_SYMBOL(vpdma_reset_desc_list);
 506
 507/*
 508 * free the buffer allocated for the VPDMA descriptor list, this should be
 509 * called when the user doesn't want to use VPDMA any more.
 510 */
 511void vpdma_free_desc_list(struct vpdma_desc_list *list)
 512{
 513        vpdma_free_desc_buf(&list->buf);
 514
 515        list->next = NULL;
 516}
 517EXPORT_SYMBOL(vpdma_free_desc_list);
 518
 519bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
 520{
 521        return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
 522}
 523EXPORT_SYMBOL(vpdma_list_busy);
 524
 525/*
 526 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
 527 */
 528int vpdma_submit_descs(struct vpdma_data *vpdma,
 529                        struct vpdma_desc_list *list, int list_num)
 530{
 531        int list_size;
 532        unsigned long flags;
 533
 534        if (vpdma_list_busy(vpdma, list_num))
 535                return -EBUSY;
 536
 537        /* 16-byte granularity */
 538        list_size = (list->next - list->buf.addr) >> 4;
 539
 540        spin_lock_irqsave(&vpdma->lock, flags);
 541        write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
 542
 543        write_reg(vpdma, VPDMA_LIST_ATTR,
 544                        (list_num << VPDMA_LIST_NUM_SHFT) |
 545                        (list->type << VPDMA_LIST_TYPE_SHFT) |
 546                        list_size);
 547        spin_unlock_irqrestore(&vpdma->lock, flags);
 548
 549        return 0;
 550}
 551EXPORT_SYMBOL(vpdma_submit_descs);
 552
 553static void dump_dtd(struct vpdma_dtd *dtd);
 554
 555void vpdma_update_dma_addr(struct vpdma_data *vpdma,
 556        struct vpdma_desc_list *list, dma_addr_t dma_addr,
 557        void *write_dtd, int drop, int idx)
 558{
 559        struct vpdma_dtd *dtd = list->buf.addr;
 560        dma_addr_t write_desc_addr;
 561        int offset;
 562
 563        dtd += idx;
 564        vpdma_unmap_desc_buf(vpdma, &list->buf);
 565
 566        dtd->start_addr = dma_addr;
 567
 568        /* Calculate write address from the offset of write_dtd from start
 569         * of the list->buf
 570         */
 571        offset = (void *)write_dtd - list->buf.addr;
 572        write_desc_addr = list->buf.dma_addr + offset;
 573
 574        if (drop)
 575                dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
 576                                                           1, 1, 0);
 577        else
 578                dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
 579                                                           1, 0, 0);
 580
 581        vpdma_map_desc_buf(vpdma, &list->buf);
 582
 583        dump_dtd(dtd);
 584}
 585EXPORT_SYMBOL(vpdma_update_dma_addr);
 586
 587void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
 588                        u32 width, u32 height)
 589{
 590        if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
 591            reg_addr != VPDMA_MAX_SIZE3)
 592                reg_addr = VPDMA_MAX_SIZE1;
 593
 594        write_field_reg(vpdma, reg_addr, width - 1,
 595                        VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
 596
 597        write_field_reg(vpdma, reg_addr, height - 1,
 598                        VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
 599
 600}
 601EXPORT_SYMBOL(vpdma_set_max_size);
 602
 603static void dump_cfd(struct vpdma_cfd *cfd)
 604{
 605        int class;
 606
 607        class = cfd_get_class(cfd);
 608
 609        pr_debug("config descriptor of payload class: %s\n",
 610                class == CFD_CLS_BLOCK ? "simple block" :
 611                "address data block");
 612
 613        if (class == CFD_CLS_BLOCK)
 614                pr_debug("word0: dst_addr_offset = 0x%08x\n",
 615                        cfd->dest_addr_offset);
 616
 617        if (class == CFD_CLS_BLOCK)
 618                pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
 619
 620        pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
 621
 622        pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
 623                 cfd_get_pkt_type(cfd),
 624                 cfd_get_direct(cfd), class, cfd_get_dest(cfd),
 625                 cfd_get_payload_len(cfd));
 626}
 627
 628/*
 629 * append a configuration descriptor to the given descriptor list, where the
 630 * payload is in the form of a simple data block specified in the descriptor
 631 * header, this is used to upload scaler coefficients to the scaler module
 632 */
 633void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
 634                struct vpdma_buf *blk, u32 dest_offset)
 635{
 636        struct vpdma_cfd *cfd;
 637        int len = blk->size;
 638
 639        WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
 640
 641        cfd = list->next;
 642        WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
 643
 644        cfd->dest_addr_offset = dest_offset;
 645        cfd->block_len = len;
 646        cfd->payload_addr = (u32) blk->dma_addr;
 647        cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
 648                                client, len >> 4);
 649
 650        list->next = cfd + 1;
 651
 652        dump_cfd(cfd);
 653}
 654EXPORT_SYMBOL(vpdma_add_cfd_block);
 655
 656/*
 657 * append a configuration descriptor to the given descriptor list, where the
 658 * payload is in the address data block format, this is used to a configure a
 659 * discontiguous set of MMRs
 660 */
 661void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
 662                struct vpdma_buf *adb)
 663{
 664        struct vpdma_cfd *cfd;
 665        unsigned int len = adb->size;
 666
 667        WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
 668        WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
 669
 670        cfd = list->next;
 671        BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
 672
 673        cfd->w0 = 0;
 674        cfd->w1 = 0;
 675        cfd->payload_addr = (u32) adb->dma_addr;
 676        cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
 677                                client, len >> 4);
 678
 679        list->next = cfd + 1;
 680
 681        dump_cfd(cfd);
 682};
 683EXPORT_SYMBOL(vpdma_add_cfd_adb);
 684
 685/*
 686 * control descriptor format change based on what type of control descriptor it
 687 * is, we only use 'sync on channel' control descriptors for now, so assume it's
 688 * that
 689 */
 690static void dump_ctd(struct vpdma_ctd *ctd)
 691{
 692        pr_debug("control descriptor\n");
 693
 694        pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
 695                ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
 696}
 697
 698/*
 699 * append a 'sync on channel' type control descriptor to the given descriptor
 700 * list, this descriptor stalls the VPDMA list till the time DMA is completed
 701 * on the specified channel
 702 */
 703void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
 704                enum vpdma_channel chan)
 705{
 706        struct vpdma_ctd *ctd;
 707
 708        ctd = list->next;
 709        WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
 710
 711        ctd->w0 = 0;
 712        ctd->w1 = 0;
 713        ctd->w2 = 0;
 714        ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
 715                                CTD_TYPE_SYNC_ON_CHANNEL);
 716
 717        list->next = ctd + 1;
 718
 719        dump_ctd(ctd);
 720}
 721EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
 722
 723/*
 724 * append an 'abort_channel' type control descriptor to the given descriptor
 725 * list, this descriptor aborts any DMA transaction happening using the
 726 * specified channel
 727 */
 728void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
 729                int chan_num)
 730{
 731        struct vpdma_ctd *ctd;
 732
 733        ctd = list->next;
 734        WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
 735
 736        ctd->w0 = 0;
 737        ctd->w1 = 0;
 738        ctd->w2 = 0;
 739        ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
 740                                CTD_TYPE_ABORT_CHANNEL);
 741
 742        list->next = ctd + 1;
 743
 744        dump_ctd(ctd);
 745}
 746EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
 747
 748static void dump_dtd(struct vpdma_dtd *dtd)
 749{
 750        int dir, chan;
 751
 752        dir = dtd_get_dir(dtd);
 753        chan = dtd_get_chan(dtd);
 754
 755        pr_debug("%s data transfer descriptor for channel %d\n",
 756                dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
 757
 758        pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
 759                dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
 760                dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
 761                dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
 762
 763        if (dir == DTD_DIR_IN)
 764                pr_debug("word1: line_length = %d, xfer_height = %d\n",
 765                        dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
 766
 767        pr_debug("word2: start_addr = %x\n", dtd->start_addr);
 768
 769        pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
 770                 dtd_get_pkt_type(dtd),
 771                 dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
 772                 dtd_get_next_chan(dtd));
 773
 774        if (dir == DTD_DIR_IN)
 775                pr_debug("word4: frame_width = %d, frame_height = %d\n",
 776                        dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
 777        else
 778                pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
 779                        dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
 780                        dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
 781
 782        if (dir == DTD_DIR_IN)
 783                pr_debug("word5: hor_start = %d, ver_start = %d\n",
 784                        dtd_get_h_start(dtd), dtd_get_v_start(dtd));
 785        else
 786                pr_debug("word5: max_width %d, max_height %d\n",
 787                        dtd_get_max_width(dtd), dtd_get_max_height(dtd));
 788
 789        pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
 790        pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
 791}
 792
 793/*
 794 * append an outbound data transfer descriptor to the given descriptor list,
 795 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
 796 *
 797 * @list: vpdma desc list to which we add this descriptor
 798 * @width: width of the image in pixels in memory
 799 * @c_rect: compose params of output image
 800 * @fmt: vpdma data format of the buffer
 801 * dma_addr: dma address as seen by VPDMA
 802 * max_width: enum for maximum width of data transfer
 803 * max_height: enum for maximum height of data transfer
 804 * chan: VPDMA channel
 805 * flags: VPDMA flags to configure some descriptor fields
 806 */
 807void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
 808                int stride, const struct v4l2_rect *c_rect,
 809                const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
 810                int max_w, int max_h, enum vpdma_channel chan, u32 flags)
 811{
 812        vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr,
 813                                  max_w, max_h, chan_info[chan].num, flags);
 814}
 815EXPORT_SYMBOL(vpdma_add_out_dtd);
 816
 817void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
 818                int stride, const struct v4l2_rect *c_rect,
 819                const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
 820                int max_w, int max_h, int raw_vpdma_chan, u32 flags)
 821{
 822        int priority = 0;
 823        int field = 0;
 824        int notify = 1;
 825        int channel, next_chan;
 826        struct v4l2_rect rect = *c_rect;
 827        int depth = fmt->depth;
 828        struct vpdma_dtd *dtd;
 829
 830        channel = next_chan = raw_vpdma_chan;
 831
 832        if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
 833            (fmt->data_type == DATA_TYPE_C420 ||
 834             fmt->data_type == DATA_TYPE_CB420)) {
 835                rect.height >>= 1;
 836                rect.top >>= 1;
 837                depth = 8;
 838        }
 839
 840        dma_addr += rect.top * stride + (rect.left * depth >> 3);
 841
 842        dtd = list->next;
 843        WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
 844
 845        dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
 846                                        notify,
 847                                        field,
 848                                        !!(flags & VPDMA_DATA_FRAME_1D),
 849                                        !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
 850                                        !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
 851                                        stride);
 852        dtd->w1 = 0;
 853        dtd->start_addr = (u32) dma_addr;
 854        dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
 855                                DTD_DIR_OUT, channel, priority, next_chan);
 856        dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
 857        dtd->max_width_height = dtd_max_width_height(max_w, max_h);
 858        dtd->client_attr0 = 0;
 859        dtd->client_attr1 = 0;
 860
 861        list->next = dtd + 1;
 862
 863        dump_dtd(dtd);
 864}
 865EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
 866
 867/*
 868 * append an inbound data transfer descriptor to the given descriptor list,
 869 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
 870 *
 871 * @list: vpdma desc list to which we add this descriptor
 872 * @width: width of the image in pixels in memory(not the cropped width)
 873 * @c_rect: crop params of input image
 874 * @fmt: vpdma data format of the buffer
 875 * dma_addr: dma address as seen by VPDMA
 876 * chan: VPDMA channel
 877 * field: top or bottom field info of the input image
 878 * flags: VPDMA flags to configure some descriptor fields
 879 * frame_width/height: the complete width/height of the image presented to the
 880 *                      client (this makes sense when multiple channels are
 881 *                      connected to the same client, forming a larger frame)
 882 * start_h, start_v: position where the given channel starts providing pixel
 883 *                      data to the client (makes sense when multiple channels
 884 *                      contribute to the client)
 885 */
 886void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
 887                int stride, const struct v4l2_rect *c_rect,
 888                const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
 889                enum vpdma_channel chan, int field, u32 flags, int frame_width,
 890                int frame_height, int start_h, int start_v)
 891{
 892        int priority = 0;
 893        int notify = 1;
 894        int depth = fmt->depth;
 895        int channel, next_chan;
 896        struct v4l2_rect rect = *c_rect;
 897        struct vpdma_dtd *dtd;
 898
 899        channel = next_chan = chan_info[chan].num;
 900
 901        if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
 902            (fmt->data_type == DATA_TYPE_C420 ||
 903             fmt->data_type == DATA_TYPE_CB420)) {
 904                rect.height >>= 1;
 905                rect.top >>= 1;
 906                depth = 8;
 907        }
 908
 909        dma_addr += rect.top * stride + (rect.left * depth >> 3);
 910
 911        dtd = list->next;
 912        WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
 913
 914        dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
 915                                        notify,
 916                                        field,
 917                                        !!(flags & VPDMA_DATA_FRAME_1D),
 918                                        !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
 919                                        !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
 920                                        stride);
 921
 922        dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
 923                                        rect.height);
 924        dtd->start_addr = (u32) dma_addr;
 925        dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
 926                                DTD_DIR_IN, channel, priority, next_chan);
 927        dtd->frame_width_height = dtd_frame_width_height(frame_width,
 928                                        frame_height);
 929        dtd->start_h_v = dtd_start_h_v(start_h, start_v);
 930        dtd->client_attr0 = 0;
 931        dtd->client_attr1 = 0;
 932
 933        list->next = dtd + 1;
 934
 935        dump_dtd(dtd);
 936}
 937EXPORT_SYMBOL(vpdma_add_in_dtd);
 938
 939int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
 940{
 941        int i, list_num = -1;
 942        unsigned long flags;
 943
 944        spin_lock_irqsave(&vpdma->lock, flags);
 945        for (i = 0; i < VPDMA_MAX_NUM_LIST && vpdma->hwlist_used[i]; i++)
 946                ;
 947
 948        if (i < VPDMA_MAX_NUM_LIST) {
 949                list_num = i;
 950                vpdma->hwlist_used[i] = true;
 951                vpdma->hwlist_priv[i] = priv;
 952        }
 953        spin_unlock_irqrestore(&vpdma->lock, flags);
 954
 955        return list_num;
 956}
 957EXPORT_SYMBOL(vpdma_hwlist_alloc);
 958
 959void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
 960{
 961        if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
 962                return NULL;
 963
 964        return vpdma->hwlist_priv[list_num];
 965}
 966EXPORT_SYMBOL(vpdma_hwlist_get_priv);
 967
 968void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
 969{
 970        void *priv;
 971        unsigned long flags;
 972
 973        spin_lock_irqsave(&vpdma->lock, flags);
 974        vpdma->hwlist_used[list_num] = false;
 975        priv = vpdma->hwlist_priv;
 976        spin_unlock_irqrestore(&vpdma->lock, flags);
 977
 978        return priv;
 979}
 980EXPORT_SYMBOL(vpdma_hwlist_release);
 981
 982/* set or clear the mask for list complete interrupt */
 983void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
 984                int list_num, bool enable)
 985{
 986        u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
 987        u32 val;
 988
 989        val = read_reg(vpdma, reg_addr);
 990        if (enable)
 991                val |= (1 << (list_num * 2));
 992        else
 993                val &= ~(1 << (list_num * 2));
 994        write_reg(vpdma, reg_addr, val);
 995}
 996EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
 997
 998/* get the LIST_STAT register */
 999unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
1000{
1001        u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
1002
1003        return read_reg(vpdma, reg_addr);
1004}
1005EXPORT_SYMBOL(vpdma_get_list_stat);
1006
1007/* get the LIST_MASK register */
1008unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
1009{
1010        u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
1011
1012        return read_reg(vpdma, reg_addr);
1013}
1014EXPORT_SYMBOL(vpdma_get_list_mask);
1015
1016/* clear previously occurred list interrupts in the LIST_STAT register */
1017void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
1018                           int list_num)
1019{
1020        u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
1021
1022        write_reg(vpdma, reg_addr, 3 << (list_num * 2));
1023}
1024EXPORT_SYMBOL(vpdma_clear_list_stat);
1025
1026void vpdma_set_bg_color(struct vpdma_data *vpdma,
1027                struct vpdma_data_format *fmt, u32 color)
1028{
1029        if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
1030                write_reg(vpdma, VPDMA_BG_RGB, color);
1031        else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
1032                write_reg(vpdma, VPDMA_BG_YUV, color);
1033}
1034EXPORT_SYMBOL(vpdma_set_bg_color);
1035
1036/*
1037 * configures the output mode of the line buffer for the given client, the
1038 * line buffer content can either be mirrored(each line repeated twice) or
1039 * passed to the client as is
1040 */
1041void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
1042                enum vpdma_channel chan)
1043{
1044        int client_cstat = chan_info[chan].cstat_offset;
1045
1046        write_field_reg(vpdma, client_cstat, line_mode,
1047                VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
1048}
1049EXPORT_SYMBOL(vpdma_set_line_mode);
1050
1051/*
1052 * configures the event which should trigger VPDMA transfer for the given
1053 * client
1054 */
1055void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
1056                enum vpdma_frame_start_event fs_event,
1057                enum vpdma_channel chan)
1058{
1059        int client_cstat = chan_info[chan].cstat_offset;
1060
1061        write_field_reg(vpdma, client_cstat, fs_event,
1062                VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
1063}
1064EXPORT_SYMBOL(vpdma_set_frame_start_event);
1065
1066static void vpdma_firmware_cb(const struct firmware *f, void *context)
1067{
1068        struct vpdma_data *vpdma = context;
1069        struct vpdma_buf fw_dma_buf;
1070        int i, r;
1071
1072        dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
1073
1074        if (!f || !f->data) {
1075                dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
1076                return;
1077        }
1078
1079        /* already initialized */
1080        if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1081                        VPDMA_LIST_RDY_SHFT)) {
1082                vpdma->cb(vpdma->pdev);
1083                return;
1084        }
1085
1086        r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
1087        if (r) {
1088                dev_err(&vpdma->pdev->dev,
1089                        "failed to allocate dma buffer for firmware\n");
1090                goto rel_fw;
1091        }
1092
1093        memcpy(fw_dma_buf.addr, f->data, f->size);
1094
1095        vpdma_map_desc_buf(vpdma, &fw_dma_buf);
1096
1097        write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
1098
1099        for (i = 0; i < 100; i++) {             /* max 1 second */
1100                msleep_interruptible(10);
1101
1102                if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1103                                VPDMA_LIST_RDY_SHFT))
1104                        break;
1105        }
1106
1107        if (i == 100) {
1108                dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
1109                goto free_buf;
1110        }
1111
1112        vpdma->cb(vpdma->pdev);
1113
1114free_buf:
1115        vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
1116
1117        vpdma_free_desc_buf(&fw_dma_buf);
1118rel_fw:
1119        release_firmware(f);
1120}
1121
1122static int vpdma_load_firmware(struct vpdma_data *vpdma)
1123{
1124        int r;
1125        struct device *dev = &vpdma->pdev->dev;
1126
1127        r = request_firmware_nowait(THIS_MODULE, 1,
1128                (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
1129                vpdma_firmware_cb);
1130        if (r) {
1131                dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
1132                return r;
1133        } else {
1134                dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
1135        }
1136
1137        return 0;
1138}
1139
1140int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
1141                void (*cb)(struct platform_device *pdev))
1142{
1143        struct resource *res;
1144        int r;
1145
1146        dev_dbg(&pdev->dev, "vpdma_create\n");
1147
1148        vpdma->pdev = pdev;
1149        vpdma->cb = cb;
1150        spin_lock_init(&vpdma->lock);
1151
1152        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
1153        if (res == NULL) {
1154                dev_err(&pdev->dev, "missing platform resources data\n");
1155                return -ENODEV;
1156        }
1157
1158        vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1159        if (!vpdma->base) {
1160                dev_err(&pdev->dev, "failed to ioremap\n");
1161                return -ENOMEM;
1162        }
1163
1164        r = vpdma_load_firmware(vpdma);
1165        if (r) {
1166                pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
1167                return r;
1168        }
1169
1170        return 0;
1171}
1172EXPORT_SYMBOL(vpdma_create);
1173
1174MODULE_AUTHOR("Texas Instruments Inc.");
1175MODULE_FIRMWARE(VPDMA_FIRMWARE);
1176MODULE_LICENSE("GPL v2");
1177