linux/drivers/media/platform/ti-vpe/vpdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VPDMA helper library
   4 *
   5 * Copyright (c) 2013 Texas Instruments Inc.
   6 *
   7 * David Griego, <dagriego@biglakesoftware.com>
   8 * Dale Farnsworth, <dale@farnsworth.org>
   9 * Archit Taneja, <archit@ti.com>
  10 */
  11
  12#include <linux/delay.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/firmware.h>
  16#include <linux/io.h>
  17#include <linux/module.h>
  18#include <linux/platform_device.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/videodev2.h>
  22
  23#include "vpdma.h"
  24#include "vpdma_priv.h"
  25
  26#define VPDMA_FIRMWARE  "vpdma-1b8.bin"
  27
  28const struct vpdma_data_format vpdma_yuv_fmts[] = {
  29        [VPDMA_DATA_FMT_Y444] = {
  30                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  31                .data_type      = DATA_TYPE_Y444,
  32                .depth          = 8,
  33        },
  34        [VPDMA_DATA_FMT_Y422] = {
  35                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  36                .data_type      = DATA_TYPE_Y422,
  37                .depth          = 8,
  38        },
  39        [VPDMA_DATA_FMT_Y420] = {
  40                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  41                .data_type      = DATA_TYPE_Y420,
  42                .depth          = 8,
  43        },
  44        [VPDMA_DATA_FMT_C444] = {
  45                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  46                .data_type      = DATA_TYPE_C444,
  47                .depth          = 8,
  48        },
  49        [VPDMA_DATA_FMT_C422] = {
  50                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  51                .data_type      = DATA_TYPE_C422,
  52                .depth          = 8,
  53        },
  54        [VPDMA_DATA_FMT_C420] = {
  55                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  56                .data_type      = DATA_TYPE_C420,
  57                .depth          = 4,
  58        },
  59        [VPDMA_DATA_FMT_YCR422] = {
  60                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  61                .data_type      = DATA_TYPE_YCR422,
  62                .depth          = 16,
  63        },
  64        [VPDMA_DATA_FMT_YC444] = {
  65                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  66                .data_type      = DATA_TYPE_YC444,
  67                .depth          = 24,
  68        },
  69        [VPDMA_DATA_FMT_CRY422] = {
  70                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  71                .data_type      = DATA_TYPE_CRY422,
  72                .depth          = 16,
  73        },
  74        [VPDMA_DATA_FMT_CBY422] = {
  75                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  76                .data_type      = DATA_TYPE_CBY422,
  77                .depth          = 16,
  78        },
  79        [VPDMA_DATA_FMT_YCB422] = {
  80                .type           = VPDMA_DATA_FMT_TYPE_YUV,
  81                .data_type      = DATA_TYPE_YCB422,
  82                .depth          = 16,
  83        },
  84};
  85EXPORT_SYMBOL(vpdma_yuv_fmts);
  86
  87const struct vpdma_data_format vpdma_rgb_fmts[] = {
  88        [VPDMA_DATA_FMT_RGB565] = {
  89                .type           = VPDMA_DATA_FMT_TYPE_RGB,
  90                .data_type      = DATA_TYPE_RGB16_565,
  91                .depth          = 16,
  92        },
  93        [VPDMA_DATA_FMT_ARGB16_1555] = {
  94                .type           = VPDMA_DATA_FMT_TYPE_RGB,
  95                .data_type      = DATA_TYPE_ARGB_1555,
  96                .depth          = 16,
  97        },
  98        [VPDMA_DATA_FMT_ARGB16] = {
  99                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 100                .data_type      = DATA_TYPE_ARGB_4444,
 101                .depth          = 16,
 102        },
 103        [VPDMA_DATA_FMT_RGBA16_5551] = {
 104                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 105                .data_type      = DATA_TYPE_RGBA_5551,
 106                .depth          = 16,
 107        },
 108        [VPDMA_DATA_FMT_RGBA16] = {
 109                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 110                .data_type      = DATA_TYPE_RGBA_4444,
 111                .depth          = 16,
 112        },
 113        [VPDMA_DATA_FMT_ARGB24] = {
 114                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 115                .data_type      = DATA_TYPE_ARGB24_6666,
 116                .depth          = 24,
 117        },
 118        [VPDMA_DATA_FMT_RGB24] = {
 119                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 120                .data_type      = DATA_TYPE_RGB24_888,
 121                .depth          = 24,
 122        },
 123        [VPDMA_DATA_FMT_ARGB32] = {
 124                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 125                .data_type      = DATA_TYPE_ARGB32_8888,
 126                .depth          = 32,
 127        },
 128        [VPDMA_DATA_FMT_RGBA24] = {
 129                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 130                .data_type      = DATA_TYPE_RGBA24_6666,
 131                .depth          = 24,
 132        },
 133        [VPDMA_DATA_FMT_RGBA32] = {
 134                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 135                .data_type      = DATA_TYPE_RGBA32_8888,
 136                .depth          = 32,
 137        },
 138        [VPDMA_DATA_FMT_BGR565] = {
 139                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 140                .data_type      = DATA_TYPE_BGR16_565,
 141                .depth          = 16,
 142        },
 143        [VPDMA_DATA_FMT_ABGR16_1555] = {
 144                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 145                .data_type      = DATA_TYPE_ABGR_1555,
 146                .depth          = 16,
 147        },
 148        [VPDMA_DATA_FMT_ABGR16] = {
 149                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 150                .data_type      = DATA_TYPE_ABGR_4444,
 151                .depth          = 16,
 152        },
 153        [VPDMA_DATA_FMT_BGRA16_5551] = {
 154                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 155                .data_type      = DATA_TYPE_BGRA_5551,
 156                .depth          = 16,
 157        },
 158        [VPDMA_DATA_FMT_BGRA16] = {
 159                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 160                .data_type      = DATA_TYPE_BGRA_4444,
 161                .depth          = 16,
 162        },
 163        [VPDMA_DATA_FMT_ABGR24] = {
 164                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 165                .data_type      = DATA_TYPE_ABGR24_6666,
 166                .depth          = 24,
 167        },
 168        [VPDMA_DATA_FMT_BGR24] = {
 169                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 170                .data_type      = DATA_TYPE_BGR24_888,
 171                .depth          = 24,
 172        },
 173        [VPDMA_DATA_FMT_ABGR32] = {
 174                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 175                .data_type      = DATA_TYPE_ABGR32_8888,
 176                .depth          = 32,
 177        },
 178        [VPDMA_DATA_FMT_BGRA24] = {
 179                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 180                .data_type      = DATA_TYPE_BGRA24_6666,
 181                .depth          = 24,
 182        },
 183        [VPDMA_DATA_FMT_BGRA32] = {
 184                .type           = VPDMA_DATA_FMT_TYPE_RGB,
 185                .data_type      = DATA_TYPE_BGRA32_8888,
 186                .depth          = 32,
 187        },
 188};
 189EXPORT_SYMBOL(vpdma_rgb_fmts);
 190
 191/*
 192 * To handle RAW format we are re-using the CBY422
 193 * vpdma data type so that we use the vpdma to re-order
 194 * the incoming bytes, as the parser assumes that the
 195 * first byte presented on the bus is the MSB of a 2
 196 * bytes value.
 197 * RAW8 handles from 1 to 8 bits
 198 * RAW16 handles from 9 to 16 bits
 199 */
 200const struct vpdma_data_format vpdma_raw_fmts[] = {
 201        [VPDMA_DATA_FMT_RAW8] = {
 202                .type           = VPDMA_DATA_FMT_TYPE_YUV,
 203                .data_type      = DATA_TYPE_CBY422,
 204                .depth          = 8,
 205        },
 206        [VPDMA_DATA_FMT_RAW16] = {
 207                .type           = VPDMA_DATA_FMT_TYPE_YUV,
 208                .data_type      = DATA_TYPE_CBY422,
 209                .depth          = 16,
 210        },
 211};
 212EXPORT_SYMBOL(vpdma_raw_fmts);
 213
 214const struct vpdma_data_format vpdma_misc_fmts[] = {
 215        [VPDMA_DATA_FMT_MV] = {
 216                .type           = VPDMA_DATA_FMT_TYPE_MISC,
 217                .data_type      = DATA_TYPE_MV,
 218                .depth          = 4,
 219        },
 220};
 221EXPORT_SYMBOL(vpdma_misc_fmts);
 222
 223struct vpdma_channel_info {
 224        int num;                /* VPDMA channel number */
 225        int cstat_offset;       /* client CSTAT register offset */
 226};
 227
 228static const struct vpdma_channel_info chan_info[] = {
 229        [VPE_CHAN_LUMA1_IN] = {
 230                .num            = VPE_CHAN_NUM_LUMA1_IN,
 231                .cstat_offset   = VPDMA_DEI_LUMA1_CSTAT,
 232        },
 233        [VPE_CHAN_CHROMA1_IN] = {
 234                .num            = VPE_CHAN_NUM_CHROMA1_IN,
 235                .cstat_offset   = VPDMA_DEI_CHROMA1_CSTAT,
 236        },
 237        [VPE_CHAN_LUMA2_IN] = {
 238                .num            = VPE_CHAN_NUM_LUMA2_IN,
 239                .cstat_offset   = VPDMA_DEI_LUMA2_CSTAT,
 240        },
 241        [VPE_CHAN_CHROMA2_IN] = {
 242                .num            = VPE_CHAN_NUM_CHROMA2_IN,
 243                .cstat_offset   = VPDMA_DEI_CHROMA2_CSTAT,
 244        },
 245        [VPE_CHAN_LUMA3_IN] = {
 246                .num            = VPE_CHAN_NUM_LUMA3_IN,
 247                .cstat_offset   = VPDMA_DEI_LUMA3_CSTAT,
 248        },
 249        [VPE_CHAN_CHROMA3_IN] = {
 250                .num            = VPE_CHAN_NUM_CHROMA3_IN,
 251                .cstat_offset   = VPDMA_DEI_CHROMA3_CSTAT,
 252        },
 253        [VPE_CHAN_MV_IN] = {
 254                .num            = VPE_CHAN_NUM_MV_IN,
 255                .cstat_offset   = VPDMA_DEI_MV_IN_CSTAT,
 256        },
 257        [VPE_CHAN_MV_OUT] = {
 258                .num            = VPE_CHAN_NUM_MV_OUT,
 259                .cstat_offset   = VPDMA_DEI_MV_OUT_CSTAT,
 260        },
 261        [VPE_CHAN_LUMA_OUT] = {
 262                .num            = VPE_CHAN_NUM_LUMA_OUT,
 263                .cstat_offset   = VPDMA_VIP_UP_Y_CSTAT,
 264        },
 265        [VPE_CHAN_CHROMA_OUT] = {
 266                .num            = VPE_CHAN_NUM_CHROMA_OUT,
 267                .cstat_offset   = VPDMA_VIP_UP_UV_CSTAT,
 268        },
 269        [VPE_CHAN_RGB_OUT] = {
 270                .num            = VPE_CHAN_NUM_RGB_OUT,
 271                .cstat_offset   = VPDMA_VIP_UP_Y_CSTAT,
 272        },
 273};
 274
 275static u32 read_reg(struct vpdma_data *vpdma, int offset)
 276{
 277        return ioread32(vpdma->base + offset);
 278}
 279
 280static void write_reg(struct vpdma_data *vpdma, int offset, u32 value)
 281{
 282        iowrite32(value, vpdma->base + offset);
 283}
 284
 285static int read_field_reg(struct vpdma_data *vpdma, int offset,
 286                u32 mask, int shift)
 287{
 288        return (read_reg(vpdma, offset) & (mask << shift)) >> shift;
 289}
 290
 291static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field,
 292                u32 mask, int shift)
 293{
 294        u32 val = read_reg(vpdma, offset);
 295
 296        val &= ~(mask << shift);
 297        val |= (field & mask) << shift;
 298
 299        write_reg(vpdma, offset, val);
 300}
 301
 302void vpdma_dump_regs(struct vpdma_data *vpdma)
 303{
 304        struct device *dev = &vpdma->pdev->dev;
 305
 306#define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r))
 307
 308        dev_dbg(dev, "VPDMA Registers:\n");
 309
 310        DUMPREG(PID);
 311        DUMPREG(LIST_ADDR);
 312        DUMPREG(LIST_ATTR);
 313        DUMPREG(LIST_STAT_SYNC);
 314        DUMPREG(BG_RGB);
 315        DUMPREG(BG_YUV);
 316        DUMPREG(SETUP);
 317        DUMPREG(MAX_SIZE1);
 318        DUMPREG(MAX_SIZE2);
 319        DUMPREG(MAX_SIZE3);
 320
 321        /*
 322         * dumping registers of only group0 and group3, because VPE channels
 323         * lie within group0 and group3 registers
 324         */
 325        DUMPREG(INT_CHAN_STAT(0));
 326        DUMPREG(INT_CHAN_MASK(0));
 327        DUMPREG(INT_CHAN_STAT(3));
 328        DUMPREG(INT_CHAN_MASK(3));
 329        DUMPREG(INT_CLIENT0_STAT);
 330        DUMPREG(INT_CLIENT0_MASK);
 331        DUMPREG(INT_CLIENT1_STAT);
 332        DUMPREG(INT_CLIENT1_MASK);
 333        DUMPREG(INT_LIST0_STAT);
 334        DUMPREG(INT_LIST0_MASK);
 335
 336        /*
 337         * these are registers specific to VPE clients, we can make this
 338         * function dump client registers specific to VPE or VIP based on
 339         * who is using it
 340         */
 341        DUMPREG(DEI_CHROMA1_CSTAT);
 342        DUMPREG(DEI_LUMA1_CSTAT);
 343        DUMPREG(DEI_CHROMA2_CSTAT);
 344        DUMPREG(DEI_LUMA2_CSTAT);
 345        DUMPREG(DEI_CHROMA3_CSTAT);
 346        DUMPREG(DEI_LUMA3_CSTAT);
 347        DUMPREG(DEI_MV_IN_CSTAT);
 348        DUMPREG(DEI_MV_OUT_CSTAT);
 349        DUMPREG(VIP_UP_Y_CSTAT);
 350        DUMPREG(VIP_UP_UV_CSTAT);
 351        DUMPREG(VPI_CTL_CSTAT);
 352}
 353EXPORT_SYMBOL(vpdma_dump_regs);
 354
 355/*
 356 * Allocate a DMA buffer
 357 */
 358int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size)
 359{
 360        buf->size = size;
 361        buf->mapped = false;
 362        buf->addr = kzalloc(size, GFP_KERNEL);
 363        if (!buf->addr)
 364                return -ENOMEM;
 365
 366        WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0);
 367
 368        return 0;
 369}
 370EXPORT_SYMBOL(vpdma_alloc_desc_buf);
 371
 372void vpdma_free_desc_buf(struct vpdma_buf *buf)
 373{
 374        WARN_ON(buf->mapped);
 375        kfree(buf->addr);
 376        buf->addr = NULL;
 377        buf->size = 0;
 378}
 379EXPORT_SYMBOL(vpdma_free_desc_buf);
 380
 381/*
 382 * map descriptor/payload DMA buffer, enabling DMA access
 383 */
 384int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
 385{
 386        struct device *dev = &vpdma->pdev->dev;
 387
 388        WARN_ON(buf->mapped);
 389        buf->dma_addr = dma_map_single(dev, buf->addr, buf->size,
 390                                DMA_BIDIRECTIONAL);
 391        if (dma_mapping_error(dev, buf->dma_addr)) {
 392                dev_err(dev, "failed to map buffer\n");
 393                return -EINVAL;
 394        }
 395
 396        buf->mapped = true;
 397
 398        return 0;
 399}
 400EXPORT_SYMBOL(vpdma_map_desc_buf);
 401
 402/*
 403 * unmap descriptor/payload DMA buffer, disabling DMA access and
 404 * allowing the main processor to access the data
 405 */
 406void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf)
 407{
 408        struct device *dev = &vpdma->pdev->dev;
 409
 410        if (buf->mapped)
 411                dma_unmap_single(dev, buf->dma_addr, buf->size,
 412                                DMA_BIDIRECTIONAL);
 413
 414        buf->mapped = false;
 415}
 416EXPORT_SYMBOL(vpdma_unmap_desc_buf);
 417
 418/*
 419 * Cleanup all pending descriptors of a list
 420 * First, stop the current list being processed.
 421 * If the VPDMA was busy, this step makes vpdma to accept post lists.
 422 * To cleanup the internal FSM, post abort list descriptor for all the
 423 * channels from @channels array of size @size.
 424 */
 425int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num,
 426                int *channels, int size)
 427{
 428        struct vpdma_desc_list abort_list;
 429        int i, ret, timeout = 500;
 430
 431        write_reg(vpdma, VPDMA_LIST_ATTR,
 432                        (list_num << VPDMA_LIST_NUM_SHFT) |
 433                        (1 << VPDMA_LIST_STOP_SHFT));
 434
 435        if (size <= 0 || !channels)
 436                return 0;
 437
 438        ret = vpdma_create_desc_list(&abort_list,
 439                size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL);
 440        if (ret)
 441                return ret;
 442
 443        for (i = 0; i < size; i++)
 444                vpdma_add_abort_channel_ctd(&abort_list, channels[i]);
 445
 446        ret = vpdma_map_desc_buf(vpdma, &abort_list.buf);
 447        if (ret)
 448                return ret;
 449        ret = vpdma_submit_descs(vpdma, &abort_list, list_num);
 450        if (ret)
 451                return ret;
 452
 453        while (vpdma_list_busy(vpdma, list_num) && --timeout)
 454                ;
 455
 456        if (timeout == 0) {
 457                dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n");
 458                return -EBUSY;
 459        }
 460
 461        vpdma_unmap_desc_buf(vpdma, &abort_list.buf);
 462        vpdma_free_desc_buf(&abort_list.buf);
 463
 464        return 0;
 465}
 466EXPORT_SYMBOL(vpdma_list_cleanup);
 467
 468/*
 469 * create a descriptor list, the user of this list will append configuration,
 470 * control and data descriptors to this list, this list will be submitted to
 471 * VPDMA. VPDMA's list parser will go through each descriptor and perform the
 472 * required DMA operations
 473 */
 474int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type)
 475{
 476        int r;
 477
 478        r = vpdma_alloc_desc_buf(&list->buf, size);
 479        if (r)
 480                return r;
 481
 482        list->next = list->buf.addr;
 483
 484        list->type = type;
 485
 486        return 0;
 487}
 488EXPORT_SYMBOL(vpdma_create_desc_list);
 489
 490/*
 491 * once a descriptor list is parsed by VPDMA, we reset the list by emptying it,
 492 * to allow new descriptors to be added to the list.
 493 */
 494void vpdma_reset_desc_list(struct vpdma_desc_list *list)
 495{
 496        list->next = list->buf.addr;
 497}
 498EXPORT_SYMBOL(vpdma_reset_desc_list);
 499
 500/*
 501 * free the buffer allocated for the VPDMA descriptor list, this should be
 502 * called when the user doesn't want to use VPDMA any more.
 503 */
 504void vpdma_free_desc_list(struct vpdma_desc_list *list)
 505{
 506        vpdma_free_desc_buf(&list->buf);
 507
 508        list->next = NULL;
 509}
 510EXPORT_SYMBOL(vpdma_free_desc_list);
 511
 512bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num)
 513{
 514        return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16);
 515}
 516EXPORT_SYMBOL(vpdma_list_busy);
 517
 518/*
 519 * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion
 520 */
 521int vpdma_submit_descs(struct vpdma_data *vpdma,
 522                        struct vpdma_desc_list *list, int list_num)
 523{
 524        int list_size;
 525        unsigned long flags;
 526
 527        if (vpdma_list_busy(vpdma, list_num))
 528                return -EBUSY;
 529
 530        /* 16-byte granularity */
 531        list_size = (list->next - list->buf.addr) >> 4;
 532
 533        spin_lock_irqsave(&vpdma->lock, flags);
 534        write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr);
 535
 536        write_reg(vpdma, VPDMA_LIST_ATTR,
 537                        (list_num << VPDMA_LIST_NUM_SHFT) |
 538                        (list->type << VPDMA_LIST_TYPE_SHFT) |
 539                        list_size);
 540        spin_unlock_irqrestore(&vpdma->lock, flags);
 541
 542        return 0;
 543}
 544EXPORT_SYMBOL(vpdma_submit_descs);
 545
 546static void dump_dtd(struct vpdma_dtd *dtd);
 547
 548void vpdma_update_dma_addr(struct vpdma_data *vpdma,
 549        struct vpdma_desc_list *list, dma_addr_t dma_addr,
 550        void *write_dtd, int drop, int idx)
 551{
 552        struct vpdma_dtd *dtd = list->buf.addr;
 553        dma_addr_t write_desc_addr;
 554        int offset;
 555
 556        dtd += idx;
 557        vpdma_unmap_desc_buf(vpdma, &list->buf);
 558
 559        dtd->start_addr = dma_addr;
 560
 561        /* Calculate write address from the offset of write_dtd from start
 562         * of the list->buf
 563         */
 564        offset = (void *)write_dtd - list->buf.addr;
 565        write_desc_addr = list->buf.dma_addr + offset;
 566
 567        if (drop)
 568                dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
 569                                                           1, 1, 0);
 570        else
 571                dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr,
 572                                                           1, 0, 0);
 573
 574        vpdma_map_desc_buf(vpdma, &list->buf);
 575
 576        dump_dtd(dtd);
 577}
 578EXPORT_SYMBOL(vpdma_update_dma_addr);
 579
 580void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr,
 581                        u32 width, u32 height)
 582{
 583        if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 &&
 584            reg_addr != VPDMA_MAX_SIZE3)
 585                reg_addr = VPDMA_MAX_SIZE1;
 586
 587        write_field_reg(vpdma, reg_addr, width - 1,
 588                        VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT);
 589
 590        write_field_reg(vpdma, reg_addr, height - 1,
 591                        VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT);
 592
 593}
 594EXPORT_SYMBOL(vpdma_set_max_size);
 595
 596static void dump_cfd(struct vpdma_cfd *cfd)
 597{
 598        int class;
 599
 600        class = cfd_get_class(cfd);
 601
 602        pr_debug("config descriptor of payload class: %s\n",
 603                class == CFD_CLS_BLOCK ? "simple block" :
 604                "address data block");
 605
 606        if (class == CFD_CLS_BLOCK)
 607                pr_debug("word0: dst_addr_offset = 0x%08x\n",
 608                        cfd->dest_addr_offset);
 609
 610        if (class == CFD_CLS_BLOCK)
 611                pr_debug("word1: num_data_wrds = %d\n", cfd->block_len);
 612
 613        pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr);
 614
 615        pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n",
 616                 cfd_get_pkt_type(cfd),
 617                 cfd_get_direct(cfd), class, cfd_get_dest(cfd),
 618                 cfd_get_payload_len(cfd));
 619}
 620
 621/*
 622 * append a configuration descriptor to the given descriptor list, where the
 623 * payload is in the form of a simple data block specified in the descriptor
 624 * header, this is used to upload scaler coefficients to the scaler module
 625 */
 626void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client,
 627                struct vpdma_buf *blk, u32 dest_offset)
 628{
 629        struct vpdma_cfd *cfd;
 630        int len = blk->size;
 631
 632        WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN);
 633
 634        cfd = list->next;
 635        WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
 636
 637        cfd->dest_addr_offset = dest_offset;
 638        cfd->block_len = len;
 639        cfd->payload_addr = (u32) blk->dma_addr;
 640        cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK,
 641                                client, len >> 4);
 642
 643        list->next = cfd + 1;
 644
 645        dump_cfd(cfd);
 646}
 647EXPORT_SYMBOL(vpdma_add_cfd_block);
 648
 649/*
 650 * append a configuration descriptor to the given descriptor list, where the
 651 * payload is in the address data block format, this is used to a configure a
 652 * discontiguous set of MMRs
 653 */
 654void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client,
 655                struct vpdma_buf *adb)
 656{
 657        struct vpdma_cfd *cfd;
 658        unsigned int len = adb->size;
 659
 660        WARN_ON(len & VPDMA_ADB_SIZE_ALIGN);
 661        WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN);
 662
 663        cfd = list->next;
 664        BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size));
 665
 666        cfd->w0 = 0;
 667        cfd->w1 = 0;
 668        cfd->payload_addr = (u32) adb->dma_addr;
 669        cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB,
 670                                client, len >> 4);
 671
 672        list->next = cfd + 1;
 673
 674        dump_cfd(cfd);
 675};
 676EXPORT_SYMBOL(vpdma_add_cfd_adb);
 677
 678/*
 679 * control descriptor format change based on what type of control descriptor it
 680 * is, we only use 'sync on channel' control descriptors for now, so assume it's
 681 * that
 682 */
 683static void dump_ctd(struct vpdma_ctd *ctd)
 684{
 685        pr_debug("control descriptor\n");
 686
 687        pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n",
 688                ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd));
 689}
 690
 691/*
 692 * append a 'sync on channel' type control descriptor to the given descriptor
 693 * list, this descriptor stalls the VPDMA list till the time DMA is completed
 694 * on the specified channel
 695 */
 696void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list,
 697                enum vpdma_channel chan)
 698{
 699        struct vpdma_ctd *ctd;
 700
 701        ctd = list->next;
 702        WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
 703
 704        ctd->w0 = 0;
 705        ctd->w1 = 0;
 706        ctd->w2 = 0;
 707        ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num,
 708                                CTD_TYPE_SYNC_ON_CHANNEL);
 709
 710        list->next = ctd + 1;
 711
 712        dump_ctd(ctd);
 713}
 714EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd);
 715
 716/*
 717 * append an 'abort_channel' type control descriptor to the given descriptor
 718 * list, this descriptor aborts any DMA transaction happening using the
 719 * specified channel
 720 */
 721void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list,
 722                int chan_num)
 723{
 724        struct vpdma_ctd *ctd;
 725
 726        ctd = list->next;
 727        WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size));
 728
 729        ctd->w0 = 0;
 730        ctd->w1 = 0;
 731        ctd->w2 = 0;
 732        ctd->type_source_ctl = ctd_type_source_ctl(chan_num,
 733                                CTD_TYPE_ABORT_CHANNEL);
 734
 735        list->next = ctd + 1;
 736
 737        dump_ctd(ctd);
 738}
 739EXPORT_SYMBOL(vpdma_add_abort_channel_ctd);
 740
 741static void dump_dtd(struct vpdma_dtd *dtd)
 742{
 743        int dir, chan;
 744
 745        dir = dtd_get_dir(dtd);
 746        chan = dtd_get_chan(dtd);
 747
 748        pr_debug("%s data transfer descriptor for channel %d\n",
 749                dir == DTD_DIR_OUT ? "outbound" : "inbound", chan);
 750
 751        pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n",
 752                dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd),
 753                dtd_get_1d(dtd), dtd_get_even_line_skip(dtd),
 754                dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd));
 755
 756        if (dir == DTD_DIR_IN)
 757                pr_debug("word1: line_length = %d, xfer_height = %d\n",
 758                        dtd_get_line_length(dtd), dtd_get_xfer_height(dtd));
 759
 760        pr_debug("word2: start_addr = %pad\n", &dtd->start_addr);
 761
 762        pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n",
 763                 dtd_get_pkt_type(dtd),
 764                 dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd),
 765                 dtd_get_next_chan(dtd));
 766
 767        if (dir == DTD_DIR_IN)
 768                pr_debug("word4: frame_width = %d, frame_height = %d\n",
 769                        dtd_get_frame_width(dtd), dtd_get_frame_height(dtd));
 770        else
 771                pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n",
 772                        dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd),
 773                        dtd_get_drop_data(dtd), dtd_get_use_desc(dtd));
 774
 775        if (dir == DTD_DIR_IN)
 776                pr_debug("word5: hor_start = %d, ver_start = %d\n",
 777                        dtd_get_h_start(dtd), dtd_get_v_start(dtd));
 778        else
 779                pr_debug("word5: max_width %d, max_height %d\n",
 780                        dtd_get_max_width(dtd), dtd_get_max_height(dtd));
 781
 782        pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0);
 783        pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1);
 784}
 785
 786/*
 787 * append an outbound data transfer descriptor to the given descriptor list,
 788 * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel
 789 *
 790 * @list: vpdma desc list to which we add this descriptor
 791 * @width: width of the image in pixels in memory
 792 * @c_rect: compose params of output image
 793 * @fmt: vpdma data format of the buffer
 794 * dma_addr: dma address as seen by VPDMA
 795 * max_width: enum for maximum width of data transfer
 796 * max_height: enum for maximum height of data transfer
 797 * chan: VPDMA channel
 798 * flags: VPDMA flags to configure some descriptor fields
 799 */
 800void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width,
 801                int stride, const struct v4l2_rect *c_rect,
 802                const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
 803                int max_w, int max_h, enum vpdma_channel chan, u32 flags)
 804{
 805        vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr,
 806                                  max_w, max_h, chan_info[chan].num, flags);
 807}
 808EXPORT_SYMBOL(vpdma_add_out_dtd);
 809
 810void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width,
 811                int stride, const struct v4l2_rect *c_rect,
 812                const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
 813                int max_w, int max_h, int raw_vpdma_chan, u32 flags)
 814{
 815        int priority = 0;
 816        int field = 0;
 817        int notify = 1;
 818        int channel, next_chan;
 819        struct v4l2_rect rect = *c_rect;
 820        int depth = fmt->depth;
 821        struct vpdma_dtd *dtd;
 822
 823        channel = next_chan = raw_vpdma_chan;
 824
 825        if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
 826                        fmt->data_type == DATA_TYPE_C420) {
 827                rect.height >>= 1;
 828                rect.top >>= 1;
 829                depth = 8;
 830        }
 831
 832        dma_addr += rect.top * stride + (rect.left * depth >> 3);
 833
 834        dtd = list->next;
 835        WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
 836
 837        dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
 838                                        notify,
 839                                        field,
 840                                        !!(flags & VPDMA_DATA_FRAME_1D),
 841                                        !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
 842                                        !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
 843                                        stride);
 844        dtd->w1 = 0;
 845        dtd->start_addr = (u32) dma_addr;
 846        dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
 847                                DTD_DIR_OUT, channel, priority, next_chan);
 848        dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0);
 849        dtd->max_width_height = dtd_max_width_height(max_w, max_h);
 850        dtd->client_attr0 = 0;
 851        dtd->client_attr1 = 0;
 852
 853        list->next = dtd + 1;
 854
 855        dump_dtd(dtd);
 856}
 857EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd);
 858
 859/*
 860 * append an inbound data transfer descriptor to the given descriptor list,
 861 * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel
 862 *
 863 * @list: vpdma desc list to which we add this descriptor
 864 * @width: width of the image in pixels in memory(not the cropped width)
 865 * @c_rect: crop params of input image
 866 * @fmt: vpdma data format of the buffer
 867 * dma_addr: dma address as seen by VPDMA
 868 * chan: VPDMA channel
 869 * field: top or bottom field info of the input image
 870 * flags: VPDMA flags to configure some descriptor fields
 871 * frame_width/height: the complete width/height of the image presented to the
 872 *                      client (this makes sense when multiple channels are
 873 *                      connected to the same client, forming a larger frame)
 874 * start_h, start_v: position where the given channel starts providing pixel
 875 *                      data to the client (makes sense when multiple channels
 876 *                      contribute to the client)
 877 */
 878void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width,
 879                int stride, const struct v4l2_rect *c_rect,
 880                const struct vpdma_data_format *fmt, dma_addr_t dma_addr,
 881                enum vpdma_channel chan, int field, u32 flags, int frame_width,
 882                int frame_height, int start_h, int start_v)
 883{
 884        int priority = 0;
 885        int notify = 1;
 886        int depth = fmt->depth;
 887        int channel, next_chan;
 888        struct v4l2_rect rect = *c_rect;
 889        struct vpdma_dtd *dtd;
 890
 891        channel = next_chan = chan_info[chan].num;
 892
 893        if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV &&
 894                        fmt->data_type == DATA_TYPE_C420) {
 895                rect.height >>= 1;
 896                rect.top >>= 1;
 897                depth = 8;
 898        }
 899
 900        dma_addr += rect.top * stride + (rect.left * depth >> 3);
 901
 902        dtd = list->next;
 903        WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size));
 904
 905        dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type,
 906                                        notify,
 907                                        field,
 908                                        !!(flags & VPDMA_DATA_FRAME_1D),
 909                                        !!(flags & VPDMA_DATA_EVEN_LINE_SKIP),
 910                                        !!(flags & VPDMA_DATA_ODD_LINE_SKIP),
 911                                        stride);
 912
 913        dtd->xfer_length_height = dtd_xfer_length_height(rect.width,
 914                                        rect.height);
 915        dtd->start_addr = (u32) dma_addr;
 916        dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED),
 917                                DTD_DIR_IN, channel, priority, next_chan);
 918        dtd->frame_width_height = dtd_frame_width_height(frame_width,
 919                                        frame_height);
 920        dtd->start_h_v = dtd_start_h_v(start_h, start_v);
 921        dtd->client_attr0 = 0;
 922        dtd->client_attr1 = 0;
 923
 924        list->next = dtd + 1;
 925
 926        dump_dtd(dtd);
 927}
 928EXPORT_SYMBOL(vpdma_add_in_dtd);
 929
 930int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv)
 931{
 932        int i, list_num = -1;
 933        unsigned long flags;
 934
 935        spin_lock_irqsave(&vpdma->lock, flags);
 936        for (i = 0; i < VPDMA_MAX_NUM_LIST &&
 937            vpdma->hwlist_used[i] == true; i++)
 938                ;
 939
 940        if (i < VPDMA_MAX_NUM_LIST) {
 941                list_num = i;
 942                vpdma->hwlist_used[i] = true;
 943                vpdma->hwlist_priv[i] = priv;
 944        }
 945        spin_unlock_irqrestore(&vpdma->lock, flags);
 946
 947        return list_num;
 948}
 949EXPORT_SYMBOL(vpdma_hwlist_alloc);
 950
 951void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num)
 952{
 953        if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST)
 954                return NULL;
 955
 956        return vpdma->hwlist_priv[list_num];
 957}
 958EXPORT_SYMBOL(vpdma_hwlist_get_priv);
 959
 960void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num)
 961{
 962        void *priv;
 963        unsigned long flags;
 964
 965        spin_lock_irqsave(&vpdma->lock, flags);
 966        vpdma->hwlist_used[list_num] = false;
 967        priv = vpdma->hwlist_priv;
 968        spin_unlock_irqrestore(&vpdma->lock, flags);
 969
 970        return priv;
 971}
 972EXPORT_SYMBOL(vpdma_hwlist_release);
 973
 974/* set or clear the mask for list complete interrupt */
 975void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num,
 976                int list_num, bool enable)
 977{
 978        u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
 979        u32 val;
 980
 981        val = read_reg(vpdma, reg_addr);
 982        if (enable)
 983                val |= (1 << (list_num * 2));
 984        else
 985                val &= ~(1 << (list_num * 2));
 986        write_reg(vpdma, reg_addr, val);
 987}
 988EXPORT_SYMBOL(vpdma_enable_list_complete_irq);
 989
 990/* get the LIST_STAT register */
 991unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num)
 992{
 993        u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
 994
 995        return read_reg(vpdma, reg_addr);
 996}
 997EXPORT_SYMBOL(vpdma_get_list_stat);
 998
 999/* get the LIST_MASK register */
1000unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num)
1001{
1002        u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num;
1003
1004        return read_reg(vpdma, reg_addr);
1005}
1006EXPORT_SYMBOL(vpdma_get_list_mask);
1007
1008/* clear previously occurred list interrupts in the LIST_STAT register */
1009void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num,
1010                           int list_num)
1011{
1012        u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num;
1013
1014        write_reg(vpdma, reg_addr, 3 << (list_num * 2));
1015}
1016EXPORT_SYMBOL(vpdma_clear_list_stat);
1017
1018void vpdma_set_bg_color(struct vpdma_data *vpdma,
1019                struct vpdma_data_format *fmt, u32 color)
1020{
1021        if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB)
1022                write_reg(vpdma, VPDMA_BG_RGB, color);
1023        else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV)
1024                write_reg(vpdma, VPDMA_BG_YUV, color);
1025}
1026EXPORT_SYMBOL(vpdma_set_bg_color);
1027
1028/*
1029 * configures the output mode of the line buffer for the given client, the
1030 * line buffer content can either be mirrored(each line repeated twice) or
1031 * passed to the client as is
1032 */
1033void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode,
1034                enum vpdma_channel chan)
1035{
1036        int client_cstat = chan_info[chan].cstat_offset;
1037
1038        write_field_reg(vpdma, client_cstat, line_mode,
1039                VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT);
1040}
1041EXPORT_SYMBOL(vpdma_set_line_mode);
1042
1043/*
1044 * configures the event which should trigger VPDMA transfer for the given
1045 * client
1046 */
1047void vpdma_set_frame_start_event(struct vpdma_data *vpdma,
1048                enum vpdma_frame_start_event fs_event,
1049                enum vpdma_channel chan)
1050{
1051        int client_cstat = chan_info[chan].cstat_offset;
1052
1053        write_field_reg(vpdma, client_cstat, fs_event,
1054                VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT);
1055}
1056EXPORT_SYMBOL(vpdma_set_frame_start_event);
1057
1058static void vpdma_firmware_cb(const struct firmware *f, void *context)
1059{
1060        struct vpdma_data *vpdma = context;
1061        struct vpdma_buf fw_dma_buf;
1062        int i, r;
1063
1064        dev_dbg(&vpdma->pdev->dev, "firmware callback\n");
1065
1066        if (!f || !f->data) {
1067                dev_err(&vpdma->pdev->dev, "couldn't get firmware\n");
1068                return;
1069        }
1070
1071        /* already initialized */
1072        if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1073                        VPDMA_LIST_RDY_SHFT)) {
1074                vpdma->cb(vpdma->pdev);
1075                return;
1076        }
1077
1078        r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size);
1079        if (r) {
1080                dev_err(&vpdma->pdev->dev,
1081                        "failed to allocate dma buffer for firmware\n");
1082                goto rel_fw;
1083        }
1084
1085        memcpy(fw_dma_buf.addr, f->data, f->size);
1086
1087        vpdma_map_desc_buf(vpdma, &fw_dma_buf);
1088
1089        write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr);
1090
1091        for (i = 0; i < 100; i++) {             /* max 1 second */
1092                msleep_interruptible(10);
1093
1094                if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK,
1095                                VPDMA_LIST_RDY_SHFT))
1096                        break;
1097        }
1098
1099        if (i == 100) {
1100                dev_err(&vpdma->pdev->dev, "firmware upload failed\n");
1101                goto free_buf;
1102        }
1103
1104        vpdma->cb(vpdma->pdev);
1105
1106free_buf:
1107        vpdma_unmap_desc_buf(vpdma, &fw_dma_buf);
1108
1109        vpdma_free_desc_buf(&fw_dma_buf);
1110rel_fw:
1111        release_firmware(f);
1112}
1113
1114static int vpdma_load_firmware(struct vpdma_data *vpdma)
1115{
1116        int r;
1117        struct device *dev = &vpdma->pdev->dev;
1118
1119        r = request_firmware_nowait(THIS_MODULE, 1,
1120                (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma,
1121                vpdma_firmware_cb);
1122        if (r) {
1123                dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE);
1124                return r;
1125        } else {
1126                dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE);
1127        }
1128
1129        return 0;
1130}
1131
1132int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma,
1133                void (*cb)(struct platform_device *pdev))
1134{
1135        struct resource *res;
1136        int r;
1137
1138        dev_dbg(&pdev->dev, "vpdma_create\n");
1139
1140        vpdma->pdev = pdev;
1141        vpdma->cb = cb;
1142        spin_lock_init(&vpdma->lock);
1143
1144        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma");
1145        if (res == NULL) {
1146                dev_err(&pdev->dev, "missing platform resources data\n");
1147                return -ENODEV;
1148        }
1149
1150        vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1151        if (!vpdma->base) {
1152                dev_err(&pdev->dev, "failed to ioremap\n");
1153                return -ENOMEM;
1154        }
1155
1156        r = vpdma_load_firmware(vpdma);
1157        if (r) {
1158                pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE);
1159                return r;
1160        }
1161
1162        return 0;
1163}
1164EXPORT_SYMBOL(vpdma_create);
1165
1166MODULE_AUTHOR("Texas Instruments Inc.");
1167MODULE_FIRMWARE(VPDMA_FIRMWARE);
1168MODULE_LICENSE("GPL v2");
1169