linux/drivers/dma/xilinx/xilinx_dpdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Xilinx ZynqMP DPDMA Engine driver
   4 *
   5 *  Copyright (C) 2015 - 2018 Xilinx, Inc.
   6 *
   7 *  Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
   8 *
   9 * This software is licensed under the terms of the GNU General Public
  10 * License version 2, as published by the Free Software Foundation, and
  11 * may be copied, distributed, and modified under those terms.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 */
  18
  19#include <linux/bitops.h>
  20#include <linux/clk.h>
  21#include <linux/debugfs.h>
  22#include <linux/delay.h>
  23#include <linux/device.h>
  24#include <linux/dmaengine.h>
  25#include <linux/dmapool.h>
  26#include <linux/gfp.h>
  27#include <linux/interrupt.h>
  28#include <linux/irqreturn.h>
  29#include <linux/module.h>
  30#include <linux/of.h>
  31#include <linux/of_dma.h>
  32#include <linux/platform_device.h>
  33#include <linux/sched.h>
  34#include <linux/slab.h>
  35#include <linux/spinlock.h>
  36#include <linux/types.h>
  37#include <linux/uaccess.h>
  38#include <linux/wait.h>
  39
  40#include "../dmaengine.h"
  41
  42/* DPDMA registers */
  43#define XILINX_DPDMA_ERR_CTRL                           0x0
  44#define XILINX_DPDMA_ISR                                0x4
  45#define XILINX_DPDMA_IMR                                0x8
  46#define XILINX_DPDMA_IEN                                0xc
  47#define XILINX_DPDMA_IDS                                0x10
  48#define XILINX_DPDMA_INTR_DESC_DONE_MASK                (0x3f << 0)
  49#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT               0
  50#define XILINX_DPDMA_INTR_NO_OSTAND_MASK                (0x3f << 6)
  51#define XILINX_DPDMA_INTR_NO_OSTAND_SHIFT               6
  52#define XILINX_DPDMA_INTR_AXI_ERR_MASK                  (0x3f << 12)
  53#define XILINX_DPDMA_INTR_AXI_ERR_SHIFT                 12
  54#define XILINX_DPDMA_INTR_DESC_ERR_MASK                 (0x3f << 18)
  55#define XILINX_DPDMA_INTR_DESC_ERR_SHIFT                16
  56#define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL              BIT(24)
  57#define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL             BIT(25)
  58#define XILINX_DPDMA_INTR_AXI_4K_CROSS                  BIT(26)
  59#define XILINX_DPDMA_INTR_VSYNC                         BIT(27)
  60#define XILINX_DPDMA_INTR_CHAN_ERR_MASK                 0x41000
  61#define XILINX_DPDMA_INTR_CHAN_ERR                      0xfff000
  62#define XILINX_DPDMA_INTR_GLOBAL_ERR                    0x7000000
  63#define XILINX_DPDMA_INTR_ERR_ALL                       0x7fff000
  64#define XILINX_DPDMA_INTR_CHAN_MASK                     0x41041
  65#define XILINX_DPDMA_INTR_GLOBAL_MASK                   0xf000000
  66#define XILINX_DPDMA_INTR_ALL                           0xfffffff
  67#define XILINX_DPDMA_EISR                               0x14
  68#define XILINX_DPDMA_EIMR                               0x18
  69#define XILINX_DPDMA_EIEN                               0x1c
  70#define XILINX_DPDMA_EIDS                               0x20
  71#define XILINX_DPDMA_EINTR_INV_APB                      BIT(0)
  72#define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK              (0x3f << 1)
  73#define XILINX_DPDMA_EINTR_RD_AXI_ERR_SHIFT             1
  74#define XILINX_DPDMA_EINTR_PRE_ERR_MASK                 (0x3f << 7)
  75#define XILINX_DPDMA_EINTR_PRE_ERR_SHIFT                7
  76#define XILINX_DPDMA_EINTR_CRC_ERR_MASK                 (0x3f << 13)
  77#define XILINX_DPDMA_EINTR_CRC_ERR_SHIFT                13
  78#define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK              (0x3f << 19)
  79#define XILINX_DPDMA_EINTR_WR_AXI_ERR_SHIFT             19
  80#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK           (0x3f << 25)
  81#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_SHIFT          25
  82#define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL             BIT(32)
  83#define XILINX_DPDMA_EINTR_CHAN_ERR_MASK                0x2082082
  84#define XILINX_DPDMA_EINTR_CHAN_ERR                     0x7ffffffe
  85#define XILINX_DPDMA_EINTR_GLOBAL_ERR                   0x80000001
  86#define XILINX_DPDMA_EINTR_ALL                          0xffffffff
  87#define XILINX_DPDMA_CNTL                               0x100
  88#define XILINX_DPDMA_GBL                                0x104
  89#define XILINX_DPDMA_GBL_TRIG_SHIFT                     0
  90#define XILINX_DPDMA_GBL_RETRIG_SHIFT                   6
  91#define XILINX_DPDMA_ALC0_CNTL                          0x108
  92#define XILINX_DPDMA_ALC0_STATUS                        0x10c
  93#define XILINX_DPDMA_ALC0_MAX                           0x110
  94#define XILINX_DPDMA_ALC0_MIN                           0x114
  95#define XILINX_DPDMA_ALC0_ACC                           0x118
  96#define XILINX_DPDMA_ALC0_ACC_TRAN                      0x11c
  97#define XILINX_DPDMA_ALC1_CNTL                          0x120
  98#define XILINX_DPDMA_ALC1_STATUS                        0x124
  99#define XILINX_DPDMA_ALC1_MAX                           0x128
 100#define XILINX_DPDMA_ALC1_MIN                           0x12c
 101#define XILINX_DPDMA_ALC1_ACC                           0x130
 102#define XILINX_DPDMA_ALC1_ACC_TRAN                      0x134
 103
 104/* Channel register */
 105#define XILINX_DPDMA_CH_BASE                            0x200
 106#define XILINX_DPDMA_CH_OFFSET                          0x100
 107#define XILINX_DPDMA_CH_DESC_START_ADDRE                0x0
 108#define XILINX_DPDMA_CH_DESC_START_ADDR                 0x4
 109#define XILINX_DPDMA_CH_DESC_NEXT_ADDRE                 0x8
 110#define XILINX_DPDMA_CH_DESC_NEXT_ADDR                  0xc
 111#define XILINX_DPDMA_CH_PYLD_CUR_ADDRE                  0x10
 112#define XILINX_DPDMA_CH_PYLD_CUR_ADDR                   0x14
 113#define XILINX_DPDMA_CH_CNTL                            0x18
 114#define XILINX_DPDMA_CH_CNTL_ENABLE                     BIT(0)
 115#define XILINX_DPDMA_CH_CNTL_PAUSE                      BIT(1)
 116#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT          2
 117#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT          6
 118#define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT          10
 119#define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS              11
 120#define XILINX_DPDMA_CH_STATUS                          0x1c
 121#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK           (0xf << 21)
 122#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT          21
 123#define XILINX_DPDMA_CH_VDO                             0x20
 124#define XILINX_DPDMA_CH_PYLD_SZ                         0x24
 125#define XILINX_DPDMA_CH_DESC_ID                         0x28
 126
 127/* DPDMA descriptor fields */
 128#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE              (0xa5)
 129#define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR         BIT(8)
 130#define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE           BIT(9)
 131#define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE           BIT(10)
 132#define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE             BIT(18)
 133#define XILINX_DPDMA_DESC_CONTROL_LAST                  BIT(19)
 134#define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC            BIT(20)
 135#define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME         BIT(21)
 136#define XILINX_DPDMA_DESC_ID_MASK                       (0xffff << 0)
 137#define XILINX_DPDMA_DESC_ID_SHIFT                      (0)
 138#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK       (0x3ffff << 0)
 139#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT      (0)
 140#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK      (0x3fff << 18)
 141#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT     (18)
 142#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK            (0xfff)
 143#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT           (16)
 144
 145#define XILINX_DPDMA_ALIGN_BYTES                        256
 146#define XILINX_DPDMA_LINESIZE_ALIGN_BITS                128
 147
 148#define XILINX_DPDMA_NUM_CHAN                           6
 149#define XILINX_DPDMA_PAGE_MASK                          ((1 << 12) - 1)
 150#define XILINX_DPDMA_PAGE_SHIFT                         12
 151
 152/**
 153 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
 154 * @control: control configuration field
 155 * @desc_id: descriptor ID
 156 * @xfer_size: transfer size
 157 * @hsize_stride: horizontal size and stride
 158 * @timestamp_lsb: LSB of time stamp
 159 * @timestamp_msb: MSB of time stamp
 160 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
 161 * @next_desc: next descriptor 32 bit address
 162 * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
 163 * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
 164 * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
 165 * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
 166 * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
 167 * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
 168 * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)
 169 * @crc: descriptor CRC
 170 */
 171struct xilinx_dpdma_hw_desc {
 172        u32 control;
 173        u32 desc_id;
 174        u32 xfer_size;
 175        u32 hsize_stride;
 176        u32 timestamp_lsb;
 177        u32 timestamp_msb;
 178        u32 addr_ext;
 179        u32 next_desc;
 180        u32 src_addr;
 181        u32 addr_ext_23;
 182        u32 addr_ext_45;
 183        u32 src_addr2;
 184        u32 src_addr3;
 185        u32 src_addr4;
 186        u32 src_addr5;
 187        u32 crc;
 188} __aligned(XILINX_DPDMA_ALIGN_BYTES);
 189
 190/**
 191 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
 192 * @hw: DPDMA hardware descriptor
 193 * @node: list node for software descriptors
 194 * @phys: physical address of the software descriptor
 195 */
 196struct xilinx_dpdma_sw_desc {
 197        struct xilinx_dpdma_hw_desc hw;
 198        struct list_head node;
 199        dma_addr_t phys;
 200};
 201
 202/**
 203 * enum xilinx_dpdma_tx_desc_status - DPDMA tx descriptor status
 204 * @PREPARED: descriptor is prepared for transaction
 205 * @ACTIVE: transaction is (being) done successfully
 206 * @ERRORED: descriptor generates some errors
 207 */
 208enum xilinx_dpdma_tx_desc_status {
 209        PREPARED,
 210        ACTIVE,
 211        ERRORED
 212};
 213
 214/**
 215 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
 216 * @async_tx: DMA async transaction descriptor
 217 * @descriptors: list of software descriptors
 218 * @node: list node for transaction descriptors
 219 * @status: tx descriptor status
 220 * @done_cnt: number of complete notification to deliver
 221 */
 222struct xilinx_dpdma_tx_desc {
 223        struct dma_async_tx_descriptor async_tx;
 224        struct list_head descriptors;
 225        struct list_head node;
 226        enum xilinx_dpdma_tx_desc_status status;
 227        unsigned int done_cnt;
 228};
 229
 230/**
 231 * enum xilinx_dpdma_chan_id - DPDMA channel ID
 232 * @VIDEO0: video 1st channel
 233 * @VIDEO1: video 2nd channel for multi plane yuv formats
 234 * @VIDEO2: video 3rd channel for multi plane yuv formats
 235 * @GRAPHICS: graphics channel
 236 * @AUDIO0: 1st audio channel
 237 * @AUDIO1: 2nd audio channel
 238 */
 239enum xilinx_dpdma_chan_id {
 240        VIDEO0,
 241        VIDEO1,
 242        VIDEO2,
 243        GRAPHICS,
 244        AUDIO0,
 245        AUDIO1
 246};
 247
 248/**
 249 * enum xilinx_dpdma_chan_status - DPDMA channel status
 250 * @IDLE: idle state
 251 * @STREAMING: actively streaming state
 252 */
 253enum xilinx_dpdma_chan_status {
 254        IDLE,
 255        STREAMING
 256};
 257
 258/*
 259 * DPDMA descriptor placement
 260 * --------------------------
 261 * DPDMA descritpor life time is described with following placements:
 262 *
 263 * allocated_desc -> submitted_desc -> pending_desc -> active_desc -> done_list
 264 *
 265 * Transition is triggered as following:
 266 *
 267 * -> allocated_desc : a descriptor allocation
 268 * allocated_desc -> submitted_desc: a descriptor submission
 269 * submitted_desc -> pending_desc: request to issue pending a descriptor
 270 * pending_desc -> active_desc: VSYNC intr when a desc is scheduled to DPDMA
 271 * active_desc -> done_list: VSYNC intr when DPDMA switches to a new desc
 272 */
 273
 274/**
 275 * struct xilinx_dpdma_chan - DPDMA channel
 276 * @common: generic dma channel structure
 277 * @reg: register base address
 278 * @id: channel ID
 279 * @wait_to_stop: queue to wait for outstanding transacitons before stopping
 280 * @status: channel status
 281 * @first_frame: flag for the first frame of stream
 282 * @video_group: flag if multi-channel operation is needed for video channels
 283 * @lock: lock to access struct xilinx_dpdma_chan
 284 * @desc_pool: descriptor allocation pool
 285 * @done_task: done IRQ bottom half handler
 286 * @err_task: error IRQ bottom half handler
 287 * @allocated_desc: allocated descriptor
 288 * @submitted_desc: submitted descriptor
 289 * @pending_desc: pending descriptor to be scheduled in next period
 290 * @active_desc: descriptor that the DPDMA channel is active on
 291 * @done_list: done descriptor list
 292 * @xdev: DPDMA device
 293 */
 294struct xilinx_dpdma_chan {
 295        struct dma_chan common;
 296        void __iomem *reg;
 297        enum xilinx_dpdma_chan_id id;
 298
 299        wait_queue_head_t wait_to_stop;
 300        enum xilinx_dpdma_chan_status status;
 301        bool first_frame;
 302        bool video_group;
 303
 304        spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
 305        struct dma_pool *desc_pool;
 306        struct tasklet_struct done_task;
 307        struct tasklet_struct err_task;
 308
 309        struct xilinx_dpdma_tx_desc *allocated_desc;
 310        struct xilinx_dpdma_tx_desc *submitted_desc;
 311        struct xilinx_dpdma_tx_desc *pending_desc;
 312        struct xilinx_dpdma_tx_desc *active_desc;
 313        struct list_head done_list;
 314
 315        struct xilinx_dpdma_device *xdev;
 316};
 317
 318/**
 319 * struct xilinx_dpdma_device - DPDMA device
 320 * @common: generic dma device structure
 321 * @reg: register base address
 322 * @dev: generic device structure
 323 * @axi_clk: axi clock
 324 * @chan: DPDMA channels
 325 * @ext_addr: flag for 64 bit system (48 bit addressing)
 326 * @desc_addr: descriptor addressing callback (32 bit vs 64 bit)
 327 */
 328struct xilinx_dpdma_device {
 329        struct dma_device common;
 330        void __iomem *reg;
 331        struct device *dev;
 332
 333        struct clk *axi_clk;
 334        struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
 335
 336        bool ext_addr;
 337        void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
 338                          struct xilinx_dpdma_sw_desc *prev,
 339                          dma_addr_t dma_addr[], unsigned int num_src_addr);
 340};
 341
 342#ifdef CONFIG_XILINX_DPDMA_DEBUG_FS
 343#define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE      32
 344#define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR     "65535"
 345#define IN_RANGE(x, min, max) ({                \
 346                typeof(x) _x = (x);             \
 347                _x >= (min) && _x <= (max); })
 348
 349/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
 350enum xilinx_dpdma_testcases {
 351        DPDMA_TC_INTR_DONE,
 352        DPDMA_TC_NONE
 353};
 354
 355struct xilinx_dpdma_debugfs {
 356        enum xilinx_dpdma_testcases testcase;
 357        u16 xilinx_dpdma_intr_done_count;
 358        enum xilinx_dpdma_chan_id chan_id;
 359};
 360
 361static struct xilinx_dpdma_debugfs dpdma_debugfs;
 362struct xilinx_dpdma_debugfs_request {
 363        const char *req;
 364        enum xilinx_dpdma_testcases tc;
 365        ssize_t (*read_handler)(char **kern_buff);
 366        ssize_t (*write_handler)(char **cmd);
 367};
 368
 369static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
 370{
 371        if (chan_id == dpdma_debugfs.chan_id)
 372                dpdma_debugfs.xilinx_dpdma_intr_done_count++;
 373}
 374
 375static s64 xilinx_dpdma_debugfs_argument_value(char *arg)
 376{
 377        s64 value;
 378
 379        if (!arg)
 380                return -1;
 381
 382        if (!kstrtos64(arg, 0, &value))
 383                return value;
 384
 385        return -1;
 386}
 387
 388static ssize_t
 389xilinx_dpdma_debugfs_desc_done_intr_write(char **dpdma_test_arg)
 390{
 391        char *arg;
 392        char *arg_chan_id;
 393        s64 id;
 394
 395        arg = strsep(dpdma_test_arg, " ");
 396        if (strncasecmp(arg, "start", 5) != 0)
 397                return -EINVAL;
 398
 399        arg_chan_id = strsep(dpdma_test_arg, " ");
 400        id = xilinx_dpdma_debugfs_argument_value(arg_chan_id);
 401
 402        if (id < 0 || !IN_RANGE(id, VIDEO0, AUDIO1))
 403                return -EINVAL;
 404
 405        dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
 406        dpdma_debugfs.xilinx_dpdma_intr_done_count = 0;
 407        dpdma_debugfs.chan_id = id;
 408
 409        return 0;
 410}
 411
 412static ssize_t xilinx_dpdma_debugfs_desc_done_intr_read(char **kern_buff)
 413{
 414        size_t out_str_len;
 415
 416        dpdma_debugfs.testcase = DPDMA_TC_NONE;
 417
 418        out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
 419        out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
 420                            out_str_len);
 421        snprintf(*kern_buff, out_str_len, "%d",
 422                 dpdma_debugfs.xilinx_dpdma_intr_done_count);
 423
 424        return 0;
 425}
 426
 427/* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
 428struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
 429        {"DESCRIPTOR_DONE_INTR", DPDMA_TC_INTR_DONE,
 430                        xilinx_dpdma_debugfs_desc_done_intr_read,
 431                        xilinx_dpdma_debugfs_desc_done_intr_write},
 432};
 433
 434static ssize_t xilinx_dpdma_debugfs_write(struct file *f, const char __user
 435                                               *buf, size_t size, loff_t *pos)
 436{
 437        char *kern_buff, *kern_buff_start;
 438        char *dpdma_test_req;
 439        int ret;
 440        int i;
 441
 442        if (*pos != 0 || size <= 0)
 443                return -EINVAL;
 444
 445        /* Supporting single instance of test as of now*/
 446        if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
 447                return -EBUSY;
 448
 449        kern_buff = kzalloc(size, GFP_KERNEL);
 450        if (!kern_buff)
 451                return -ENOMEM;
 452        kern_buff_start = kern_buff;
 453
 454        ret = strncpy_from_user(kern_buff, buf, size);
 455        if (ret < 0) {
 456                kfree(kern_buff_start);
 457                return ret;
 458        }
 459
 460        /* Read the testcase name from a user request */
 461        dpdma_test_req = strsep(&kern_buff, " ");
 462
 463        for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
 464                if (!strcasecmp(dpdma_test_req, dpdma_debugfs_reqs[i].req)) {
 465                        if (!dpdma_debugfs_reqs[i].write_handler(&kern_buff)) {
 466                                kfree(kern_buff_start);
 467                                return size;
 468                        }
 469                        break;
 470                }
 471        }
 472        kfree(kern_buff_start);
 473        return -EINVAL;
 474}
 475
 476static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
 477                                         size_t size, loff_t *pos)
 478{
 479        char *kern_buff = NULL;
 480        size_t kern_buff_len, out_str_len;
 481        enum xilinx_dpdma_testcases tc;
 482        int ret;
 483
 484        if (size <= 0)
 485                return -EINVAL;
 486
 487        if (*pos != 0)
 488                return 0;
 489
 490        kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
 491        if (!kern_buff) {
 492                dpdma_debugfs.testcase = DPDMA_TC_NONE;
 493                return -ENOMEM;
 494        }
 495
 496        tc = dpdma_debugfs.testcase;
 497        if (tc == DPDMA_TC_NONE) {
 498                out_str_len = strlen("No testcase executed");
 499                out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
 500                                    out_str_len);
 501                snprintf(kern_buff, out_str_len, "%s", "No testcase executed");
 502        } else {
 503                ret = dpdma_debugfs_reqs[tc].read_handler(&kern_buff);
 504                if (ret) {
 505                        kfree(kern_buff);
 506                        return ret;
 507                }
 508        }
 509
 510        kern_buff_len = strlen(kern_buff);
 511        size = min(size, kern_buff_len);
 512
 513        ret = copy_to_user(buf, kern_buff, size);
 514
 515        kfree(kern_buff);
 516        if (ret)
 517                return ret;
 518
 519        *pos = size + 1;
 520        return size;
 521}
 522
 523static const struct file_operations fops_xilinx_dpdma_dbgfs = {
 524        .owner = THIS_MODULE,
 525        .read = xilinx_dpdma_debugfs_read,
 526        .write = xilinx_dpdma_debugfs_write,
 527};
 528
 529static int xilinx_dpdma_debugfs_init(struct device *dev)
 530{
 531        int err;
 532        struct dentry *xilinx_dpdma_debugfs_dir, *xilinx_dpdma_debugfs_file;
 533
 534        dpdma_debugfs.testcase = DPDMA_TC_NONE;
 535
 536        xilinx_dpdma_debugfs_dir = debugfs_create_dir("dpdma", NULL);
 537        if (!xilinx_dpdma_debugfs_dir) {
 538                dev_err(dev, "debugfs_create_dir failed\n");
 539                return -ENODEV;
 540        }
 541
 542        xilinx_dpdma_debugfs_file =
 543                debugfs_create_file("testcase", 0444,
 544                                    xilinx_dpdma_debugfs_dir, NULL,
 545                                    &fops_xilinx_dpdma_dbgfs);
 546        if (!xilinx_dpdma_debugfs_file) {
 547                dev_err(dev, "debugfs_create_file testcase failed\n");
 548                err = -ENODEV;
 549                goto err_dbgfs;
 550        }
 551        return 0;
 552
 553err_dbgfs:
 554        debugfs_remove_recursive(xilinx_dpdma_debugfs_dir);
 555        xilinx_dpdma_debugfs_dir = NULL;
 556        return err;
 557}
 558
 559#else
 560static int xilinx_dpdma_debugfs_init(struct device *dev)
 561{
 562        return 0;
 563}
 564
 565static void xilinx_dpdma_debugfs_intr_done_count_incr(int chan_id)
 566{
 567}
 568#endif /* CONFIG_XILINX_DPDMA_DEBUG_FS */
 569
 570#define to_dpdma_tx_desc(tx) \
 571        container_of(tx, struct xilinx_dpdma_tx_desc, async_tx)
 572
 573#define to_xilinx_chan(chan) \
 574        container_of(chan, struct xilinx_dpdma_chan, common)
 575
 576/* IO operations */
 577
 578static inline u32 dpdma_read(void __iomem *base, u32 offset)
 579{
 580        return ioread32(base + offset);
 581}
 582
 583static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
 584{
 585        iowrite32(val, base + offset);
 586}
 587
 588static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
 589{
 590        dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
 591}
 592
 593static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
 594{
 595        dpdma_write(base, offset, dpdma_read(base, offset) | set);
 596}
 597
 598/* Xilinx DPDMA descriptor operations */
 599
 600/**
 601 * xilinx_dpdma_sw_desc_next_32 - Set 32 bit address of a next sw descriptor
 602 * @sw_desc: current software descriptor
 603 * @next: next descriptor
 604 *
 605 * Update the current sw descriptor @sw_desc with 32 bit address of the next
 606 * descriptor @next.
 607 */
 608static inline void
 609xilinx_dpdma_sw_desc_next_32(struct xilinx_dpdma_sw_desc *sw_desc,
 610                             struct xilinx_dpdma_sw_desc *next)
 611{
 612        sw_desc->hw.next_desc = next->phys;
 613}
 614
 615/**
 616 * xilinx_dpdma_sw_desc_addr_32 - Update the sw descriptor with 32 bit address
 617 * @sw_desc: software descriptor
 618 * @prev: previous descriptor
 619 * @dma_addr: array of dma addresses
 620 * @num_src_addr: number of addresses in @dma_addr
 621 *
 622 * Update the descriptor @sw_desc with 32 bit address.
 623 */
 624static void xilinx_dpdma_sw_desc_addr_32(struct xilinx_dpdma_sw_desc *sw_desc,
 625                                         struct xilinx_dpdma_sw_desc *prev,
 626                                         dma_addr_t dma_addr[],
 627                                         unsigned int num_src_addr)
 628{
 629        struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
 630        unsigned int i;
 631
 632        hw_desc->src_addr = dma_addr[0];
 633
 634        if (prev)
 635                xilinx_dpdma_sw_desc_next_32(prev, sw_desc);
 636
 637        for (i = 1; i < num_src_addr; i++) {
 638                u32 *addr = &hw_desc->src_addr2;
 639                u32 frag_addr;
 640
 641                frag_addr = dma_addr[i];
 642                addr[i - 1] = frag_addr;
 643        }
 644}
 645
 646/**
 647 * xilinx_dpdma_sw_desc_next_64 - Set 64 bit address of a next sw descriptor
 648 * @sw_desc: current software descriptor
 649 * @next: next descriptor
 650 *
 651 * Update the current sw descriptor @sw_desc with 64 bit address of the next
 652 * descriptor @next.
 653 */
 654static inline void
 655xilinx_dpdma_sw_desc_next_64(struct xilinx_dpdma_sw_desc *sw_desc,
 656                             struct xilinx_dpdma_sw_desc *next)
 657{
 658        sw_desc->hw.next_desc = lower_32_bits(next->phys);
 659        sw_desc->hw.addr_ext |= upper_32_bits(next->phys) &
 660                                XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
 661}
 662
 663/**
 664 * xilinx_dpdma_sw_desc_addr_64 - Update the sw descriptor with 64 bit address
 665 * @sw_desc: software descriptor
 666 * @prev: previous descriptor
 667 * @dma_addr: array of dma addresses
 668 * @num_src_addr: number of addresses in @dma_addr
 669 *
 670 * Update the descriptor @sw_desc with 64 bit address.
 671 */
 672static void xilinx_dpdma_sw_desc_addr_64(struct xilinx_dpdma_sw_desc *sw_desc,
 673                                         struct xilinx_dpdma_sw_desc *prev,
 674                                         dma_addr_t dma_addr[],
 675                                         unsigned int num_src_addr)
 676{
 677        struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
 678        unsigned int i;
 679        u32 src_addr_extn;
 680
 681        hw_desc->src_addr = lower_32_bits(dma_addr[0]);
 682        src_addr_extn = upper_32_bits(dma_addr[0]) &
 683                        XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
 684        hw_desc->addr_ext |= (src_addr_extn <<
 685                              XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT);
 686
 687        if (prev)
 688                xilinx_dpdma_sw_desc_next_64(prev, sw_desc);
 689
 690        for (i = 1; i < num_src_addr; i++) {
 691                u32 *addr = &hw_desc->src_addr2;
 692                u32 *addr_ext = &hw_desc->addr_ext_23;
 693                u64 frag_addr;
 694
 695                frag_addr = dma_addr[i];
 696                addr[i] = (u32)frag_addr;
 697
 698                frag_addr >>= 32;
 699                frag_addr &= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
 700                frag_addr <<= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT * (i % 2);
 701                addr_ext[i / 2] = frag_addr;
 702        }
 703}
 704
 705/* Xilinx DPDMA channel descriptor operations */
 706
 707/**
 708 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
 709 * @chan: DPDMA channel
 710 *
 711 * Allocate a software descriptor from the channel's descriptor pool.
 712 *
 713 * Return: a software descriptor or NULL.
 714 */
 715static struct xilinx_dpdma_sw_desc *
 716xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
 717{
 718        struct xilinx_dpdma_sw_desc *sw_desc;
 719        dma_addr_t phys;
 720
 721        sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
 722        if (!sw_desc)
 723                return NULL;
 724
 725        sw_desc->phys = phys;
 726
 727        return sw_desc;
 728}
 729
 730/**
 731 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
 732 * @chan: DPDMA channel
 733 * @sw_desc: software descriptor to free
 734 *
 735 * Free a software descriptor from the channel's descriptor pool.
 736 */
 737static void
 738xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
 739                               struct xilinx_dpdma_sw_desc *sw_desc)
 740{
 741        dma_pool_free(chan->desc_pool, sw_desc, sw_desc->phys);
 742}
 743
 744/**
 745 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
 746 * @chan: DPDMA channel
 747 * @tx_desc: tx descriptor to dump
 748 *
 749 * Dump contents of a tx descriptor
 750 */
 751static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
 752                                           struct xilinx_dpdma_tx_desc *tx_desc)
 753{
 754        struct xilinx_dpdma_sw_desc *sw_desc;
 755        struct device *dev = chan->xdev->dev;
 756        unsigned int i = 0;
 757
 758        dev_dbg(dev, "------- TX descriptor dump start -------\n");
 759        dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
 760
 761        list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
 762                struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
 763
 764                dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
 765                dev_dbg(dev, "descriptor phys: %pad\n", &sw_desc->phys);
 766                dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
 767                dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
 768                dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
 769                dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
 770                dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
 771                dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
 772                dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
 773                dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
 774                dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
 775                dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
 776                dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
 777                dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
 778                dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
 779                dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
 780                dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
 781                dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
 782        }
 783
 784        dev_dbg(dev, "------- TX descriptor dump end -------\n");
 785}
 786
 787/**
 788 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
 789 * @chan: DPDMA channel
 790 *
 791 * Allocate a tx descriptor.
 792 *
 793 * Return: a tx descriptor or NULL.
 794 */
 795static struct xilinx_dpdma_tx_desc *
 796xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
 797{
 798        struct xilinx_dpdma_tx_desc *tx_desc;
 799
 800        tx_desc = kzalloc(sizeof(*tx_desc), GFP_KERNEL);
 801        if (!tx_desc)
 802                return NULL;
 803
 804        INIT_LIST_HEAD(&tx_desc->descriptors);
 805        tx_desc->status = PREPARED;
 806
 807        return tx_desc;
 808}
 809
 810/**
 811 * xilinx_dpdma_chan_free_tx_desc - Free a transaction descriptor
 812 * @chan: DPDMA channel
 813 * @tx_desc: tx descriptor
 814 *
 815 * Free the tx descriptor @tx_desc including its software descriptors.
 816 */
 817static void
 818xilinx_dpdma_chan_free_tx_desc(struct xilinx_dpdma_chan *chan,
 819                               struct xilinx_dpdma_tx_desc *tx_desc)
 820{
 821        struct xilinx_dpdma_sw_desc *sw_desc, *next;
 822
 823        if (!tx_desc)
 824                return;
 825
 826        list_for_each_entry_safe(sw_desc, next, &tx_desc->descriptors, node) {
 827                list_del(&sw_desc->node);
 828                xilinx_dpdma_chan_free_sw_desc(chan, sw_desc);
 829        }
 830
 831        kfree(tx_desc);
 832}
 833
 834/**
 835 * xilinx_dpdma_chan_submit_tx_desc - Submit a transaction descriptor
 836 * @chan: DPDMA channel
 837 * @tx_desc: tx descriptor
 838 *
 839 * Submit the tx descriptor @tx_desc to the channel @chan.
 840 *
 841 * Return: a cookie assigned to the tx descriptor
 842 */
 843static dma_cookie_t
 844xilinx_dpdma_chan_submit_tx_desc(struct xilinx_dpdma_chan *chan,
 845                                 struct xilinx_dpdma_tx_desc *tx_desc)
 846{
 847        struct xilinx_dpdma_sw_desc *sw_desc;
 848        dma_cookie_t cookie;
 849        unsigned long flags;
 850
 851        spin_lock_irqsave(&chan->lock, flags);
 852
 853        if (chan->submitted_desc) {
 854                cookie = chan->submitted_desc->async_tx.cookie;
 855                goto out_unlock;
 856        }
 857
 858        cookie = dma_cookie_assign(&tx_desc->async_tx);
 859
 860        /* Assign the cookie to descriptors in this transaction */
 861        /* Only 16 bit will be used, but it should be enough */
 862        list_for_each_entry(sw_desc, &tx_desc->descriptors, node)
 863                sw_desc->hw.desc_id = cookie;
 864
 865        if (tx_desc != chan->allocated_desc)
 866                dev_err(chan->xdev->dev, "desc != allocated_desc\n");
 867        else
 868                chan->allocated_desc = NULL;
 869        chan->submitted_desc = tx_desc;
 870
 871        if (chan->id == VIDEO1 || chan->id == VIDEO2) {
 872                chan->video_group = true;
 873                chan->xdev->chan[VIDEO0]->video_group = true;
 874        }
 875
 876out_unlock:
 877        spin_unlock_irqrestore(&chan->lock, flags);
 878
 879        return cookie;
 880}
 881
 882/**
 883 * xilinx_dpdma_chan_free_desc_list - Free a descriptor list
 884 * @chan: DPDMA channel
 885 * @list: tx descriptor list
 886 *
 887 * Free tx descriptors in the list @list.
 888 */
 889static void xilinx_dpdma_chan_free_desc_list(struct xilinx_dpdma_chan *chan,
 890                                             struct list_head *list)
 891{
 892        struct xilinx_dpdma_tx_desc *tx_desc, *next;
 893
 894        list_for_each_entry_safe(tx_desc, next, list, node) {
 895                list_del(&tx_desc->node);
 896                xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
 897        }
 898}
 899
 900/**
 901 * xilinx_dpdma_chan_free_all_desc - Free all descriptors of the channel
 902 * @chan: DPDMA channel
 903 *
 904 * Free all descriptors associated with the channel. The channel should be
 905 * disabled before this function is called, otherwise, this function may
 906 * result in misbehavior of the system due to remaining outstanding
 907 * transactions.
 908 */
 909static void xilinx_dpdma_chan_free_all_desc(struct xilinx_dpdma_chan *chan)
 910{
 911        unsigned long flags;
 912
 913        spin_lock_irqsave(&chan->lock, flags);
 914
 915        dev_dbg(chan->xdev->dev, "chan->status = %s\n",
 916                chan->status == STREAMING ? "STREAMING" : "IDLE");
 917
 918        xilinx_dpdma_chan_free_tx_desc(chan, chan->allocated_desc);
 919        chan->allocated_desc = NULL;
 920        xilinx_dpdma_chan_free_tx_desc(chan, chan->submitted_desc);
 921        chan->submitted_desc = NULL;
 922        xilinx_dpdma_chan_free_tx_desc(chan, chan->pending_desc);
 923        chan->pending_desc = NULL;
 924        xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
 925        chan->active_desc = NULL;
 926        xilinx_dpdma_chan_free_desc_list(chan, &chan->done_list);
 927
 928        spin_unlock_irqrestore(&chan->lock, flags);
 929}
 930
 931/**
 932 * xilinx_dpdma_chan_cleanup_desc - Clean up descriptors
 933 * @chan: DPDMA channel
 934 *
 935 * Trigger the complete callbacks of descriptors with finished transactions.
 936 * Free descriptors which are no longer in use.
 937 */
 938static void xilinx_dpdma_chan_cleanup_desc(struct xilinx_dpdma_chan *chan)
 939{
 940        struct xilinx_dpdma_tx_desc *desc;
 941        dma_async_tx_callback callback;
 942        void *callback_param;
 943        unsigned long flags;
 944        unsigned int cnt, i;
 945
 946        spin_lock_irqsave(&chan->lock, flags);
 947
 948        while (!list_empty(&chan->done_list)) {
 949                desc = list_first_entry(&chan->done_list,
 950                                        struct xilinx_dpdma_tx_desc, node);
 951                list_del(&desc->node);
 952
 953                cnt = desc->done_cnt;
 954                desc->done_cnt = 0;
 955                callback = desc->async_tx.callback;
 956                callback_param = desc->async_tx.callback_param;
 957                if (callback) {
 958                        spin_unlock_irqrestore(&chan->lock, flags);
 959                        for (i = 0; i < cnt; i++)
 960                                callback(callback_param);
 961                        spin_lock_irqsave(&chan->lock, flags);
 962                }
 963
 964                xilinx_dpdma_chan_free_tx_desc(chan, desc);
 965        }
 966
 967        if (chan->active_desc) {
 968                cnt = chan->active_desc->done_cnt;
 969                chan->active_desc->done_cnt = 0;
 970                callback = chan->active_desc->async_tx.callback;
 971                callback_param = chan->active_desc->async_tx.callback_param;
 972                if (callback) {
 973                        spin_unlock_irqrestore(&chan->lock, flags);
 974                        for (i = 0; i < cnt; i++)
 975                                callback(callback_param);
 976                        spin_lock_irqsave(&chan->lock, flags);
 977                }
 978        }
 979
 980        spin_unlock_irqrestore(&chan->lock, flags);
 981}
 982
 983/**
 984 * xilinx_dpdma_chan_desc_active - Set the descriptor as active
 985 * @chan: DPDMA channel
 986 *
 987 * Make the pending descriptor @chan->pending_desc as active. This function
 988 * should be called when the channel starts operating on the pending descriptor.
 989 */
 990static void xilinx_dpdma_chan_desc_active(struct xilinx_dpdma_chan *chan)
 991{
 992        unsigned long flags;
 993
 994        spin_lock_irqsave(&chan->lock, flags);
 995
 996        if (!chan->pending_desc)
 997                goto out_unlock;
 998
 999        if (chan->active_desc)
1000                list_add_tail(&chan->active_desc->node, &chan->done_list);
1001
1002        chan->active_desc = chan->pending_desc;
1003        chan->pending_desc = NULL;
1004
1005out_unlock:
1006        spin_unlock_irqrestore(&chan->lock, flags);
1007}
1008
1009/**
1010 * xilinx_dpdma_chan_desc_done_intr - Mark the current descriptor as 'done'
1011 * @chan: DPDMA channel
1012 *
1013 * Mark the current active descriptor @chan->active_desc as 'done'. This
1014 * function should be called to mark completion of the currently active
1015 * descriptor.
1016 */
1017static void xilinx_dpdma_chan_desc_done_intr(struct xilinx_dpdma_chan *chan)
1018{
1019        unsigned long flags;
1020
1021        spin_lock_irqsave(&chan->lock, flags);
1022
1023        xilinx_dpdma_debugfs_intr_done_count_incr(chan->id);
1024
1025        if (!chan->active_desc) {
1026                dev_dbg(chan->xdev->dev, "done intr with no active desc\n");
1027                goto out_unlock;
1028        }
1029
1030        chan->active_desc->done_cnt++;
1031        if (chan->active_desc->status ==  PREPARED) {
1032                dma_cookie_complete(&chan->active_desc->async_tx);
1033                chan->active_desc->status = ACTIVE;
1034        }
1035
1036out_unlock:
1037        spin_unlock_irqrestore(&chan->lock, flags);
1038        tasklet_schedule(&chan->done_task);
1039}
1040
1041/**
1042 * xilinx_dpdma_chan_prep_slave_sg - Prepare a scatter-gather dma descriptor
1043 * @chan: DPDMA channel
1044 * @sgl: scatter-gather list
1045 *
1046 * Prepare a tx descriptor incudling internal software/hardware descriptors
1047 * for the given scatter-gather transaction.
1048 *
1049 * Return: A dma async tx descriptor on success, or NULL.
1050 */
1051static struct dma_async_tx_descriptor *
1052xilinx_dpdma_chan_prep_slave_sg(struct xilinx_dpdma_chan *chan,
1053                                struct scatterlist *sgl)
1054{
1055        struct xilinx_dpdma_tx_desc *tx_desc;
1056        struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
1057        struct scatterlist *iter = sgl;
1058        u32 line_size = 0;
1059
1060        if (chan->allocated_desc)
1061                return &chan->allocated_desc->async_tx;
1062
1063        tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
1064        if (!tx_desc)
1065                return NULL;
1066
1067        while (!sg_is_chain(iter))
1068                line_size += sg_dma_len(iter++);
1069
1070        while (sgl) {
1071                struct xilinx_dpdma_hw_desc *hw_desc;
1072                dma_addr_t dma_addr[4];
1073                unsigned int num_pages = 0;
1074
1075                sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
1076                if (!sw_desc)
1077                        goto error;
1078
1079                while (!sg_is_chain(sgl) && !sg_is_last(sgl)) {
1080                        dma_addr[num_pages] = sg_dma_address(sgl++);
1081                        if (!IS_ALIGNED(dma_addr[num_pages++],
1082                                        XILINX_DPDMA_ALIGN_BYTES)) {
1083                                dev_err(chan->xdev->dev,
1084                                        "buffer should be aligned at %d B\n",
1085                                        XILINX_DPDMA_ALIGN_BYTES);
1086                                goto error;
1087                        }
1088                }
1089
1090                chan->xdev->desc_addr(sw_desc, last, dma_addr, num_pages);
1091                hw_desc = &sw_desc->hw;
1092                hw_desc->xfer_size = line_size;
1093                hw_desc->hsize_stride =
1094                        line_size << XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
1095                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
1096                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_FRAG_MODE;
1097                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
1098
1099                list_add_tail(&sw_desc->node, &tx_desc->descriptors);
1100                last = sw_desc;
1101                if (sg_is_last(sgl))
1102                        break;
1103                sgl = sg_chain_ptr(sgl);
1104        }
1105
1106        sw_desc = list_first_entry(&tx_desc->descriptors,
1107                                   struct xilinx_dpdma_sw_desc, node);
1108        if (chan->xdev->ext_addr)
1109                xilinx_dpdma_sw_desc_next_64(last, sw_desc);
1110        else
1111                xilinx_dpdma_sw_desc_next_32(last, sw_desc);
1112        last->hw.control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
1113        last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
1114
1115        chan->allocated_desc = tx_desc;
1116
1117        return &tx_desc->async_tx;
1118
1119error:
1120        xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
1121
1122        return NULL;
1123}
1124
1125/**
1126 * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
1127 * @chan: DPDMA channel
1128 * @buf_addr: buffer address
1129 * @buf_len: buffer length
1130 * @period_len: number of periods
1131 *
1132 * Prepare a tx descriptor incudling internal software/hardware descriptors
1133 * for the given cyclic transaction.
1134 *
1135 * Return: A dma async tx descriptor on success, or NULL.
1136 */
1137static struct dma_async_tx_descriptor *
1138xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
1139                              dma_addr_t buf_addr, size_t buf_len,
1140                              size_t period_len)
1141{
1142        struct xilinx_dpdma_tx_desc *tx_desc;
1143        struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
1144        unsigned int periods = buf_len / period_len;
1145        unsigned int i;
1146
1147        if (chan->allocated_desc)
1148                return &chan->allocated_desc->async_tx;
1149
1150        tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
1151        if (!tx_desc)
1152                return NULL;
1153
1154        for (i = 0; i < periods; i++) {
1155                struct xilinx_dpdma_hw_desc *hw_desc;
1156
1157                if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
1158                        dev_err(chan->xdev->dev,
1159                                "buffer should be aligned at %d B\n",
1160                                XILINX_DPDMA_ALIGN_BYTES);
1161                        goto error;
1162                }
1163
1164                sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
1165                if (!sw_desc)
1166                        goto error;
1167
1168                chan->xdev->desc_addr(sw_desc, last, &buf_addr, 1);
1169                hw_desc = &sw_desc->hw;
1170                hw_desc->xfer_size = period_len;
1171                hw_desc->hsize_stride =
1172                        period_len <<
1173                        XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
1174                hw_desc->hsize_stride |=
1175                        period_len <<
1176                        XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
1177                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
1178                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
1179                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
1180
1181                list_add_tail(&sw_desc->node, &tx_desc->descriptors);
1182
1183                buf_addr += period_len;
1184                last = sw_desc;
1185        }
1186
1187        sw_desc = list_first_entry(&tx_desc->descriptors,
1188                                   struct xilinx_dpdma_sw_desc, node);
1189        if (chan->xdev->ext_addr)
1190                xilinx_dpdma_sw_desc_next_64(last, sw_desc);
1191        else
1192                xilinx_dpdma_sw_desc_next_32(last, sw_desc);
1193        last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
1194
1195        chan->allocated_desc = tx_desc;
1196
1197        return &tx_desc->async_tx;
1198
1199error:
1200        xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
1201
1202        return NULL;
1203}
1204
1205/**
1206 * xilinx_dpdma_chan_prep_interleaved - Prepare a interleaved dma descriptor
1207 * @chan: DPDMA channel
1208 * @xt: dma interleaved template
1209 *
1210 * Prepare a tx descriptor incudling internal software/hardware descriptors
1211 * based on @xt.
1212 *
1213 * Return: A dma async tx descriptor on success, or NULL.
1214 */
1215static struct dma_async_tx_descriptor *
1216xilinx_dpdma_chan_prep_interleaved(struct xilinx_dpdma_chan *chan,
1217                                   struct dma_interleaved_template *xt)
1218{
1219        struct xilinx_dpdma_tx_desc *tx_desc;
1220        struct xilinx_dpdma_sw_desc *sw_desc;
1221        struct xilinx_dpdma_hw_desc *hw_desc;
1222        size_t hsize = xt->sgl[0].size;
1223        size_t stride = hsize + xt->sgl[0].icg;
1224
1225        if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
1226                dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
1227                        XILINX_DPDMA_ALIGN_BYTES);
1228                return NULL;
1229        }
1230
1231        if (chan->allocated_desc)
1232                return &chan->allocated_desc->async_tx;
1233
1234        tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
1235        if (!tx_desc)
1236                return NULL;
1237
1238        sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
1239        if (!sw_desc)
1240                goto error;
1241
1242        chan->xdev->desc_addr(sw_desc, sw_desc, &xt->src_start, 1);
1243        hw_desc = &sw_desc->hw;
1244        hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
1245        hw_desc->xfer_size = hsize * xt->numf;
1246        hw_desc->hsize_stride = hsize <<
1247                                XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
1248        hw_desc->hsize_stride |= (stride / 16) <<
1249                                 XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
1250        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
1251        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
1252        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
1253        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
1254
1255        list_add_tail(&sw_desc->node, &tx_desc->descriptors);
1256        chan->allocated_desc = tx_desc;
1257
1258        return &tx_desc->async_tx;
1259
1260error:
1261        xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
1262
1263        return NULL;
1264}
1265
1266/* Xilinx DPDMA channel operations */
1267
1268/**
1269 * xilinx_dpdma_chan_enable - Enable the channel
1270 * @chan: DPDMA channel
1271 *
1272 * Enable the channel and its interrupts. Set the QoS values for video class.
1273 */
1274static inline void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
1275{
1276        u32 reg;
1277
1278        reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
1279        reg |= XILINX_DPDMA_INTR_GLOBAL_MASK;
1280        dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
1281        reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
1282        reg |= XILINX_DPDMA_INTR_GLOBAL_ERR;
1283        dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
1284
1285        reg = XILINX_DPDMA_CH_CNTL_ENABLE;
1286        reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1287               XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT;
1288        reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1289               XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT;
1290        reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1291               XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT;
1292        dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
1293}
1294
1295/**
1296 * xilinx_dpdma_chan_disable - Disable the channel
1297 * @chan: DPDMA channel
1298 *
1299 * Disable the channel and its interrupts.
1300 */
1301static inline void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
1302{
1303        u32 reg;
1304
1305        reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
1306        dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
1307        reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
1308        dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
1309
1310        dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
1311}
1312
1313/**
1314 * xilinx_dpdma_chan_pause - Pause the channel
1315 * @chan: DPDMA channel
1316 *
1317 * Pause the channel.
1318 */
1319static inline void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
1320{
1321        dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
1322}
1323
1324/**
1325 * xilinx_dpdma_chan_unpause - Unpause the channel
1326 * @chan: DPDMA channel
1327 *
1328 * Unpause the channel.
1329 */
1330static inline void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
1331{
1332        dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
1333}
1334
1335static u32
1336xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
1337{
1338        struct xilinx_dpdma_device *xdev = chan->xdev;
1339        u32 i = 0, ret = 0;
1340
1341        for (i = VIDEO0; i < GRAPHICS; i++) {
1342                if (xdev->chan[i]->video_group &&
1343                    xdev->chan[i]->status != STREAMING)
1344                        return 0;
1345
1346                if (xdev->chan[i]->video_group)
1347                        ret |= BIT(i);
1348        }
1349
1350        return ret;
1351}
1352
1353/**
1354 * xilinx_dpdma_chan_issue_pending - Issue the pending descriptor
1355 * @chan: DPDMA channel
1356 *
1357 * Issue the first pending descriptor from @chan->submitted_desc. If the channel
1358 * is already streaming, the channel is re-triggered with the pending
1359 * descriptor.
1360 */
1361static void xilinx_dpdma_chan_issue_pending(struct xilinx_dpdma_chan *chan)
1362{
1363        struct xilinx_dpdma_device *xdev = chan->xdev;
1364        struct xilinx_dpdma_sw_desc *sw_desc;
1365        unsigned long flags;
1366        u32 reg, channels;
1367
1368        spin_lock_irqsave(&chan->lock, flags);
1369
1370        if (!chan->submitted_desc || chan->pending_desc)
1371                goto out_unlock;
1372
1373        chan->pending_desc = chan->submitted_desc;
1374        chan->submitted_desc = NULL;
1375
1376        sw_desc = list_first_entry(&chan->pending_desc->descriptors,
1377                                   struct xilinx_dpdma_sw_desc, node);
1378        dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
1379                    (u32)sw_desc->phys);
1380        if (xdev->ext_addr)
1381                dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
1382                            ((u64)sw_desc->phys >> 32) &
1383                            XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK);
1384
1385        if (chan->first_frame) {
1386                chan->first_frame = false;
1387                if (chan->video_group) {
1388                        channels = xilinx_dpdma_chan_video_group_ready(chan);
1389                        if (!channels)
1390                                goto out_unlock;
1391                        reg = channels << XILINX_DPDMA_GBL_TRIG_SHIFT;
1392                } else {
1393                        reg = 1 << (XILINX_DPDMA_GBL_TRIG_SHIFT + chan->id);
1394                }
1395        } else {
1396                if (chan->video_group) {
1397                        channels = xilinx_dpdma_chan_video_group_ready(chan);
1398                        if (!channels)
1399                                goto out_unlock;
1400                        reg = channels << XILINX_DPDMA_GBL_RETRIG_SHIFT;
1401                } else {
1402                        reg = 1 << (XILINX_DPDMA_GBL_RETRIG_SHIFT + chan->id);
1403                }
1404        }
1405
1406        dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
1407
1408out_unlock:
1409        spin_unlock_irqrestore(&chan->lock, flags);
1410}
1411
1412/**
1413 * xilinx_dpdma_chan_start - Start the channel
1414 * @chan: DPDMA channel
1415 *
1416 * Start the channel by enabling interrupts and triggering the channel.
1417 * If the channel is enabled already or there's no pending descriptor, this
1418 * function won't do anything on the channel.
1419 */
1420static void xilinx_dpdma_chan_start(struct xilinx_dpdma_chan *chan)
1421{
1422        unsigned long flags;
1423
1424        spin_lock_irqsave(&chan->lock, flags);
1425
1426        if (!chan->submitted_desc || chan->status == STREAMING)
1427                goto out_unlock;
1428
1429        xilinx_dpdma_chan_unpause(chan);
1430        xilinx_dpdma_chan_enable(chan);
1431        chan->first_frame = true;
1432        chan->status = STREAMING;
1433
1434out_unlock:
1435        spin_unlock_irqrestore(&chan->lock, flags);
1436}
1437
1438/**
1439 * xilinx_dpdma_chan_ostand - Number of outstanding transactions
1440 * @chan: DPDMA channel
1441 *
1442 * Read and return the number of outstanding transactions from register.
1443 *
1444 * Return: Number of outstanding transactions from the status register.
1445 */
1446static inline u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
1447{
1448        return dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS) >>
1449               XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT &
1450               XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK;
1451}
1452
1453/**
1454 * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
1455 * @chan: DPDMA channel
1456 *
1457 * Notify waiters for no outstanding event, so waiters can stop the channel
1458 * safely. This function is supposed to be called when 'no outstanding'
1459 * interrupt is generated. The 'no outstanding' interrupt is disabled and
1460 * should be re-enabled when this event is handled. If the channel status
1461 * register still shows some number of outstanding transactions, the interrupt
1462 * remains enabled.
1463 *
1464 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
1465 * transaction(s).
1466 */
1467static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
1468{
1469        u32 cnt;
1470
1471        cnt = xilinx_dpdma_chan_ostand(chan);
1472        if (cnt) {
1473                dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
1474                return -EWOULDBLOCK;
1475        }
1476
1477        /* Disable 'no outstanding' interrupt */
1478        dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
1479                    1 << (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1480        wake_up(&chan->wait_to_stop);
1481
1482        return 0;
1483}
1484
1485/**
1486 * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding intr
1487 * @chan: DPDMA channel
1488 *
1489 * Wait for the no outstanding transaction interrupt. This functions can sleep
1490 * for 50ms.
1491 *
1492 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
1493 * from wait_event_interruptible_timeout().
1494 */
1495static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
1496{
1497        int ret;
1498
1499        /* Wait for a no outstanding transaction interrupt upto 50msec */
1500        ret = wait_event_interruptible_timeout(chan->wait_to_stop,
1501                                               !xilinx_dpdma_chan_ostand(chan),
1502                                               msecs_to_jiffies(50));
1503        if (ret > 0) {
1504                dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1505                            1 <<
1506                            (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1507                return 0;
1508        }
1509
1510        dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1511                xilinx_dpdma_chan_ostand(chan));
1512
1513        if (ret == 0)
1514                return -ETIMEDOUT;
1515
1516        return ret;
1517}
1518
1519/**
1520 * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
1521 * @chan: DPDMA channel
1522 *
1523 * Poll the outstanding transaction status, and return when there's no
1524 * outstanding transaction. This functions can be used in the interrupt context
1525 * or where the atomicity is required. Calling thread may wait more than 50ms.
1526 *
1527 * Return: 0 on success, or -ETIMEDOUT.
1528 */
1529static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
1530{
1531        u32 cnt, loop = 50000;
1532
1533        /* Poll at least for 50ms (20 fps). */
1534        do {
1535                cnt = xilinx_dpdma_chan_ostand(chan);
1536                udelay(1);
1537        } while (loop-- > 0 && cnt);
1538
1539        if (loop) {
1540                dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1541                            1 <<
1542                            (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1543                return 0;
1544        }
1545
1546        dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1547                xilinx_dpdma_chan_ostand(chan));
1548
1549        return -ETIMEDOUT;
1550}
1551
1552/**
1553 * xilinx_dpdma_chan_stop - Stop the channel
1554 * @chan: DPDMA channel
1555 * @poll: flag whether to poll or wait
1556 *
1557 * Stop the channel with the following sequence: 1. Pause, 2. Wait (sleep) for
1558 * no outstanding transaction interrupt, 3. Disable the channel.
1559 *
1560 * Return: 0 on success, or an error from xilinx_dpdma_chan_poll/wait_ostand().
1561 */
1562static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan, bool poll)
1563{
1564        unsigned long flags;
1565        bool ret;
1566
1567        xilinx_dpdma_chan_pause(chan);
1568        if (poll)
1569                ret = xilinx_dpdma_chan_poll_no_ostand(chan);
1570        else
1571                ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1572        if (ret)
1573                return ret;
1574
1575        spin_lock_irqsave(&chan->lock, flags);
1576        xilinx_dpdma_chan_disable(chan);
1577        chan->status = IDLE;
1578        spin_unlock_irqrestore(&chan->lock, flags);
1579
1580        return 0;
1581}
1582
1583/**
1584 * xilinx_dpdma_chan_alloc_resources - Allocate resources for the channel
1585 * @chan: DPDMA channel
1586 *
1587 * Allocate a descriptor pool for the channel.
1588 *
1589 * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1590 */
1591static int xilinx_dpdma_chan_alloc_resources(struct xilinx_dpdma_chan *chan)
1592{
1593        chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1594                                chan->xdev->dev,
1595                                sizeof(struct xilinx_dpdma_sw_desc),
1596                                __alignof__(struct xilinx_dpdma_sw_desc), 0);
1597        if (!chan->desc_pool) {
1598                dev_err(chan->xdev->dev,
1599                        "failed to allocate a descriptor pool\n");
1600                return -ENOMEM;
1601        }
1602
1603        return 0;
1604}
1605
1606/**
1607 * xilinx_dpdma_chan_free_resources - Free all resources for the channel
1608 * @chan: DPDMA channel
1609 *
1610 * Free all descriptors and the descriptor pool for the channel.
1611 */
1612static void xilinx_dpdma_chan_free_resources(struct xilinx_dpdma_chan *chan)
1613{
1614        xilinx_dpdma_chan_free_all_desc(chan);
1615        dma_pool_destroy(chan->desc_pool);
1616        chan->desc_pool = NULL;
1617}
1618
1619/**
1620 * xilinx_dpdma_chan_terminate_all - Terminate the channel and descriptors
1621 * @chan: DPDMA channel
1622 *
1623 * Stop the channel and free all associated descriptors. Poll the no outstanding
1624 * transaction interrupt as this can be called from an atomic context.
1625 *
1626 * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
1627 */
1628static int xilinx_dpdma_chan_terminate_all(struct xilinx_dpdma_chan *chan)
1629{
1630        struct xilinx_dpdma_device *xdev = chan->xdev;
1631        int ret;
1632        unsigned int i;
1633
1634        if (chan->video_group) {
1635                for (i = VIDEO0; i < GRAPHICS; i++) {
1636                        if (xdev->chan[i]->video_group &&
1637                            xdev->chan[i]->status == STREAMING) {
1638                                xilinx_dpdma_chan_pause(xdev->chan[i]);
1639                                xdev->chan[i]->video_group = false;
1640                        }
1641                }
1642        }
1643
1644        ret = xilinx_dpdma_chan_stop(chan, true);
1645        if (ret)
1646                return ret;
1647
1648        xilinx_dpdma_chan_free_all_desc(chan);
1649
1650        return 0;
1651}
1652
1653/**
1654 * xilinx_dpdma_chan_synchronize - Synchronize all outgoing transfer
1655 * @chan: DPDMA channel
1656 *
1657 * Stop the channel and free all associated descriptors. As this can't be
1658 * called in an atomic context, sleep-wait for no outstanding transaction
1659 * interrupt. Then kill all related tasklets.
1660 *
1661 * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
1662 */
1663static int xilinx_dpdma_chan_synchronize(struct xilinx_dpdma_chan *chan)
1664{
1665        struct xilinx_dpdma_device *xdev = chan->xdev;
1666        int ret;
1667        unsigned int i;
1668
1669        if (chan->video_group) {
1670                for (i = VIDEO0; i < GRAPHICS; i++) {
1671                        if (xdev->chan[i]->video_group &&
1672                            xdev->chan[i]->status == STREAMING) {
1673                                xilinx_dpdma_chan_pause(xdev->chan[i]);
1674                                xdev->chan[i]->video_group = false;
1675                        }
1676                }
1677        }
1678
1679        ret = xilinx_dpdma_chan_stop(chan, false);
1680        if (ret)
1681                return ret;
1682
1683        tasklet_kill(&chan->err_task);
1684        tasklet_kill(&chan->done_task);
1685        xilinx_dpdma_chan_free_all_desc(chan);
1686
1687        return 0;
1688}
1689
1690/**
1691 * xilinx_dpdma_chan_err - Detect any channel error
1692 * @chan: DPDMA channel
1693 * @isr: masked Interrupt Status Register
1694 * @eisr: Error Interrupt Status Register
1695 *
1696 * Return: true if any channel error occurs, or false otherwise.
1697 */
1698static bool
1699xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1700{
1701        if (!chan)
1702                return false;
1703
1704        if (chan->status == STREAMING &&
1705            ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1706            (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1707                return true;
1708
1709        return false;
1710}
1711
1712/**
1713 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1714 * @chan: DPDMA channel
1715 *
1716 * This function is called when any channel error or any global error occurs.
1717 * The function disables the paused channel by errors and determines
1718 * if the current active descriptor can be rescheduled depending on
1719 * the descriptor status.
1720 */
1721static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1722{
1723        struct xilinx_dpdma_device *xdev = chan->xdev;
1724        struct device *dev = xdev->dev;
1725        unsigned long flags;
1726
1727        spin_lock_irqsave(&chan->lock, flags);
1728
1729        dev_dbg(dev, "cur desc addr = 0x%04x%08x\n",
1730                dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1731                dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1732        dev_dbg(dev, "cur payload addr = 0x%04x%08x\n",
1733                dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1734                dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1735
1736        xilinx_dpdma_chan_disable(chan);
1737        chan->status = IDLE;
1738
1739        if (!chan->active_desc)
1740                goto out_unlock;
1741
1742        xilinx_dpdma_chan_dump_tx_desc(chan, chan->active_desc);
1743
1744        switch (chan->active_desc->status) {
1745        case ERRORED:
1746                dev_dbg(dev, "repeated error on desc\n");
1747        case ACTIVE:
1748        case PREPARED:
1749                /* Reschedule if there's no new descriptor */
1750                if (!chan->pending_desc && !chan->submitted_desc) {
1751                        chan->active_desc->status = ERRORED;
1752                        chan->submitted_desc = chan->active_desc;
1753                } else {
1754                        xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
1755                }
1756                break;
1757        }
1758        chan->active_desc = NULL;
1759
1760out_unlock:
1761        spin_unlock_irqrestore(&chan->lock, flags);
1762}
1763
1764/* DMA tx descriptor */
1765
1766static dma_cookie_t xilinx_dpdma_tx_submit(struct dma_async_tx_descriptor *tx)
1767{
1768        struct xilinx_dpdma_chan *chan = to_xilinx_chan(tx->chan);
1769        struct xilinx_dpdma_tx_desc *tx_desc = to_dpdma_tx_desc(tx);
1770
1771        return xilinx_dpdma_chan_submit_tx_desc(chan, tx_desc);
1772}
1773
1774/* DMA channel operations */
1775
1776static struct dma_async_tx_descriptor *
1777xilinx_dpdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
1778                           unsigned int sg_len,
1779                           enum dma_transfer_direction direction,
1780                           unsigned long flags, void *context)
1781{
1782        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1783        struct dma_async_tx_descriptor *async_tx;
1784
1785        if (direction != DMA_MEM_TO_DEV)
1786                return NULL;
1787
1788        if (!sgl || sg_len < 2)
1789                return NULL;
1790
1791        async_tx = xilinx_dpdma_chan_prep_slave_sg(chan, sgl);
1792        if (!async_tx)
1793                return NULL;
1794
1795        dma_async_tx_descriptor_init(async_tx, dchan);
1796        async_tx->tx_submit = xilinx_dpdma_tx_submit;
1797        async_tx->flags = flags;
1798        async_tx_ack(async_tx);
1799
1800        return async_tx;
1801}
1802
1803static struct dma_async_tx_descriptor *
1804xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
1805                             size_t buf_len, size_t period_len,
1806                             enum dma_transfer_direction direction,
1807                             unsigned long flags)
1808{
1809        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1810        struct dma_async_tx_descriptor *async_tx;
1811
1812        if (direction != DMA_MEM_TO_DEV)
1813                return NULL;
1814
1815        if (buf_len % period_len)
1816                return NULL;
1817
1818        async_tx = xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
1819                                                 period_len);
1820        if (!async_tx)
1821                return NULL;
1822
1823        dma_async_tx_descriptor_init(async_tx, dchan);
1824        async_tx->tx_submit = xilinx_dpdma_tx_submit;
1825        async_tx->flags = flags;
1826        async_tx_ack(async_tx);
1827
1828        return async_tx;
1829}
1830
1831static struct dma_async_tx_descriptor *
1832xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1833                                  struct dma_interleaved_template *xt,
1834                                  unsigned long flags)
1835{
1836        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1837        struct dma_async_tx_descriptor *async_tx;
1838
1839        if (xt->dir != DMA_MEM_TO_DEV)
1840                return NULL;
1841
1842        if (!xt->numf || !xt->sgl[0].size)
1843                return NULL;
1844
1845        async_tx = xilinx_dpdma_chan_prep_interleaved(chan, xt);
1846        if (!async_tx)
1847                return NULL;
1848
1849        dma_async_tx_descriptor_init(async_tx, dchan);
1850        async_tx->tx_submit = xilinx_dpdma_tx_submit;
1851        async_tx->flags = flags;
1852        async_tx_ack(async_tx);
1853
1854        return async_tx;
1855}
1856
1857static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1858{
1859        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1860
1861        dma_cookie_init(dchan);
1862
1863        return xilinx_dpdma_chan_alloc_resources(chan);
1864}
1865
1866static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1867{
1868        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1869
1870        xilinx_dpdma_chan_free_resources(chan);
1871}
1872
1873static enum dma_status xilinx_dpdma_tx_status(struct dma_chan *dchan,
1874                                              dma_cookie_t cookie,
1875                                              struct dma_tx_state *txstate)
1876{
1877        return dma_cookie_status(dchan, cookie, txstate);
1878}
1879
1880static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1881{
1882        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1883
1884        xilinx_dpdma_chan_start(chan);
1885        xilinx_dpdma_chan_issue_pending(chan);
1886}
1887
1888static int xilinx_dpdma_config(struct dma_chan *dchan,
1889                               struct dma_slave_config *config)
1890{
1891        if (config->direction != DMA_MEM_TO_DEV)
1892                return -EINVAL;
1893
1894        return 0;
1895}
1896
1897static int xilinx_dpdma_pause(struct dma_chan *dchan)
1898{
1899        xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1900
1901        return 0;
1902}
1903
1904static int xilinx_dpdma_resume(struct dma_chan *dchan)
1905{
1906        xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1907
1908        return 0;
1909}
1910
1911static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1912{
1913        return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
1914}
1915
1916static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
1917{
1918        xilinx_dpdma_chan_synchronize(to_xilinx_chan(dchan));
1919}
1920
1921/* Xilinx DPDMA device operations */
1922
1923/**
1924 * xilinx_dpdma_err - Detect any global error
1925 * @isr: Interrupt Status Register
1926 * @eisr: Error Interrupt Status Register
1927 *
1928 * Return: True if any global error occurs, or false otherwise.
1929 */
1930static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1931{
1932        if ((isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1933             eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR))
1934                return true;
1935
1936        return false;
1937}
1938
1939/**
1940 * xilinx_dpdma_handle_err_intr - Handle DPDMA error interrupt
1941 * @xdev: DPDMA device
1942 * @isr: masked Interrupt Status Register
1943 * @eisr: Error Interrupt Status Register
1944 *
1945 * Handle if any error occurs based on @isr and @eisr. This function disables
1946 * corresponding error interrupts, and those should be re-enabled once handling
1947 * is done.
1948 */
1949static void xilinx_dpdma_handle_err_intr(struct xilinx_dpdma_device *xdev,
1950                                         u32 isr, u32 eisr)
1951{
1952        bool err = xilinx_dpdma_err(isr, eisr);
1953        unsigned int i;
1954
1955        dev_dbg_ratelimited(xdev->dev,
1956                            "error intr: isr = 0x%08x, eisr = 0x%08x\n",
1957                            isr, eisr);
1958
1959        /* Disable channel error interrupts until errors are handled. */
1960        dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1961                    isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1962        dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1963                    eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1964
1965        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
1966                if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1967                        tasklet_schedule(&xdev->chan[i]->err_task);
1968}
1969
1970/**
1971 * xilinx_dpdma_handle_vsync_intr - Handle the VSYNC interrupt
1972 * @xdev: DPDMA device
1973 *
1974 * Handle the VSYNC event. At this point, the current frame becomes active,
1975 * which means the DPDMA actually starts fetching, and the next frame can be
1976 * scheduled.
1977 */
1978static void xilinx_dpdma_handle_vsync_intr(struct xilinx_dpdma_device *xdev)
1979{
1980        unsigned int i;
1981
1982        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++) {
1983                if (xdev->chan[i] &&
1984                    xdev->chan[i]->status == STREAMING) {
1985                        xilinx_dpdma_chan_desc_active(xdev->chan[i]);
1986                        xilinx_dpdma_chan_issue_pending(xdev->chan[i]);
1987                }
1988        }
1989}
1990
1991/**
1992 * xilinx_dpdma_enable_intr - Enable interrupts
1993 * @xdev: DPDMA device
1994 *
1995 * Enable interrupts.
1996 */
1997static void xilinx_dpdma_enable_intr(struct xilinx_dpdma_device *xdev)
1998{
1999        dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
2000        dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
2001}
2002
2003/**
2004 * xilinx_dpdma_disable_intr - Disable interrupts
2005 * @xdev: DPDMA device
2006 *
2007 * Disable interrupts.
2008 */
2009static void xilinx_dpdma_disable_intr(struct xilinx_dpdma_device *xdev)
2010{
2011        dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
2012        dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
2013}
2014
2015/* Interrupt handling operations*/
2016
2017/**
2018 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
2019 * @data: tasklet data to be casted to DPDMA channel structure
2020 *
2021 * Per channel error handling tasklet. This function waits for the outstanding
2022 * transaction to complete and triggers error handling. After error handling,
2023 * re-enable channel error interrupts, and restart the channel if needed.
2024 */
2025static void xilinx_dpdma_chan_err_task(unsigned long data)
2026{
2027        struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
2028        struct xilinx_dpdma_device *xdev = chan->xdev;
2029
2030        /* Proceed error handling even when polling fails. */
2031        xilinx_dpdma_chan_poll_no_ostand(chan);
2032
2033        xilinx_dpdma_chan_handle_err(chan);
2034
2035        dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
2036                    XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
2037        dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
2038                    XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
2039
2040        xilinx_dpdma_chan_start(chan);
2041        xilinx_dpdma_chan_issue_pending(chan);
2042}
2043
2044/**
2045 * xilinx_dpdma_chan_done_task - Per channel tasklet for done interrupt handling
2046 * @data: tasklet data to be casted to DPDMA channel structure
2047 *
2048 * Per channel done interrupt handling tasklet.
2049 */
2050static void xilinx_dpdma_chan_done_task(unsigned long data)
2051{
2052        struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
2053
2054        xilinx_dpdma_chan_cleanup_desc(chan);
2055}
2056
2057static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
2058{
2059        struct xilinx_dpdma_device *xdev = data;
2060        u32 status, error, i;
2061        unsigned long masked;
2062
2063        status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
2064        error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
2065        if (!status && !error)
2066                return IRQ_NONE;
2067
2068        dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
2069        dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
2070
2071        if (status & XILINX_DPDMA_INTR_VSYNC)
2072                xilinx_dpdma_handle_vsync_intr(xdev);
2073
2074        masked = (status & XILINX_DPDMA_INTR_DESC_DONE_MASK) >>
2075                 XILINX_DPDMA_INTR_DESC_DONE_SHIFT;
2076        if (masked)
2077                for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
2078                        xilinx_dpdma_chan_desc_done_intr(xdev->chan[i]);
2079
2080        masked = (status & XILINX_DPDMA_INTR_NO_OSTAND_MASK) >>
2081                 XILINX_DPDMA_INTR_NO_OSTAND_SHIFT;
2082        if (masked)
2083                for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
2084                        xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
2085
2086        masked = status & XILINX_DPDMA_INTR_ERR_ALL;
2087        if (masked || error)
2088                xilinx_dpdma_handle_err_intr(xdev, masked, error);
2089
2090        return IRQ_HANDLED;
2091}
2092
2093/* Initialization operations */
2094
2095static struct xilinx_dpdma_chan *
2096xilinx_dpdma_chan_probe(struct device_node *node,
2097                        struct xilinx_dpdma_device *xdev)
2098{
2099        struct xilinx_dpdma_chan *chan;
2100
2101        chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2102        if (!chan)
2103                return ERR_PTR(-ENOMEM);
2104
2105        if (of_device_is_compatible(node, "xlnx,video0")) {
2106                chan->id = VIDEO0;
2107        } else if (of_device_is_compatible(node, "xlnx,video1")) {
2108                chan->id = VIDEO1;
2109        } else if (of_device_is_compatible(node, "xlnx,video2")) {
2110                chan->id = VIDEO2;
2111        } else if (of_device_is_compatible(node, "xlnx,graphics")) {
2112                chan->id = GRAPHICS;
2113        } else if (of_device_is_compatible(node, "xlnx,audio0")) {
2114                chan->id = AUDIO0;
2115        } else if (of_device_is_compatible(node, "xlnx,audio1")) {
2116                chan->id = AUDIO1;
2117        } else {
2118                dev_err(xdev->dev, "invalid channel compatible string in DT\n");
2119                return ERR_PTR(-EINVAL);
2120        }
2121
2122        chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
2123                    chan->id;
2124        chan->status = IDLE;
2125
2126        spin_lock_init(&chan->lock);
2127        INIT_LIST_HEAD(&chan->done_list);
2128        init_waitqueue_head(&chan->wait_to_stop);
2129
2130        tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
2131                     (unsigned long)chan);
2132        tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
2133                     (unsigned long)chan);
2134
2135        chan->common.device = &xdev->common;
2136        chan->xdev = xdev;
2137
2138        list_add_tail(&chan->common.device_node, &xdev->common.channels);
2139        xdev->chan[chan->id] = chan;
2140
2141        return chan;
2142}
2143
2144static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
2145{
2146        tasklet_kill(&chan->err_task);
2147        tasklet_kill(&chan->done_task);
2148        list_del(&chan->common.device_node);
2149}
2150
2151static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2152                                            struct of_dma *ofdma)
2153{
2154        struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
2155        uint32_t chan_id = dma_spec->args[0];
2156
2157        if (chan_id >= XILINX_DPDMA_NUM_CHAN)
2158                return NULL;
2159
2160        if (!xdev->chan[chan_id])
2161                return NULL;
2162
2163        return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2164}
2165
2166static int xilinx_dpdma_probe(struct platform_device *pdev)
2167{
2168        struct xilinx_dpdma_device *xdev;
2169        struct xilinx_dpdma_chan *chan;
2170        struct dma_device *ddev;
2171        struct resource *res;
2172        struct device_node *node, *child;
2173        u32 i;
2174        int irq, ret;
2175
2176        xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2177        if (!xdev)
2178                return -ENOMEM;
2179
2180        xdev->dev = &pdev->dev;
2181        ddev = &xdev->common;
2182        ddev->dev = &pdev->dev;
2183        node = xdev->dev->of_node;
2184
2185        xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
2186        if (IS_ERR(xdev->axi_clk))
2187                return PTR_ERR(xdev->axi_clk);
2188
2189        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2190        xdev->reg = devm_ioremap_resource(&pdev->dev, res);
2191        if (IS_ERR(xdev->reg))
2192                return PTR_ERR(xdev->reg);
2193
2194        irq = platform_get_irq(pdev, 0);
2195        if (irq < 0) {
2196                dev_err(xdev->dev, "failed to get platform irq\n");
2197                return irq;
2198        }
2199
2200        ret = devm_request_irq(xdev->dev, irq, xilinx_dpdma_irq_handler,
2201                               IRQF_SHARED, dev_name(xdev->dev), xdev);
2202        if (ret) {
2203                dev_err(xdev->dev, "failed to request IRQ\n");
2204                return ret;
2205        }
2206
2207        INIT_LIST_HEAD(&xdev->common.channels);
2208        dma_cap_set(DMA_SLAVE, ddev->cap_mask);
2209        dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
2210        dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
2211        dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
2212        ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
2213
2214        ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
2215        ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
2216        ddev->device_prep_slave_sg = xilinx_dpdma_prep_slave_sg;
2217        ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
2218        ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
2219        ddev->device_tx_status = xilinx_dpdma_tx_status;
2220        ddev->device_issue_pending = xilinx_dpdma_issue_pending;
2221        ddev->device_config = xilinx_dpdma_config;
2222        ddev->device_pause = xilinx_dpdma_pause;
2223        ddev->device_resume = xilinx_dpdma_resume;
2224        ddev->device_terminate_all = xilinx_dpdma_terminate_all;
2225        ddev->device_synchronize = xilinx_dpdma_synchronize;
2226        ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
2227        ddev->directions = BIT(DMA_MEM_TO_DEV);
2228        ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2229
2230        for_each_child_of_node(node, child) {
2231                chan = xilinx_dpdma_chan_probe(child, xdev);
2232                if (IS_ERR(chan)) {
2233                        dev_err(xdev->dev, "failed to probe a channel\n");
2234                        ret = PTR_ERR(chan);
2235                        goto error;
2236                }
2237        }
2238
2239        xdev->ext_addr = sizeof(dma_addr_t) > 4;
2240        if (xdev->ext_addr)
2241                xdev->desc_addr = xilinx_dpdma_sw_desc_addr_64;
2242        else
2243                xdev->desc_addr = xilinx_dpdma_sw_desc_addr_32;
2244
2245        ret = clk_prepare_enable(xdev->axi_clk);
2246        if (ret) {
2247                dev_err(xdev->dev, "failed to enable the axi clock\n");
2248                goto error;
2249        }
2250
2251        ret = dma_async_device_register(ddev);
2252        if (ret) {
2253                dev_err(xdev->dev, "failed to register the dma device\n");
2254                goto error_dma_async;
2255        }
2256
2257        ret = of_dma_controller_register(xdev->dev->of_node,
2258                                         of_dma_xilinx_xlate, ddev);
2259        if (ret) {
2260                dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
2261                goto error_of_dma;
2262        }
2263
2264        xilinx_dpdma_enable_intr(xdev);
2265
2266        xilinx_dpdma_debugfs_init(&pdev->dev);
2267
2268        dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
2269
2270        return 0;
2271
2272error_of_dma:
2273        dma_async_device_unregister(ddev);
2274error_dma_async:
2275        clk_disable_unprepare(xdev->axi_clk);
2276error:
2277        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
2278                if (xdev->chan[i])
2279                        xilinx_dpdma_chan_remove(xdev->chan[i]);
2280
2281        return ret;
2282}
2283
2284static int xilinx_dpdma_remove(struct platform_device *pdev)
2285{
2286        struct xilinx_dpdma_device *xdev;
2287        unsigned int i;
2288
2289        xdev = platform_get_drvdata(pdev);
2290
2291        xilinx_dpdma_disable_intr(xdev);
2292        of_dma_controller_free(pdev->dev.of_node);
2293        dma_async_device_unregister(&xdev->common);
2294        clk_disable_unprepare(xdev->axi_clk);
2295
2296        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
2297                if (xdev->chan[i])
2298                        xilinx_dpdma_chan_remove(xdev->chan[i]);
2299
2300        return 0;
2301}
2302
2303static const struct of_device_id xilinx_dpdma_of_match[] = {
2304        { .compatible = "xlnx,dpdma",},
2305        { /* end of table */ },
2306};
2307MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
2308
2309static struct platform_driver xilinx_dpdma_driver = {
2310        .probe                  = xilinx_dpdma_probe,
2311        .remove                 = xilinx_dpdma_remove,
2312        .driver                 = {
2313                .name           = "xilinx-dpdma",
2314                .of_match_table = xilinx_dpdma_of_match,
2315        },
2316};
2317
2318module_platform_driver(xilinx_dpdma_driver);
2319
2320MODULE_AUTHOR("Xilinx, Inc.");
2321MODULE_DESCRIPTION("Xilinx DPDMA driver");
2322MODULE_LICENSE("GPL v2");
2323