linux/drivers/dma/xilinx/xilinx_dpdma.c
<<
>>
Prefs
   1/*
   2 * Xilinx DPDMA Engine driver
   3 *
   4 *  Copyright (C) 2015 Xilinx, Inc.
   5 *
   6 *  Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
   7 *
   8 * This software is licensed under the terms of the GNU General Public
   9 * License version 2, as published by the Free Software Foundation, and
  10 * may be copied, distributed, and modified under those terms.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 */
  17
  18#include <linux/bitops.h>
  19#include <linux/clk.h>
  20#include <linux/delay.h>
  21#include <linux/device.h>
  22#include <linux/dmaengine.h>
  23#include <linux/dmapool.h>
  24#include <linux/gfp.h>
  25#include <linux/interrupt.h>
  26#include <linux/irqreturn.h>
  27#include <linux/module.h>
  28#include <linux/of.h>
  29#include <linux/of_dma.h>
  30#include <linux/platform_device.h>
  31#include <linux/sched.h>
  32#include <linux/slab.h>
  33#include <linux/spinlock.h>
  34#include <linux/types.h>
  35#include <linux/wait.h>
  36
  37#include "../dmaengine.h"
  38
  39/* DPDMA registers */
  40#define XILINX_DPDMA_ERR_CTRL                           0x0
  41#define XILINX_DPDMA_ISR                                0x4
  42#define XILINX_DPDMA_IMR                                0x8
  43#define XILINX_DPDMA_IEN                                0xc
  44#define XILINX_DPDMA_IDS                                0x10
  45#define XILINX_DPDMA_INTR_DESC_DONE_MASK                (0x3f << 0)
  46#define XILINX_DPDMA_INTR_DESC_DONE_SHIFT               0
  47#define XILINX_DPDMA_INTR_NO_OSTAND_MASK                (0x3f << 6)
  48#define XILINX_DPDMA_INTR_NO_OSTAND_SHIFT               6
  49#define XILINX_DPDMA_INTR_AXI_ERR_MASK                  (0x3f << 12)
  50#define XILINX_DPDMA_INTR_AXI_ERR_SHIFT                 12
  51#define XILINX_DPDMA_INTR_DESC_ERR_MASK                 (0x3f << 18)
  52#define XILINX_DPDMA_INTR_DESC_ERR_SHIFT                16
  53#define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL              BIT(24)
  54#define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL             BIT(25)
  55#define XILINX_DPDMA_INTR_AXI_4K_CROSS                  BIT(26)
  56#define XILINX_DPDMA_INTR_VSYNC                         BIT(27)
  57#define XILINX_DPDMA_INTR_CHAN_ERR_MASK                 0x41000
  58#define XILINX_DPDMA_INTR_CHAN_ERR                      0xfff000
  59#define XILINX_DPDMA_INTR_GLOBAL_ERR                    0x7000000
  60#define XILINX_DPDMA_INTR_ERR_ALL                       0x7fff000
  61#define XILINX_DPDMA_INTR_CHAN_MASK                     0x41041
  62#define XILINX_DPDMA_INTR_GLOBAL_MASK                   0xf00000
  63#define XILINX_DPDMA_INTR_ALL                           0xfffffff
  64#define XILINX_DPDMA_EISR                               0x14
  65#define XILINX_DPDMA_EIMR                               0x18
  66#define XILINX_DPDMA_EIEN                               0x1c
  67#define XILINX_DPDMA_EIDS                               0x20
  68#define XILINX_DPDMA_EINTR_INV_APB                      BIT(0)
  69#define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK              (0x3f << 1)
  70#define XILINX_DPDMA_EINTR_RD_AXI_ERR_SHIFT             1
  71#define XILINX_DPDMA_EINTR_PRE_ERR_MASK                 (0x3f << 7)
  72#define XILINX_DPDMA_EINTR_PRE_ERR_SHIFT                7
  73#define XILINX_DPDMA_EINTR_CRC_ERR_MASK                 (0x3f << 13)
  74#define XILINX_DPDMA_EINTR_CRC_ERR_SHIFT                13
  75#define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK              (0x3f << 19)
  76#define XILINX_DPDMA_EINTR_WR_AXI_ERR_SHIFT             19
  77#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK           (0x3f << 25)
  78#define XILINX_DPDMA_EINTR_DESC_DONE_ERR_SHIFT          25
  79#define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL             BIT(32)
  80#define XILINX_DPDMA_EINTR_CHAN_ERR_MASK                0x2082082
  81#define XILINX_DPDMA_EINTR_CHAN_ERR                     0x7ffffffe
  82#define XILINX_DPDMA_EINTR_GLOBAL_ERR                   0x80000001
  83#define XILINX_DPDMA_EINTR_ALL                          0xffffffff
  84#define XILINX_DPDMA_CNTL                               0x100
  85#define XILINX_DPDMA_GBL                                0x104
  86#define XILINX_DPDMA_GBL_TRIG_SHIFT                     0
  87#define XILINX_DPDMA_GBL_RETRIG_SHIFT                   6
  88#define XILINX_DPDMA_ALC0_CNTL                          0x108
  89#define XILINX_DPDMA_ALC0_STATUS                        0x10c
  90#define XILINX_DPDMA_ALC0_MAX                           0x110
  91#define XILINX_DPDMA_ALC0_MIN                           0x114
  92#define XILINX_DPDMA_ALC0_ACC                           0x118
  93#define XILINX_DPDMA_ALC0_ACC_TRAN                      0x11c
  94#define XILINX_DPDMA_ALC1_CNTL                          0x120
  95#define XILINX_DPDMA_ALC1_STATUS                        0x124
  96#define XILINX_DPDMA_ALC1_MAX                           0x128
  97#define XILINX_DPDMA_ALC1_MIN                           0x12c
  98#define XILINX_DPDMA_ALC1_ACC                           0x130
  99#define XILINX_DPDMA_ALC1_ACC_TRAN                      0x134
 100
 101/* Channel register */
 102#define XILINX_DPDMA_CH_BASE                            0x200
 103#define XILINX_DPDMA_CH_OFFSET                          0x100
 104#define XILINX_DPDMA_CH_DESC_START_ADDRE                0x0
 105#define XILINX_DPDMA_CH_DESC_START_ADDR                 0x4
 106#define XILINX_DPDMA_CH_DESC_NEXT_ADDRE                 0x8
 107#define XILINX_DPDMA_CH_DESC_NEXT_ADDR                  0xc
 108#define XILINX_DPDMA_CH_PYLD_CUR_ADDRE                  0x10
 109#define XILINX_DPDMA_CH_PYLD_CUR_ADDR                   0x14
 110#define XILINX_DPDMA_CH_CNTL                            0x18
 111#define XILINX_DPDMA_CH_CNTL_ENABLE                     BIT(0)
 112#define XILINX_DPDMA_CH_CNTL_PAUSE                      BIT(1)
 113#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT          2
 114#define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT          6
 115#define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT          10
 116#define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS              11
 117#define XILINX_DPDMA_CH_STATUS                          0x1c
 118#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK           (0xf << 21)
 119#define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT          21
 120#define XILINX_DPDMA_CH_VDO                             0x20
 121#define XILINX_DPDMA_CH_PYLD_SZ                         0x24
 122#define XILINX_DPDMA_CH_DESC_ID                         0x28
 123
 124/* DPDMA descriptor fields */
 125#define XILINX_DPDMA_DESC_CONTROL_PREEMBLE              (0xa5)
 126#define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR         BIT(8)
 127#define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE           BIT(9)
 128#define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE           BIT(10)
 129#define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE             BIT(18)
 130#define XILINX_DPDMA_DESC_CONTROL_LAST                  BIT(19)
 131#define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC            BIT(20)
 132#define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME         BIT(21)
 133#define XILINX_DPDMA_DESC_ID_MASK                       (0xffff << 0)
 134#define XILINX_DPDMA_DESC_ID_SHIFT                      (0)
 135#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK       (0x3ffff << 0)
 136#define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT      (0)
 137#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK      (0x3fff << 18)
 138#define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT     (18)
 139#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK            (0xfff)
 140#define XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT           (16)
 141
 142#define XILINX_DPDMA_ALIGN_BYTES                        256
 143
 144#define XILINX_DPDMA_NUM_CHAN                           6
 145#define XILINX_DPDMA_PAGE_MASK                          ((1 << 12) - 1)
 146#define XILINX_DPDMA_PAGE_SHIFT                         12
 147
 148/**
 149 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
 150 * @control: control configuration field
 151 * @desc_id: descriptor ID
 152 * @xfer_size: transfer size
 153 * @hsize_stride: horizontal size and stride
 154 * @timestamp_lsb: LSB of time stamp
 155 * @timestamp_msb: MSB of time stamp
 156 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
 157 * @next_desc: next descriptor 32 bit address
 158 * @src_addr: payload source address (lower 32 bit of 1st 4KB page)
 159 * @addr_ext_23: upper 16 bit of 48 bit address (src_addr2 and src_addr3)
 160 * @addr_ext_45: upper 16 bit of 48 bit address (src_addr4 and src_addr5)
 161 * @src_addr2: payload source address (lower 32 bit of 2nd 4KB page)
 162 * @src_addr3: payload source address (lower 32 bit of 3rd 4KB page)
 163 * @src_addr4: payload source address (lower 32 bit of 4th 4KB page)
 164 * @src_addr5: payload source address (lower 32 bit of 5th 4KB page)
 165 * @crc: descriptor CRC
 166 */
 167struct xilinx_dpdma_hw_desc {
 168        u32 control;
 169        u32 desc_id;
 170        u32 xfer_size;
 171        u32 hsize_stride;
 172        u32 timestamp_lsb;
 173        u32 timestamp_msb;
 174        u32 addr_ext;
 175        u32 next_desc;
 176        u32 src_addr;
 177        u32 addr_ext_23;
 178        u32 addr_ext_45;
 179        u32 src_addr2;
 180        u32 src_addr3;
 181        u32 src_addr4;
 182        u32 src_addr5;
 183        u32 crc;
 184} __aligned(XILINX_DPDMA_ALIGN_BYTES);
 185
 186/**
 187 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
 188 * @hw: DPDMA hardware descriptor
 189 * @node: list node for software descriptors
 190 * @phys: physical address of the software descriptor
 191 */
 192struct xilinx_dpdma_sw_desc {
 193        struct xilinx_dpdma_hw_desc hw;
 194        struct list_head node;
 195        dma_addr_t phys;
 196};
 197
 198/**
 199 * enum xilinx_dpdma_tx_desc_status - DPDMA tx descriptor status
 200 * @PREPARED: descriptor is prepared for transaction
 201 * @ACTIVE: transaction is (being) done successfully
 202 * @ERRORED: descriptor generates some errors
 203 */
 204enum xilinx_dpdma_tx_desc_status {
 205        PREPARED,
 206        ACTIVE,
 207        ERRORED
 208};
 209
 210/**
 211 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
 212 * @async_tx: DMA async transaction descriptor
 213 * @descriptors: list of software descriptors
 214 * @node: list node for transaction descriptors
 215 * @status: tx descriptor status
 216 * @done_cnt: number of complete notification to deliver
 217 */
 218struct xilinx_dpdma_tx_desc {
 219        struct dma_async_tx_descriptor async_tx;
 220        struct list_head descriptors;
 221        struct list_head node;
 222        enum xilinx_dpdma_tx_desc_status status;
 223        unsigned int done_cnt;
 224};
 225
 226/**
 227 * enum xilinx_dpdma_chan_id - DPDMA channel ID
 228 * @VIDEO0: video 1st channel
 229 * @VIDEO1: video 2nd channel for multi plane yuv formats
 230 * @VIDEO2: video 3rd channel for multi plane yuv formats
 231 * @GRAPHICS: graphics channel
 232 * @AUDIO0: 1st audio channel
 233 * @AUDIO1: 2nd audio channel
 234 */
 235enum xilinx_dpdma_chan_id {
 236        VIDEO0,
 237        VIDEO1,
 238        VIDEO2,
 239        GRAPHICS,
 240        AUDIO0,
 241        AUDIO1
 242};
 243
 244/**
 245 * enum xilinx_dpdma_chan_status - DPDMA channel status
 246 * @IDLE: idle state
 247 * @STREAMING: actively streaming state
 248 */
 249enum xilinx_dpdma_chan_status {
 250        IDLE,
 251        STREAMING
 252};
 253
 254/*
 255 * DPDMA descriptor placement
 256 * --------------------------
 257 * DPDMA descritpor life time is described with following placements:
 258 *
 259 * allocated_desc -> submitted_desc -> pending_desc -> active_desc -> done_list
 260 *
 261 * Transition is triggered as following:
 262 *
 263 * -> allocated_desc : a descriptor allocation
 264 * allocated_desc -> submitted_desc: a descriptorsubmission
 265 * submitted_desc -> pending_desc: request to issue pending a descriptor
 266 * pending_desc -> active_desc: VSYNC intr when a desc is scheduled to DPDMA
 267 * active_desc -> done_list: VSYNC intr when DPDMA switches to a new desc
 268 */
 269
 270/**
 271 * struct xilinx_dpdma_chan - DPDMA channel
 272 * @common: generic dma channel structure
 273 * @reg: register base address
 274 * @id: channel ID
 275 * @wait_to_stop: queue to wait for outstanding transacitons before stopping
 276 * @status: channel status
 277 * @first_frame: flag for the first frame of stream
 278 * @video_group: flag if multi-channel operation is needed for video channels
 279 * @lock: lock to access struct xilinx_dpdma_chan
 280 * @desc_pool: descriptor allocation pool
 281 * @done_task: done IRQ bottom half handler
 282 * @err_task: error IRQ bottom half handler
 283 * @allocated_desc: allocated descriptor
 284 * @submitted_desc: submitted descriptor
 285 * @pending_desc: pending descriptor to be scheduled in next period
 286 * @active_desc: descriptor that the DPDMA channel is active on
 287 * @done_list: done descriptor list
 288 * @xdev: DPDMA device
 289 */
 290struct xilinx_dpdma_chan {
 291        struct dma_chan common;
 292        void __iomem *reg;
 293        enum xilinx_dpdma_chan_id id;
 294
 295        wait_queue_head_t wait_to_stop;
 296        enum xilinx_dpdma_chan_status status;
 297        bool first_frame;
 298        bool video_group;
 299
 300        spinlock_t lock;
 301        struct dma_pool *desc_pool;
 302        struct tasklet_struct done_task;
 303        struct tasklet_struct err_task;
 304
 305        struct xilinx_dpdma_tx_desc *allocated_desc;
 306        struct xilinx_dpdma_tx_desc *submitted_desc;
 307        struct xilinx_dpdma_tx_desc *pending_desc;
 308        struct xilinx_dpdma_tx_desc *active_desc;
 309        struct list_head done_list;
 310
 311        struct xilinx_dpdma_device *xdev;
 312};
 313
 314/**
 315 * struct xilinx_dpdma_device - DPDMA device
 316 * @common: generic dma device structure
 317 * @reg: register base address
 318 * @dev: generic device structure
 319 * @axi_clk: axi clock
 320 * @chan: DPDMA channels
 321 * @ext_addr: flag for 64 bit system (48 bit addressing)
 322 * @desc_addr: descriptor addressing callback (32 bit vs 64 bit)
 323 */
 324struct xilinx_dpdma_device {
 325        struct dma_device common;
 326        void __iomem *reg;
 327        struct device *dev;
 328
 329        struct clk *axi_clk;
 330        struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
 331
 332        bool ext_addr;
 333        void (*desc_addr)(struct xilinx_dpdma_sw_desc *sw_desc,
 334                          struct xilinx_dpdma_sw_desc *prev,
 335                          dma_addr_t dma_addr[], unsigned int num_src_addr);
 336};
 337
 338#define to_dpdma_tx_desc(tx) \
 339        container_of(tx, struct xilinx_dpdma_tx_desc, async_tx)
 340
 341#define to_xilinx_chan(chan) \
 342        container_of(chan, struct xilinx_dpdma_chan, common)
 343
 344/* IO operations */
 345
 346static inline u32 dpdma_read(void __iomem *base, u32 offset)
 347{
 348        return ioread32(base + offset);
 349}
 350
 351static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
 352{
 353        iowrite32(val, base + offset);
 354}
 355
 356static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
 357{
 358        dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
 359}
 360
 361static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
 362{
 363        dpdma_write(base, offset, dpdma_read(base, offset) | set);
 364}
 365
 366/* Xilinx DPDMA descriptor operations */
 367
 368/**
 369 * xilinx_dpdma_sw_desc_next_32 - Set 32 bit address of a next sw descriptor
 370 * @sw_desc: current software descriptor
 371 * @next: next descriptor
 372 *
 373 * Update the current sw descriptor @sw_desc with 32 bit address of the next
 374 * descriptor @next.
 375 */
 376static inline void
 377xilinx_dpdma_sw_desc_next_32(struct xilinx_dpdma_sw_desc *sw_desc,
 378                             struct xilinx_dpdma_sw_desc *next)
 379{
 380        sw_desc->hw.next_desc = next->phys;
 381}
 382
 383/**
 384 * xilinx_dpdma_sw_desc_addr_32 - Update the sw descriptor with 32 bit address
 385 * @sw_desc: software descriptor
 386 * @prev: previous descriptor
 387 * @dma_addr: array of dma addresses
 388 * @num_src_addr: number of addresses in @dma_addr
 389 *
 390 * Update the descriptor @sw_desc with 32 bit address.
 391 */
 392static void xilinx_dpdma_sw_desc_addr_32(struct xilinx_dpdma_sw_desc *sw_desc,
 393                                         struct xilinx_dpdma_sw_desc *prev,
 394                                         dma_addr_t dma_addr[],
 395                                         unsigned int num_src_addr)
 396{
 397        struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
 398        unsigned int i;
 399
 400        hw_desc->src_addr = dma_addr[0];
 401
 402        if (prev)
 403                xilinx_dpdma_sw_desc_next_32(prev, sw_desc);
 404
 405        for (i = 1; i < num_src_addr; i++) {
 406                u32 *addr = &hw_desc->src_addr2;
 407                u32 frag_addr;
 408
 409                frag_addr = dma_addr[i];
 410                addr[i - 1] = frag_addr;
 411        }
 412}
 413
 414/**
 415 * xilinx_dpdma_sw_desc_next_64 - Set 64 bit address of a next sw descriptor
 416 * @sw_desc: current software descriptor
 417 * @next: next descriptor
 418 *
 419 * Update the current sw descriptor @sw_desc with 64 bit address of the next
 420 * descriptor @next.
 421 */
 422static inline void
 423xilinx_dpdma_sw_desc_next_64(struct xilinx_dpdma_sw_desc *sw_desc,
 424                             struct xilinx_dpdma_sw_desc *next)
 425{
 426        sw_desc->hw.next_desc = (u32)next->phys;
 427        sw_desc->hw.addr_ext |= ((u64)next->phys >> 32) &
 428                                XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
 429}
 430
 431/**
 432 * xilinx_dpdma_sw_desc_addr_64 - Update the sw descriptor with 64 bit address
 433 * @sw_desc: software descriptor
 434 * @prev: previous descriptor
 435 * @dma_addr: array of dma addresses
 436 * @num_src_addr: number of addresses in @dma_addr
 437 *
 438 * Update the descriptor @sw_desc with 64 bit address.
 439 */
 440static void xilinx_dpdma_sw_desc_addr_64(struct xilinx_dpdma_sw_desc *sw_desc,
 441                                         struct xilinx_dpdma_sw_desc *prev,
 442                                         dma_addr_t dma_addr[],
 443                                         unsigned int num_src_addr)
 444{
 445        struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
 446        unsigned int i;
 447
 448        hw_desc->src_addr = (u32)dma_addr[0];
 449        hw_desc->addr_ext |=
 450                ((u64)dma_addr[0] >> 32) & XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
 451
 452        if (prev)
 453                xilinx_dpdma_sw_desc_next_64(prev, sw_desc);
 454
 455        for (i = 1; i < num_src_addr; i++) {
 456                u32 *addr = &hw_desc->src_addr2;
 457                u32 *addr_ext = &hw_desc->addr_ext_23;
 458                u64 frag_addr;
 459
 460                frag_addr = dma_addr[i];
 461                addr[i] = (u32)frag_addr;
 462
 463                frag_addr >>= 32;
 464                frag_addr &= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK;
 465                frag_addr <<= XILINX_DPDMA_DESC_ADDR_EXT_ADDR_SHIFT * (i % 2);
 466                addr_ext[i / 2] = frag_addr;
 467        }
 468}
 469
 470/* Xilinx DPDMA channel descriptor operations */
 471
 472/**
 473 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
 474 * @chan: DPDMA channel
 475 *
 476 * Allocate a software descriptor from the channel's descriptor pool.
 477 *
 478 * Return: a software descriptor or NULL.
 479 */
 480static struct xilinx_dpdma_sw_desc *
 481xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
 482{
 483        struct xilinx_dpdma_sw_desc *sw_desc;
 484        dma_addr_t phys;
 485
 486        sw_desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
 487        if (!sw_desc)
 488                return NULL;
 489
 490        memset(sw_desc, 0, sizeof(*sw_desc));
 491        sw_desc->phys = phys;
 492
 493        return sw_desc;
 494}
 495
 496/**
 497 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
 498 * @chan: DPDMA channel
 499 * @sw_desc: software descriptor to free
 500 *
 501 * Free a software descriptor from the channel's descriptor pool.
 502 */
 503static void
 504xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
 505                               struct xilinx_dpdma_sw_desc *sw_desc)
 506{
 507        dma_pool_free(chan->desc_pool, sw_desc, sw_desc->phys);
 508}
 509
 510/**
 511 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
 512 * @chan: DPDMA channel
 513 * @tx_desc: tx descriptor to dump
 514 *
 515 * Dump contents of a tx descriptor
 516 */
 517static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
 518                                           struct xilinx_dpdma_tx_desc *tx_desc)
 519{
 520        struct xilinx_dpdma_sw_desc *sw_desc;
 521        struct device *dev = chan->xdev->dev;
 522        unsigned int i = 0;
 523
 524        dev_dbg(dev, "------- TX descriptor dump start -------\n");
 525        dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
 526
 527        list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
 528                struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
 529
 530                dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
 531                dev_dbg(dev, "descriptor phys: %pad\n", &sw_desc->phys);
 532                dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
 533                dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
 534                dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
 535                dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
 536                dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
 537                dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
 538                dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
 539                dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
 540                dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
 541                dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
 542                dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
 543                dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
 544                dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
 545                dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
 546                dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
 547                dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
 548        }
 549
 550        dev_dbg(dev, "------- TX descriptor dump end -------\n");
 551}
 552
 553/**
 554 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
 555 * @chan: DPDMA channel
 556 *
 557 * Allocate a tx descriptor.
 558 *
 559 * Return: a tx descriptor or NULL.
 560 */
 561static struct xilinx_dpdma_tx_desc *
 562xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
 563{
 564        struct xilinx_dpdma_tx_desc *tx_desc;
 565
 566        tx_desc = kzalloc(sizeof(*tx_desc), GFP_KERNEL);
 567        if (!tx_desc)
 568                return NULL;
 569
 570        INIT_LIST_HEAD(&tx_desc->descriptors);
 571        tx_desc->status = PREPARED;
 572
 573        return tx_desc;
 574}
 575
 576/**
 577 * xilinx_dpdma_chan_free_tx_desc - Free a transaction descriptor
 578 * @chan: DPDMA channel
 579 * @tx_desc: tx descriptor
 580 *
 581 * Free the tx descriptor @tx_desc including its software descriptors.
 582 */
 583static void
 584xilinx_dpdma_chan_free_tx_desc(struct xilinx_dpdma_chan *chan,
 585                               struct xilinx_dpdma_tx_desc *tx_desc)
 586{
 587        struct xilinx_dpdma_sw_desc *sw_desc, *next;
 588
 589        if (!tx_desc)
 590                return;
 591
 592        list_for_each_entry_safe(sw_desc, next, &tx_desc->descriptors, node) {
 593                list_del(&sw_desc->node);
 594                xilinx_dpdma_chan_free_sw_desc(chan, sw_desc);
 595        }
 596
 597        kfree(tx_desc);
 598}
 599
 600/**
 601 * xilinx_dpdma_chan_submit_tx_desc - Submit a transaction descriptor
 602 * @chan: DPDMA channel
 603 * @tx_desc: tx descriptor
 604 *
 605 * Submit the tx descriptor @tx_desc to the channel @chan.
 606 *
 607 * Return: a cookie assigned to the tx descriptor
 608 */
 609static dma_cookie_t
 610xilinx_dpdma_chan_submit_tx_desc(struct xilinx_dpdma_chan *chan,
 611                                 struct xilinx_dpdma_tx_desc *tx_desc)
 612{
 613        struct xilinx_dpdma_sw_desc *sw_desc;
 614        dma_cookie_t cookie;
 615        unsigned long flags;
 616
 617        spin_lock_irqsave(&chan->lock, flags);
 618
 619        if (chan->submitted_desc) {
 620                cookie = chan->submitted_desc->async_tx.cookie;
 621                goto out_unlock;
 622        }
 623
 624        cookie = dma_cookie_assign(&tx_desc->async_tx);
 625
 626        /* Assign the cookie to descriptors in this transaction */
 627        /* Only 16 bit will be used, but it should be enough */
 628        list_for_each_entry(sw_desc, &tx_desc->descriptors, node)
 629                sw_desc->hw.desc_id = cookie;
 630
 631        if (tx_desc != chan->allocated_desc)
 632                dev_err(chan->xdev->dev, "desc != allocated_desc\n");
 633        else
 634                chan->allocated_desc = NULL;
 635        chan->submitted_desc = tx_desc;
 636
 637        if (chan->id == VIDEO1 || chan->id == VIDEO2) {
 638                chan->video_group = true;
 639                chan->xdev->chan[VIDEO0]->video_group = true;
 640        }
 641
 642out_unlock:
 643        spin_unlock_irqrestore(&chan->lock, flags);
 644
 645        return cookie;
 646}
 647
 648/**
 649 * xilinx_dpdma_chan_free_desc_list - Free a descriptor list
 650 * @chan: DPDMA channel
 651 * @list: tx descriptor list
 652 *
 653 * Free tx descriptors in the list @list.
 654 */
 655static void xilinx_dpdma_chan_free_desc_list(struct xilinx_dpdma_chan *chan,
 656                                             struct list_head *list)
 657{
 658        struct xilinx_dpdma_tx_desc *tx_desc, *next;
 659
 660        list_for_each_entry_safe(tx_desc, next, list, node) {
 661                list_del(&tx_desc->node);
 662                xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
 663        }
 664}
 665
 666/**
 667 * xilinx_dpdma_chan_free_all_desc - Free all descriptors of the channel
 668 * @chan: DPDMA channel
 669 *
 670 * Free all descriptors associated with the channel. The channel should be
 671 * disabled before this function is called, otherwise, this function may
 672 * result in misbehavior of the system due to remaining outstanding
 673 * transactions.
 674 */
 675static void xilinx_dpdma_chan_free_all_desc(struct xilinx_dpdma_chan *chan)
 676{
 677        unsigned long flags;
 678
 679        spin_lock_irqsave(&chan->lock, flags);
 680
 681        dev_dbg(chan->xdev->dev, "chan->status = %s\n",
 682                chan->status == STREAMING ? "STREAMING" : "IDLE");
 683
 684        xilinx_dpdma_chan_free_tx_desc(chan, chan->allocated_desc);
 685        chan->allocated_desc = NULL;
 686        xilinx_dpdma_chan_free_tx_desc(chan, chan->submitted_desc);
 687        chan->submitted_desc = NULL;
 688        xilinx_dpdma_chan_free_tx_desc(chan, chan->pending_desc);
 689        chan->pending_desc = NULL;
 690        xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
 691        chan->active_desc = NULL;
 692        xilinx_dpdma_chan_free_desc_list(chan, &chan->done_list);
 693
 694        spin_unlock_irqrestore(&chan->lock, flags);
 695}
 696
 697/**
 698 * xilinx_dpdma_chan_cleanup_desc - Clean up descriptors
 699 * @chan: DPDMA channel
 700 *
 701 * Trigger the complete callbacks of descriptors with finished transactions.
 702 * Free descriptors which are no longer in use.
 703 */
 704static void xilinx_dpdma_chan_cleanup_desc(struct xilinx_dpdma_chan *chan)
 705{
 706        struct xilinx_dpdma_tx_desc *desc;
 707        dma_async_tx_callback callback;
 708        void *callback_param;
 709        unsigned long flags;
 710        unsigned int cnt, i;
 711
 712        spin_lock_irqsave(&chan->lock, flags);
 713
 714        while (!list_empty(&chan->done_list)) {
 715                desc = list_first_entry(&chan->done_list,
 716                                        struct xilinx_dpdma_tx_desc, node);
 717                list_del(&desc->node);
 718
 719                cnt = desc->done_cnt;
 720                desc->done_cnt = 0;
 721                callback = desc->async_tx.callback;
 722                callback_param = desc->async_tx.callback_param;
 723                if (callback) {
 724                        spin_unlock_irqrestore(&chan->lock, flags);
 725                        for (i = 0; i < cnt; i++)
 726                                callback(callback_param);
 727                        spin_lock_irqsave(&chan->lock, flags);
 728                }
 729
 730                xilinx_dpdma_chan_free_tx_desc(chan, desc);
 731        }
 732
 733        if (chan->active_desc) {
 734                cnt = chan->active_desc->done_cnt;
 735                chan->active_desc->done_cnt = 0;
 736                callback = chan->active_desc->async_tx.callback;
 737                callback_param = chan->active_desc->async_tx.callback_param;
 738                if (callback) {
 739                        spin_unlock_irqrestore(&chan->lock, flags);
 740                        for (i = 0; i < cnt; i++)
 741                                callback(callback_param);
 742                        spin_lock_irqsave(&chan->lock, flags);
 743                }
 744        }
 745
 746        spin_unlock_irqrestore(&chan->lock, flags);
 747}
 748
 749/**
 750 * xilinx_dpdma_chan_desc_active - Set the descriptor as active
 751 * @chan: DPDMA channel
 752 *
 753 * Make the pending descriptor @chan->pending_desc as active. This function
 754 * should be called when the channel starts operating on the pending descriptor.
 755 */
 756static void xilinx_dpdma_chan_desc_active(struct xilinx_dpdma_chan *chan)
 757{
 758        unsigned long flags;
 759
 760        spin_lock_irqsave(&chan->lock, flags);
 761
 762        if (!chan->pending_desc)
 763                goto out_unlock;
 764
 765        if (chan->active_desc)
 766                list_add_tail(&chan->active_desc->node, &chan->done_list);
 767
 768        chan->active_desc = chan->pending_desc;
 769        chan->pending_desc = NULL;
 770
 771out_unlock:
 772        spin_unlock_irqrestore(&chan->lock, flags);
 773}
 774
 775/**
 776 * xilinx_dpdma_chan_desc_done_intr - Mark the current descriptor as 'done'
 777 * @chan: DPDMA channel
 778 *
 779 * Mark the current active descriptor @chan->active_desc as 'done'. This
 780 * function should be called to mark completion of the currently active
 781 * descriptor.
 782 */
 783static void xilinx_dpdma_chan_desc_done_intr(struct xilinx_dpdma_chan *chan)
 784{
 785        unsigned long flags;
 786
 787        spin_lock_irqsave(&chan->lock, flags);
 788
 789        if (!chan->active_desc) {
 790                dev_dbg(chan->xdev->dev, "done intr with no active desc\n");
 791                goto out_unlock;
 792        }
 793
 794        chan->active_desc->done_cnt++;
 795        if (chan->active_desc->status ==  PREPARED) {
 796                dma_cookie_complete(&chan->active_desc->async_tx);
 797                chan->active_desc->status = ACTIVE;
 798        }
 799
 800out_unlock:
 801        spin_unlock_irqrestore(&chan->lock, flags);
 802        tasklet_schedule(&chan->done_task);
 803}
 804
 805/**
 806 * xilinx_dpdma_chan_prep_slave_sg - Prepare a scatter-gather dma descriptor
 807 * @chan: DPDMA channel
 808 * @sgl: scatter-gather list
 809 *
 810 * Prepare a tx descriptor incudling internal software/hardware descriptors
 811 * for the given scatter-gather transaction.
 812 *
 813 * Return: A dma async tx descriptor on success, or NULL.
 814 */
 815static struct dma_async_tx_descriptor *
 816xilinx_dpdma_chan_prep_slave_sg(struct xilinx_dpdma_chan *chan,
 817                                struct scatterlist *sgl)
 818{
 819        struct xilinx_dpdma_tx_desc *tx_desc;
 820        struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
 821        struct scatterlist *iter = sgl;
 822        u32 line_size = 0;
 823
 824        if (chan->allocated_desc)
 825                return &chan->allocated_desc->async_tx;
 826
 827        tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
 828        if (!tx_desc)
 829                return NULL;
 830
 831        while (!sg_is_chain(iter))
 832                line_size += sg_dma_len(iter++);
 833
 834        while (sgl) {
 835                struct xilinx_dpdma_hw_desc *hw_desc;
 836                dma_addr_t dma_addr[4];
 837                unsigned int num_pages = 0;
 838
 839                sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
 840                if (!sw_desc)
 841                        goto error;
 842
 843                while (!sg_is_chain(sgl) && !sg_is_last(sgl)) {
 844                        dma_addr[num_pages] = sg_dma_address(sgl++);
 845                        if (!IS_ALIGNED(dma_addr[num_pages++],
 846                                        XILINX_DPDMA_ALIGN_BYTES)) {
 847                                dev_err(chan->xdev->dev,
 848                                        "buffer should be aligned at %d B\n",
 849                                        XILINX_DPDMA_ALIGN_BYTES);
 850                                goto error;
 851                        }
 852                }
 853
 854                chan->xdev->desc_addr(sw_desc, last, dma_addr, num_pages);
 855                hw_desc = &sw_desc->hw;
 856                hw_desc->xfer_size = line_size;
 857                hw_desc->hsize_stride =
 858                        line_size << XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
 859                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
 860                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_FRAG_MODE;
 861                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
 862
 863                list_add_tail(&sw_desc->node, &tx_desc->descriptors);
 864                last = sw_desc;
 865                if (sg_is_last(sgl))
 866                        break;
 867                sgl = sg_chain_ptr(sgl);
 868        }
 869
 870        sw_desc = list_first_entry(&tx_desc->descriptors,
 871                                   struct xilinx_dpdma_sw_desc, node);
 872        if (chan->xdev->ext_addr)
 873                xilinx_dpdma_sw_desc_next_64(last, sw_desc);
 874        else
 875                xilinx_dpdma_sw_desc_next_32(last, sw_desc);
 876        last->hw.control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
 877        last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
 878
 879        chan->allocated_desc = tx_desc;
 880
 881        return &tx_desc->async_tx;
 882
 883error:
 884        xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
 885
 886        return NULL;
 887}
 888
 889/**
 890 * xilinx_dpdma_chan_prep_cyclic - Prepare a cyclic dma descriptor
 891 * @chan: DPDMA channel
 892 * @buf_addr: buffer address
 893 * @buf_len: buffer length
 894 * @period_len: number of periods
 895 *
 896 * Prepare a tx descriptor incudling internal software/hardware descriptors
 897 * for the given cyclic transaction.
 898 *
 899 * Return: A dma async tx descriptor on success, or NULL.
 900 */
 901static struct dma_async_tx_descriptor *
 902xilinx_dpdma_chan_prep_cyclic(struct xilinx_dpdma_chan *chan,
 903                              dma_addr_t buf_addr, size_t buf_len,
 904                              size_t period_len)
 905{
 906        struct xilinx_dpdma_tx_desc *tx_desc;
 907        struct xilinx_dpdma_sw_desc *sw_desc, *last = NULL;
 908        unsigned int periods = buf_len / period_len;
 909        unsigned int i;
 910
 911        if (chan->allocated_desc)
 912                return &chan->allocated_desc->async_tx;
 913
 914        tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
 915        if (!tx_desc)
 916                return NULL;
 917
 918        for (i = 0; i < periods; i++) {
 919                struct xilinx_dpdma_hw_desc *hw_desc;
 920
 921                if (!IS_ALIGNED(buf_addr, XILINX_DPDMA_ALIGN_BYTES)) {
 922                        dev_err(chan->xdev->dev,
 923                                "buffer should be aligned at %d B\n",
 924                                XILINX_DPDMA_ALIGN_BYTES);
 925                        goto error;
 926                }
 927
 928                sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
 929                if (!sw_desc)
 930                        goto error;
 931
 932                chan->xdev->desc_addr(sw_desc, last, &buf_addr, 1);
 933                hw_desc = &sw_desc->hw;
 934                hw_desc->xfer_size = period_len;
 935                hw_desc->hsize_stride =
 936                        period_len <<
 937                        XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
 938                hw_desc->hsize_stride |=
 939                        period_len <<
 940                        XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
 941                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
 942                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
 943                hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
 944
 945                list_add_tail(&sw_desc->node, &tx_desc->descriptors);
 946
 947                buf_addr += period_len;
 948                last = sw_desc;
 949        }
 950
 951        sw_desc = list_first_entry(&tx_desc->descriptors,
 952                                   struct xilinx_dpdma_sw_desc, node);
 953        if (chan->xdev->ext_addr)
 954                xilinx_dpdma_sw_desc_next_64(last, sw_desc);
 955        else
 956                xilinx_dpdma_sw_desc_next_32(last, sw_desc);
 957        last->hw.control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
 958
 959        chan->allocated_desc = tx_desc;
 960
 961        return &tx_desc->async_tx;
 962
 963error:
 964        xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
 965
 966        return NULL;
 967}
 968
 969/**
 970 * xilinx_dpdma_chan_prep_interleaved - Prepare a interleaved dma descriptor
 971 * @chan: DPDMA channel
 972 * @xt: dma interleaved template
 973 *
 974 * Prepare a tx descriptor incudling internal software/hardware descriptors
 975 * based on @xt.
 976 *
 977 * Return: A dma async tx descriptor on success, or NULL.
 978 */
 979static struct dma_async_tx_descriptor *
 980xilinx_dpdma_chan_prep_interleaved(struct xilinx_dpdma_chan *chan,
 981                                   struct dma_interleaved_template *xt)
 982{
 983        struct xilinx_dpdma_tx_desc *tx_desc;
 984        struct xilinx_dpdma_sw_desc *sw_desc;
 985        struct xilinx_dpdma_hw_desc *hw_desc;
 986        size_t hsize = xt->sgl[0].size;
 987        size_t stride = hsize + xt->sgl[0].icg;
 988
 989        if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
 990                dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
 991                        XILINX_DPDMA_ALIGN_BYTES);
 992                return NULL;
 993        }
 994
 995        if (chan->allocated_desc)
 996                return &chan->allocated_desc->async_tx;
 997
 998        tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
 999        if (!tx_desc)
1000                return NULL;
1001
1002        sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
1003        if (!sw_desc)
1004                goto error;
1005
1006        chan->xdev->desc_addr(sw_desc, sw_desc, &xt->src_start, 1);
1007        hw_desc = &sw_desc->hw;
1008        hw_desc->xfer_size = hsize * xt->numf;
1009        hw_desc->hsize_stride = hsize <<
1010                                XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_SHIFT;
1011        hw_desc->hsize_stride |= (stride / 16) <<
1012                                 XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_SHIFT;
1013        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
1014        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
1015        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
1016        hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
1017
1018        list_add_tail(&sw_desc->node, &tx_desc->descriptors);
1019        chan->allocated_desc = tx_desc;
1020
1021        return &tx_desc->async_tx;
1022
1023error:
1024        xilinx_dpdma_chan_free_tx_desc(chan, tx_desc);
1025
1026        return NULL;
1027}
1028
1029/* Xilinx DPDMA channel operations */
1030
1031/**
1032 * xilinx_dpdma_chan_enable - Enable the channel
1033 * @chan: DPDMA channel
1034 *
1035 * Enable the channel and its interrupts. Set the QoS values for video class.
1036 */
1037static inline void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
1038{
1039        u32 reg;
1040
1041        reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
1042        reg |= XILINX_DPDMA_INTR_GLOBAL_MASK;
1043        dpdma_set(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
1044        reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
1045        reg |= XILINX_DPDMA_INTR_GLOBAL_ERR;
1046        dpdma_set(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
1047
1048        reg = XILINX_DPDMA_CH_CNTL_ENABLE;
1049        reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1050               XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_SHIFT;
1051        reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1052               XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_SHIFT;
1053        reg |= XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS <<
1054               XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_SHIFT;
1055        dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
1056}
1057
1058/**
1059 * xilinx_dpdma_chan_disable - Disable the channel
1060 * @chan: DPDMA channel
1061 *
1062 * Disable the channel and its interrupts.
1063 */
1064static inline void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
1065{
1066        u32 reg;
1067
1068        reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
1069        dpdma_clr(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
1070        reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
1071        dpdma_clr(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
1072
1073        dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
1074}
1075
1076/**
1077 * xilinx_dpdma_chan_pause - Pause the channel
1078 * @chan: DPDMA channel
1079 *
1080 * Pause the channel.
1081 */
1082static inline void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
1083{
1084        dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
1085}
1086
1087/**
1088 * xilinx_dpdma_chan_unpause - Unpause the channel
1089 * @chan: DPDMA channel
1090 *
1091 * Unpause the channel.
1092 */
1093static inline void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
1094{
1095        dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
1096}
1097
1098static u32
1099xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
1100{
1101        struct xilinx_dpdma_device *xdev = chan->xdev;
1102        u32 i = 0, ret = 0;
1103
1104        for (i = VIDEO0; i < GRAPHICS; i++) {
1105                if (xdev->chan[i]->video_group &&
1106                    xdev->chan[i]->status != STREAMING)
1107                        return 0;
1108
1109                if (xdev->chan[i]->video_group)
1110                        ret |= BIT(i);
1111        }
1112
1113        return ret;
1114}
1115
1116/**
1117 * xilinx_dpdma_chan_issue_pending - Issue the pending descriptor
1118 * @chan: DPDMA channel
1119 *
1120 * Issue the first pending descriptor from @chan->submitted_desc. If the channel
1121 * is already streaming, the channel is re-triggered with the pending
1122 * descriptor.
1123 */
1124static void xilinx_dpdma_chan_issue_pending(struct xilinx_dpdma_chan *chan)
1125{
1126        struct xilinx_dpdma_device *xdev = chan->xdev;
1127        struct xilinx_dpdma_sw_desc *sw_desc;
1128        unsigned long flags;
1129        u32 reg, channels;
1130
1131        spin_lock_irqsave(&chan->lock, flags);
1132
1133        if (!chan->submitted_desc || chan->pending_desc)
1134                goto out_unlock;
1135
1136        chan->pending_desc = chan->submitted_desc;
1137        chan->submitted_desc = NULL;
1138
1139        sw_desc = list_first_entry(&chan->pending_desc->descriptors,
1140                                   struct xilinx_dpdma_sw_desc, node);
1141        dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
1142                    (u32)sw_desc->phys);
1143        if (xdev->ext_addr)
1144                dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
1145                            ((u64)sw_desc->phys >> 32) &
1146                            XILINX_DPDMA_DESC_ADDR_EXT_ADDR_MASK);
1147
1148        if (chan->first_frame) {
1149                chan->first_frame = false;
1150                if (chan->video_group) {
1151                        channels = xilinx_dpdma_chan_video_group_ready(chan);
1152                        if (!channels)
1153                                goto out_unlock;
1154                        reg = channels << XILINX_DPDMA_GBL_TRIG_SHIFT;
1155                } else {
1156                        reg = 1 << (XILINX_DPDMA_GBL_TRIG_SHIFT + chan->id);
1157                }
1158        } else {
1159                if (chan->video_group) {
1160                        channels = xilinx_dpdma_chan_video_group_ready(chan);
1161                        if (!channels)
1162                                goto out_unlock;
1163                        reg = channels << XILINX_DPDMA_GBL_RETRIG_SHIFT;
1164                } else {
1165                        reg = 1 << (XILINX_DPDMA_GBL_RETRIG_SHIFT + chan->id);
1166                }
1167        }
1168
1169        dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
1170
1171out_unlock:
1172        spin_unlock_irqrestore(&chan->lock, flags);
1173}
1174
1175/**
1176 * xilinx_dpdma_chan_start - Start the channel
1177 * @chan: DPDMA channel
1178 *
1179 * Start the channel by enabling interrupts and triggering the channel.
1180 * If the channel is enabled already or there's no pending descriptor, this
1181 * function won't do anything on the channel.
1182 */
1183static void xilinx_dpdma_chan_start(struct xilinx_dpdma_chan *chan)
1184{
1185        unsigned long flags;
1186
1187        spin_lock_irqsave(&chan->lock, flags);
1188
1189        if (!chan->submitted_desc || chan->status == STREAMING)
1190                goto out_unlock;
1191
1192        xilinx_dpdma_chan_unpause(chan);
1193        xilinx_dpdma_chan_enable(chan);
1194        chan->first_frame = true;
1195        chan->status = STREAMING;
1196
1197out_unlock:
1198        spin_unlock_irqrestore(&chan->lock, flags);
1199}
1200
1201/**
1202 * xilinx_dpdma_chan_ostand - Number of outstanding transactions
1203 * @chan: DPDMA channel
1204 *
1205 * Read and return the number of outstanding transactions from register.
1206 *
1207 * Return: Number of outstanding transactions from the status register.
1208 */
1209static inline u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
1210{
1211        return dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS) >>
1212               XILINX_DPDMA_CH_STATUS_OTRAN_CNT_SHIFT &
1213               XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK;
1214}
1215
1216/**
1217 * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
1218 * @chan: DPDMA channel
1219 *
1220 * Notify waiters for no outstanding event, so waiters can stop the channel
1221 * safely. This function is supposed to be called when 'no oustanding' interrupt
1222 * is generated. The 'no outstanding' interrupt is disabled and should be
1223 * re-enabled when this event is handled. If the channel status register still
1224 * shows some number of outstanding transactions, the interrupt remains enabled.
1225 *
1226 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
1227 * transaction(s).
1228 */
1229static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
1230{
1231        u32 cnt;
1232
1233        cnt = xilinx_dpdma_chan_ostand(chan);
1234        if (cnt) {
1235                dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
1236                return -EWOULDBLOCK;
1237        }
1238
1239        /* Disable 'no oustanding' interrupt */
1240        dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
1241                    1 << (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1242        wake_up(&chan->wait_to_stop);
1243
1244        return 0;
1245}
1246
1247/**
1248 * xilinx_dpdma_chan_wait_no_ostand - Wait for the oustanding transaction intr
1249 * @chan: DPDMA channel
1250 *
1251 * Wait for the no outstanding transaction interrupt. This functions can sleep
1252 * for 50ms.
1253 *
1254 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
1255 * from wait_event_interruptible_timeout().
1256 */
1257static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
1258{
1259        int ret;
1260
1261        /* Wait for a no outstanding transaction interrupt upto 50msec */
1262        ret = wait_event_interruptible_timeout(chan->wait_to_stop,
1263                                               !xilinx_dpdma_chan_ostand(chan),
1264                                               msecs_to_jiffies(50));
1265        if (ret > 0) {
1266                dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1267                            1 <<
1268                            (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1269                return 0;
1270        }
1271
1272        dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1273                xilinx_dpdma_chan_ostand(chan));
1274
1275        if (ret == 0)
1276                return -ETIMEDOUT;
1277
1278        return ret;
1279}
1280
1281/**
1282 * xilinx_dpdma_chan_poll_no_ostand - Poll the oustanding transaction status reg
1283 * @chan: DPDMA channel
1284 *
1285 * Poll the outstanding transaction status, and return when there's no
1286 * outstanding transaction. This functions can be used in the interrupt context
1287 * or where the atomicity is required. Calling thread may wait more than 50ms.
1288 *
1289 * Return: 0 on success, or -ETIMEDOUT.
1290 */
1291static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
1292{
1293        u32 cnt, loop = 50000;
1294
1295        /* Poll at least for 50ms (20 fps). */
1296        do {
1297                cnt = xilinx_dpdma_chan_ostand(chan);
1298                udelay(1);
1299        } while (loop-- > 0 && cnt);
1300
1301        if (loop) {
1302                dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1303                            1 <<
1304                            (XILINX_DPDMA_INTR_NO_OSTAND_SHIFT + chan->id));
1305                return 0;
1306        }
1307
1308        dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1309                xilinx_dpdma_chan_ostand(chan));
1310
1311        return -ETIMEDOUT;
1312}
1313
1314/**
1315 * xilinx_dpdma_chan_stop - Stop the channel
1316 * @chan: DPDMA channel
1317 *
1318 * Stop the channel with the following sequence: 1. Pause, 2. Wait (sleep) for
1319 * no outstanding transaction interrupt, 3. Disable the channel.
1320 *
1321 * Return: 0 on success, or error code from xilinx_dpdma_chan_wait_no_ostand().
1322 */
1323static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1324{
1325        unsigned long flags;
1326        bool ret;
1327
1328        xilinx_dpdma_chan_pause(chan);
1329        ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1330        if (ret)
1331                return ret;
1332
1333        spin_lock_irqsave(&chan->lock, flags);
1334        xilinx_dpdma_chan_disable(chan);
1335        chan->status = IDLE;
1336        spin_unlock_irqrestore(&chan->lock, flags);
1337
1338        return 0;
1339}
1340
1341/**
1342 * xilinx_dpdma_chan_alloc_resources - Allocate resources for the channel
1343 * @chan: DPDMA channel
1344 *
1345 * Allocate a descriptor pool for the channel.
1346 *
1347 * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1348 */
1349static int xilinx_dpdma_chan_alloc_resources(struct xilinx_dpdma_chan *chan)
1350{
1351        chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1352                                chan->xdev->dev,
1353                                sizeof(struct xilinx_dpdma_sw_desc),
1354                                __alignof__(struct xilinx_dpdma_sw_desc), 0);
1355        if (!chan->desc_pool) {
1356                dev_err(chan->xdev->dev,
1357                        "failed to allocate a descriptor pool\n");
1358                return -ENOMEM;
1359        }
1360
1361        return 0;
1362}
1363
1364/**
1365 * xilinx_dpdma_chan_free_resources - Free all resources for the channel
1366 * @chan: DPDMA channel
1367 *
1368 * Free all descriptors and the descriptor pool for the channel.
1369 */
1370static void xilinx_dpdma_chan_free_resources(struct xilinx_dpdma_chan *chan)
1371{
1372        xilinx_dpdma_chan_free_all_desc(chan);
1373        dma_pool_destroy(chan->desc_pool);
1374        chan->desc_pool = NULL;
1375}
1376
1377/**
1378 * xilinx_dpdma_chan_terminate_all - Terminate the channel and descriptors
1379 * @chan: DPDMA channel
1380 *
1381 * Stop the channel and free all associated descriptors.
1382 *
1383 * Return: 0 on success, or the error code from xilinx_dpdma_chan_stop().
1384 */
1385static int xilinx_dpdma_chan_terminate_all(struct xilinx_dpdma_chan *chan)
1386{
1387        struct xilinx_dpdma_device *xdev = chan->xdev;
1388        int ret;
1389        unsigned int i;
1390
1391        if (chan->video_group) {
1392                for (i = VIDEO0; i < GRAPHICS; i++) {
1393                        if (xdev->chan[i]->video_group &&
1394                            xdev->chan[i]->status == STREAMING) {
1395                                xilinx_dpdma_chan_pause(xdev->chan[i]);
1396                                xdev->chan[i]->video_group = false;
1397                        }
1398                }
1399        }
1400
1401        ret = xilinx_dpdma_chan_stop(chan);
1402        if (ret)
1403                return ret;
1404
1405        xilinx_dpdma_chan_free_all_desc(chan);
1406
1407        return 0;
1408}
1409
1410/**
1411 * xilinx_dpdma_chan_err - Detect any channel error
1412 * @chan: DPDMA channel
1413 * @isr: masked Interrupt Status Register
1414 * @eisr: Error Interrupt Status Register
1415 *
1416 * Return: true if any channel error occurs, or false otherwise.
1417 */
1418static bool
1419xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1420{
1421
1422        if (!chan)
1423                return false;
1424
1425        if (chan->status == STREAMING &&
1426            ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1427            (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1428                return true;
1429
1430        return false;
1431}
1432
1433/**
1434 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1435 * @chan: DPDMA channel
1436 *
1437 * This function is called when any channel error or any global error occurs.
1438 * The function disables the paused channel by errors and determines
1439 * if the current active descriptor can be rescheduled depending on
1440 * the descriptor status.
1441 */
1442static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1443{
1444        struct xilinx_dpdma_device *xdev = chan->xdev;
1445        struct device *dev = xdev->dev;
1446        unsigned long flags;
1447
1448        spin_lock_irqsave(&chan->lock, flags);
1449
1450        dev_dbg(dev, "cur desc addr = 0x%04x%08x\n",
1451                dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1452                dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1453        dev_dbg(dev, "cur payload addr = 0x%04x%08x\n",
1454                dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1455                dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1456
1457        xilinx_dpdma_chan_disable(chan);
1458        chan->status = IDLE;
1459
1460        /* Decide if the current descriptor can be rescheduled */
1461        if (chan->active_desc) {
1462                switch (chan->active_desc->status) {
1463                case ACTIVE:
1464                case PREPARED:
1465                        xilinx_dpdma_chan_free_tx_desc(chan,
1466                                                       chan->submitted_desc);
1467                        chan->submitted_desc = NULL;
1468                        xilinx_dpdma_chan_free_tx_desc(chan,
1469                                                       chan->pending_desc);
1470                        chan->pending_desc = NULL;
1471                        chan->active_desc->status = ERRORED;
1472                        chan->submitted_desc = chan->active_desc;
1473                        break;
1474                case ERRORED:
1475                        dev_err(dev, "desc is dropped by unrecoverable err\n");
1476                        xilinx_dpdma_chan_dump_tx_desc(chan, chan->active_desc);
1477                        xilinx_dpdma_chan_free_tx_desc(chan, chan->active_desc);
1478                        break;
1479                default:
1480                        break;
1481                }
1482                chan->active_desc = NULL;
1483        }
1484
1485        spin_unlock_irqrestore(&chan->lock, flags);
1486}
1487
1488/* DMA tx descriptor */
1489
1490static dma_cookie_t xilinx_dpdma_tx_submit(struct dma_async_tx_descriptor *tx)
1491{
1492        struct xilinx_dpdma_chan *chan = to_xilinx_chan(tx->chan);
1493        struct xilinx_dpdma_tx_desc *tx_desc = to_dpdma_tx_desc(tx);
1494
1495        return xilinx_dpdma_chan_submit_tx_desc(chan, tx_desc);
1496}
1497
1498/* DMA channel operations */
1499
1500static struct dma_async_tx_descriptor *
1501xilinx_dpdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
1502                           unsigned int sg_len,
1503                           enum dma_transfer_direction direction,
1504                           unsigned long flags, void *context)
1505{
1506        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1507        struct dma_async_tx_descriptor *async_tx;
1508
1509        if (direction != DMA_MEM_TO_DEV)
1510                return NULL;
1511
1512        if (!sgl || sg_len < 2)
1513                return NULL;
1514
1515        async_tx = xilinx_dpdma_chan_prep_slave_sg(chan, sgl);
1516        if (!async_tx)
1517                return NULL;
1518
1519        dma_async_tx_descriptor_init(async_tx, dchan);
1520        async_tx->tx_submit = xilinx_dpdma_tx_submit;
1521        async_tx->flags = flags;
1522        async_tx_ack(async_tx);
1523
1524        return async_tx;
1525}
1526
1527static struct dma_async_tx_descriptor *
1528xilinx_dpdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr,
1529                             size_t buf_len, size_t period_len,
1530                             enum dma_transfer_direction direction,
1531                             unsigned long flags)
1532{
1533        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1534        struct dma_async_tx_descriptor *async_tx;
1535
1536        if (direction != DMA_MEM_TO_DEV)
1537                return NULL;
1538
1539        if (buf_len % period_len)
1540                return NULL;
1541
1542        async_tx = xilinx_dpdma_chan_prep_cyclic(chan, buf_addr, buf_len,
1543                                                 period_len);
1544        if (!async_tx)
1545                return NULL;
1546
1547        dma_async_tx_descriptor_init(async_tx, dchan);
1548        async_tx->tx_submit = xilinx_dpdma_tx_submit;
1549        async_tx->flags = flags;
1550        async_tx_ack(async_tx);
1551
1552        return async_tx;
1553}
1554
1555static struct dma_async_tx_descriptor *
1556xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1557                                  struct dma_interleaved_template *xt,
1558                                  unsigned long flags)
1559{
1560        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1561        struct dma_async_tx_descriptor *async_tx;
1562
1563        if (xt->dir != DMA_MEM_TO_DEV)
1564                return NULL;
1565
1566        if (!xt->numf || !xt->sgl[0].size)
1567                return NULL;
1568
1569        async_tx = xilinx_dpdma_chan_prep_interleaved(chan, xt);
1570        if (!async_tx)
1571                return NULL;
1572
1573        dma_async_tx_descriptor_init(async_tx, dchan);
1574        async_tx->tx_submit = xilinx_dpdma_tx_submit;
1575        async_tx->flags = flags;
1576        async_tx_ack(async_tx);
1577
1578        return async_tx;
1579}
1580
1581static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1582{
1583        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1584
1585        dma_cookie_init(dchan);
1586
1587        return xilinx_dpdma_chan_alloc_resources(chan);
1588}
1589
1590static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1591{
1592        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1593
1594        xilinx_dpdma_chan_free_resources(chan);
1595}
1596
1597static enum dma_status xilinx_dpdma_tx_status(struct dma_chan *dchan,
1598                                              dma_cookie_t cookie,
1599                                              struct dma_tx_state *txstate)
1600{
1601        return dma_cookie_status(dchan, cookie, txstate);
1602}
1603
1604static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1605{
1606        struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1607
1608        xilinx_dpdma_chan_start(chan);
1609        xilinx_dpdma_chan_issue_pending(chan);
1610}
1611
1612static int xilinx_dpdma_config(struct dma_chan *dchan,
1613                               struct dma_slave_config *config)
1614{
1615        if (config->direction != DMA_MEM_TO_DEV)
1616                return -EINVAL;
1617
1618        return 0;
1619}
1620
1621static int xilinx_dpdma_pause(struct dma_chan *dchan)
1622{
1623        xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1624
1625        return 0;
1626}
1627
1628static int xilinx_dpdma_resume(struct dma_chan *dchan)
1629{
1630        xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1631
1632        return 0;
1633}
1634
1635static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1636{
1637        return xilinx_dpdma_chan_terminate_all(to_xilinx_chan(dchan));
1638}
1639
1640/* Xilinx DPDMA device operations */
1641
1642/**
1643 * xilinx_dpdma_err - Detect any global error
1644 * @isr: Interrupt Status Register
1645 * @eisr: Error Interrupt Status Register
1646 *
1647 * Return: True if any global error occurs, or false otherwise.
1648 */
1649static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1650{
1651        if ((isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1652            eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR))
1653                return true;
1654
1655        return false;
1656}
1657
1658/**
1659 * xilinx_dpdma_handle_err_intr - Handle DPDMA error interrupt
1660 * @xdev: DPDMA device
1661 * @isr: masked Interrupt Status Register
1662 * @eisr: Error Interrupt Status Register
1663 *
1664 * Handle if any error occurs based on @isr and @eisr. This function disables
1665 * corresponding error interrupts, and those should be re-enabled once handling
1666 * is done.
1667 */
1668static void xilinx_dpdma_handle_err_intr(struct xilinx_dpdma_device *xdev,
1669                                         u32 isr, u32 eisr)
1670{
1671        bool err = xilinx_dpdma_err(isr, eisr);
1672        unsigned int i;
1673
1674        dev_err(xdev->dev, "error intr: isr = 0x%08x, eisr = 0x%08x\n",
1675                isr, eisr);
1676
1677        /* Disable channel error interrupts until errors are handled. */
1678        dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1679                    isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1680        dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1681                    eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1682
1683        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
1684                if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1685                        tasklet_schedule(&xdev->chan[i]->err_task);
1686}
1687
1688/**
1689 * xilinx_dpdma_handle_vsync_intr - Handle the VSYNC interrupt
1690 * @xdev: DPDMA device
1691 *
1692 * Handle the VSYNC event. At this point, the current frame becomes active,
1693 * which means the DPDMA actually starts fetching, and the next frame can be
1694 * scheduled.
1695 */
1696static void xilinx_dpdma_handle_vsync_intr(struct xilinx_dpdma_device *xdev)
1697{
1698        unsigned int i;
1699
1700        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++) {
1701                if (xdev->chan[i] &&
1702                    xdev->chan[i]->status == STREAMING) {
1703                        xilinx_dpdma_chan_desc_active(xdev->chan[i]);
1704                        xilinx_dpdma_chan_issue_pending(xdev->chan[i]);
1705                }
1706        }
1707}
1708
1709/**
1710 * xilinx_dpdma_enable_intr - Enable interrupts
1711 * @xdev: DPDMA device
1712 *
1713 * Enable interrupts.
1714 */
1715static void xilinx_dpdma_enable_intr(struct xilinx_dpdma_device *xdev)
1716{
1717        dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1718        dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1719}
1720
1721/**
1722 * xilinx_dpdma_disable_intr - Disable interrupts
1723 * @xdev: DPDMA device
1724 *
1725 * Disable interrupts.
1726 */
1727static void xilinx_dpdma_disable_intr(struct xilinx_dpdma_device *xdev)
1728{
1729        dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
1730        dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1731}
1732
1733/* Interrupt handling operations*/
1734
1735/**
1736 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1737 * @data: tasklet data to be casted to DPDMA channel structure
1738 *
1739 * Per channel error handling tasklet. This function waits for the outstanding
1740 * transaction to complete and triggers error handling. After error handling,
1741 * re-enable channel error interrupts, and restart the channel if needed.
1742 */
1743static void xilinx_dpdma_chan_err_task(unsigned long data)
1744{
1745        struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
1746        struct xilinx_dpdma_device *xdev = chan->xdev;
1747
1748        /* Proceed error handling even when polling fails. */
1749        xilinx_dpdma_chan_poll_no_ostand(chan);
1750
1751        xilinx_dpdma_chan_handle_err(chan);
1752
1753        dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1754                    XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1755        dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1756                    XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1757
1758        xilinx_dpdma_chan_start(chan);
1759        xilinx_dpdma_chan_issue_pending(chan);
1760}
1761
1762/**
1763 * xilinx_dpdma_chan_done_task - Per channel tasklet for done interrupt handling
1764 * @data: tasklet data to be casted to DPDMA channel structure
1765 *
1766 * Per channel done interrupt handling tasklet.
1767 */
1768static void xilinx_dpdma_chan_done_task(unsigned long data)
1769{
1770        struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data;
1771
1772        xilinx_dpdma_chan_cleanup_desc(chan);
1773}
1774
1775static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
1776{
1777        struct xilinx_dpdma_device *xdev = data;
1778        u32 status, error, i;
1779        unsigned long masked;
1780
1781        status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
1782        error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
1783        if (!status && !error)
1784                return IRQ_NONE;
1785
1786        dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
1787        dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
1788
1789        if (status & XILINX_DPDMA_INTR_VSYNC)
1790                xilinx_dpdma_handle_vsync_intr(xdev);
1791
1792        masked = (status & XILINX_DPDMA_INTR_DESC_DONE_MASK) >>
1793                 XILINX_DPDMA_INTR_DESC_DONE_SHIFT;
1794        if (masked)
1795                for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
1796                        xilinx_dpdma_chan_desc_done_intr(xdev->chan[i]);
1797
1798        masked = (status & XILINX_DPDMA_INTR_NO_OSTAND_MASK) >>
1799                 XILINX_DPDMA_INTR_NO_OSTAND_SHIFT;
1800        if (masked)
1801                for_each_set_bit(i, &masked, XILINX_DPDMA_NUM_CHAN)
1802                        xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
1803
1804        masked = status & XILINX_DPDMA_INTR_ERR_ALL;
1805        if (masked || error)
1806                xilinx_dpdma_handle_err_intr(xdev, masked, error);
1807
1808        return IRQ_HANDLED;
1809}
1810
1811/* Initialization operations */
1812
1813static struct xilinx_dpdma_chan *
1814xilinx_dpdma_chan_probe(struct device_node *node,
1815                        struct xilinx_dpdma_device *xdev)
1816{
1817        struct xilinx_dpdma_chan *chan;
1818
1819        chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1820        if (!chan)
1821                return ERR_PTR(-ENOMEM);
1822
1823        if (of_device_is_compatible(node, "xlnx,video0")) {
1824                chan->id = VIDEO0;
1825        } else if (of_device_is_compatible(node, "xlnx,video1")) {
1826                chan->id = VIDEO1;
1827        } else if (of_device_is_compatible(node, "xlnx,video2")) {
1828                chan->id = VIDEO2;
1829        } else if (of_device_is_compatible(node, "xlnx,graphics")) {
1830                chan->id = GRAPHICS;
1831        } else if (of_device_is_compatible(node, "xlnx,audio0")) {
1832                chan->id = AUDIO0;
1833        } else if (of_device_is_compatible(node, "xlnx,audio1")) {
1834                chan->id = AUDIO1;
1835        } else {
1836                dev_err(xdev->dev, "invalid channel compatible string in DT\n");
1837                return ERR_PTR(-EINVAL);
1838        }
1839
1840        chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE + XILINX_DPDMA_CH_OFFSET *
1841                    chan->id;
1842        chan->status = IDLE;
1843
1844        spin_lock_init(&chan->lock);
1845        INIT_LIST_HEAD(&chan->done_list);
1846        init_waitqueue_head(&chan->wait_to_stop);
1847
1848        tasklet_init(&chan->done_task, xilinx_dpdma_chan_done_task,
1849                     (unsigned long)chan);
1850        tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task,
1851                     (unsigned long)chan);
1852
1853        chan->common.device = &xdev->common;
1854        chan->xdev = xdev;
1855
1856        list_add_tail(&chan->common.device_node, &xdev->common.channels);
1857        xdev->chan[chan->id] = chan;
1858
1859        return chan;
1860}
1861
1862static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
1863{
1864        tasklet_kill(&chan->err_task);
1865        tasklet_kill(&chan->done_task);
1866        list_del(&chan->common.device_node);
1867}
1868
1869static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1870                                            struct of_dma *ofdma)
1871{
1872        struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
1873        uint32_t chan_id = dma_spec->args[0];
1874
1875        if (chan_id >= XILINX_DPDMA_NUM_CHAN)
1876                return NULL;
1877
1878        if (!xdev->chan[chan_id])
1879                return NULL;
1880
1881        return dma_get_slave_channel(&xdev->chan[chan_id]->common);
1882}
1883
1884static int xilinx_dpdma_probe(struct platform_device *pdev)
1885{
1886        struct xilinx_dpdma_device *xdev;
1887        struct xilinx_dpdma_chan *chan;
1888        struct dma_device *ddev;
1889        struct resource *res;
1890        struct device_node *node, *child;
1891        u32 i, freq;
1892        int irq, ret;
1893
1894        xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1895        if (!xdev)
1896                return -ENOMEM;
1897
1898        xdev->dev = &pdev->dev;
1899        ddev = &xdev->common;
1900        ddev->dev = &pdev->dev;
1901        node = xdev->dev->of_node;
1902
1903        xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
1904        if (IS_ERR(xdev->axi_clk))
1905                return PTR_ERR(xdev->axi_clk);
1906
1907        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1908        xdev->reg = devm_ioremap_resource(&pdev->dev, res);
1909        if (IS_ERR(xdev->reg))
1910                return PTR_ERR(xdev->reg);
1911
1912        irq = platform_get_irq(pdev, 0);
1913        if (irq < 0) {
1914                dev_err(xdev->dev, "failed to get platform irq\n");
1915                return irq;
1916        }
1917
1918        ret = devm_request_irq(xdev->dev, irq, xilinx_dpdma_irq_handler,
1919                               IRQF_SHARED, dev_name(xdev->dev), xdev);
1920        if (ret) {
1921                dev_err(xdev->dev, "failed to request IRQ\n");
1922                return ret;
1923        }
1924
1925        INIT_LIST_HEAD(&xdev->common.channels);
1926        dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1927        dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1928        dma_cap_set(DMA_CYCLIC, ddev->cap_mask);
1929        dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
1930        ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
1931
1932        ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
1933        ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1934        ddev->device_prep_slave_sg = xilinx_dpdma_prep_slave_sg;
1935        ddev->device_prep_dma_cyclic = xilinx_dpdma_prep_dma_cyclic;
1936        ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
1937        ddev->device_tx_status = xilinx_dpdma_tx_status;
1938        ddev->device_issue_pending = xilinx_dpdma_issue_pending;
1939        ddev->device_config = xilinx_dpdma_config;
1940        ddev->device_pause = xilinx_dpdma_pause;
1941        ddev->device_resume = xilinx_dpdma_resume;
1942        ddev->device_terminate_all = xilinx_dpdma_terminate_all;
1943        ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
1944        ddev->directions = BIT(DMA_MEM_TO_DEV);
1945        ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1946
1947        for_each_child_of_node(node, child) {
1948                chan = xilinx_dpdma_chan_probe(child, xdev);
1949                if (IS_ERR(chan)) {
1950                        dev_err(xdev->dev, "failed to probe a channel\n");
1951                        ret = PTR_ERR(chan);
1952                        goto error;
1953                }
1954        }
1955
1956        xdev->ext_addr = sizeof(dma_addr_t) > 4;
1957        if (xdev->ext_addr)
1958                xdev->desc_addr = xilinx_dpdma_sw_desc_addr_64;
1959        else
1960                xdev->desc_addr = xilinx_dpdma_sw_desc_addr_32;
1961
1962        ret = clk_prepare_enable(xdev->axi_clk);
1963        if (ret) {
1964                dev_err(xdev->dev, "failed to enable the axi clock\n");
1965                goto error;
1966        }
1967
1968        ret = of_property_read_u32(node, "xlnx,axi-clock-freq", &freq);
1969        if (ret < 0) {
1970                dev_dbg(xdev->dev, "No axi clock freq in DT. Set to 533Mhz\n");
1971                freq = 533000000;
1972        }
1973
1974        ret = clk_set_rate(xdev->axi_clk, freq);
1975        if (ret) {
1976                dev_err(xdev->dev, "failed to set the axi clock\n");
1977                return ret;
1978        }
1979
1980        dev_dbg(xdev->dev, "axi clock freq: req = %u act = %lu\n", freq,
1981                clk_get_rate(xdev->axi_clk));
1982
1983        ret = dma_async_device_register(ddev);
1984        if (ret) {
1985                dev_err(xdev->dev, "failed to enable the axi clock\n");
1986                goto error_dma_async;
1987        }
1988
1989        ret = of_dma_controller_register(xdev->dev->of_node,
1990                                         of_dma_xilinx_xlate, ddev);
1991        if (ret) {
1992                dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
1993                goto error_of_dma;
1994        }
1995
1996        xilinx_dpdma_enable_intr(xdev);
1997
1998        dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
1999
2000        return 0;
2001
2002error_of_dma:
2003        dma_async_device_unregister(ddev);
2004error_dma_async:
2005        clk_disable_unprepare(xdev->axi_clk);
2006error:
2007        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
2008                if (xdev->chan[i])
2009                        xilinx_dpdma_chan_remove(xdev->chan[i]);
2010
2011        return ret;
2012}
2013
2014static int xilinx_dpdma_remove(struct platform_device *pdev)
2015{
2016        struct xilinx_dpdma_device *xdev;
2017        unsigned int i;
2018
2019        xdev = platform_get_drvdata(pdev);
2020
2021        xilinx_dpdma_disable_intr(xdev);
2022        of_dma_controller_free(pdev->dev.of_node);
2023        dma_async_device_unregister(&xdev->common);
2024        clk_disable_unprepare(xdev->axi_clk);
2025
2026        for (i = 0; i < XILINX_DPDMA_NUM_CHAN; i++)
2027                if (xdev->chan[i])
2028                        xilinx_dpdma_chan_remove(xdev->chan[i]);
2029
2030        return 0;
2031}
2032
2033static const struct of_device_id xilinx_dpdma_of_match[] = {
2034        { .compatible = "xlnx,dpdma",},
2035        { /* end of table */ },
2036};
2037MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
2038
2039static struct platform_driver xilinx_dpdma_driver = {
2040        .probe                  = xilinx_dpdma_probe,
2041        .remove                 = xilinx_dpdma_remove,
2042        .driver                 = {
2043                .name           = "xilinx-dpdma",
2044                .of_match_table = xilinx_dpdma_of_match,
2045        },
2046};
2047
2048module_platform_driver(xilinx_dpdma_driver);
2049
2050MODULE_AUTHOR("Xilinx, Inc.");
2051MODULE_DESCRIPTION("Xilinx DPDMA driver");
2052MODULE_LICENSE("GPL v2");
2053