linux/drivers/dma/xilinx/xilinx_dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * DMA driver for Xilinx Video DMA Engine
   4 *
   5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
   6 *
   7 * Based on the Freescale DMA driver.
   8 *
   9 * Description:
  10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
  11 * core that provides high-bandwidth direct memory access between memory
  12 * and AXI4-Stream type video target peripherals. The core provides efficient
  13 * two dimensional DMA operations with independent asynchronous read (S2MM)
  14 * and write (MM2S) channel operation. It can be configured to have either
  15 * one channel or two channels. If configured as two channels, one is to
  16 * transmit to the video device (MM2S) and another is to receive from the
  17 * video device (S2MM). Initialization, status, interrupt and management
  18 * registers are accessed through an AXI4-Lite slave interface.
  19 *
  20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
  21 * provides high-bandwidth one dimensional direct memory access between memory
  22 * and AXI4-Stream target peripherals. It supports one receive and one
  23 * transmit channel, both of them optional at synthesis time.
  24 *
  25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
  26 * Access (DMA) between a memory-mapped source address and a memory-mapped
  27 * destination address.
  28 *
  29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
  30 * Xilinx IP that provides high-bandwidth direct memory access between
  31 * memory and AXI4-Stream target peripherals. It provides scatter gather
  32 * (SG) interface with multiple channels independent configuration support.
  33 *
  34 */
  35
  36#include <linux/bitops.h>
  37#include <linux/dmapool.h>
  38#include <linux/dma/xilinx_dma.h>
  39#include <linux/init.h>
  40#include <linux/interrupt.h>
  41#include <linux/io.h>
  42#include <linux/iopoll.h>
  43#include <linux/module.h>
  44#include <linux/of_address.h>
  45#include <linux/of_dma.h>
  46#include <linux/of_platform.h>
  47#include <linux/of_irq.h>
  48#include <linux/slab.h>
  49#include <linux/clk.h>
  50#include <linux/io-64-nonatomic-lo-hi.h>
  51
  52#include "../dmaengine.h"
  53
  54/* Register/Descriptor Offsets */
  55#define XILINX_DMA_MM2S_CTRL_OFFSET             0x0000
  56#define XILINX_DMA_S2MM_CTRL_OFFSET             0x0030
  57#define XILINX_VDMA_MM2S_DESC_OFFSET            0x0050
  58#define XILINX_VDMA_S2MM_DESC_OFFSET            0x00a0
  59
  60/* Control Registers */
  61#define XILINX_DMA_REG_DMACR                    0x0000
  62#define XILINX_DMA_DMACR_DELAY_MAX              0xff
  63#define XILINX_DMA_DMACR_DELAY_SHIFT            24
  64#define XILINX_DMA_DMACR_FRAME_COUNT_MAX        0xff
  65#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT      16
  66#define XILINX_DMA_DMACR_ERR_IRQ                BIT(14)
  67#define XILINX_DMA_DMACR_DLY_CNT_IRQ            BIT(13)
  68#define XILINX_DMA_DMACR_FRM_CNT_IRQ            BIT(12)
  69#define XILINX_DMA_DMACR_MASTER_SHIFT           8
  70#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
  71#define XILINX_DMA_DMACR_FRAMECNT_EN            BIT(4)
  72#define XILINX_DMA_DMACR_GENLOCK_EN             BIT(3)
  73#define XILINX_DMA_DMACR_RESET                  BIT(2)
  74#define XILINX_DMA_DMACR_CIRC_EN                BIT(1)
  75#define XILINX_DMA_DMACR_RUNSTOP                BIT(0)
  76#define XILINX_DMA_DMACR_FSYNCSRC_MASK          GENMASK(6, 5)
  77#define XILINX_DMA_DMACR_DELAY_MASK             GENMASK(31, 24)
  78#define XILINX_DMA_DMACR_FRAME_COUNT_MASK       GENMASK(23, 16)
  79#define XILINX_DMA_DMACR_MASTER_MASK            GENMASK(11, 8)
  80
  81#define XILINX_DMA_REG_DMASR                    0x0004
  82#define XILINX_DMA_DMASR_EOL_LATE_ERR           BIT(15)
  83#define XILINX_DMA_DMASR_ERR_IRQ                BIT(14)
  84#define XILINX_DMA_DMASR_DLY_CNT_IRQ            BIT(13)
  85#define XILINX_DMA_DMASR_FRM_CNT_IRQ            BIT(12)
  86#define XILINX_DMA_DMASR_SOF_LATE_ERR           BIT(11)
  87#define XILINX_DMA_DMASR_SG_DEC_ERR             BIT(10)
  88#define XILINX_DMA_DMASR_SG_SLV_ERR             BIT(9)
  89#define XILINX_DMA_DMASR_EOF_EARLY_ERR          BIT(8)
  90#define XILINX_DMA_DMASR_SOF_EARLY_ERR          BIT(7)
  91#define XILINX_DMA_DMASR_DMA_DEC_ERR            BIT(6)
  92#define XILINX_DMA_DMASR_DMA_SLAVE_ERR          BIT(5)
  93#define XILINX_DMA_DMASR_DMA_INT_ERR            BIT(4)
  94#define XILINX_DMA_DMASR_SG_MASK                BIT(3)
  95#define XILINX_DMA_DMASR_IDLE                   BIT(1)
  96#define XILINX_DMA_DMASR_HALTED         BIT(0)
  97#define XILINX_DMA_DMASR_DELAY_MASK             GENMASK(31, 24)
  98#define XILINX_DMA_DMASR_FRAME_COUNT_MASK       GENMASK(23, 16)
  99
 100#define XILINX_DMA_REG_CURDESC                  0x0008
 101#define XILINX_DMA_REG_TAILDESC         0x0010
 102#define XILINX_DMA_REG_REG_INDEX                0x0014
 103#define XILINX_DMA_REG_FRMSTORE         0x0018
 104#define XILINX_DMA_REG_THRESHOLD                0x001c
 105#define XILINX_DMA_REG_FRMPTR_STS               0x0024
 106#define XILINX_DMA_REG_PARK_PTR         0x0028
 107#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT        8
 108#define XILINX_DMA_PARK_PTR_WR_REF_MASK         GENMASK(12, 8)
 109#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT        0
 110#define XILINX_DMA_PARK_PTR_RD_REF_MASK         GENMASK(4, 0)
 111#define XILINX_DMA_REG_VDMA_VERSION             0x002c
 112
 113/* Register Direct Mode Registers */
 114#define XILINX_DMA_REG_VSIZE                    0x0000
 115#define XILINX_DMA_REG_HSIZE                    0x0004
 116
 117#define XILINX_DMA_REG_FRMDLY_STRIDE            0x0008
 118#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT   24
 119#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT   0
 120
 121#define XILINX_VDMA_REG_START_ADDRESS(n)        (0x000c + 4 * (n))
 122#define XILINX_VDMA_REG_START_ADDRESS_64(n)     (0x000c + 8 * (n))
 123
 124#define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP    0x00ec
 125#define XILINX_VDMA_ENABLE_VERTICAL_FLIP        BIT(0)
 126
 127/* HW specific definitions */
 128#define XILINX_MCDMA_MAX_CHANS_PER_DEVICE       0x20
 129#define XILINX_DMA_MAX_CHANS_PER_DEVICE         0x2
 130#define XILINX_CDMA_MAX_CHANS_PER_DEVICE        0x1
 131
 132#define XILINX_DMA_DMAXR_ALL_IRQ_MASK   \
 133                (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
 134                 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
 135                 XILINX_DMA_DMASR_ERR_IRQ)
 136
 137#define XILINX_DMA_DMASR_ALL_ERR_MASK   \
 138                (XILINX_DMA_DMASR_EOL_LATE_ERR | \
 139                 XILINX_DMA_DMASR_SOF_LATE_ERR | \
 140                 XILINX_DMA_DMASR_SG_DEC_ERR | \
 141                 XILINX_DMA_DMASR_SG_SLV_ERR | \
 142                 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
 143                 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
 144                 XILINX_DMA_DMASR_DMA_DEC_ERR | \
 145                 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
 146                 XILINX_DMA_DMASR_DMA_INT_ERR)
 147
 148/*
 149 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
 150 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
 151 * is enabled in the h/w system.
 152 */
 153#define XILINX_DMA_DMASR_ERR_RECOVER_MASK       \
 154                (XILINX_DMA_DMASR_SOF_LATE_ERR | \
 155                 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
 156                 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
 157                 XILINX_DMA_DMASR_DMA_INT_ERR)
 158
 159/* Axi VDMA Flush on Fsync bits */
 160#define XILINX_DMA_FLUSH_S2MM           3
 161#define XILINX_DMA_FLUSH_MM2S           2
 162#define XILINX_DMA_FLUSH_BOTH           1
 163
 164/* Delay loop counter to prevent hardware failure */
 165#define XILINX_DMA_LOOP_COUNT           1000000
 166
 167/* AXI DMA Specific Registers/Offsets */
 168#define XILINX_DMA_REG_SRCDSTADDR       0x18
 169#define XILINX_DMA_REG_BTT              0x28
 170
 171/* AXI DMA Specific Masks/Bit fields */
 172#define XILINX_DMA_MAX_TRANS_LEN_MIN    8
 173#define XILINX_DMA_MAX_TRANS_LEN_MAX    23
 174#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
 175#define XILINX_DMA_CR_COALESCE_MAX      GENMASK(23, 16)
 176#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
 177#define XILINX_DMA_CR_COALESCE_SHIFT    16
 178#define XILINX_DMA_BD_SOP               BIT(27)
 179#define XILINX_DMA_BD_EOP               BIT(26)
 180#define XILINX_DMA_COALESCE_MAX         255
 181#define XILINX_DMA_NUM_DESCS            255
 182#define XILINX_DMA_NUM_APP_WORDS        5
 183
 184/* AXI CDMA Specific Registers/Offsets */
 185#define XILINX_CDMA_REG_SRCADDR         0x18
 186#define XILINX_CDMA_REG_DSTADDR         0x20
 187
 188/* AXI CDMA Specific Masks */
 189#define XILINX_CDMA_CR_SGMODE          BIT(3)
 190
 191#define xilinx_prep_dma_addr_t(addr)    \
 192        ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
 193
 194/* AXI MCDMA Specific Registers/Offsets */
 195#define XILINX_MCDMA_MM2S_CTRL_OFFSET           0x0000
 196#define XILINX_MCDMA_S2MM_CTRL_OFFSET           0x0500
 197#define XILINX_MCDMA_CHEN_OFFSET                0x0008
 198#define XILINX_MCDMA_CH_ERR_OFFSET              0x0010
 199#define XILINX_MCDMA_RXINT_SER_OFFSET           0x0020
 200#define XILINX_MCDMA_TXINT_SER_OFFSET           0x0028
 201#define XILINX_MCDMA_CHAN_CR_OFFSET(x)          (0x40 + (x) * 0x40)
 202#define XILINX_MCDMA_CHAN_SR_OFFSET(x)          (0x44 + (x) * 0x40)
 203#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x)       (0x48 + (x) * 0x40)
 204#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x)       (0x50 + (x) * 0x40)
 205
 206/* AXI MCDMA Specific Masks/Shifts */
 207#define XILINX_MCDMA_COALESCE_SHIFT             16
 208#define XILINX_MCDMA_COALESCE_MAX               24
 209#define XILINX_MCDMA_IRQ_ALL_MASK               GENMASK(7, 5)
 210#define XILINX_MCDMA_COALESCE_MASK              GENMASK(23, 16)
 211#define XILINX_MCDMA_CR_RUNSTOP_MASK            BIT(0)
 212#define XILINX_MCDMA_IRQ_IOC_MASK               BIT(5)
 213#define XILINX_MCDMA_IRQ_DELAY_MASK             BIT(6)
 214#define XILINX_MCDMA_IRQ_ERR_MASK               BIT(7)
 215#define XILINX_MCDMA_BD_EOP                     BIT(30)
 216#define XILINX_MCDMA_BD_SOP                     BIT(31)
 217
 218/**
 219 * struct xilinx_vdma_desc_hw - Hardware Descriptor
 220 * @next_desc: Next Descriptor Pointer @0x00
 221 * @pad1: Reserved @0x04
 222 * @buf_addr: Buffer address @0x08
 223 * @buf_addr_msb: MSB of Buffer address @0x0C
 224 * @vsize: Vertical Size @0x10
 225 * @hsize: Horizontal Size @0x14
 226 * @stride: Number of bytes between the first
 227 *          pixels of each horizontal line @0x18
 228 */
 229struct xilinx_vdma_desc_hw {
 230        u32 next_desc;
 231        u32 pad1;
 232        u32 buf_addr;
 233        u32 buf_addr_msb;
 234        u32 vsize;
 235        u32 hsize;
 236        u32 stride;
 237} __aligned(64);
 238
 239/**
 240 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
 241 * @next_desc: Next Descriptor Pointer @0x00
 242 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
 243 * @buf_addr: Buffer address @0x08
 244 * @buf_addr_msb: MSB of Buffer address @0x0C
 245 * @reserved1: Reserved @0x10
 246 * @reserved2: Reserved @0x14
 247 * @control: Control field @0x18
 248 * @status: Status field @0x1C
 249 * @app: APP Fields @0x20 - 0x30
 250 */
 251struct xilinx_axidma_desc_hw {
 252        u32 next_desc;
 253        u32 next_desc_msb;
 254        u32 buf_addr;
 255        u32 buf_addr_msb;
 256        u32 reserved1;
 257        u32 reserved2;
 258        u32 control;
 259        u32 status;
 260        u32 app[XILINX_DMA_NUM_APP_WORDS];
 261} __aligned(64);
 262
 263/**
 264 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
 265 * @next_desc: Next Descriptor Pointer @0x00
 266 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
 267 * @buf_addr: Buffer address @0x08
 268 * @buf_addr_msb: MSB of Buffer address @0x0C
 269 * @rsvd: Reserved field @0x10
 270 * @control: Control Information field @0x14
 271 * @status: Status field @0x18
 272 * @sideband_status: Status of sideband signals @0x1C
 273 * @app: APP Fields @0x20 - 0x30
 274 */
 275struct xilinx_aximcdma_desc_hw {
 276        u32 next_desc;
 277        u32 next_desc_msb;
 278        u32 buf_addr;
 279        u32 buf_addr_msb;
 280        u32 rsvd;
 281        u32 control;
 282        u32 status;
 283        u32 sideband_status;
 284        u32 app[XILINX_DMA_NUM_APP_WORDS];
 285} __aligned(64);
 286
 287/**
 288 * struct xilinx_cdma_desc_hw - Hardware Descriptor
 289 * @next_desc: Next Descriptor Pointer @0x00
 290 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
 291 * @src_addr: Source address @0x08
 292 * @src_addr_msb: Source address MSB @0x0C
 293 * @dest_addr: Destination address @0x10
 294 * @dest_addr_msb: Destination address MSB @0x14
 295 * @control: Control field @0x18
 296 * @status: Status field @0x1C
 297 */
 298struct xilinx_cdma_desc_hw {
 299        u32 next_desc;
 300        u32 next_desc_msb;
 301        u32 src_addr;
 302        u32 src_addr_msb;
 303        u32 dest_addr;
 304        u32 dest_addr_msb;
 305        u32 control;
 306        u32 status;
 307} __aligned(64);
 308
 309/**
 310 * struct xilinx_vdma_tx_segment - Descriptor segment
 311 * @hw: Hardware descriptor
 312 * @node: Node in the descriptor segments list
 313 * @phys: Physical address of segment
 314 */
 315struct xilinx_vdma_tx_segment {
 316        struct xilinx_vdma_desc_hw hw;
 317        struct list_head node;
 318        dma_addr_t phys;
 319} __aligned(64);
 320
 321/**
 322 * struct xilinx_axidma_tx_segment - Descriptor segment
 323 * @hw: Hardware descriptor
 324 * @node: Node in the descriptor segments list
 325 * @phys: Physical address of segment
 326 */
 327struct xilinx_axidma_tx_segment {
 328        struct xilinx_axidma_desc_hw hw;
 329        struct list_head node;
 330        dma_addr_t phys;
 331} __aligned(64);
 332
 333/**
 334 * struct xilinx_aximcdma_tx_segment - Descriptor segment
 335 * @hw: Hardware descriptor
 336 * @node: Node in the descriptor segments list
 337 * @phys: Physical address of segment
 338 */
 339struct xilinx_aximcdma_tx_segment {
 340        struct xilinx_aximcdma_desc_hw hw;
 341        struct list_head node;
 342        dma_addr_t phys;
 343} __aligned(64);
 344
 345/**
 346 * struct xilinx_cdma_tx_segment - Descriptor segment
 347 * @hw: Hardware descriptor
 348 * @node: Node in the descriptor segments list
 349 * @phys: Physical address of segment
 350 */
 351struct xilinx_cdma_tx_segment {
 352        struct xilinx_cdma_desc_hw hw;
 353        struct list_head node;
 354        dma_addr_t phys;
 355} __aligned(64);
 356
 357/**
 358 * struct xilinx_dma_tx_descriptor - Per Transaction structure
 359 * @async_tx: Async transaction descriptor
 360 * @segments: TX segments list
 361 * @node: Node in the channel descriptors list
 362 * @cyclic: Check for cyclic transfers.
 363 * @err: Whether the descriptor has an error.
 364 * @residue: Residue of the completed descriptor
 365 */
 366struct xilinx_dma_tx_descriptor {
 367        struct dma_async_tx_descriptor async_tx;
 368        struct list_head segments;
 369        struct list_head node;
 370        bool cyclic;
 371        bool err;
 372        u32 residue;
 373};
 374
 375/**
 376 * struct xilinx_dma_chan - Driver specific DMA channel structure
 377 * @xdev: Driver specific device structure
 378 * @ctrl_offset: Control registers offset
 379 * @desc_offset: TX descriptor registers offset
 380 * @lock: Descriptor operation lock
 381 * @pending_list: Descriptors waiting
 382 * @active_list: Descriptors ready to submit
 383 * @done_list: Complete descriptors
 384 * @free_seg_list: Free descriptors
 385 * @common: DMA common channel
 386 * @desc_pool: Descriptors pool
 387 * @dev: The dma device
 388 * @irq: Channel IRQ
 389 * @id: Channel ID
 390 * @direction: Transfer direction
 391 * @num_frms: Number of frames
 392 * @has_sg: Support scatter transfers
 393 * @cyclic: Check for cyclic transfers.
 394 * @genlock: Support genlock mode
 395 * @err: Channel has errors
 396 * @idle: Check for channel idle
 397 * @tasklet: Cleanup work after irq
 398 * @config: Device configuration info
 399 * @flush_on_fsync: Flush on Frame sync
 400 * @desc_pendingcount: Descriptor pending count
 401 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
 402 * @desc_submitcount: Descriptor h/w submitted count
 403 * @seg_v: Statically allocated segments base
 404 * @seg_mv: Statically allocated segments base for MCDMA
 405 * @seg_p: Physical allocated segments base
 406 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
 407 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
 408 * @start_transfer: Differentiate b/w DMA IP's transfer
 409 * @stop_transfer: Differentiate b/w DMA IP's quiesce
 410 * @tdest: TDEST value for mcdma
 411 * @has_vflip: S2MM vertical flip
 412 */
 413struct xilinx_dma_chan {
 414        struct xilinx_dma_device *xdev;
 415        u32 ctrl_offset;
 416        u32 desc_offset;
 417        spinlock_t lock;
 418        struct list_head pending_list;
 419        struct list_head active_list;
 420        struct list_head done_list;
 421        struct list_head free_seg_list;
 422        struct dma_chan common;
 423        struct dma_pool *desc_pool;
 424        struct device *dev;
 425        int irq;
 426        int id;
 427        enum dma_transfer_direction direction;
 428        int num_frms;
 429        bool has_sg;
 430        bool cyclic;
 431        bool genlock;
 432        bool err;
 433        bool idle;
 434        struct tasklet_struct tasklet;
 435        struct xilinx_vdma_config config;
 436        bool flush_on_fsync;
 437        u32 desc_pendingcount;
 438        bool ext_addr;
 439        u32 desc_submitcount;
 440        struct xilinx_axidma_tx_segment *seg_v;
 441        struct xilinx_aximcdma_tx_segment *seg_mv;
 442        dma_addr_t seg_p;
 443        struct xilinx_axidma_tx_segment *cyclic_seg_v;
 444        dma_addr_t cyclic_seg_p;
 445        void (*start_transfer)(struct xilinx_dma_chan *chan);
 446        int (*stop_transfer)(struct xilinx_dma_chan *chan);
 447        u16 tdest;
 448        bool has_vflip;
 449};
 450
 451/**
 452 * enum xdma_ip_type - DMA IP type.
 453 *
 454 * @XDMA_TYPE_AXIDMA: Axi dma ip.
 455 * @XDMA_TYPE_CDMA: Axi cdma ip.
 456 * @XDMA_TYPE_VDMA: Axi vdma ip.
 457 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
 458 *
 459 */
 460enum xdma_ip_type {
 461        XDMA_TYPE_AXIDMA = 0,
 462        XDMA_TYPE_CDMA,
 463        XDMA_TYPE_VDMA,
 464        XDMA_TYPE_AXIMCDMA
 465};
 466
 467struct xilinx_dma_config {
 468        enum xdma_ip_type dmatype;
 469        int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
 470                        struct clk **tx_clk, struct clk **txs_clk,
 471                        struct clk **rx_clk, struct clk **rxs_clk);
 472        irqreturn_t (*irq_handler)(int irq, void *data);
 473        const int max_channels;
 474};
 475
 476/**
 477 * struct xilinx_dma_device - DMA device structure
 478 * @regs: I/O mapped base address
 479 * @dev: Device Structure
 480 * @common: DMA device structure
 481 * @chan: Driver specific DMA channel
 482 * @flush_on_fsync: Flush on frame sync
 483 * @ext_addr: Indicates 64 bit addressing is supported by dma device
 484 * @pdev: Platform device structure pointer
 485 * @dma_config: DMA config structure
 486 * @axi_clk: DMA Axi4-lite interace clock
 487 * @tx_clk: DMA mm2s clock
 488 * @txs_clk: DMA mm2s stream clock
 489 * @rx_clk: DMA s2mm clock
 490 * @rxs_clk: DMA s2mm stream clock
 491 * @s2mm_chan_id: DMA s2mm channel identifier
 492 * @mm2s_chan_id: DMA mm2s channel identifier
 493 * @max_buffer_len: Max buffer length
 494 */
 495struct xilinx_dma_device {
 496        void __iomem *regs;
 497        struct device *dev;
 498        struct dma_device common;
 499        struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
 500        u32 flush_on_fsync;
 501        bool ext_addr;
 502        struct platform_device  *pdev;
 503        const struct xilinx_dma_config *dma_config;
 504        struct clk *axi_clk;
 505        struct clk *tx_clk;
 506        struct clk *txs_clk;
 507        struct clk *rx_clk;
 508        struct clk *rxs_clk;
 509        u32 s2mm_chan_id;
 510        u32 mm2s_chan_id;
 511        u32 max_buffer_len;
 512};
 513
 514/* Macros */
 515#define to_xilinx_chan(chan) \
 516        container_of(chan, struct xilinx_dma_chan, common)
 517#define to_dma_tx_descriptor(tx) \
 518        container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
 519#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
 520        readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
 521                                  val, cond, delay_us, timeout_us)
 522
 523/* IO accessors */
 524static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
 525{
 526        return ioread32(chan->xdev->regs + reg);
 527}
 528
 529static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
 530{
 531        iowrite32(value, chan->xdev->regs + reg);
 532}
 533
 534static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
 535                                   u32 value)
 536{
 537        dma_write(chan, chan->desc_offset + reg, value);
 538}
 539
 540static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
 541{
 542        return dma_read(chan, chan->ctrl_offset + reg);
 543}
 544
 545static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
 546                                   u32 value)
 547{
 548        dma_write(chan, chan->ctrl_offset + reg, value);
 549}
 550
 551static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
 552                                 u32 clr)
 553{
 554        dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
 555}
 556
 557static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
 558                                 u32 set)
 559{
 560        dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
 561}
 562
 563/**
 564 * vdma_desc_write_64 - 64-bit descriptor write
 565 * @chan: Driver specific VDMA channel
 566 * @reg: Register to write
 567 * @value_lsb: lower address of the descriptor.
 568 * @value_msb: upper address of the descriptor.
 569 *
 570 * Since vdma driver is trying to write to a register offset which is not a
 571 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
 572 * instead of a single 64 bit register write.
 573 */
 574static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
 575                                      u32 value_lsb, u32 value_msb)
 576{
 577        /* Write the lsb 32 bits*/
 578        writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
 579
 580        /* Write the msb 32 bits */
 581        writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
 582}
 583
 584static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
 585{
 586        lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
 587}
 588
 589static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
 590                                dma_addr_t addr)
 591{
 592        if (chan->ext_addr)
 593                dma_writeq(chan, reg, addr);
 594        else
 595                dma_ctrl_write(chan, reg, addr);
 596}
 597
 598static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
 599                                     struct xilinx_axidma_desc_hw *hw,
 600                                     dma_addr_t buf_addr, size_t sg_used,
 601                                     size_t period_len)
 602{
 603        if (chan->ext_addr) {
 604                hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
 605                hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
 606                                                 period_len);
 607        } else {
 608                hw->buf_addr = buf_addr + sg_used + period_len;
 609        }
 610}
 611
 612static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
 613                                       struct xilinx_aximcdma_desc_hw *hw,
 614                                       dma_addr_t buf_addr, size_t sg_used)
 615{
 616        if (chan->ext_addr) {
 617                hw->buf_addr = lower_32_bits(buf_addr + sg_used);
 618                hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
 619        } else {
 620                hw->buf_addr = buf_addr + sg_used;
 621        }
 622}
 623
 624/* -----------------------------------------------------------------------------
 625 * Descriptors and segments alloc and free
 626 */
 627
 628/**
 629 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
 630 * @chan: Driver specific DMA channel
 631 *
 632 * Return: The allocated segment on success and NULL on failure.
 633 */
 634static struct xilinx_vdma_tx_segment *
 635xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 636{
 637        struct xilinx_vdma_tx_segment *segment;
 638        dma_addr_t phys;
 639
 640        segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
 641        if (!segment)
 642                return NULL;
 643
 644        segment->phys = phys;
 645
 646        return segment;
 647}
 648
 649/**
 650 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
 651 * @chan: Driver specific DMA channel
 652 *
 653 * Return: The allocated segment on success and NULL on failure.
 654 */
 655static struct xilinx_cdma_tx_segment *
 656xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 657{
 658        struct xilinx_cdma_tx_segment *segment;
 659        dma_addr_t phys;
 660
 661        segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
 662        if (!segment)
 663                return NULL;
 664
 665        segment->phys = phys;
 666
 667        return segment;
 668}
 669
 670/**
 671 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
 672 * @chan: Driver specific DMA channel
 673 *
 674 * Return: The allocated segment on success and NULL on failure.
 675 */
 676static struct xilinx_axidma_tx_segment *
 677xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 678{
 679        struct xilinx_axidma_tx_segment *segment = NULL;
 680        unsigned long flags;
 681
 682        spin_lock_irqsave(&chan->lock, flags);
 683        if (!list_empty(&chan->free_seg_list)) {
 684                segment = list_first_entry(&chan->free_seg_list,
 685                                           struct xilinx_axidma_tx_segment,
 686                                           node);
 687                list_del(&segment->node);
 688        }
 689        spin_unlock_irqrestore(&chan->lock, flags);
 690
 691        if (!segment)
 692                dev_dbg(chan->dev, "Could not find free tx segment\n");
 693
 694        return segment;
 695}
 696
 697/**
 698 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
 699 * @chan: Driver specific DMA channel
 700 *
 701 * Return: The allocated segment on success and NULL on failure.
 702 */
 703static struct xilinx_aximcdma_tx_segment *
 704xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
 705{
 706        struct xilinx_aximcdma_tx_segment *segment = NULL;
 707        unsigned long flags;
 708
 709        spin_lock_irqsave(&chan->lock, flags);
 710        if (!list_empty(&chan->free_seg_list)) {
 711                segment = list_first_entry(&chan->free_seg_list,
 712                                           struct xilinx_aximcdma_tx_segment,
 713                                           node);
 714                list_del(&segment->node);
 715        }
 716        spin_unlock_irqrestore(&chan->lock, flags);
 717
 718        return segment;
 719}
 720
 721static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
 722{
 723        u32 next_desc = hw->next_desc;
 724        u32 next_desc_msb = hw->next_desc_msb;
 725
 726        memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
 727
 728        hw->next_desc = next_desc;
 729        hw->next_desc_msb = next_desc_msb;
 730}
 731
 732static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
 733{
 734        u32 next_desc = hw->next_desc;
 735        u32 next_desc_msb = hw->next_desc_msb;
 736
 737        memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
 738
 739        hw->next_desc = next_desc;
 740        hw->next_desc_msb = next_desc_msb;
 741}
 742
 743/**
 744 * xilinx_dma_free_tx_segment - Free transaction segment
 745 * @chan: Driver specific DMA channel
 746 * @segment: DMA transaction segment
 747 */
 748static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
 749                                struct xilinx_axidma_tx_segment *segment)
 750{
 751        xilinx_dma_clean_hw_desc(&segment->hw);
 752
 753        list_add_tail(&segment->node, &chan->free_seg_list);
 754}
 755
 756/**
 757 * xilinx_mcdma_free_tx_segment - Free transaction segment
 758 * @chan: Driver specific DMA channel
 759 * @segment: DMA transaction segment
 760 */
 761static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
 762                                         struct xilinx_aximcdma_tx_segment *
 763                                         segment)
 764{
 765        xilinx_mcdma_clean_hw_desc(&segment->hw);
 766
 767        list_add_tail(&segment->node, &chan->free_seg_list);
 768}
 769
 770/**
 771 * xilinx_cdma_free_tx_segment - Free transaction segment
 772 * @chan: Driver specific DMA channel
 773 * @segment: DMA transaction segment
 774 */
 775static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
 776                                struct xilinx_cdma_tx_segment *segment)
 777{
 778        dma_pool_free(chan->desc_pool, segment, segment->phys);
 779}
 780
 781/**
 782 * xilinx_vdma_free_tx_segment - Free transaction segment
 783 * @chan: Driver specific DMA channel
 784 * @segment: DMA transaction segment
 785 */
 786static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
 787                                        struct xilinx_vdma_tx_segment *segment)
 788{
 789        dma_pool_free(chan->desc_pool, segment, segment->phys);
 790}
 791
 792/**
 793 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
 794 * @chan: Driver specific DMA channel
 795 *
 796 * Return: The allocated descriptor on success and NULL on failure.
 797 */
 798static struct xilinx_dma_tx_descriptor *
 799xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
 800{
 801        struct xilinx_dma_tx_descriptor *desc;
 802
 803        desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 804        if (!desc)
 805                return NULL;
 806
 807        INIT_LIST_HEAD(&desc->segments);
 808
 809        return desc;
 810}
 811
 812/**
 813 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
 814 * @chan: Driver specific DMA channel
 815 * @desc: DMA transaction descriptor
 816 */
 817static void
 818xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
 819                               struct xilinx_dma_tx_descriptor *desc)
 820{
 821        struct xilinx_vdma_tx_segment *segment, *next;
 822        struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
 823        struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
 824        struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
 825
 826        if (!desc)
 827                return;
 828
 829        if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
 830                list_for_each_entry_safe(segment, next, &desc->segments, node) {
 831                        list_del(&segment->node);
 832                        xilinx_vdma_free_tx_segment(chan, segment);
 833                }
 834        } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
 835                list_for_each_entry_safe(cdma_segment, cdma_next,
 836                                         &desc->segments, node) {
 837                        list_del(&cdma_segment->node);
 838                        xilinx_cdma_free_tx_segment(chan, cdma_segment);
 839                }
 840        } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 841                list_for_each_entry_safe(axidma_segment, axidma_next,
 842                                         &desc->segments, node) {
 843                        list_del(&axidma_segment->node);
 844                        xilinx_dma_free_tx_segment(chan, axidma_segment);
 845                }
 846        } else {
 847                list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
 848                                         &desc->segments, node) {
 849                        list_del(&aximcdma_segment->node);
 850                        xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
 851                }
 852        }
 853
 854        kfree(desc);
 855}
 856
 857/* Required functions */
 858
 859/**
 860 * xilinx_dma_free_desc_list - Free descriptors list
 861 * @chan: Driver specific DMA channel
 862 * @list: List to parse and delete the descriptor
 863 */
 864static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
 865                                        struct list_head *list)
 866{
 867        struct xilinx_dma_tx_descriptor *desc, *next;
 868
 869        list_for_each_entry_safe(desc, next, list, node) {
 870                list_del(&desc->node);
 871                xilinx_dma_free_tx_descriptor(chan, desc);
 872        }
 873}
 874
 875/**
 876 * xilinx_dma_free_descriptors - Free channel descriptors
 877 * @chan: Driver specific DMA channel
 878 */
 879static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
 880{
 881        unsigned long flags;
 882
 883        spin_lock_irqsave(&chan->lock, flags);
 884
 885        xilinx_dma_free_desc_list(chan, &chan->pending_list);
 886        xilinx_dma_free_desc_list(chan, &chan->done_list);
 887        xilinx_dma_free_desc_list(chan, &chan->active_list);
 888
 889        spin_unlock_irqrestore(&chan->lock, flags);
 890}
 891
 892/**
 893 * xilinx_dma_free_chan_resources - Free channel resources
 894 * @dchan: DMA channel
 895 */
 896static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
 897{
 898        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
 899        unsigned long flags;
 900
 901        dev_dbg(chan->dev, "Free all channel resources.\n");
 902
 903        xilinx_dma_free_descriptors(chan);
 904
 905        if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
 906                spin_lock_irqsave(&chan->lock, flags);
 907                INIT_LIST_HEAD(&chan->free_seg_list);
 908                spin_unlock_irqrestore(&chan->lock, flags);
 909
 910                /* Free memory that is allocated for BD */
 911                dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
 912                                  XILINX_DMA_NUM_DESCS, chan->seg_v,
 913                                  chan->seg_p);
 914
 915                /* Free Memory that is allocated for cyclic DMA Mode */
 916                dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
 917                                  chan->cyclic_seg_v, chan->cyclic_seg_p);
 918        }
 919
 920        if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
 921                spin_lock_irqsave(&chan->lock, flags);
 922                INIT_LIST_HEAD(&chan->free_seg_list);
 923                spin_unlock_irqrestore(&chan->lock, flags);
 924
 925                /* Free memory that is allocated for BD */
 926                dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
 927                                  XILINX_DMA_NUM_DESCS, chan->seg_mv,
 928                                  chan->seg_p);
 929        }
 930
 931        if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
 932            chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
 933                dma_pool_destroy(chan->desc_pool);
 934                chan->desc_pool = NULL;
 935        }
 936
 937}
 938
 939/**
 940 * xilinx_dma_get_residue - Compute residue for a given descriptor
 941 * @chan: Driver specific dma channel
 942 * @desc: dma transaction descriptor
 943 *
 944 * Return: The number of residue bytes for the descriptor.
 945 */
 946static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
 947                                  struct xilinx_dma_tx_descriptor *desc)
 948{
 949        struct xilinx_cdma_tx_segment *cdma_seg;
 950        struct xilinx_axidma_tx_segment *axidma_seg;
 951        struct xilinx_aximcdma_tx_segment *aximcdma_seg;
 952        struct xilinx_cdma_desc_hw *cdma_hw;
 953        struct xilinx_axidma_desc_hw *axidma_hw;
 954        struct xilinx_aximcdma_desc_hw *aximcdma_hw;
 955        struct list_head *entry;
 956        u32 residue = 0;
 957
 958        list_for_each(entry, &desc->segments) {
 959                if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
 960                        cdma_seg = list_entry(entry,
 961                                              struct xilinx_cdma_tx_segment,
 962                                              node);
 963                        cdma_hw = &cdma_seg->hw;
 964                        residue += (cdma_hw->control - cdma_hw->status) &
 965                                   chan->xdev->max_buffer_len;
 966                } else if (chan->xdev->dma_config->dmatype ==
 967                           XDMA_TYPE_AXIDMA) {
 968                        axidma_seg = list_entry(entry,
 969                                                struct xilinx_axidma_tx_segment,
 970                                                node);
 971                        axidma_hw = &axidma_seg->hw;
 972                        residue += (axidma_hw->control - axidma_hw->status) &
 973                                   chan->xdev->max_buffer_len;
 974                } else {
 975                        aximcdma_seg =
 976                                list_entry(entry,
 977                                           struct xilinx_aximcdma_tx_segment,
 978                                           node);
 979                        aximcdma_hw = &aximcdma_seg->hw;
 980                        residue +=
 981                                (aximcdma_hw->control - aximcdma_hw->status) &
 982                                chan->xdev->max_buffer_len;
 983                }
 984        }
 985
 986        return residue;
 987}
 988
 989/**
 990 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
 991 * @chan: Driver specific dma channel
 992 * @desc: dma transaction descriptor
 993 * @flags: flags for spin lock
 994 */
 995static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
 996                                          struct xilinx_dma_tx_descriptor *desc,
 997                                          unsigned long *flags)
 998{
 999        dma_async_tx_callback callback;
1000        void *callback_param;
1001
1002        callback = desc->async_tx.callback;
1003        callback_param = desc->async_tx.callback_param;
1004        if (callback) {
1005                spin_unlock_irqrestore(&chan->lock, *flags);
1006                callback(callback_param);
1007                spin_lock_irqsave(&chan->lock, *flags);
1008        }
1009}
1010
1011/**
1012 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
1013 * @chan: Driver specific DMA channel
1014 */
1015static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1016{
1017        struct xilinx_dma_tx_descriptor *desc, *next;
1018        unsigned long flags;
1019
1020        spin_lock_irqsave(&chan->lock, flags);
1021
1022        list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1023                struct dmaengine_result result;
1024
1025                if (desc->cyclic) {
1026                        xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1027                        break;
1028                }
1029
1030                /* Remove from the list of running transactions */
1031                list_del(&desc->node);
1032
1033                if (unlikely(desc->err)) {
1034                        if (chan->direction == DMA_DEV_TO_MEM)
1035                                result.result = DMA_TRANS_READ_FAILED;
1036                        else
1037                                result.result = DMA_TRANS_WRITE_FAILED;
1038                } else {
1039                        result.result = DMA_TRANS_NOERROR;
1040                }
1041
1042                result.residue = desc->residue;
1043
1044                /* Run the link descriptor callback function */
1045                spin_unlock_irqrestore(&chan->lock, flags);
1046                dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1047                spin_lock_irqsave(&chan->lock, flags);
1048
1049                /* Run any dependencies, then free the descriptor */
1050                dma_run_dependencies(&desc->async_tx);
1051                xilinx_dma_free_tx_descriptor(chan, desc);
1052        }
1053
1054        spin_unlock_irqrestore(&chan->lock, flags);
1055}
1056
1057/**
1058 * xilinx_dma_do_tasklet - Schedule completion tasklet
1059 * @t: Pointer to the Xilinx DMA channel structure
1060 */
1061static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1062{
1063        struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1064
1065        xilinx_dma_chan_desc_cleanup(chan);
1066}
1067
1068/**
1069 * xilinx_dma_alloc_chan_resources - Allocate channel resources
1070 * @dchan: DMA channel
1071 *
1072 * Return: '0' on success and failure value on error
1073 */
1074static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1075{
1076        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1077        int i;
1078
1079        /* Has this channel already been allocated? */
1080        if (chan->desc_pool)
1081                return 0;
1082
1083        /*
1084         * We need the descriptor to be aligned to 64bytes
1085         * for meeting Xilinx VDMA specification requirement.
1086         */
1087        if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1088                /* Allocate the buffer descriptors. */
1089                chan->seg_v = dma_alloc_coherent(chan->dev,
1090                                                 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1091                                                 &chan->seg_p, GFP_KERNEL);
1092                if (!chan->seg_v) {
1093                        dev_err(chan->dev,
1094                                "unable to allocate channel %d descriptors\n",
1095                                chan->id);
1096                        return -ENOMEM;
1097                }
1098                /*
1099                 * For cyclic DMA mode we need to program the tail Descriptor
1100                 * register with a value which is not a part of the BD chain
1101                 * so allocating a desc segment during channel allocation for
1102                 * programming tail descriptor.
1103                 */
1104                chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1105                                                        sizeof(*chan->cyclic_seg_v),
1106                                                        &chan->cyclic_seg_p,
1107                                                        GFP_KERNEL);
1108                if (!chan->cyclic_seg_v) {
1109                        dev_err(chan->dev,
1110                                "unable to allocate desc segment for cyclic DMA\n");
1111                        dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1112                                XILINX_DMA_NUM_DESCS, chan->seg_v,
1113                                chan->seg_p);
1114                        return -ENOMEM;
1115                }
1116                chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1117
1118                for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1119                        chan->seg_v[i].hw.next_desc =
1120                        lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1121                                ((i + 1) % XILINX_DMA_NUM_DESCS));
1122                        chan->seg_v[i].hw.next_desc_msb =
1123                        upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1124                                ((i + 1) % XILINX_DMA_NUM_DESCS));
1125                        chan->seg_v[i].phys = chan->seg_p +
1126                                sizeof(*chan->seg_v) * i;
1127                        list_add_tail(&chan->seg_v[i].node,
1128                                      &chan->free_seg_list);
1129                }
1130        } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1131                /* Allocate the buffer descriptors. */
1132                chan->seg_mv = dma_alloc_coherent(chan->dev,
1133                                                  sizeof(*chan->seg_mv) *
1134                                                  XILINX_DMA_NUM_DESCS,
1135                                                  &chan->seg_p, GFP_KERNEL);
1136                if (!chan->seg_mv) {
1137                        dev_err(chan->dev,
1138                                "unable to allocate channel %d descriptors\n",
1139                                chan->id);
1140                        return -ENOMEM;
1141                }
1142                for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1143                        chan->seg_mv[i].hw.next_desc =
1144                        lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1145                                ((i + 1) % XILINX_DMA_NUM_DESCS));
1146                        chan->seg_mv[i].hw.next_desc_msb =
1147                        upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1148                                ((i + 1) % XILINX_DMA_NUM_DESCS));
1149                        chan->seg_mv[i].phys = chan->seg_p +
1150                                sizeof(*chan->seg_mv) * i;
1151                        list_add_tail(&chan->seg_mv[i].node,
1152                                      &chan->free_seg_list);
1153                }
1154        } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1155                chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1156                                   chan->dev,
1157                                   sizeof(struct xilinx_cdma_tx_segment),
1158                                   __alignof__(struct xilinx_cdma_tx_segment),
1159                                   0);
1160        } else {
1161                chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1162                                     chan->dev,
1163                                     sizeof(struct xilinx_vdma_tx_segment),
1164                                     __alignof__(struct xilinx_vdma_tx_segment),
1165                                     0);
1166        }
1167
1168        if (!chan->desc_pool &&
1169            ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1170                chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1171                dev_err(chan->dev,
1172                        "unable to allocate channel %d descriptor pool\n",
1173                        chan->id);
1174                return -ENOMEM;
1175        }
1176
1177        dma_cookie_init(dchan);
1178
1179        if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1180                /* For AXI DMA resetting once channel will reset the
1181                 * other channel as well so enable the interrupts here.
1182                 */
1183                dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1184                              XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1185        }
1186
1187        if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1188                dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1189                             XILINX_CDMA_CR_SGMODE);
1190
1191        return 0;
1192}
1193
1194/**
1195 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1196 * @chan: Driver specific DMA channel
1197 * @size: Total data that needs to be copied
1198 * @done: Amount of data that has been already copied
1199 *
1200 * Return: Amount of data that has to be copied
1201 */
1202static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1203                                    int size, int done)
1204{
1205        size_t copy;
1206
1207        copy = min_t(size_t, size - done,
1208                     chan->xdev->max_buffer_len);
1209
1210        if ((copy + done < size) &&
1211            chan->xdev->common.copy_align) {
1212                /*
1213                 * If this is not the last descriptor, make sure
1214                 * the next one will be properly aligned
1215                 */
1216                copy = rounddown(copy,
1217                                 (1 << chan->xdev->common.copy_align));
1218        }
1219        return copy;
1220}
1221
1222/**
1223 * xilinx_dma_tx_status - Get DMA transaction status
1224 * @dchan: DMA channel
1225 * @cookie: Transaction identifier
1226 * @txstate: Transaction state
1227 *
1228 * Return: DMA transaction status
1229 */
1230static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1231                                        dma_cookie_t cookie,
1232                                        struct dma_tx_state *txstate)
1233{
1234        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1235        struct xilinx_dma_tx_descriptor *desc;
1236        enum dma_status ret;
1237        unsigned long flags;
1238        u32 residue = 0;
1239
1240        ret = dma_cookie_status(dchan, cookie, txstate);
1241        if (ret == DMA_COMPLETE || !txstate)
1242                return ret;
1243
1244        spin_lock_irqsave(&chan->lock, flags);
1245        if (!list_empty(&chan->active_list)) {
1246                desc = list_last_entry(&chan->active_list,
1247                                       struct xilinx_dma_tx_descriptor, node);
1248                /*
1249                 * VDMA and simple mode do not support residue reporting, so the
1250                 * residue field will always be 0.
1251                 */
1252                if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1253                        residue = xilinx_dma_get_residue(chan, desc);
1254        }
1255        spin_unlock_irqrestore(&chan->lock, flags);
1256
1257        dma_set_residue(txstate, residue);
1258
1259        return ret;
1260}
1261
1262/**
1263 * xilinx_dma_stop_transfer - Halt DMA channel
1264 * @chan: Driver specific DMA channel
1265 *
1266 * Return: '0' on success and failure value on error
1267 */
1268static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1269{
1270        u32 val;
1271
1272        dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1273
1274        /* Wait for the hardware to halt */
1275        return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1276                                       val & XILINX_DMA_DMASR_HALTED, 0,
1277                                       XILINX_DMA_LOOP_COUNT);
1278}
1279
1280/**
1281 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1282 * @chan: Driver specific DMA channel
1283 *
1284 * Return: '0' on success and failure value on error
1285 */
1286static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1287{
1288        u32 val;
1289
1290        return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1291                                       val & XILINX_DMA_DMASR_IDLE, 0,
1292                                       XILINX_DMA_LOOP_COUNT);
1293}
1294
1295/**
1296 * xilinx_dma_start - Start DMA channel
1297 * @chan: Driver specific DMA channel
1298 */
1299static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1300{
1301        int err;
1302        u32 val;
1303
1304        dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1305
1306        /* Wait for the hardware to start */
1307        err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1308                                      !(val & XILINX_DMA_DMASR_HALTED), 0,
1309                                      XILINX_DMA_LOOP_COUNT);
1310
1311        if (err) {
1312                dev_err(chan->dev, "Cannot start channel %p: %x\n",
1313                        chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1314
1315                chan->err = true;
1316        }
1317}
1318
1319/**
1320 * xilinx_vdma_start_transfer - Starts VDMA transfer
1321 * @chan: Driver specific channel struct pointer
1322 */
1323static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1324{
1325        struct xilinx_vdma_config *config = &chan->config;
1326        struct xilinx_dma_tx_descriptor *desc;
1327        u32 reg, j;
1328        struct xilinx_vdma_tx_segment *segment, *last = NULL;
1329        int i = 0;
1330
1331        /* This function was invoked with lock held */
1332        if (chan->err)
1333                return;
1334
1335        if (!chan->idle)
1336                return;
1337
1338        if (list_empty(&chan->pending_list))
1339                return;
1340
1341        desc = list_first_entry(&chan->pending_list,
1342                                struct xilinx_dma_tx_descriptor, node);
1343
1344        /* Configure the hardware using info in the config structure */
1345        if (chan->has_vflip) {
1346                reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1347                reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1348                reg |= config->vflip_en;
1349                dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1350                          reg);
1351        }
1352
1353        reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1354
1355        if (config->frm_cnt_en)
1356                reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1357        else
1358                reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1359
1360        /* If not parking, enable circular mode */
1361        if (config->park)
1362                reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1363        else
1364                reg |= XILINX_DMA_DMACR_CIRC_EN;
1365
1366        dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1367
1368        j = chan->desc_submitcount;
1369        reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1370        if (chan->direction == DMA_MEM_TO_DEV) {
1371                reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1372                reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1373        } else {
1374                reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1375                reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1376        }
1377        dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1378
1379        /* Start the hardware */
1380        xilinx_dma_start(chan);
1381
1382        if (chan->err)
1383                return;
1384
1385        /* Start the transfer */
1386        if (chan->desc_submitcount < chan->num_frms)
1387                i = chan->desc_submitcount;
1388
1389        list_for_each_entry(segment, &desc->segments, node) {
1390                if (chan->ext_addr)
1391                        vdma_desc_write_64(chan,
1392                                   XILINX_VDMA_REG_START_ADDRESS_64(i++),
1393                                   segment->hw.buf_addr,
1394                                   segment->hw.buf_addr_msb);
1395                else
1396                        vdma_desc_write(chan,
1397                                        XILINX_VDMA_REG_START_ADDRESS(i++),
1398                                        segment->hw.buf_addr);
1399
1400                last = segment;
1401        }
1402
1403        if (!last)
1404                return;
1405
1406        /* HW expects these parameters to be same for one transaction */
1407        vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1408        vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1409                        last->hw.stride);
1410        vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1411
1412        chan->desc_submitcount++;
1413        chan->desc_pendingcount--;
1414        list_del(&desc->node);
1415        list_add_tail(&desc->node, &chan->active_list);
1416        if (chan->desc_submitcount == chan->num_frms)
1417                chan->desc_submitcount = 0;
1418
1419        chan->idle = false;
1420}
1421
1422/**
1423 * xilinx_cdma_start_transfer - Starts cdma transfer
1424 * @chan: Driver specific channel struct pointer
1425 */
1426static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1427{
1428        struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1429        struct xilinx_cdma_tx_segment *tail_segment;
1430        u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1431
1432        if (chan->err)
1433                return;
1434
1435        if (!chan->idle)
1436                return;
1437
1438        if (list_empty(&chan->pending_list))
1439                return;
1440
1441        head_desc = list_first_entry(&chan->pending_list,
1442                                     struct xilinx_dma_tx_descriptor, node);
1443        tail_desc = list_last_entry(&chan->pending_list,
1444                                    struct xilinx_dma_tx_descriptor, node);
1445        tail_segment = list_last_entry(&tail_desc->segments,
1446                                       struct xilinx_cdma_tx_segment, node);
1447
1448        if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1449                ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1450                ctrl_reg |= chan->desc_pendingcount <<
1451                                XILINX_DMA_CR_COALESCE_SHIFT;
1452                dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1453        }
1454
1455        if (chan->has_sg) {
1456                dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1457                             XILINX_CDMA_CR_SGMODE);
1458
1459                dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1460                             XILINX_CDMA_CR_SGMODE);
1461
1462                xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1463                             head_desc->async_tx.phys);
1464
1465                /* Update tail ptr register which will start the transfer */
1466                xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1467                             tail_segment->phys);
1468        } else {
1469                /* In simple mode */
1470                struct xilinx_cdma_tx_segment *segment;
1471                struct xilinx_cdma_desc_hw *hw;
1472
1473                segment = list_first_entry(&head_desc->segments,
1474                                           struct xilinx_cdma_tx_segment,
1475                                           node);
1476
1477                hw = &segment->hw;
1478
1479                xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1480                             xilinx_prep_dma_addr_t(hw->src_addr));
1481                xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1482                             xilinx_prep_dma_addr_t(hw->dest_addr));
1483
1484                /* Start the transfer */
1485                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1486                                hw->control & chan->xdev->max_buffer_len);
1487        }
1488
1489        list_splice_tail_init(&chan->pending_list, &chan->active_list);
1490        chan->desc_pendingcount = 0;
1491        chan->idle = false;
1492}
1493
1494/**
1495 * xilinx_dma_start_transfer - Starts DMA transfer
1496 * @chan: Driver specific channel struct pointer
1497 */
1498static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1499{
1500        struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1501        struct xilinx_axidma_tx_segment *tail_segment;
1502        u32 reg;
1503
1504        if (chan->err)
1505                return;
1506
1507        if (list_empty(&chan->pending_list))
1508                return;
1509
1510        if (!chan->idle)
1511                return;
1512
1513        head_desc = list_first_entry(&chan->pending_list,
1514                                     struct xilinx_dma_tx_descriptor, node);
1515        tail_desc = list_last_entry(&chan->pending_list,
1516                                    struct xilinx_dma_tx_descriptor, node);
1517        tail_segment = list_last_entry(&tail_desc->segments,
1518                                       struct xilinx_axidma_tx_segment, node);
1519
1520        reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1521
1522        if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1523                reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1524                reg |= chan->desc_pendingcount <<
1525                                  XILINX_DMA_CR_COALESCE_SHIFT;
1526                dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1527        }
1528
1529        if (chan->has_sg)
1530                xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1531                             head_desc->async_tx.phys);
1532
1533        xilinx_dma_start(chan);
1534
1535        if (chan->err)
1536                return;
1537
1538        /* Start the transfer */
1539        if (chan->has_sg) {
1540                if (chan->cyclic)
1541                        xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1542                                     chan->cyclic_seg_v->phys);
1543                else
1544                        xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1545                                     tail_segment->phys);
1546        } else {
1547                struct xilinx_axidma_tx_segment *segment;
1548                struct xilinx_axidma_desc_hw *hw;
1549
1550                segment = list_first_entry(&head_desc->segments,
1551                                           struct xilinx_axidma_tx_segment,
1552                                           node);
1553                hw = &segment->hw;
1554
1555                xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1556                             xilinx_prep_dma_addr_t(hw->buf_addr));
1557
1558                /* Start the transfer */
1559                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1560                               hw->control & chan->xdev->max_buffer_len);
1561        }
1562
1563        list_splice_tail_init(&chan->pending_list, &chan->active_list);
1564        chan->desc_pendingcount = 0;
1565        chan->idle = false;
1566}
1567
1568/**
1569 * xilinx_mcdma_start_transfer - Starts MCDMA transfer
1570 * @chan: Driver specific channel struct pointer
1571 */
1572static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1573{
1574        struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1575        struct xilinx_aximcdma_tx_segment *tail_segment;
1576        u32 reg;
1577
1578        /*
1579         * lock has been held by calling functions, so we don't need it
1580         * to take it here again.
1581         */
1582
1583        if (chan->err)
1584                return;
1585
1586        if (!chan->idle)
1587                return;
1588
1589        if (list_empty(&chan->pending_list))
1590                return;
1591
1592        head_desc = list_first_entry(&chan->pending_list,
1593                                     struct xilinx_dma_tx_descriptor, node);
1594        tail_desc = list_last_entry(&chan->pending_list,
1595                                    struct xilinx_dma_tx_descriptor, node);
1596        tail_segment = list_last_entry(&tail_desc->segments,
1597                                       struct xilinx_aximcdma_tx_segment, node);
1598
1599        reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1600
1601        if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1602                reg &= ~XILINX_MCDMA_COALESCE_MASK;
1603                reg |= chan->desc_pendingcount <<
1604                        XILINX_MCDMA_COALESCE_SHIFT;
1605        }
1606
1607        reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1608        dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1609
1610        /* Program current descriptor */
1611        xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1612                     head_desc->async_tx.phys);
1613
1614        /* Program channel enable register */
1615        reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1616        reg |= BIT(chan->tdest);
1617        dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1618
1619        /* Start the fetch of BDs for the channel */
1620        reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1621        reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1622        dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1623
1624        xilinx_dma_start(chan);
1625
1626        if (chan->err)
1627                return;
1628
1629        /* Start the transfer */
1630        xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1631                     tail_segment->phys);
1632
1633        list_splice_tail_init(&chan->pending_list, &chan->active_list);
1634        chan->desc_pendingcount = 0;
1635        chan->idle = false;
1636}
1637
1638/**
1639 * xilinx_dma_issue_pending - Issue pending transactions
1640 * @dchan: DMA channel
1641 */
1642static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1643{
1644        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1645        unsigned long flags;
1646
1647        spin_lock_irqsave(&chan->lock, flags);
1648        chan->start_transfer(chan);
1649        spin_unlock_irqrestore(&chan->lock, flags);
1650}
1651
1652/**
1653 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1654 * @chan : xilinx DMA channel
1655 *
1656 * CONTEXT: hardirq
1657 */
1658static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1659{
1660        struct xilinx_dma_tx_descriptor *desc, *next;
1661
1662        /* This function was invoked with lock held */
1663        if (list_empty(&chan->active_list))
1664                return;
1665
1666        list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1667                if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1668                    XDMA_TYPE_VDMA)
1669                        desc->residue = xilinx_dma_get_residue(chan, desc);
1670                else
1671                        desc->residue = 0;
1672                desc->err = chan->err;
1673
1674                list_del(&desc->node);
1675                if (!desc->cyclic)
1676                        dma_cookie_complete(&desc->async_tx);
1677                list_add_tail(&desc->node, &chan->done_list);
1678        }
1679}
1680
1681/**
1682 * xilinx_dma_reset - Reset DMA channel
1683 * @chan: Driver specific DMA channel
1684 *
1685 * Return: '0' on success and failure value on error
1686 */
1687static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1688{
1689        int err;
1690        u32 tmp;
1691
1692        dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1693
1694        /* Wait for the hardware to finish reset */
1695        err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1696                                      !(tmp & XILINX_DMA_DMACR_RESET), 0,
1697                                      XILINX_DMA_LOOP_COUNT);
1698
1699        if (err) {
1700                dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1701                        dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1702                        dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1703                return -ETIMEDOUT;
1704        }
1705
1706        chan->err = false;
1707        chan->idle = true;
1708        chan->desc_pendingcount = 0;
1709        chan->desc_submitcount = 0;
1710
1711        return err;
1712}
1713
1714/**
1715 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1716 * @chan: Driver specific DMA channel
1717 *
1718 * Return: '0' on success and failure value on error
1719 */
1720static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1721{
1722        int err;
1723
1724        /* Reset VDMA */
1725        err = xilinx_dma_reset(chan);
1726        if (err)
1727                return err;
1728
1729        /* Enable interrupts */
1730        dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1731                      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1732
1733        return 0;
1734}
1735
1736/**
1737 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
1738 * @irq: IRQ number
1739 * @data: Pointer to the Xilinx MCDMA channel structure
1740 *
1741 * Return: IRQ_HANDLED/IRQ_NONE
1742 */
1743static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1744{
1745        struct xilinx_dma_chan *chan = data;
1746        u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1747
1748        if (chan->direction == DMA_DEV_TO_MEM)
1749                ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1750        else
1751                ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1752
1753        /* Read the channel id raising the interrupt*/
1754        chan_sermask = dma_ctrl_read(chan, ser_offset);
1755        chan_id = ffs(chan_sermask);
1756
1757        if (!chan_id)
1758                return IRQ_NONE;
1759
1760        if (chan->direction == DMA_DEV_TO_MEM)
1761                chan_offset = chan->xdev->dma_config->max_channels / 2;
1762
1763        chan_offset = chan_offset + (chan_id - 1);
1764        chan = chan->xdev->chan[chan_offset];
1765        /* Read the status and ack the interrupts. */
1766        status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1767        if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1768                return IRQ_NONE;
1769
1770        dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1771                       status & XILINX_MCDMA_IRQ_ALL_MASK);
1772
1773        if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1774                dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1775                        chan,
1776                        dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1777                        dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1778                                      (chan->tdest)),
1779                        dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1780                                      (chan->tdest)));
1781                chan->err = true;
1782        }
1783
1784        if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1785                /*
1786                 * Device takes too long to do the transfer when user requires
1787                 * responsiveness.
1788                 */
1789                dev_dbg(chan->dev, "Inter-packet latency too long\n");
1790        }
1791
1792        if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1793                spin_lock(&chan->lock);
1794                xilinx_dma_complete_descriptor(chan);
1795                chan->idle = true;
1796                chan->start_transfer(chan);
1797                spin_unlock(&chan->lock);
1798        }
1799
1800        tasklet_schedule(&chan->tasklet);
1801        return IRQ_HANDLED;
1802}
1803
1804/**
1805 * xilinx_dma_irq_handler - DMA Interrupt handler
1806 * @irq: IRQ number
1807 * @data: Pointer to the Xilinx DMA channel structure
1808 *
1809 * Return: IRQ_HANDLED/IRQ_NONE
1810 */
1811static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1812{
1813        struct xilinx_dma_chan *chan = data;
1814        u32 status;
1815
1816        /* Read the status and ack the interrupts. */
1817        status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1818        if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1819                return IRQ_NONE;
1820
1821        dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1822                        status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1823
1824        if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1825                /*
1826                 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1827                 * error is recoverable, ignore it. Otherwise flag the error.
1828                 *
1829                 * Only recoverable errors can be cleared in the DMASR register,
1830                 * make sure not to write to other error bits to 1.
1831                 */
1832                u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1833
1834                dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1835                                errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1836
1837                if (!chan->flush_on_fsync ||
1838                    (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1839                        dev_err(chan->dev,
1840                                "Channel %p has errors %x, cdr %x tdr %x\n",
1841                                chan, errors,
1842                                dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1843                                dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1844                        chan->err = true;
1845                }
1846        }
1847
1848        if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1849                /*
1850                 * Device takes too long to do the transfer when user requires
1851                 * responsiveness.
1852                 */
1853                dev_dbg(chan->dev, "Inter-packet latency too long\n");
1854        }
1855
1856        if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1857                spin_lock(&chan->lock);
1858                xilinx_dma_complete_descriptor(chan);
1859                chan->idle = true;
1860                chan->start_transfer(chan);
1861                spin_unlock(&chan->lock);
1862        }
1863
1864        tasklet_schedule(&chan->tasklet);
1865        return IRQ_HANDLED;
1866}
1867
1868/**
1869 * append_desc_queue - Queuing descriptor
1870 * @chan: Driver specific dma channel
1871 * @desc: dma transaction descriptor
1872 */
1873static void append_desc_queue(struct xilinx_dma_chan *chan,
1874                              struct xilinx_dma_tx_descriptor *desc)
1875{
1876        struct xilinx_vdma_tx_segment *tail_segment;
1877        struct xilinx_dma_tx_descriptor *tail_desc;
1878        struct xilinx_axidma_tx_segment *axidma_tail_segment;
1879        struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1880        struct xilinx_cdma_tx_segment *cdma_tail_segment;
1881
1882        if (list_empty(&chan->pending_list))
1883                goto append;
1884
1885        /*
1886         * Add the hardware descriptor to the chain of hardware descriptors
1887         * that already exists in memory.
1888         */
1889        tail_desc = list_last_entry(&chan->pending_list,
1890                                    struct xilinx_dma_tx_descriptor, node);
1891        if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1892                tail_segment = list_last_entry(&tail_desc->segments,
1893                                               struct xilinx_vdma_tx_segment,
1894                                               node);
1895                tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1896        } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1897                cdma_tail_segment = list_last_entry(&tail_desc->segments,
1898                                                struct xilinx_cdma_tx_segment,
1899                                                node);
1900                cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1901        } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1902                axidma_tail_segment = list_last_entry(&tail_desc->segments,
1903                                               struct xilinx_axidma_tx_segment,
1904                                               node);
1905                axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1906        } else {
1907                aximcdma_tail_segment =
1908                        list_last_entry(&tail_desc->segments,
1909                                        struct xilinx_aximcdma_tx_segment,
1910                                        node);
1911                aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1912        }
1913
1914        /*
1915         * Add the software descriptor and all children to the list
1916         * of pending transactions
1917         */
1918append:
1919        list_add_tail(&desc->node, &chan->pending_list);
1920        chan->desc_pendingcount++;
1921
1922        if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1923            && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1924                dev_dbg(chan->dev, "desc pendingcount is too high\n");
1925                chan->desc_pendingcount = chan->num_frms;
1926        }
1927}
1928
1929/**
1930 * xilinx_dma_tx_submit - Submit DMA transaction
1931 * @tx: Async transaction descriptor
1932 *
1933 * Return: cookie value on success and failure value on error
1934 */
1935static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1936{
1937        struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1938        struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1939        dma_cookie_t cookie;
1940        unsigned long flags;
1941        int err;
1942
1943        if (chan->cyclic) {
1944                xilinx_dma_free_tx_descriptor(chan, desc);
1945                return -EBUSY;
1946        }
1947
1948        if (chan->err) {
1949                /*
1950                 * If reset fails, need to hard reset the system.
1951                 * Channel is no longer functional
1952                 */
1953                err = xilinx_dma_chan_reset(chan);
1954                if (err < 0)
1955                        return err;
1956        }
1957
1958        spin_lock_irqsave(&chan->lock, flags);
1959
1960        cookie = dma_cookie_assign(tx);
1961
1962        /* Put this transaction onto the tail of the pending queue */
1963        append_desc_queue(chan, desc);
1964
1965        if (desc->cyclic)
1966                chan->cyclic = true;
1967
1968        spin_unlock_irqrestore(&chan->lock, flags);
1969
1970        return cookie;
1971}
1972
1973/**
1974 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1975 *      DMA_SLAVE transaction
1976 * @dchan: DMA channel
1977 * @xt: Interleaved template pointer
1978 * @flags: transfer ack flags
1979 *
1980 * Return: Async transaction descriptor on success and NULL on failure
1981 */
1982static struct dma_async_tx_descriptor *
1983xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1984                                 struct dma_interleaved_template *xt,
1985                                 unsigned long flags)
1986{
1987        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1988        struct xilinx_dma_tx_descriptor *desc;
1989        struct xilinx_vdma_tx_segment *segment;
1990        struct xilinx_vdma_desc_hw *hw;
1991
1992        if (!is_slave_direction(xt->dir))
1993                return NULL;
1994
1995        if (!xt->numf || !xt->sgl[0].size)
1996                return NULL;
1997
1998        if (xt->frame_size != 1)
1999                return NULL;
2000
2001        /* Allocate a transaction descriptor. */
2002        desc = xilinx_dma_alloc_tx_descriptor(chan);
2003        if (!desc)
2004                return NULL;
2005
2006        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2007        desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2008        async_tx_ack(&desc->async_tx);
2009
2010        /* Allocate the link descriptor from DMA pool */
2011        segment = xilinx_vdma_alloc_tx_segment(chan);
2012        if (!segment)
2013                goto error;
2014
2015        /* Fill in the hardware descriptor */
2016        hw = &segment->hw;
2017        hw->vsize = xt->numf;
2018        hw->hsize = xt->sgl[0].size;
2019        hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2020                        XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2021        hw->stride |= chan->config.frm_dly <<
2022                        XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2023
2024        if (xt->dir != DMA_MEM_TO_DEV) {
2025                if (chan->ext_addr) {
2026                        hw->buf_addr = lower_32_bits(xt->dst_start);
2027                        hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2028                } else {
2029                        hw->buf_addr = xt->dst_start;
2030                }
2031        } else {
2032                if (chan->ext_addr) {
2033                        hw->buf_addr = lower_32_bits(xt->src_start);
2034                        hw->buf_addr_msb = upper_32_bits(xt->src_start);
2035                } else {
2036                        hw->buf_addr = xt->src_start;
2037                }
2038        }
2039
2040        /* Insert the segment into the descriptor segments list. */
2041        list_add_tail(&segment->node, &desc->segments);
2042
2043        /* Link the last hardware descriptor with the first. */
2044        segment = list_first_entry(&desc->segments,
2045                                   struct xilinx_vdma_tx_segment, node);
2046        desc->async_tx.phys = segment->phys;
2047
2048        return &desc->async_tx;
2049
2050error:
2051        xilinx_dma_free_tx_descriptor(chan, desc);
2052        return NULL;
2053}
2054
2055/**
2056 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
2057 * @dchan: DMA channel
2058 * @dma_dst: destination address
2059 * @dma_src: source address
2060 * @len: transfer length
2061 * @flags: transfer ack flags
2062 *
2063 * Return: Async transaction descriptor on success and NULL on failure
2064 */
2065static struct dma_async_tx_descriptor *
2066xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2067                        dma_addr_t dma_src, size_t len, unsigned long flags)
2068{
2069        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2070        struct xilinx_dma_tx_descriptor *desc;
2071        struct xilinx_cdma_tx_segment *segment;
2072        struct xilinx_cdma_desc_hw *hw;
2073
2074        if (!len || len > chan->xdev->max_buffer_len)
2075                return NULL;
2076
2077        desc = xilinx_dma_alloc_tx_descriptor(chan);
2078        if (!desc)
2079                return NULL;
2080
2081        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2082        desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2083
2084        /* Allocate the link descriptor from DMA pool */
2085        segment = xilinx_cdma_alloc_tx_segment(chan);
2086        if (!segment)
2087                goto error;
2088
2089        hw = &segment->hw;
2090        hw->control = len;
2091        hw->src_addr = dma_src;
2092        hw->dest_addr = dma_dst;
2093        if (chan->ext_addr) {
2094                hw->src_addr_msb = upper_32_bits(dma_src);
2095                hw->dest_addr_msb = upper_32_bits(dma_dst);
2096        }
2097
2098        /* Insert the segment into the descriptor segments list. */
2099        list_add_tail(&segment->node, &desc->segments);
2100
2101        desc->async_tx.phys = segment->phys;
2102        hw->next_desc = segment->phys;
2103
2104        return &desc->async_tx;
2105
2106error:
2107        xilinx_dma_free_tx_descriptor(chan, desc);
2108        return NULL;
2109}
2110
2111/**
2112 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2113 * @dchan: DMA channel
2114 * @sgl: scatterlist to transfer to/from
2115 * @sg_len: number of entries in @scatterlist
2116 * @direction: DMA direction
2117 * @flags: transfer ack flags
2118 * @context: APP words of the descriptor
2119 *
2120 * Return: Async transaction descriptor on success and NULL on failure
2121 */
2122static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2123        struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2124        enum dma_transfer_direction direction, unsigned long flags,
2125        void *context)
2126{
2127        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2128        struct xilinx_dma_tx_descriptor *desc;
2129        struct xilinx_axidma_tx_segment *segment = NULL;
2130        u32 *app_w = (u32 *)context;
2131        struct scatterlist *sg;
2132        size_t copy;
2133        size_t sg_used;
2134        unsigned int i;
2135
2136        if (!is_slave_direction(direction))
2137                return NULL;
2138
2139        /* Allocate a transaction descriptor. */
2140        desc = xilinx_dma_alloc_tx_descriptor(chan);
2141        if (!desc)
2142                return NULL;
2143
2144        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2145        desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2146
2147        /* Build transactions using information in the scatter gather list */
2148        for_each_sg(sgl, sg, sg_len, i) {
2149                sg_used = 0;
2150
2151                /* Loop until the entire scatterlist entry is used */
2152                while (sg_used < sg_dma_len(sg)) {
2153                        struct xilinx_axidma_desc_hw *hw;
2154
2155                        /* Get a free segment */
2156                        segment = xilinx_axidma_alloc_tx_segment(chan);
2157                        if (!segment)
2158                                goto error;
2159
2160                        /*
2161                         * Calculate the maximum number of bytes to transfer,
2162                         * making sure it is less than the hw limit
2163                         */
2164                        copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2165                                                        sg_used);
2166                        hw = &segment->hw;
2167
2168                        /* Fill in the descriptor */
2169                        xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2170                                          sg_used, 0);
2171
2172                        hw->control = copy;
2173
2174                        if (chan->direction == DMA_MEM_TO_DEV) {
2175                                if (app_w)
2176                                        memcpy(hw->app, app_w, sizeof(u32) *
2177                                               XILINX_DMA_NUM_APP_WORDS);
2178                        }
2179
2180                        sg_used += copy;
2181
2182                        /*
2183                         * Insert the segment into the descriptor segments
2184                         * list.
2185                         */
2186                        list_add_tail(&segment->node, &desc->segments);
2187                }
2188        }
2189
2190        segment = list_first_entry(&desc->segments,
2191                                   struct xilinx_axidma_tx_segment, node);
2192        desc->async_tx.phys = segment->phys;
2193
2194        /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2195        if (chan->direction == DMA_MEM_TO_DEV) {
2196                segment->hw.control |= XILINX_DMA_BD_SOP;
2197                segment = list_last_entry(&desc->segments,
2198                                          struct xilinx_axidma_tx_segment,
2199                                          node);
2200                segment->hw.control |= XILINX_DMA_BD_EOP;
2201        }
2202
2203        return &desc->async_tx;
2204
2205error:
2206        xilinx_dma_free_tx_descriptor(chan, desc);
2207        return NULL;
2208}
2209
2210/**
2211 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
2212 * @dchan: DMA channel
2213 * @buf_addr: Physical address of the buffer
2214 * @buf_len: Total length of the cyclic buffers
2215 * @period_len: length of individual cyclic buffer
2216 * @direction: DMA direction
2217 * @flags: transfer ack flags
2218 *
2219 * Return: Async transaction descriptor on success and NULL on failure
2220 */
2221static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2222        struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2223        size_t period_len, enum dma_transfer_direction direction,
2224        unsigned long flags)
2225{
2226        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2227        struct xilinx_dma_tx_descriptor *desc;
2228        struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2229        size_t copy, sg_used;
2230        unsigned int num_periods;
2231        int i;
2232        u32 reg;
2233
2234        if (!period_len)
2235                return NULL;
2236
2237        num_periods = buf_len / period_len;
2238
2239        if (!num_periods)
2240                return NULL;
2241
2242        if (!is_slave_direction(direction))
2243                return NULL;
2244
2245        /* Allocate a transaction descriptor. */
2246        desc = xilinx_dma_alloc_tx_descriptor(chan);
2247        if (!desc)
2248                return NULL;
2249
2250        chan->direction = direction;
2251        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2252        desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2253
2254        for (i = 0; i < num_periods; ++i) {
2255                sg_used = 0;
2256
2257                while (sg_used < period_len) {
2258                        struct xilinx_axidma_desc_hw *hw;
2259
2260                        /* Get a free segment */
2261                        segment = xilinx_axidma_alloc_tx_segment(chan);
2262                        if (!segment)
2263                                goto error;
2264
2265                        /*
2266                         * Calculate the maximum number of bytes to transfer,
2267                         * making sure it is less than the hw limit
2268                         */
2269                        copy = xilinx_dma_calc_copysize(chan, period_len,
2270                                                        sg_used);
2271                        hw = &segment->hw;
2272                        xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2273                                          period_len * i);
2274                        hw->control = copy;
2275
2276                        if (prev)
2277                                prev->hw.next_desc = segment->phys;
2278
2279                        prev = segment;
2280                        sg_used += copy;
2281
2282                        /*
2283                         * Insert the segment into the descriptor segments
2284                         * list.
2285                         */
2286                        list_add_tail(&segment->node, &desc->segments);
2287                }
2288        }
2289
2290        head_segment = list_first_entry(&desc->segments,
2291                                   struct xilinx_axidma_tx_segment, node);
2292        desc->async_tx.phys = head_segment->phys;
2293
2294        desc->cyclic = true;
2295        reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2296        reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2297        dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2298
2299        segment = list_last_entry(&desc->segments,
2300                                  struct xilinx_axidma_tx_segment,
2301                                  node);
2302        segment->hw.next_desc = (u32) head_segment->phys;
2303
2304        /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2305        if (direction == DMA_MEM_TO_DEV) {
2306                head_segment->hw.control |= XILINX_DMA_BD_SOP;
2307                segment->hw.control |= XILINX_DMA_BD_EOP;
2308        }
2309
2310        return &desc->async_tx;
2311
2312error:
2313        xilinx_dma_free_tx_descriptor(chan, desc);
2314        return NULL;
2315}
2316
2317/**
2318 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2319 * @dchan: DMA channel
2320 * @sgl: scatterlist to transfer to/from
2321 * @sg_len: number of entries in @scatterlist
2322 * @direction: DMA direction
2323 * @flags: transfer ack flags
2324 * @context: APP words of the descriptor
2325 *
2326 * Return: Async transaction descriptor on success and NULL on failure
2327 */
2328static struct dma_async_tx_descriptor *
2329xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2330                           unsigned int sg_len,
2331                           enum dma_transfer_direction direction,
2332                           unsigned long flags, void *context)
2333{
2334        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2335        struct xilinx_dma_tx_descriptor *desc;
2336        struct xilinx_aximcdma_tx_segment *segment = NULL;
2337        u32 *app_w = (u32 *)context;
2338        struct scatterlist *sg;
2339        size_t copy;
2340        size_t sg_used;
2341        unsigned int i;
2342
2343        if (!is_slave_direction(direction))
2344                return NULL;
2345
2346        /* Allocate a transaction descriptor. */
2347        desc = xilinx_dma_alloc_tx_descriptor(chan);
2348        if (!desc)
2349                return NULL;
2350
2351        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2352        desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2353
2354        /* Build transactions using information in the scatter gather list */
2355        for_each_sg(sgl, sg, sg_len, i) {
2356                sg_used = 0;
2357
2358                /* Loop until the entire scatterlist entry is used */
2359                while (sg_used < sg_dma_len(sg)) {
2360                        struct xilinx_aximcdma_desc_hw *hw;
2361
2362                        /* Get a free segment */
2363                        segment = xilinx_aximcdma_alloc_tx_segment(chan);
2364                        if (!segment)
2365                                goto error;
2366
2367                        /*
2368                         * Calculate the maximum number of bytes to transfer,
2369                         * making sure it is less than the hw limit
2370                         */
2371                        copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2372                                     chan->xdev->max_buffer_len);
2373                        hw = &segment->hw;
2374
2375                        /* Fill in the descriptor */
2376                        xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2377                                            sg_used);
2378                        hw->control = copy;
2379
2380                        if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2381                                memcpy(hw->app, app_w, sizeof(u32) *
2382                                       XILINX_DMA_NUM_APP_WORDS);
2383                        }
2384
2385                        sg_used += copy;
2386                        /*
2387                         * Insert the segment into the descriptor segments
2388                         * list.
2389                         */
2390                        list_add_tail(&segment->node, &desc->segments);
2391                }
2392        }
2393
2394        segment = list_first_entry(&desc->segments,
2395                                   struct xilinx_aximcdma_tx_segment, node);
2396        desc->async_tx.phys = segment->phys;
2397
2398        /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2399        if (chan->direction == DMA_MEM_TO_DEV) {
2400                segment->hw.control |= XILINX_MCDMA_BD_SOP;
2401                segment = list_last_entry(&desc->segments,
2402                                          struct xilinx_aximcdma_tx_segment,
2403                                          node);
2404                segment->hw.control |= XILINX_MCDMA_BD_EOP;
2405        }
2406
2407        return &desc->async_tx;
2408
2409error:
2410        xilinx_dma_free_tx_descriptor(chan, desc);
2411
2412        return NULL;
2413}
2414
2415/**
2416 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2417 * @dchan: Driver specific DMA Channel pointer
2418 *
2419 * Return: '0' always.
2420 */
2421static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2422{
2423        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2424        u32 reg;
2425        int err;
2426
2427        if (!chan->cyclic) {
2428                err = chan->stop_transfer(chan);
2429                if (err) {
2430                        dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2431                                chan, dma_ctrl_read(chan,
2432                                XILINX_DMA_REG_DMASR));
2433                        chan->err = true;
2434                }
2435        }
2436
2437        xilinx_dma_chan_reset(chan);
2438        /* Remove and free all of the descriptors in the lists */
2439        xilinx_dma_free_descriptors(chan);
2440        chan->idle = true;
2441
2442        if (chan->cyclic) {
2443                reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2444                reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2445                dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2446                chan->cyclic = false;
2447        }
2448
2449        if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2450                dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2451                             XILINX_CDMA_CR_SGMODE);
2452
2453        return 0;
2454}
2455
2456/**
2457 * xilinx_dma_channel_set_config - Configure VDMA channel
2458 * Run-time configuration for Axi VDMA, supports:
2459 * . halt the channel
2460 * . configure interrupt coalescing and inter-packet delay threshold
2461 * . start/stop parking
2462 * . enable genlock
2463 *
2464 * @dchan: DMA channel
2465 * @cfg: VDMA device configuration pointer
2466 *
2467 * Return: '0' on success and failure value on error
2468 */
2469int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2470                                        struct xilinx_vdma_config *cfg)
2471{
2472        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2473        u32 dmacr;
2474
2475        if (cfg->reset)
2476                return xilinx_dma_chan_reset(chan);
2477
2478        dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2479
2480        chan->config.frm_dly = cfg->frm_dly;
2481        chan->config.park = cfg->park;
2482
2483        /* genlock settings */
2484        chan->config.gen_lock = cfg->gen_lock;
2485        chan->config.master = cfg->master;
2486
2487        dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2488        if (cfg->gen_lock && chan->genlock) {
2489                dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2490                dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2491                dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2492        }
2493
2494        chan->config.frm_cnt_en = cfg->frm_cnt_en;
2495        chan->config.vflip_en = cfg->vflip_en;
2496
2497        if (cfg->park)
2498                chan->config.park_frm = cfg->park_frm;
2499        else
2500                chan->config.park_frm = -1;
2501
2502        chan->config.coalesc = cfg->coalesc;
2503        chan->config.delay = cfg->delay;
2504
2505        if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2506                dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2507                dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2508                chan->config.coalesc = cfg->coalesc;
2509        }
2510
2511        if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2512                dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2513                dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2514                chan->config.delay = cfg->delay;
2515        }
2516
2517        /* FSync Source selection */
2518        dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2519        dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2520
2521        dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2522
2523        return 0;
2524}
2525EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2526
2527/* -----------------------------------------------------------------------------
2528 * Probe and remove
2529 */
2530
2531/**
2532 * xilinx_dma_chan_remove - Per Channel remove function
2533 * @chan: Driver specific DMA channel
2534 */
2535static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2536{
2537        /* Disable all interrupts */
2538        dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2539                      XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2540
2541        if (chan->irq > 0)
2542                free_irq(chan->irq, chan);
2543
2544        tasklet_kill(&chan->tasklet);
2545
2546        list_del(&chan->common.device_node);
2547}
2548
2549static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2550                            struct clk **tx_clk, struct clk **rx_clk,
2551                            struct clk **sg_clk, struct clk **tmp_clk)
2552{
2553        int err;
2554
2555        *tmp_clk = NULL;
2556
2557        *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2558        if (IS_ERR(*axi_clk))
2559                return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2560
2561        *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2562        if (IS_ERR(*tx_clk))
2563                *tx_clk = NULL;
2564
2565        *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2566        if (IS_ERR(*rx_clk))
2567                *rx_clk = NULL;
2568
2569        *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2570        if (IS_ERR(*sg_clk))
2571                *sg_clk = NULL;
2572
2573        err = clk_prepare_enable(*axi_clk);
2574        if (err) {
2575                dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2576                return err;
2577        }
2578
2579        err = clk_prepare_enable(*tx_clk);
2580        if (err) {
2581                dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2582                goto err_disable_axiclk;
2583        }
2584
2585        err = clk_prepare_enable(*rx_clk);
2586        if (err) {
2587                dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2588                goto err_disable_txclk;
2589        }
2590
2591        err = clk_prepare_enable(*sg_clk);
2592        if (err) {
2593                dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2594                goto err_disable_rxclk;
2595        }
2596
2597        return 0;
2598
2599err_disable_rxclk:
2600        clk_disable_unprepare(*rx_clk);
2601err_disable_txclk:
2602        clk_disable_unprepare(*tx_clk);
2603err_disable_axiclk:
2604        clk_disable_unprepare(*axi_clk);
2605
2606        return err;
2607}
2608
2609static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2610                            struct clk **dev_clk, struct clk **tmp_clk,
2611                            struct clk **tmp1_clk, struct clk **tmp2_clk)
2612{
2613        int err;
2614
2615        *tmp_clk = NULL;
2616        *tmp1_clk = NULL;
2617        *tmp2_clk = NULL;
2618
2619        *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2620        if (IS_ERR(*axi_clk))
2621                return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2622
2623        *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2624        if (IS_ERR(*dev_clk))
2625                return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2626
2627        err = clk_prepare_enable(*axi_clk);
2628        if (err) {
2629                dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2630                return err;
2631        }
2632
2633        err = clk_prepare_enable(*dev_clk);
2634        if (err) {
2635                dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2636                goto err_disable_axiclk;
2637        }
2638
2639        return 0;
2640
2641err_disable_axiclk:
2642        clk_disable_unprepare(*axi_clk);
2643
2644        return err;
2645}
2646
2647static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2648                            struct clk **tx_clk, struct clk **txs_clk,
2649                            struct clk **rx_clk, struct clk **rxs_clk)
2650{
2651        int err;
2652
2653        *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2654        if (IS_ERR(*axi_clk))
2655                return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2656
2657        *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2658        if (IS_ERR(*tx_clk))
2659                *tx_clk = NULL;
2660
2661        *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2662        if (IS_ERR(*txs_clk))
2663                *txs_clk = NULL;
2664
2665        *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2666        if (IS_ERR(*rx_clk))
2667                *rx_clk = NULL;
2668
2669        *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2670        if (IS_ERR(*rxs_clk))
2671                *rxs_clk = NULL;
2672
2673        err = clk_prepare_enable(*axi_clk);
2674        if (err) {
2675                dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2676                        err);
2677                return err;
2678        }
2679
2680        err = clk_prepare_enable(*tx_clk);
2681        if (err) {
2682                dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2683                goto err_disable_axiclk;
2684        }
2685
2686        err = clk_prepare_enable(*txs_clk);
2687        if (err) {
2688                dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2689                goto err_disable_txclk;
2690        }
2691
2692        err = clk_prepare_enable(*rx_clk);
2693        if (err) {
2694                dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2695                goto err_disable_txsclk;
2696        }
2697
2698        err = clk_prepare_enable(*rxs_clk);
2699        if (err) {
2700                dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2701                goto err_disable_rxclk;
2702        }
2703
2704        return 0;
2705
2706err_disable_rxclk:
2707        clk_disable_unprepare(*rx_clk);
2708err_disable_txsclk:
2709        clk_disable_unprepare(*txs_clk);
2710err_disable_txclk:
2711        clk_disable_unprepare(*tx_clk);
2712err_disable_axiclk:
2713        clk_disable_unprepare(*axi_clk);
2714
2715        return err;
2716}
2717
2718static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2719{
2720        clk_disable_unprepare(xdev->rxs_clk);
2721        clk_disable_unprepare(xdev->rx_clk);
2722        clk_disable_unprepare(xdev->txs_clk);
2723        clk_disable_unprepare(xdev->tx_clk);
2724        clk_disable_unprepare(xdev->axi_clk);
2725}
2726
2727/**
2728 * xilinx_dma_chan_probe - Per Channel Probing
2729 * It get channel features from the device tree entry and
2730 * initialize special channel handling routines
2731 *
2732 * @xdev: Driver specific device structure
2733 * @node: Device node
2734 *
2735 * Return: '0' on success and failure value on error
2736 */
2737static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2738                                  struct device_node *node)
2739{
2740        struct xilinx_dma_chan *chan;
2741        bool has_dre = false;
2742        u32 value, width;
2743        int err;
2744
2745        /* Allocate and initialize the channel structure */
2746        chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2747        if (!chan)
2748                return -ENOMEM;
2749
2750        chan->dev = xdev->dev;
2751        chan->xdev = xdev;
2752        chan->desc_pendingcount = 0x0;
2753        chan->ext_addr = xdev->ext_addr;
2754        /* This variable ensures that descriptors are not
2755         * Submitted when dma engine is in progress. This variable is
2756         * Added to avoid polling for a bit in the status register to
2757         * Know dma state in the driver hot path.
2758         */
2759        chan->idle = true;
2760
2761        spin_lock_init(&chan->lock);
2762        INIT_LIST_HEAD(&chan->pending_list);
2763        INIT_LIST_HEAD(&chan->done_list);
2764        INIT_LIST_HEAD(&chan->active_list);
2765        INIT_LIST_HEAD(&chan->free_seg_list);
2766
2767        /* Retrieve the channel properties from the device tree */
2768        has_dre = of_property_read_bool(node, "xlnx,include-dre");
2769
2770        chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2771
2772        err = of_property_read_u32(node, "xlnx,datawidth", &value);
2773        if (err) {
2774                dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2775                return err;
2776        }
2777        width = value >> 3; /* Convert bits to bytes */
2778
2779        /* If data width is greater than 8 bytes, DRE is not in hw */
2780        if (width > 8)
2781                has_dre = false;
2782
2783        if (!has_dre)
2784                xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2785
2786        if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2787            of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2788            of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2789                chan->direction = DMA_MEM_TO_DEV;
2790                chan->id = xdev->mm2s_chan_id++;
2791                chan->tdest = chan->id;
2792
2793                chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2794                if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2795                        chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2796                        chan->config.park = 1;
2797
2798                        if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2799                            xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2800                                chan->flush_on_fsync = true;
2801                }
2802        } else if (of_device_is_compatible(node,
2803                                           "xlnx,axi-vdma-s2mm-channel") ||
2804                   of_device_is_compatible(node,
2805                                           "xlnx,axi-dma-s2mm-channel")) {
2806                chan->direction = DMA_DEV_TO_MEM;
2807                chan->id = xdev->s2mm_chan_id++;
2808                chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2809                chan->has_vflip = of_property_read_bool(node,
2810                                        "xlnx,enable-vert-flip");
2811                if (chan->has_vflip) {
2812                        chan->config.vflip_en = dma_read(chan,
2813                                XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2814                                XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2815                }
2816
2817                if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2818                        chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2819                else
2820                        chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2821
2822                if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2823                        chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2824                        chan->config.park = 1;
2825
2826                        if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2827                            xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2828                                chan->flush_on_fsync = true;
2829                }
2830        } else {
2831                dev_err(xdev->dev, "Invalid channel compatible node\n");
2832                return -EINVAL;
2833        }
2834
2835        /* Request the interrupt */
2836        chan->irq = irq_of_parse_and_map(node, chan->tdest);
2837        err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2838                          IRQF_SHARED, "xilinx-dma-controller", chan);
2839        if (err) {
2840                dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2841                return err;
2842        }
2843
2844        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2845                chan->start_transfer = xilinx_dma_start_transfer;
2846                chan->stop_transfer = xilinx_dma_stop_transfer;
2847        } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2848                chan->start_transfer = xilinx_mcdma_start_transfer;
2849                chan->stop_transfer = xilinx_dma_stop_transfer;
2850        } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2851                chan->start_transfer = xilinx_cdma_start_transfer;
2852                chan->stop_transfer = xilinx_cdma_stop_transfer;
2853        } else {
2854                chan->start_transfer = xilinx_vdma_start_transfer;
2855                chan->stop_transfer = xilinx_dma_stop_transfer;
2856        }
2857
2858        /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
2859        if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
2860                if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
2861                    dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
2862                            XILINX_DMA_DMASR_SG_MASK)
2863                        chan->has_sg = true;
2864                dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
2865                        chan->has_sg ? "enabled" : "disabled");
2866        }
2867
2868        /* Initialize the tasklet */
2869        tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
2870
2871        /*
2872         * Initialize the DMA channel and add it to the DMA engine channels
2873         * list.
2874         */
2875        chan->common.device = &xdev->common;
2876
2877        list_add_tail(&chan->common.device_node, &xdev->common.channels);
2878        xdev->chan[chan->id] = chan;
2879
2880        /* Reset the channel */
2881        err = xilinx_dma_chan_reset(chan);
2882        if (err < 0) {
2883                dev_err(xdev->dev, "Reset channel failed\n");
2884                return err;
2885        }
2886
2887        return 0;
2888}
2889
2890/**
2891 * xilinx_dma_child_probe - Per child node probe
2892 * It get number of dma-channels per child node from
2893 * device-tree and initializes all the channels.
2894 *
2895 * @xdev: Driver specific device structure
2896 * @node: Device node
2897 *
2898 * Return: 0 always.
2899 */
2900static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2901                                    struct device_node *node)
2902{
2903        int ret, i;
2904        u32 nr_channels = 1;
2905
2906        ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2907        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
2908                dev_warn(xdev->dev, "missing dma-channels property\n");
2909
2910        for (i = 0; i < nr_channels; i++)
2911                xilinx_dma_chan_probe(xdev, node);
2912
2913        return 0;
2914}
2915
2916/**
2917 * of_dma_xilinx_xlate - Translation function
2918 * @dma_spec: Pointer to DMA specifier as found in the device tree
2919 * @ofdma: Pointer to DMA controller data
2920 *
2921 * Return: DMA channel pointer on success and NULL on error
2922 */
2923static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2924                                                struct of_dma *ofdma)
2925{
2926        struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2927        int chan_id = dma_spec->args[0];
2928
2929        if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
2930                return NULL;
2931
2932        return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2933}
2934
2935static const struct xilinx_dma_config axidma_config = {
2936        .dmatype = XDMA_TYPE_AXIDMA,
2937        .clk_init = axidma_clk_init,
2938        .irq_handler = xilinx_dma_irq_handler,
2939        .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2940};
2941
2942static const struct xilinx_dma_config aximcdma_config = {
2943        .dmatype = XDMA_TYPE_AXIMCDMA,
2944        .clk_init = axidma_clk_init,
2945        .irq_handler = xilinx_mcdma_irq_handler,
2946        .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
2947};
2948static const struct xilinx_dma_config axicdma_config = {
2949        .dmatype = XDMA_TYPE_CDMA,
2950        .clk_init = axicdma_clk_init,
2951        .irq_handler = xilinx_dma_irq_handler,
2952        .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
2953};
2954
2955static const struct xilinx_dma_config axivdma_config = {
2956        .dmatype = XDMA_TYPE_VDMA,
2957        .clk_init = axivdma_clk_init,
2958        .irq_handler = xilinx_dma_irq_handler,
2959        .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
2960};
2961
2962static const struct of_device_id xilinx_dma_of_ids[] = {
2963        { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2964        { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2965        { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2966        { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
2967        {}
2968};
2969MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2970
2971/**
2972 * xilinx_dma_probe - Driver probe function
2973 * @pdev: Pointer to the platform_device structure
2974 *
2975 * Return: '0' on success and failure value on error
2976 */
2977static int xilinx_dma_probe(struct platform_device *pdev)
2978{
2979        int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2980                        struct clk **, struct clk **, struct clk **)
2981                                        = axivdma_clk_init;
2982        struct device_node *node = pdev->dev.of_node;
2983        struct xilinx_dma_device *xdev;
2984        struct device_node *child, *np = pdev->dev.of_node;
2985        u32 num_frames, addr_width, len_width;
2986        int i, err;
2987
2988        /* Allocate and initialize the DMA engine structure */
2989        xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2990        if (!xdev)
2991                return -ENOMEM;
2992
2993        xdev->dev = &pdev->dev;
2994        if (np) {
2995                const struct of_device_id *match;
2996
2997                match = of_match_node(xilinx_dma_of_ids, np);
2998                if (match && match->data) {
2999                        xdev->dma_config = match->data;
3000                        clk_init = xdev->dma_config->clk_init;
3001                }
3002        }
3003
3004        err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3005                       &xdev->rx_clk, &xdev->rxs_clk);
3006        if (err)
3007                return err;
3008
3009        /* Request and map I/O memory */
3010        xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3011        if (IS_ERR(xdev->regs))
3012                return PTR_ERR(xdev->regs);
3013
3014        /* Retrieve the DMA engine properties from the device tree */
3015        xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3016        xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3017
3018        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3019            xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3020                if (!of_property_read_u32(node, "xlnx,sg-length-width",
3021                                          &len_width)) {
3022                        if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3023                            len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3024                                dev_warn(xdev->dev,
3025                                         "invalid xlnx,sg-length-width property value. Using default width\n");
3026                        } else {
3027                                if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3028                                        dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3029                                xdev->max_buffer_len =
3030                                        GENMASK(len_width - 1, 0);
3031                        }
3032                }
3033        }
3034
3035        if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3036                err = of_property_read_u32(node, "xlnx,num-fstores",
3037                                           &num_frames);
3038                if (err < 0) {
3039                        dev_err(xdev->dev,
3040                                "missing xlnx,num-fstores property\n");
3041                        return err;
3042                }
3043
3044                err = of_property_read_u32(node, "xlnx,flush-fsync",
3045                                           &xdev->flush_on_fsync);
3046                if (err < 0)
3047                        dev_warn(xdev->dev,
3048                                 "missing xlnx,flush-fsync property\n");
3049        }
3050
3051        err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3052        if (err < 0)
3053                dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3054
3055        if (addr_width > 32)
3056                xdev->ext_addr = true;
3057        else
3058                xdev->ext_addr = false;
3059
3060        /* Set the dma mask bits */
3061        dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
3062
3063        /* Initialize the DMA engine */
3064        xdev->common.dev = &pdev->dev;
3065
3066        INIT_LIST_HEAD(&xdev->common.channels);
3067        if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3068                dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3069                dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3070        }
3071
3072        xdev->common.device_alloc_chan_resources =
3073                                xilinx_dma_alloc_chan_resources;
3074        xdev->common.device_free_chan_resources =
3075                                xilinx_dma_free_chan_resources;
3076        xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3077        xdev->common.device_tx_status = xilinx_dma_tx_status;
3078        xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3079        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3080                dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3081                xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3082                xdev->common.device_prep_dma_cyclic =
3083                                          xilinx_dma_prep_dma_cyclic;
3084                /* Residue calculation is supported by only AXI DMA and CDMA */
3085                xdev->common.residue_granularity =
3086                                          DMA_RESIDUE_GRANULARITY_SEGMENT;
3087        } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3088                dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3089                xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3090                /* Residue calculation is supported by only AXI DMA and CDMA */
3091                xdev->common.residue_granularity =
3092                                          DMA_RESIDUE_GRANULARITY_SEGMENT;
3093        } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3094                xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3095        } else {
3096                xdev->common.device_prep_interleaved_dma =
3097                                xilinx_vdma_dma_prep_interleaved;
3098        }
3099
3100        platform_set_drvdata(pdev, xdev);
3101
3102        /* Initialize the channels */
3103        for_each_child_of_node(node, child) {
3104                err = xilinx_dma_child_probe(xdev, child);
3105                if (err < 0)
3106                        goto disable_clks;
3107        }
3108
3109        if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3110                for (i = 0; i < xdev->dma_config->max_channels; i++)
3111                        if (xdev->chan[i])
3112                                xdev->chan[i]->num_frms = num_frames;
3113        }
3114
3115        /* Register the DMA engine with the core */
3116        err = dma_async_device_register(&xdev->common);
3117        if (err) {
3118                dev_err(xdev->dev, "failed to register the dma device\n");
3119                goto error;
3120        }
3121
3122        err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3123                                         xdev);
3124        if (err < 0) {
3125                dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3126                dma_async_device_unregister(&xdev->common);
3127                goto error;
3128        }
3129
3130        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3131                dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3132        else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3133                dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3134        else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3135                dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3136        else
3137                dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3138
3139        return 0;
3140
3141disable_clks:
3142        xdma_disable_allclks(xdev);
3143error:
3144        for (i = 0; i < xdev->dma_config->max_channels; i++)
3145                if (xdev->chan[i])
3146                        xilinx_dma_chan_remove(xdev->chan[i]);
3147
3148        return err;
3149}
3150
3151/**
3152 * xilinx_dma_remove - Driver remove function
3153 * @pdev: Pointer to the platform_device structure
3154 *
3155 * Return: Always '0'
3156 */
3157static int xilinx_dma_remove(struct platform_device *pdev)
3158{
3159        struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3160        int i;
3161
3162        of_dma_controller_free(pdev->dev.of_node);
3163
3164        dma_async_device_unregister(&xdev->common);
3165
3166        for (i = 0; i < xdev->dma_config->max_channels; i++)
3167                if (xdev->chan[i])
3168                        xilinx_dma_chan_remove(xdev->chan[i]);
3169
3170        xdma_disable_allclks(xdev);
3171
3172        return 0;
3173}
3174
3175static struct platform_driver xilinx_vdma_driver = {
3176        .driver = {
3177                .name = "xilinx-vdma",
3178                .of_match_table = xilinx_dma_of_ids,
3179        },
3180        .probe = xilinx_dma_probe,
3181        .remove = xilinx_dma_remove,
3182};
3183
3184module_platform_driver(xilinx_vdma_driver);
3185
3186MODULE_AUTHOR("Xilinx, Inc.");
3187MODULE_DESCRIPTION("Xilinx VDMA driver");
3188MODULE_LICENSE("GPL v2");
3189