linux/drivers/dma/xgene-dma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Applied Micro X-Gene SoC DMA engine Driver
   4 *
   5 * Copyright (c) 2015, Applied Micro Circuits Corporation
   6 * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
   7 *          Loc Ho <lho@apm.com>
   8 *
   9 * NOTE: PM support is currently not available.
  10 */
  11
  12#include <linux/acpi.h>
  13#include <linux/clk.h>
  14#include <linux/delay.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/dmaengine.h>
  17#include <linux/dmapool.h>
  18#include <linux/interrupt.h>
  19#include <linux/io.h>
  20#include <linux/irq.h>
  21#include <linux/module.h>
  22#include <linux/of_device.h>
  23
  24#include "dmaengine.h"
  25
  26/* X-Gene DMA ring csr registers and bit definations */
  27#define XGENE_DMA_RING_CONFIG                   0x04
  28#define XGENE_DMA_RING_ENABLE                   BIT(31)
  29#define XGENE_DMA_RING_ID                       0x08
  30#define XGENE_DMA_RING_ID_SETUP(v)              ((v) | BIT(31))
  31#define XGENE_DMA_RING_ID_BUF                   0x0C
  32#define XGENE_DMA_RING_ID_BUF_SETUP(v)          (((v) << 9) | BIT(21))
  33#define XGENE_DMA_RING_THRESLD0_SET1            0x30
  34#define XGENE_DMA_RING_THRESLD0_SET1_VAL        0X64
  35#define XGENE_DMA_RING_THRESLD1_SET1            0x34
  36#define XGENE_DMA_RING_THRESLD1_SET1_VAL        0xC8
  37#define XGENE_DMA_RING_HYSTERESIS               0x68
  38#define XGENE_DMA_RING_HYSTERESIS_VAL           0xFFFFFFFF
  39#define XGENE_DMA_RING_STATE                    0x6C
  40#define XGENE_DMA_RING_STATE_WR_BASE            0x70
  41#define XGENE_DMA_RING_NE_INT_MODE              0x017C
  42#define XGENE_DMA_RING_NE_INT_MODE_SET(m, v)    \
  43        ((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
  44#define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v)  \
  45        ((m) &= (~BIT(31 - (v))))
  46#define XGENE_DMA_RING_CLKEN                    0xC208
  47#define XGENE_DMA_RING_SRST                     0xC200
  48#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN         0xD070
  49#define XGENE_DMA_RING_BLK_MEM_RDY              0xD074
  50#define XGENE_DMA_RING_BLK_MEM_RDY_VAL          0xFFFFFFFF
  51#define XGENE_DMA_RING_ID_GET(owner, num)       (((owner) << 6) | (num))
  52#define XGENE_DMA_RING_DST_ID(v)                ((1 << 10) | (v))
  53#define XGENE_DMA_RING_CMD_OFFSET               0x2C
  54#define XGENE_DMA_RING_CMD_BASE_OFFSET(v)       ((v) << 6)
  55#define XGENE_DMA_RING_COHERENT_SET(m)          \
  56        (((u32 *)(m))[2] |= BIT(4))
  57#define XGENE_DMA_RING_ADDRL_SET(m, v)          \
  58        (((u32 *)(m))[2] |= (((v) >> 8) << 5))
  59#define XGENE_DMA_RING_ADDRH_SET(m, v)          \
  60        (((u32 *)(m))[3] |= ((v) >> 35))
  61#define XGENE_DMA_RING_ACCEPTLERR_SET(m)        \
  62        (((u32 *)(m))[3] |= BIT(19))
  63#define XGENE_DMA_RING_SIZE_SET(m, v)           \
  64        (((u32 *)(m))[3] |= ((v) << 23))
  65#define XGENE_DMA_RING_RECOMBBUF_SET(m)         \
  66        (((u32 *)(m))[3] |= BIT(27))
  67#define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m)     \
  68        (((u32 *)(m))[3] |= (0x7 << 28))
  69#define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m)     \
  70        (((u32 *)(m))[4] |= 0x3)
  71#define XGENE_DMA_RING_SELTHRSH_SET(m)          \
  72        (((u32 *)(m))[4] |= BIT(3))
  73#define XGENE_DMA_RING_TYPE_SET(m, v)           \
  74        (((u32 *)(m))[4] |= ((v) << 19))
  75
  76/* X-Gene DMA device csr registers and bit definitions */
  77#define XGENE_DMA_IPBRR                         0x0
  78#define XGENE_DMA_DEV_ID_RD(v)                  ((v) & 0x00000FFF)
  79#define XGENE_DMA_BUS_ID_RD(v)                  (((v) >> 12) & 3)
  80#define XGENE_DMA_REV_NO_RD(v)                  (((v) >> 14) & 3)
  81#define XGENE_DMA_GCR                           0x10
  82#define XGENE_DMA_CH_SETUP(v)                   \
  83        ((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
  84#define XGENE_DMA_ENABLE(v)                     ((v) |= BIT(31))
  85#define XGENE_DMA_DISABLE(v)                    ((v) &= ~BIT(31))
  86#define XGENE_DMA_RAID6_CONT                    0x14
  87#define XGENE_DMA_RAID6_MULTI_CTRL(v)           ((v) << 24)
  88#define XGENE_DMA_INT                           0x70
  89#define XGENE_DMA_INT_MASK                      0x74
  90#define XGENE_DMA_INT_ALL_MASK                  0xFFFFFFFF
  91#define XGENE_DMA_INT_ALL_UNMASK                0x0
  92#define XGENE_DMA_INT_MASK_SHIFT                0x14
  93#define XGENE_DMA_RING_INT0_MASK                0x90A0
  94#define XGENE_DMA_RING_INT1_MASK                0x90A8
  95#define XGENE_DMA_RING_INT2_MASK                0x90B0
  96#define XGENE_DMA_RING_INT3_MASK                0x90B8
  97#define XGENE_DMA_RING_INT4_MASK                0x90C0
  98#define XGENE_DMA_CFG_RING_WQ_ASSOC             0x90E0
  99#define XGENE_DMA_ASSOC_RING_MNGR1              0xFFFFFFFF
 100#define XGENE_DMA_MEM_RAM_SHUTDOWN              0xD070
 101#define XGENE_DMA_BLK_MEM_RDY                   0xD074
 102#define XGENE_DMA_BLK_MEM_RDY_VAL               0xFFFFFFFF
 103#define XGENE_DMA_RING_CMD_SM_OFFSET            0x8000
 104
 105/* X-Gene SoC EFUSE csr register and bit defination */
 106#define XGENE_SOC_JTAG1_SHADOW                  0x18
 107#define XGENE_DMA_PQ_DISABLE_MASK               BIT(13)
 108
 109/* X-Gene DMA Descriptor format */
 110#define XGENE_DMA_DESC_NV_BIT                   BIT_ULL(50)
 111#define XGENE_DMA_DESC_IN_BIT                   BIT_ULL(55)
 112#define XGENE_DMA_DESC_C_BIT                    BIT_ULL(63)
 113#define XGENE_DMA_DESC_DR_BIT                   BIT_ULL(61)
 114#define XGENE_DMA_DESC_ELERR_POS                46
 115#define XGENE_DMA_DESC_RTYPE_POS                56
 116#define XGENE_DMA_DESC_LERR_POS                 60
 117#define XGENE_DMA_DESC_BUFLEN_POS               48
 118#define XGENE_DMA_DESC_HOENQ_NUM_POS            48
 119#define XGENE_DMA_DESC_ELERR_RD(m)              \
 120        (((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
 121#define XGENE_DMA_DESC_LERR_RD(m)               \
 122        (((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
 123#define XGENE_DMA_DESC_STATUS(elerr, lerr)      \
 124        (((elerr) << 4) | (lerr))
 125
 126/* X-Gene DMA descriptor empty s/w signature */
 127#define XGENE_DMA_DESC_EMPTY_SIGNATURE          ~0ULL
 128
 129/* X-Gene DMA configurable parameters defines */
 130#define XGENE_DMA_RING_NUM              512
 131#define XGENE_DMA_BUFNUM                0x0
 132#define XGENE_DMA_CPU_BUFNUM            0x18
 133#define XGENE_DMA_RING_OWNER_DMA        0x03
 134#define XGENE_DMA_RING_OWNER_CPU        0x0F
 135#define XGENE_DMA_RING_TYPE_REGULAR     0x01
 136#define XGENE_DMA_RING_WQ_DESC_SIZE     32      /* 32 Bytes */
 137#define XGENE_DMA_RING_NUM_CONFIG       5
 138#define XGENE_DMA_MAX_CHANNEL           4
 139#define XGENE_DMA_XOR_CHANNEL           0
 140#define XGENE_DMA_PQ_CHANNEL            1
 141#define XGENE_DMA_MAX_BYTE_CNT          0x4000  /* 16 KB */
 142#define XGENE_DMA_MAX_64B_DESC_BYTE_CNT 0x14000 /* 80 KB */
 143#define XGENE_DMA_MAX_XOR_SRC           5
 144#define XGENE_DMA_16K_BUFFER_LEN_CODE   0x0
 145#define XGENE_DMA_INVALID_LEN_CODE      0x7800000000000000ULL
 146
 147/* X-Gene DMA descriptor error codes */
 148#define ERR_DESC_AXI                    0x01
 149#define ERR_BAD_DESC                    0x02
 150#define ERR_READ_DATA_AXI               0x03
 151#define ERR_WRITE_DATA_AXI              0x04
 152#define ERR_FBP_TIMEOUT                 0x05
 153#define ERR_ECC                         0x06
 154#define ERR_DIFF_SIZE                   0x08
 155#define ERR_SCT_GAT_LEN                 0x09
 156#define ERR_CRC_ERR                     0x11
 157#define ERR_CHKSUM                      0x12
 158#define ERR_DIF                         0x13
 159
 160/* X-Gene DMA error interrupt codes */
 161#define ERR_DIF_SIZE_INT                0x0
 162#define ERR_GS_ERR_INT                  0x1
 163#define ERR_FPB_TIMEO_INT               0x2
 164#define ERR_WFIFO_OVF_INT               0x3
 165#define ERR_RFIFO_OVF_INT               0x4
 166#define ERR_WR_TIMEO_INT                0x5
 167#define ERR_RD_TIMEO_INT                0x6
 168#define ERR_WR_ERR_INT                  0x7
 169#define ERR_RD_ERR_INT                  0x8
 170#define ERR_BAD_DESC_INT                0x9
 171#define ERR_DESC_DST_INT                0xA
 172#define ERR_DESC_SRC_INT                0xB
 173
 174/* X-Gene DMA flyby operation code */
 175#define FLYBY_2SRC_XOR                  0x80
 176#define FLYBY_3SRC_XOR                  0x90
 177#define FLYBY_4SRC_XOR                  0xA0
 178#define FLYBY_5SRC_XOR                  0xB0
 179
 180/* X-Gene DMA SW descriptor flags */
 181#define XGENE_DMA_FLAG_64B_DESC         BIT(0)
 182
 183/* Define to dump X-Gene DMA descriptor */
 184#define XGENE_DMA_DESC_DUMP(desc, m)    \
 185        print_hex_dump(KERN_ERR, (m),   \
 186                        DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
 187
 188#define to_dma_desc_sw(tx)              \
 189        container_of(tx, struct xgene_dma_desc_sw, tx)
 190#define to_dma_chan(dchan)              \
 191        container_of(dchan, struct xgene_dma_chan, dma_chan)
 192
 193#define chan_dbg(chan, fmt, arg...)     \
 194        dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
 195#define chan_err(chan, fmt, arg...)     \
 196        dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
 197
 198struct xgene_dma_desc_hw {
 199        __le64 m0;
 200        __le64 m1;
 201        __le64 m2;
 202        __le64 m3;
 203};
 204
 205enum xgene_dma_ring_cfgsize {
 206        XGENE_DMA_RING_CFG_SIZE_512B,
 207        XGENE_DMA_RING_CFG_SIZE_2KB,
 208        XGENE_DMA_RING_CFG_SIZE_16KB,
 209        XGENE_DMA_RING_CFG_SIZE_64KB,
 210        XGENE_DMA_RING_CFG_SIZE_512KB,
 211        XGENE_DMA_RING_CFG_SIZE_INVALID
 212};
 213
 214struct xgene_dma_ring {
 215        struct xgene_dma *pdma;
 216        u8 buf_num;
 217        u16 id;
 218        u16 num;
 219        u16 head;
 220        u16 owner;
 221        u16 slots;
 222        u16 dst_ring_num;
 223        u32 size;
 224        void __iomem *cmd;
 225        void __iomem *cmd_base;
 226        dma_addr_t desc_paddr;
 227        u32 state[XGENE_DMA_RING_NUM_CONFIG];
 228        enum xgene_dma_ring_cfgsize cfgsize;
 229        union {
 230                void *desc_vaddr;
 231                struct xgene_dma_desc_hw *desc_hw;
 232        };
 233};
 234
 235struct xgene_dma_desc_sw {
 236        struct xgene_dma_desc_hw desc1;
 237        struct xgene_dma_desc_hw desc2;
 238        u32 flags;
 239        struct list_head node;
 240        struct list_head tx_list;
 241        struct dma_async_tx_descriptor tx;
 242};
 243
 244/**
 245 * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
 246 * @dma_chan: dmaengine channel object member
 247 * @pdma: X-Gene DMA device structure reference
 248 * @dev: struct device reference for dma mapping api
 249 * @id: raw id of this channel
 250 * @rx_irq: channel IRQ
 251 * @name: name of X-Gene DMA channel
 252 * @lock: serializes enqueue/dequeue operations to the descriptor pool
 253 * @pending: number of transaction request pushed to DMA controller for
 254 *      execution, but still waiting for completion,
 255 * @max_outstanding: max number of outstanding request we can push to channel
 256 * @ld_pending: descriptors which are queued to run, but have not yet been
 257 *      submitted to the hardware for execution
 258 * @ld_running: descriptors which are currently being executing by the hardware
 259 * @ld_completed: descriptors which have finished execution by the hardware.
 260 *      These descriptors have already had their cleanup actions run. They
 261 *      are waiting for the ACK bit to be set by the async tx API.
 262 * @desc_pool: descriptor pool for DMA operations
 263 * @tasklet: bottom half where all completed descriptors cleans
 264 * @tx_ring: transmit ring descriptor that we use to prepare actual
 265 *      descriptors for further executions
 266 * @rx_ring: receive ring descriptor that we use to get completed DMA
 267 *      descriptors during cleanup time
 268 */
 269struct xgene_dma_chan {
 270        struct dma_chan dma_chan;
 271        struct xgene_dma *pdma;
 272        struct device *dev;
 273        int id;
 274        int rx_irq;
 275        char name[10];
 276        spinlock_t lock;
 277        int pending;
 278        int max_outstanding;
 279        struct list_head ld_pending;
 280        struct list_head ld_running;
 281        struct list_head ld_completed;
 282        struct dma_pool *desc_pool;
 283        struct tasklet_struct tasklet;
 284        struct xgene_dma_ring tx_ring;
 285        struct xgene_dma_ring rx_ring;
 286};
 287
 288/**
 289 * struct xgene_dma - internal representation of an X-Gene DMA device
 290 * @err_irq: DMA error irq number
 291 * @ring_num: start id number for DMA ring
 292 * @csr_dma: base for DMA register access
 293 * @csr_ring: base for DMA ring register access
 294 * @csr_ring_cmd: base for DMA ring command register access
 295 * @csr_efuse: base for efuse register access
 296 * @dma_dev: embedded struct dma_device
 297 * @chan: reference to X-Gene DMA channels
 298 */
 299struct xgene_dma {
 300        struct device *dev;
 301        struct clk *clk;
 302        int err_irq;
 303        int ring_num;
 304        void __iomem *csr_dma;
 305        void __iomem *csr_ring;
 306        void __iomem *csr_ring_cmd;
 307        void __iomem *csr_efuse;
 308        struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
 309        struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
 310};
 311
 312static const char * const xgene_dma_desc_err[] = {
 313        [ERR_DESC_AXI] = "AXI error when reading src/dst link list",
 314        [ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
 315        [ERR_READ_DATA_AXI] = "AXI error when reading data",
 316        [ERR_WRITE_DATA_AXI] = "AXI error when writing data",
 317        [ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
 318        [ERR_ECC] = "ECC double bit error",
 319        [ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
 320        [ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
 321        [ERR_CRC_ERR] = "CRC error",
 322        [ERR_CHKSUM] = "Checksum error",
 323        [ERR_DIF] = "DIF error",
 324};
 325
 326static const char * const xgene_dma_err[] = {
 327        [ERR_DIF_SIZE_INT] = "DIF size error",
 328        [ERR_GS_ERR_INT] = "Gather scatter not same size error",
 329        [ERR_FPB_TIMEO_INT] = "Free pool time out error",
 330        [ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
 331        [ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
 332        [ERR_WR_TIMEO_INT] = "Write time out error",
 333        [ERR_RD_TIMEO_INT] = "Read time out error",
 334        [ERR_WR_ERR_INT] = "HBF bus write error",
 335        [ERR_RD_ERR_INT] = "HBF bus read error",
 336        [ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
 337        [ERR_DESC_DST_INT] = "HFB reading dst link address error",
 338        [ERR_DESC_SRC_INT] = "HFB reading src link address error",
 339};
 340
 341static bool is_pq_enabled(struct xgene_dma *pdma)
 342{
 343        u32 val;
 344
 345        val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
 346        return !(val & XGENE_DMA_PQ_DISABLE_MASK);
 347}
 348
 349static u64 xgene_dma_encode_len(size_t len)
 350{
 351        return (len < XGENE_DMA_MAX_BYTE_CNT) ?
 352                ((u64)len << XGENE_DMA_DESC_BUFLEN_POS) :
 353                XGENE_DMA_16K_BUFFER_LEN_CODE;
 354}
 355
 356static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
 357{
 358        static u8 flyby_type[] = {
 359                FLYBY_2SRC_XOR, /* Dummy */
 360                FLYBY_2SRC_XOR, /* Dummy */
 361                FLYBY_2SRC_XOR,
 362                FLYBY_3SRC_XOR,
 363                FLYBY_4SRC_XOR,
 364                FLYBY_5SRC_XOR
 365        };
 366
 367        return flyby_type[src_cnt];
 368}
 369
 370static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
 371                                     dma_addr_t *paddr)
 372{
 373        size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
 374                        *len : XGENE_DMA_MAX_BYTE_CNT;
 375
 376        *ext8 |= cpu_to_le64(*paddr);
 377        *ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes));
 378        *len -= nbytes;
 379        *paddr += nbytes;
 380}
 381
 382static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
 383{
 384        switch (idx) {
 385        case 0:
 386                return &desc->m1;
 387        case 1:
 388                return &desc->m0;
 389        case 2:
 390                return &desc->m3;
 391        case 3:
 392                return &desc->m2;
 393        default:
 394                pr_err("Invalid dma descriptor index\n");
 395        }
 396
 397        return NULL;
 398}
 399
 400static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
 401                                u16 dst_ring_num)
 402{
 403        desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT);
 404        desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA <<
 405                                XGENE_DMA_DESC_RTYPE_POS);
 406        desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT);
 407        desc->m3 |= cpu_to_le64((u64)dst_ring_num <<
 408                                XGENE_DMA_DESC_HOENQ_NUM_POS);
 409}
 410
 411static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
 412                                    struct xgene_dma_desc_sw *desc_sw,
 413                                    dma_addr_t *dst, dma_addr_t *src,
 414                                    u32 src_cnt, size_t *nbytes,
 415                                    const u8 *scf)
 416{
 417        struct xgene_dma_desc_hw *desc1, *desc2;
 418        size_t len = *nbytes;
 419        int i;
 420
 421        desc1 = &desc_sw->desc1;
 422        desc2 = &desc_sw->desc2;
 423
 424        /* Initialize DMA descriptor */
 425        xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
 426
 427        /* Set destination address */
 428        desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
 429        desc1->m3 |= cpu_to_le64(*dst);
 430
 431        /* We have multiple source addresses, so need to set NV bit*/
 432        desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
 433
 434        /* Set flyby opcode */
 435        desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt));
 436
 437        /* Set 1st to 5th source addresses */
 438        for (i = 0; i < src_cnt; i++) {
 439                len = *nbytes;
 440                xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 :
 441                                         xgene_dma_lookup_ext8(desc2, i - 1),
 442                                         &len, &src[i]);
 443                desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8)));
 444        }
 445
 446        /* Update meta data */
 447        *nbytes = len;
 448        *dst += XGENE_DMA_MAX_BYTE_CNT;
 449
 450        /* We need always 64B descriptor to perform xor or pq operations */
 451        desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
 452}
 453
 454static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 455{
 456        struct xgene_dma_desc_sw *desc;
 457        struct xgene_dma_chan *chan;
 458        dma_cookie_t cookie;
 459
 460        if (unlikely(!tx))
 461                return -EINVAL;
 462
 463        chan = to_dma_chan(tx->chan);
 464        desc = to_dma_desc_sw(tx);
 465
 466        spin_lock_bh(&chan->lock);
 467
 468        cookie = dma_cookie_assign(tx);
 469
 470        /* Add this transaction list onto the tail of the pending queue */
 471        list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
 472
 473        spin_unlock_bh(&chan->lock);
 474
 475        return cookie;
 476}
 477
 478static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
 479                                       struct xgene_dma_desc_sw *desc)
 480{
 481        list_del(&desc->node);
 482        chan_dbg(chan, "LD %p free\n", desc);
 483        dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
 484}
 485
 486static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
 487                                 struct xgene_dma_chan *chan)
 488{
 489        struct xgene_dma_desc_sw *desc;
 490        dma_addr_t phys;
 491
 492        desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
 493        if (!desc) {
 494                chan_err(chan, "Failed to allocate LDs\n");
 495                return NULL;
 496        }
 497
 498        INIT_LIST_HEAD(&desc->tx_list);
 499        desc->tx.phys = phys;
 500        desc->tx.tx_submit = xgene_dma_tx_submit;
 501        dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
 502
 503        chan_dbg(chan, "LD %p allocated\n", desc);
 504
 505        return desc;
 506}
 507
 508/**
 509 * xgene_dma_clean_completed_descriptor - free all descriptors which
 510 * has been completed and acked
 511 * @chan: X-Gene DMA channel
 512 *
 513 * This function is used on all completed and acked descriptors.
 514 */
 515static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
 516{
 517        struct xgene_dma_desc_sw *desc, *_desc;
 518
 519        /* Run the callback for each descriptor, in order */
 520        list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
 521                if (async_tx_test_ack(&desc->tx))
 522                        xgene_dma_clean_descriptor(chan, desc);
 523        }
 524}
 525
 526/**
 527 * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
 528 * @chan: X-Gene DMA channel
 529 * @desc: descriptor to cleanup and free
 530 *
 531 * This function is used on a descriptor which has been executed by the DMA
 532 * controller. It will run any callbacks, submit any dependencies.
 533 */
 534static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
 535                                              struct xgene_dma_desc_sw *desc)
 536{
 537        struct dma_async_tx_descriptor *tx = &desc->tx;
 538
 539        /*
 540         * If this is not the last transaction in the group,
 541         * then no need to complete cookie and run any callback as
 542         * this is not the tx_descriptor which had been sent to caller
 543         * of this DMA request
 544         */
 545
 546        if (tx->cookie == 0)
 547                return;
 548
 549        dma_cookie_complete(tx);
 550        dma_descriptor_unmap(tx);
 551
 552        /* Run the link descriptor callback function */
 553        dmaengine_desc_get_callback_invoke(tx, NULL);
 554
 555        /* Run any dependencies */
 556        dma_run_dependencies(tx);
 557}
 558
 559/**
 560 * xgene_dma_clean_running_descriptor - move the completed descriptor from
 561 * ld_running to ld_completed
 562 * @chan: X-Gene DMA channel
 563 * @desc: the descriptor which is completed
 564 *
 565 * Free the descriptor directly if acked by async_tx api,
 566 * else move it to queue ld_completed.
 567 */
 568static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
 569                                               struct xgene_dma_desc_sw *desc)
 570{
 571        /* Remove from the list of running transactions */
 572        list_del(&desc->node);
 573
 574        /*
 575         * the client is allowed to attach dependent operations
 576         * until 'ack' is set
 577         */
 578        if (!async_tx_test_ack(&desc->tx)) {
 579                /*
 580                 * Move this descriptor to the list of descriptors which is
 581                 * completed, but still awaiting the 'ack' bit to be set.
 582                 */
 583                list_add_tail(&desc->node, &chan->ld_completed);
 584                return;
 585        }
 586
 587        chan_dbg(chan, "LD %p free\n", desc);
 588        dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
 589}
 590
 591static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
 592                                    struct xgene_dma_desc_sw *desc_sw)
 593{
 594        struct xgene_dma_ring *ring = &chan->tx_ring;
 595        struct xgene_dma_desc_hw *desc_hw;
 596
 597        /* Get hw descriptor from DMA tx ring */
 598        desc_hw = &ring->desc_hw[ring->head];
 599
 600        /*
 601         * Increment the head count to point next
 602         * descriptor for next time
 603         */
 604        if (++ring->head == ring->slots)
 605                ring->head = 0;
 606
 607        /* Copy prepared sw descriptor data to hw descriptor */
 608        memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
 609
 610        /*
 611         * Check if we have prepared 64B descriptor,
 612         * in this case we need one more hw descriptor
 613         */
 614        if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
 615                desc_hw = &ring->desc_hw[ring->head];
 616
 617                if (++ring->head == ring->slots)
 618                        ring->head = 0;
 619
 620                memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
 621        }
 622
 623        /* Increment the pending transaction count */
 624        chan->pending += ((desc_sw->flags &
 625                          XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
 626
 627        /* Notify the hw that we have descriptor ready for execution */
 628        iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
 629                  2 : 1, ring->cmd);
 630}
 631
 632/**
 633 * xgene_chan_xfer_ld_pending - push any pending transactions to hw
 634 * @chan : X-Gene DMA channel
 635 *
 636 * LOCKING: must hold chan->lock
 637 */
 638static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
 639{
 640        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
 641
 642        /*
 643         * If the list of pending descriptors is empty, then we
 644         * don't need to do any work at all
 645         */
 646        if (list_empty(&chan->ld_pending)) {
 647                chan_dbg(chan, "No pending LDs\n");
 648                return;
 649        }
 650
 651        /*
 652         * Move elements from the queue of pending transactions onto the list
 653         * of running transactions and push it to hw for further executions
 654         */
 655        list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
 656                /*
 657                 * Check if have pushed max number of transactions to hw
 658                 * as capable, so let's stop here and will push remaining
 659                 * elements from pening ld queue after completing some
 660                 * descriptors that we have already pushed
 661                 */
 662                if (chan->pending >= chan->max_outstanding)
 663                        return;
 664
 665                xgene_chan_xfer_request(chan, desc_sw);
 666
 667                /*
 668                 * Delete this element from ld pending queue and append it to
 669                 * ld running queue
 670                 */
 671                list_move_tail(&desc_sw->node, &chan->ld_running);
 672        }
 673}
 674
 675/**
 676 * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
 677 * and move them to ld_completed to free until flag 'ack' is set
 678 * @chan: X-Gene DMA channel
 679 *
 680 * This function is used on descriptors which have been executed by the DMA
 681 * controller. It will run any callbacks, submit any dependencies, then
 682 * free these descriptors if flag 'ack' is set.
 683 */
 684static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
 685{
 686        struct xgene_dma_ring *ring = &chan->rx_ring;
 687        struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
 688        struct xgene_dma_desc_hw *desc_hw;
 689        struct list_head ld_completed;
 690        u8 status;
 691
 692        INIT_LIST_HEAD(&ld_completed);
 693
 694        spin_lock(&chan->lock);
 695
 696        /* Clean already completed and acked descriptors */
 697        xgene_dma_clean_completed_descriptor(chan);
 698
 699        /* Move all completed descriptors to ld completed queue, in order */
 700        list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
 701                /* Get subsequent hw descriptor from DMA rx ring */
 702                desc_hw = &ring->desc_hw[ring->head];
 703
 704                /* Check if this descriptor has been completed */
 705                if (unlikely(le64_to_cpu(desc_hw->m0) ==
 706                             XGENE_DMA_DESC_EMPTY_SIGNATURE))
 707                        break;
 708
 709                if (++ring->head == ring->slots)
 710                        ring->head = 0;
 711
 712                /* Check if we have any error with DMA transactions */
 713                status = XGENE_DMA_DESC_STATUS(
 714                                XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
 715                                                        desc_hw->m0)),
 716                                XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
 717                                                       desc_hw->m0)));
 718                if (status) {
 719                        /* Print the DMA error type */
 720                        chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
 721
 722                        /*
 723                         * We have DMA transactions error here. Dump DMA Tx
 724                         * and Rx descriptors for this request */
 725                        XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
 726                                            "X-Gene DMA TX DESC1: ");
 727
 728                        if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
 729                                XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
 730                                                    "X-Gene DMA TX DESC2: ");
 731
 732                        XGENE_DMA_DESC_DUMP(desc_hw,
 733                                            "X-Gene DMA RX ERR DESC: ");
 734                }
 735
 736                /* Notify the hw about this completed descriptor */
 737                iowrite32(-1, ring->cmd);
 738
 739                /* Mark this hw descriptor as processed */
 740                desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
 741
 742                /*
 743                 * Decrement the pending transaction count
 744                 * as we have processed one
 745                 */
 746                chan->pending -= ((desc_sw->flags &
 747                                  XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
 748
 749                /*
 750                 * Delete this node from ld running queue and append it to
 751                 * ld completed queue for further processing
 752                 */
 753                list_move_tail(&desc_sw->node, &ld_completed);
 754        }
 755
 756        /*
 757         * Start any pending transactions automatically
 758         * In the ideal case, we keep the DMA controller busy while we go
 759         * ahead and free the descriptors below.
 760         */
 761        xgene_chan_xfer_ld_pending(chan);
 762
 763        spin_unlock(&chan->lock);
 764
 765        /* Run the callback for each descriptor, in order */
 766        list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
 767                xgene_dma_run_tx_complete_actions(chan, desc_sw);
 768                xgene_dma_clean_running_descriptor(chan, desc_sw);
 769        }
 770}
 771
 772static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
 773{
 774        struct xgene_dma_chan *chan = to_dma_chan(dchan);
 775
 776        /* Has this channel already been allocated? */
 777        if (chan->desc_pool)
 778                return 1;
 779
 780        chan->desc_pool = dma_pool_create(chan->name, chan->dev,
 781                                          sizeof(struct xgene_dma_desc_sw),
 782                                          0, 0);
 783        if (!chan->desc_pool) {
 784                chan_err(chan, "Failed to allocate descriptor pool\n");
 785                return -ENOMEM;
 786        }
 787
 788        chan_dbg(chan, "Allocate descriptor pool\n");
 789
 790        return 1;
 791}
 792
 793/**
 794 * xgene_dma_free_desc_list - Free all descriptors in a queue
 795 * @chan: X-Gene DMA channel
 796 * @list: the list to free
 797 *
 798 * LOCKING: must hold chan->lock
 799 */
 800static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
 801                                     struct list_head *list)
 802{
 803        struct xgene_dma_desc_sw *desc, *_desc;
 804
 805        list_for_each_entry_safe(desc, _desc, list, node)
 806                xgene_dma_clean_descriptor(chan, desc);
 807}
 808
 809static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
 810{
 811        struct xgene_dma_chan *chan = to_dma_chan(dchan);
 812
 813        chan_dbg(chan, "Free all resources\n");
 814
 815        if (!chan->desc_pool)
 816                return;
 817
 818        /* Process all running descriptor */
 819        xgene_dma_cleanup_descriptors(chan);
 820
 821        spin_lock_bh(&chan->lock);
 822
 823        /* Clean all link descriptor queues */
 824        xgene_dma_free_desc_list(chan, &chan->ld_pending);
 825        xgene_dma_free_desc_list(chan, &chan->ld_running);
 826        xgene_dma_free_desc_list(chan, &chan->ld_completed);
 827
 828        spin_unlock_bh(&chan->lock);
 829
 830        /* Delete this channel DMA pool */
 831        dma_pool_destroy(chan->desc_pool);
 832        chan->desc_pool = NULL;
 833}
 834
 835static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
 836        struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
 837        u32 src_cnt, size_t len, unsigned long flags)
 838{
 839        struct xgene_dma_desc_sw *first = NULL, *new;
 840        struct xgene_dma_chan *chan;
 841        static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
 842                                0x01, 0x01, 0x01, 0x01, 0x01};
 843
 844        if (unlikely(!dchan || !len))
 845                return NULL;
 846
 847        chan = to_dma_chan(dchan);
 848
 849        do {
 850                /* Allocate the link descriptor from DMA pool */
 851                new = xgene_dma_alloc_descriptor(chan);
 852                if (!new)
 853                        goto fail;
 854
 855                /* Prepare xor DMA descriptor */
 856                xgene_dma_prep_xor_desc(chan, new, &dst, src,
 857                                        src_cnt, &len, multi);
 858
 859                if (!first)
 860                        first = new;
 861
 862                new->tx.cookie = 0;
 863                async_tx_ack(&new->tx);
 864
 865                /* Insert the link descriptor to the LD ring */
 866                list_add_tail(&new->node, &first->tx_list);
 867        } while (len);
 868
 869        new->tx.flags = flags; /* client is in control of this ack */
 870        new->tx.cookie = -EBUSY;
 871        list_splice(&first->tx_list, &new->tx_list);
 872
 873        return &new->tx;
 874
 875fail:
 876        if (!first)
 877                return NULL;
 878
 879        xgene_dma_free_desc_list(chan, &first->tx_list);
 880        return NULL;
 881}
 882
 883static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
 884        struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
 885        u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
 886{
 887        struct xgene_dma_desc_sw *first = NULL, *new;
 888        struct xgene_dma_chan *chan;
 889        size_t _len = len;
 890        dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
 891        static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
 892
 893        if (unlikely(!dchan || !len))
 894                return NULL;
 895
 896        chan = to_dma_chan(dchan);
 897
 898        /*
 899         * Save source addresses on local variable, may be we have to
 900         * prepare two descriptor to generate P and Q if both enabled
 901         * in the flags by client
 902         */
 903        memcpy(_src, src, sizeof(*src) * src_cnt);
 904
 905        if (flags & DMA_PREP_PQ_DISABLE_P)
 906                len = 0;
 907
 908        if (flags & DMA_PREP_PQ_DISABLE_Q)
 909                _len = 0;
 910
 911        do {
 912                /* Allocate the link descriptor from DMA pool */
 913                new = xgene_dma_alloc_descriptor(chan);
 914                if (!new)
 915                        goto fail;
 916
 917                if (!first)
 918                        first = new;
 919
 920                new->tx.cookie = 0;
 921                async_tx_ack(&new->tx);
 922
 923                /* Insert the link descriptor to the LD ring */
 924                list_add_tail(&new->node, &first->tx_list);
 925
 926                /*
 927                 * Prepare DMA descriptor to generate P,
 928                 * if DMA_PREP_PQ_DISABLE_P flag is not set
 929                 */
 930                if (len) {
 931                        xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
 932                                                src_cnt, &len, multi);
 933                        continue;
 934                }
 935
 936                /*
 937                 * Prepare DMA descriptor to generate Q,
 938                 * if DMA_PREP_PQ_DISABLE_Q flag is not set
 939                 */
 940                if (_len) {
 941                        xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
 942                                                src_cnt, &_len, scf);
 943                }
 944        } while (len || _len);
 945
 946        new->tx.flags = flags; /* client is in control of this ack */
 947        new->tx.cookie = -EBUSY;
 948        list_splice(&first->tx_list, &new->tx_list);
 949
 950        return &new->tx;
 951
 952fail:
 953        if (!first)
 954                return NULL;
 955
 956        xgene_dma_free_desc_list(chan, &first->tx_list);
 957        return NULL;
 958}
 959
 960static void xgene_dma_issue_pending(struct dma_chan *dchan)
 961{
 962        struct xgene_dma_chan *chan = to_dma_chan(dchan);
 963
 964        spin_lock_bh(&chan->lock);
 965        xgene_chan_xfer_ld_pending(chan);
 966        spin_unlock_bh(&chan->lock);
 967}
 968
 969static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
 970                                           dma_cookie_t cookie,
 971                                           struct dma_tx_state *txstate)
 972{
 973        return dma_cookie_status(dchan, cookie, txstate);
 974}
 975
 976static void xgene_dma_tasklet_cb(unsigned long data)
 977{
 978        struct xgene_dma_chan *chan = (struct xgene_dma_chan *)data;
 979
 980        /* Run all cleanup for descriptors which have been completed */
 981        xgene_dma_cleanup_descriptors(chan);
 982
 983        /* Re-enable DMA channel IRQ */
 984        enable_irq(chan->rx_irq);
 985}
 986
 987static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
 988{
 989        struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
 990
 991        BUG_ON(!chan);
 992
 993        /*
 994         * Disable DMA channel IRQ until we process completed
 995         * descriptors
 996         */
 997        disable_irq_nosync(chan->rx_irq);
 998
 999        /*
1000         * Schedule the tasklet to handle all cleanup of the current
1001         * transaction. It will start a new transaction if there is
1002         * one pending.
1003         */
1004        tasklet_schedule(&chan->tasklet);
1005
1006        return IRQ_HANDLED;
1007}
1008
1009static irqreturn_t xgene_dma_err_isr(int irq, void *id)
1010{
1011        struct xgene_dma *pdma = (struct xgene_dma *)id;
1012        unsigned long int_mask;
1013        u32 val, i;
1014
1015        val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
1016
1017        /* Clear DMA interrupts */
1018        iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
1019
1020        /* Print DMA error info */
1021        int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
1022        for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
1023                dev_err(pdma->dev,
1024                        "Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
1025
1026        return IRQ_HANDLED;
1027}
1028
1029static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
1030{
1031        int i;
1032
1033        iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
1034
1035        for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
1036                iowrite32(ring->state[i], ring->pdma->csr_ring +
1037                          XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
1038}
1039
1040static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
1041{
1042        memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
1043        xgene_dma_wr_ring_state(ring);
1044}
1045
1046static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
1047{
1048        void *ring_cfg = ring->state;
1049        u64 addr = ring->desc_paddr;
1050        u32 i, val;
1051
1052        ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
1053
1054        /* Clear DMA ring state */
1055        xgene_dma_clr_ring_state(ring);
1056
1057        /* Set DMA ring type */
1058        XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
1059
1060        if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
1061                /* Set recombination buffer and timeout */
1062                XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
1063                XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
1064                XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
1065        }
1066
1067        /* Initialize DMA ring state */
1068        XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
1069        XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
1070        XGENE_DMA_RING_COHERENT_SET(ring_cfg);
1071        XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
1072        XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
1073        XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
1074
1075        /* Write DMA ring configurations */
1076        xgene_dma_wr_ring_state(ring);
1077
1078        /* Set DMA ring id */
1079        iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
1080                  ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1081
1082        /* Set DMA ring buffer */
1083        iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
1084                  ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1085
1086        if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
1087                return;
1088
1089        /* Set empty signature to DMA Rx ring descriptors */
1090        for (i = 0; i < ring->slots; i++) {
1091                struct xgene_dma_desc_hw *desc;
1092
1093                desc = &ring->desc_hw[i];
1094                desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
1095        }
1096
1097        /* Enable DMA Rx ring interrupt */
1098        val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1099        XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
1100        iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
1101}
1102
1103static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
1104{
1105        u32 ring_id, val;
1106
1107        if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
1108                /* Disable DMA Rx ring interrupt */
1109                val = ioread32(ring->pdma->csr_ring +
1110                               XGENE_DMA_RING_NE_INT_MODE);
1111                XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
1112                iowrite32(val, ring->pdma->csr_ring +
1113                          XGENE_DMA_RING_NE_INT_MODE);
1114        }
1115
1116        /* Clear DMA ring state */
1117        ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
1118        iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
1119
1120        iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
1121        xgene_dma_clr_ring_state(ring);
1122}
1123
1124static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
1125{
1126        ring->cmd_base = ring->pdma->csr_ring_cmd +
1127                                XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
1128                                                          XGENE_DMA_RING_NUM));
1129
1130        ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
1131}
1132
1133static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
1134                                   enum xgene_dma_ring_cfgsize cfgsize)
1135{
1136        int size;
1137
1138        switch (cfgsize) {
1139        case XGENE_DMA_RING_CFG_SIZE_512B:
1140                size = 0x200;
1141                break;
1142        case XGENE_DMA_RING_CFG_SIZE_2KB:
1143                size = 0x800;
1144                break;
1145        case XGENE_DMA_RING_CFG_SIZE_16KB:
1146                size = 0x4000;
1147                break;
1148        case XGENE_DMA_RING_CFG_SIZE_64KB:
1149                size = 0x10000;
1150                break;
1151        case XGENE_DMA_RING_CFG_SIZE_512KB:
1152                size = 0x80000;
1153                break;
1154        default:
1155                chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
1156                return -EINVAL;
1157        }
1158
1159        return size;
1160}
1161
1162static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
1163{
1164        /* Clear DMA ring configurations */
1165        xgene_dma_clear_ring(ring);
1166
1167        /* De-allocate DMA ring descriptor */
1168        if (ring->desc_vaddr) {
1169                dma_free_coherent(ring->pdma->dev, ring->size,
1170                                  ring->desc_vaddr, ring->desc_paddr);
1171                ring->desc_vaddr = NULL;
1172        }
1173}
1174
1175static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
1176{
1177        xgene_dma_delete_ring_one(&chan->rx_ring);
1178        xgene_dma_delete_ring_one(&chan->tx_ring);
1179}
1180
1181static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
1182                                     struct xgene_dma_ring *ring,
1183                                     enum xgene_dma_ring_cfgsize cfgsize)
1184{
1185        int ret;
1186
1187        /* Setup DMA ring descriptor variables */
1188        ring->pdma = chan->pdma;
1189        ring->cfgsize = cfgsize;
1190        ring->num = chan->pdma->ring_num++;
1191        ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
1192
1193        ret = xgene_dma_get_ring_size(chan, cfgsize);
1194        if (ret <= 0)
1195                return ret;
1196        ring->size = ret;
1197
1198        /* Allocate memory for DMA ring descriptor */
1199        ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
1200                                              &ring->desc_paddr, GFP_KERNEL);
1201        if (!ring->desc_vaddr) {
1202                chan_err(chan, "Failed to allocate ring desc\n");
1203                return -ENOMEM;
1204        }
1205
1206        /* Configure and enable DMA ring */
1207        xgene_dma_set_ring_cmd(ring);
1208        xgene_dma_setup_ring(ring);
1209
1210        return 0;
1211}
1212
1213static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
1214{
1215        struct xgene_dma_ring *rx_ring = &chan->rx_ring;
1216        struct xgene_dma_ring *tx_ring = &chan->tx_ring;
1217        int ret;
1218
1219        /* Create DMA Rx ring descriptor */
1220        rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
1221        rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
1222
1223        ret = xgene_dma_create_ring_one(chan, rx_ring,
1224                                        XGENE_DMA_RING_CFG_SIZE_64KB);
1225        if (ret)
1226                return ret;
1227
1228        chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
1229                 rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
1230
1231        /* Create DMA Tx ring descriptor */
1232        tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
1233        tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
1234
1235        ret = xgene_dma_create_ring_one(chan, tx_ring,
1236                                        XGENE_DMA_RING_CFG_SIZE_64KB);
1237        if (ret) {
1238                xgene_dma_delete_ring_one(rx_ring);
1239                return ret;
1240        }
1241
1242        tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
1243
1244        chan_dbg(chan,
1245                 "Tx ring id 0x%X num %d desc 0x%p\n",
1246                 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
1247
1248        /* Set the max outstanding request possible to this channel */
1249        chan->max_outstanding = tx_ring->slots;
1250
1251        return ret;
1252}
1253
1254static int xgene_dma_init_rings(struct xgene_dma *pdma)
1255{
1256        int ret, i, j;
1257
1258        for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1259                ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
1260                if (ret) {
1261                        for (j = 0; j < i; j++)
1262                                xgene_dma_delete_chan_rings(&pdma->chan[j]);
1263                        return ret;
1264                }
1265        }
1266
1267        return ret;
1268}
1269
1270static void xgene_dma_enable(struct xgene_dma *pdma)
1271{
1272        u32 val;
1273
1274        /* Configure and enable DMA engine */
1275        val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1276        XGENE_DMA_CH_SETUP(val);
1277        XGENE_DMA_ENABLE(val);
1278        iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1279}
1280
1281static void xgene_dma_disable(struct xgene_dma *pdma)
1282{
1283        u32 val;
1284
1285        val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
1286        XGENE_DMA_DISABLE(val);
1287        iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
1288}
1289
1290static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
1291{
1292        /*
1293         * Mask DMA ring overflow, underflow and
1294         * AXI write/read error interrupts
1295         */
1296        iowrite32(XGENE_DMA_INT_ALL_MASK,
1297                  pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1298        iowrite32(XGENE_DMA_INT_ALL_MASK,
1299                  pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1300        iowrite32(XGENE_DMA_INT_ALL_MASK,
1301                  pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1302        iowrite32(XGENE_DMA_INT_ALL_MASK,
1303                  pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1304        iowrite32(XGENE_DMA_INT_ALL_MASK,
1305                  pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1306
1307        /* Mask DMA error interrupts */
1308        iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
1309}
1310
1311static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
1312{
1313        /*
1314         * Unmask DMA ring overflow, underflow and
1315         * AXI write/read error interrupts
1316         */
1317        iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1318                  pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
1319        iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1320                  pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
1321        iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1322                  pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
1323        iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1324                  pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
1325        iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1326                  pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
1327
1328        /* Unmask DMA error interrupts */
1329        iowrite32(XGENE_DMA_INT_ALL_UNMASK,
1330                  pdma->csr_dma + XGENE_DMA_INT_MASK);
1331}
1332
1333static void xgene_dma_init_hw(struct xgene_dma *pdma)
1334{
1335        u32 val;
1336
1337        /* Associate DMA ring to corresponding ring HW */
1338        iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
1339                  pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
1340
1341        /* Configure RAID6 polynomial control setting */
1342        if (is_pq_enabled(pdma))
1343                iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
1344                          pdma->csr_dma + XGENE_DMA_RAID6_CONT);
1345        else
1346                dev_info(pdma->dev, "PQ is disabled in HW\n");
1347
1348        xgene_dma_enable(pdma);
1349        xgene_dma_unmask_interrupts(pdma);
1350
1351        /* Get DMA id and version info */
1352        val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
1353
1354        /* DMA device info */
1355        dev_info(pdma->dev,
1356                 "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
1357                 XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
1358                 XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
1359}
1360
1361static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
1362{
1363        if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
1364            (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
1365                return 0;
1366
1367        iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
1368        iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
1369
1370        /* Bring up memory */
1371        iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1372
1373        /* Force a barrier */
1374        ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
1375
1376        /* reset may take up to 1ms */
1377        usleep_range(1000, 1100);
1378
1379        if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
1380                != XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
1381                dev_err(pdma->dev,
1382                        "Failed to release ring mngr memory from shutdown\n");
1383                return -ENODEV;
1384        }
1385
1386        /* program threshold set 1 and all hysteresis */
1387        iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
1388                  pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
1389        iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
1390                  pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
1391        iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
1392                  pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
1393
1394        /* Enable QPcore and assign error queue */
1395        iowrite32(XGENE_DMA_RING_ENABLE,
1396                  pdma->csr_ring + XGENE_DMA_RING_CONFIG);
1397
1398        return 0;
1399}
1400
1401static int xgene_dma_init_mem(struct xgene_dma *pdma)
1402{
1403        int ret;
1404
1405        ret = xgene_dma_init_ring_mngr(pdma);
1406        if (ret)
1407                return ret;
1408
1409        /* Bring up memory */
1410        iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1411
1412        /* Force a barrier */
1413        ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
1414
1415        /* reset may take up to 1ms */
1416        usleep_range(1000, 1100);
1417
1418        if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
1419                != XGENE_DMA_BLK_MEM_RDY_VAL) {
1420                dev_err(pdma->dev,
1421                        "Failed to release DMA memory from shutdown\n");
1422                return -ENODEV;
1423        }
1424
1425        return 0;
1426}
1427
1428static int xgene_dma_request_irqs(struct xgene_dma *pdma)
1429{
1430        struct xgene_dma_chan *chan;
1431        int ret, i, j;
1432
1433        /* Register DMA error irq */
1434        ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
1435                               0, "dma_error", pdma);
1436        if (ret) {
1437                dev_err(pdma->dev,
1438                        "Failed to register error IRQ %d\n", pdma->err_irq);
1439                return ret;
1440        }
1441
1442        /* Register DMA channel rx irq */
1443        for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1444                chan = &pdma->chan[i];
1445                irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
1446                ret = devm_request_irq(chan->dev, chan->rx_irq,
1447                                       xgene_dma_chan_ring_isr,
1448                                       0, chan->name, chan);
1449                if (ret) {
1450                        chan_err(chan, "Failed to register Rx IRQ %d\n",
1451                                 chan->rx_irq);
1452                        devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1453
1454                        for (j = 0; j < i; j++) {
1455                                chan = &pdma->chan[i];
1456                                irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
1457                                devm_free_irq(chan->dev, chan->rx_irq, chan);
1458                        }
1459
1460                        return ret;
1461                }
1462        }
1463
1464        return 0;
1465}
1466
1467static void xgene_dma_free_irqs(struct xgene_dma *pdma)
1468{
1469        struct xgene_dma_chan *chan;
1470        int i;
1471
1472        /* Free DMA device error irq */
1473        devm_free_irq(pdma->dev, pdma->err_irq, pdma);
1474
1475        for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1476                chan = &pdma->chan[i];
1477                irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
1478                devm_free_irq(chan->dev, chan->rx_irq, chan);
1479        }
1480}
1481
1482static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
1483                               struct dma_device *dma_dev)
1484{
1485        /* Initialize DMA device capability mask */
1486        dma_cap_zero(dma_dev->cap_mask);
1487
1488        /* Set DMA device capability */
1489
1490        /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
1491         * and channel 1 supports XOR, PQ both. First thing here is we have
1492         * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
1493         * we can make sure this by reading SoC Efuse register.
1494         * Second thing, we have hw errata that if we run channel 0 and
1495         * channel 1 simultaneously with executing XOR and PQ request,
1496         * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
1497         * if XOR and PQ supports on channel 1 is disabled.
1498         */
1499        if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
1500            is_pq_enabled(chan->pdma)) {
1501                dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1502                dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1503        } else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
1504                   !is_pq_enabled(chan->pdma)) {
1505                dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1506        }
1507
1508        /* Set base and prep routines */
1509        dma_dev->dev = chan->dev;
1510        dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
1511        dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
1512        dma_dev->device_issue_pending = xgene_dma_issue_pending;
1513        dma_dev->device_tx_status = xgene_dma_tx_status;
1514
1515        if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1516                dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
1517                dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
1518                dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
1519        }
1520
1521        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1522                dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
1523                dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
1524                dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
1525        }
1526}
1527
1528static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
1529{
1530        struct xgene_dma_chan *chan = &pdma->chan[id];
1531        struct dma_device *dma_dev = &pdma->dma_dev[id];
1532        int ret;
1533
1534        chan->dma_chan.device = dma_dev;
1535
1536        spin_lock_init(&chan->lock);
1537        INIT_LIST_HEAD(&chan->ld_pending);
1538        INIT_LIST_HEAD(&chan->ld_running);
1539        INIT_LIST_HEAD(&chan->ld_completed);
1540        tasklet_init(&chan->tasklet, xgene_dma_tasklet_cb,
1541                     (unsigned long)chan);
1542
1543        chan->pending = 0;
1544        chan->desc_pool = NULL;
1545        dma_cookie_init(&chan->dma_chan);
1546
1547        /* Setup dma device capabilities and prep routines */
1548        xgene_dma_set_caps(chan, dma_dev);
1549
1550        /* Initialize DMA device list head */
1551        INIT_LIST_HEAD(&dma_dev->channels);
1552        list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
1553
1554        /* Register with Linux async DMA framework*/
1555        ret = dma_async_device_register(dma_dev);
1556        if (ret) {
1557                chan_err(chan, "Failed to register async device %d", ret);
1558                tasklet_kill(&chan->tasklet);
1559
1560                return ret;
1561        }
1562
1563        /* DMA capability info */
1564        dev_info(pdma->dev,
1565                 "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan),
1566                 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
1567                 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
1568
1569        return 0;
1570}
1571
1572static int xgene_dma_init_async(struct xgene_dma *pdma)
1573{
1574        int ret, i, j;
1575
1576        for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
1577                ret = xgene_dma_async_register(pdma, i);
1578                if (ret) {
1579                        for (j = 0; j < i; j++) {
1580                                dma_async_device_unregister(&pdma->dma_dev[j]);
1581                                tasklet_kill(&pdma->chan[j].tasklet);
1582                        }
1583
1584                        return ret;
1585                }
1586        }
1587
1588        return ret;
1589}
1590
1591static void xgene_dma_async_unregister(struct xgene_dma *pdma)
1592{
1593        int i;
1594
1595        for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
1596                dma_async_device_unregister(&pdma->dma_dev[i]);
1597}
1598
1599static void xgene_dma_init_channels(struct xgene_dma *pdma)
1600{
1601        struct xgene_dma_chan *chan;
1602        int i;
1603
1604        pdma->ring_num = XGENE_DMA_RING_NUM;
1605
1606        for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1607                chan = &pdma->chan[i];
1608                chan->dev = pdma->dev;
1609                chan->pdma = pdma;
1610                chan->id = i;
1611                snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
1612        }
1613}
1614
1615static int xgene_dma_get_resources(struct platform_device *pdev,
1616                                   struct xgene_dma *pdma)
1617{
1618        struct resource *res;
1619        int irq, i;
1620
1621        /* Get DMA csr region */
1622        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1623        if (!res) {
1624                dev_err(&pdev->dev, "Failed to get csr region\n");
1625                return -ENXIO;
1626        }
1627
1628        pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
1629                                     resource_size(res));
1630        if (!pdma->csr_dma) {
1631                dev_err(&pdev->dev, "Failed to ioremap csr region");
1632                return -ENOMEM;
1633        }
1634
1635        /* Get DMA ring csr region */
1636        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1637        if (!res) {
1638                dev_err(&pdev->dev, "Failed to get ring csr region\n");
1639                return -ENXIO;
1640        }
1641
1642        pdma->csr_ring =  devm_ioremap(&pdev->dev, res->start,
1643                                       resource_size(res));
1644        if (!pdma->csr_ring) {
1645                dev_err(&pdev->dev, "Failed to ioremap ring csr region");
1646                return -ENOMEM;
1647        }
1648
1649        /* Get DMA ring cmd csr region */
1650        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1651        if (!res) {
1652                dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
1653                return -ENXIO;
1654        }
1655
1656        pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
1657                                          resource_size(res));
1658        if (!pdma->csr_ring_cmd) {
1659                dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
1660                return -ENOMEM;
1661        }
1662
1663        pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1664
1665        /* Get efuse csr region */
1666        res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1667        if (!res) {
1668                dev_err(&pdev->dev, "Failed to get efuse csr region\n");
1669                return -ENXIO;
1670        }
1671
1672        pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
1673                                       resource_size(res));
1674        if (!pdma->csr_efuse) {
1675                dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
1676                return -ENOMEM;
1677        }
1678
1679        /* Get DMA error interrupt */
1680        irq = platform_get_irq(pdev, 0);
1681        if (irq <= 0) {
1682                dev_err(&pdev->dev, "Failed to get Error IRQ\n");
1683                return -ENXIO;
1684        }
1685
1686        pdma->err_irq = irq;
1687
1688        /* Get DMA Rx ring descriptor interrupts for all DMA channels */
1689        for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
1690                irq = platform_get_irq(pdev, i);
1691                if (irq <= 0) {
1692                        dev_err(&pdev->dev, "Failed to get Rx IRQ\n");
1693                        return -ENXIO;
1694                }
1695
1696                pdma->chan[i - 1].rx_irq = irq;
1697        }
1698
1699        return 0;
1700}
1701
1702static int xgene_dma_probe(struct platform_device *pdev)
1703{
1704        struct xgene_dma *pdma;
1705        int ret, i;
1706
1707        pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
1708        if (!pdma)
1709                return -ENOMEM;
1710
1711        pdma->dev = &pdev->dev;
1712        platform_set_drvdata(pdev, pdma);
1713
1714        ret = xgene_dma_get_resources(pdev, pdma);
1715        if (ret)
1716                return ret;
1717
1718        pdma->clk = devm_clk_get(&pdev->dev, NULL);
1719        if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
1720                dev_err(&pdev->dev, "Failed to get clk\n");
1721                return PTR_ERR(pdma->clk);
1722        }
1723
1724        /* Enable clk before accessing registers */
1725        if (!IS_ERR(pdma->clk)) {
1726                ret = clk_prepare_enable(pdma->clk);
1727                if (ret) {
1728                        dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
1729                        return ret;
1730                }
1731        }
1732
1733        /* Remove DMA RAM out of shutdown */
1734        ret = xgene_dma_init_mem(pdma);
1735        if (ret)
1736                goto err_clk_enable;
1737
1738        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
1739        if (ret) {
1740                dev_err(&pdev->dev, "No usable DMA configuration\n");
1741                goto err_dma_mask;
1742        }
1743
1744        /* Initialize DMA channels software state */
1745        xgene_dma_init_channels(pdma);
1746
1747        /* Configue DMA rings */
1748        ret = xgene_dma_init_rings(pdma);
1749        if (ret)
1750                goto err_clk_enable;
1751
1752        ret = xgene_dma_request_irqs(pdma);
1753        if (ret)
1754                goto err_request_irq;
1755
1756        /* Configure and enable DMA engine */
1757        xgene_dma_init_hw(pdma);
1758
1759        /* Register DMA device with linux async framework */
1760        ret = xgene_dma_init_async(pdma);
1761        if (ret)
1762                goto err_async_init;
1763
1764        return 0;
1765
1766err_async_init:
1767        xgene_dma_free_irqs(pdma);
1768
1769err_request_irq:
1770        for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
1771                xgene_dma_delete_chan_rings(&pdma->chan[i]);
1772
1773err_dma_mask:
1774err_clk_enable:
1775        if (!IS_ERR(pdma->clk))
1776                clk_disable_unprepare(pdma->clk);
1777
1778        return ret;
1779}
1780
1781static int xgene_dma_remove(struct platform_device *pdev)
1782{
1783        struct xgene_dma *pdma = platform_get_drvdata(pdev);
1784        struct xgene_dma_chan *chan;
1785        int i;
1786
1787        xgene_dma_async_unregister(pdma);
1788
1789        /* Mask interrupts and disable DMA engine */
1790        xgene_dma_mask_interrupts(pdma);
1791        xgene_dma_disable(pdma);
1792        xgene_dma_free_irqs(pdma);
1793
1794        for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
1795                chan = &pdma->chan[i];
1796                tasklet_kill(&chan->tasklet);
1797                xgene_dma_delete_chan_rings(chan);
1798        }
1799
1800        if (!IS_ERR(pdma->clk))
1801                clk_disable_unprepare(pdma->clk);
1802
1803        return 0;
1804}
1805
1806#ifdef CONFIG_ACPI
1807static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
1808        {"APMC0D43", 0},
1809        {},
1810};
1811MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
1812#endif
1813
1814static const struct of_device_id xgene_dma_of_match_ptr[] = {
1815        {.compatible = "apm,xgene-storm-dma",},
1816        {},
1817};
1818MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
1819
1820static struct platform_driver xgene_dma_driver = {
1821        .probe = xgene_dma_probe,
1822        .remove = xgene_dma_remove,
1823        .driver = {
1824                .name = "X-Gene-DMA",
1825                .of_match_table = xgene_dma_of_match_ptr,
1826                .acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
1827        },
1828};
1829
1830module_platform_driver(xgene_dma_driver);
1831
1832MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
1833MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
1834MODULE_AUTHOR("Loc Ho <lho@apm.com>");
1835MODULE_LICENSE("GPL");
1836MODULE_VERSION("1.0");
1837