linux/drivers/dma/at_xdmac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
   4 *
   5 * Copyright (C) 2014 Atmel Corporation
   6 *
   7 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
   8 */
   9
  10#include <asm/barrier.h>
  11#include <dt-bindings/dma/at91.h>
  12#include <linux/clk.h>
  13#include <linux/dmaengine.h>
  14#include <linux/dmapool.h>
  15#include <linux/interrupt.h>
  16#include <linux/irq.h>
  17#include <linux/kernel.h>
  18#include <linux/list.h>
  19#include <linux/module.h>
  20#include <linux/of_dma.h>
  21#include <linux/of_platform.h>
  22#include <linux/platform_device.h>
  23#include <linux/pm.h>
  24
  25#include "dmaengine.h"
  26
  27/* Global registers */
  28#define AT_XDMAC_GTYPE          0x00    /* Global Type Register */
  29#define         AT_XDMAC_NB_CH(i)       (((i) & 0x1F) + 1)              /* Number of Channels Minus One */
  30#define         AT_XDMAC_FIFO_SZ(i)     (((i) >> 5) & 0x7FF)            /* Number of Bytes */
  31#define         AT_XDMAC_NB_REQ(i)      ((((i) >> 16) & 0x3F) + 1)      /* Number of Peripheral Requests Minus One */
  32#define AT_XDMAC_GCFG           0x04    /* Global Configuration Register */
  33#define         AT_XDMAC_WRHP(i)                (((i) & 0xF) << 4)
  34#define         AT_XDMAC_WRMP(i)                (((i) & 0xF) << 8)
  35#define         AT_XDMAC_WRLP(i)                (((i) & 0xF) << 12)
  36#define         AT_XDMAC_RDHP(i)                (((i) & 0xF) << 16)
  37#define         AT_XDMAC_RDMP(i)                (((i) & 0xF) << 20)
  38#define         AT_XDMAC_RDLP(i)                (((i) & 0xF) << 24)
  39#define         AT_XDMAC_RDSG(i)                (((i) & 0xF) << 28)
  40#define AT_XDMAC_GCFG_M2M       (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
  41#define AT_XDMAC_GCFG_P2M       (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
  42                                AT_XDMAC_WRHP(0x5))
  43#define AT_XDMAC_GWAC           0x08    /* Global Weighted Arbiter Configuration Register */
  44#define         AT_XDMAC_PW0(i)         (((i) & 0xF) << 0)
  45#define         AT_XDMAC_PW1(i)         (((i) & 0xF) << 4)
  46#define         AT_XDMAC_PW2(i)         (((i) & 0xF) << 8)
  47#define         AT_XDMAC_PW3(i)         (((i) & 0xF) << 12)
  48#define AT_XDMAC_GWAC_M2M       0
  49#define AT_XDMAC_GWAC_P2M       (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
  50
  51#define AT_XDMAC_GIE            0x0C    /* Global Interrupt Enable Register */
  52#define AT_XDMAC_GID            0x10    /* Global Interrupt Disable Register */
  53#define AT_XDMAC_GIM            0x14    /* Global Interrupt Mask Register */
  54#define AT_XDMAC_GIS            0x18    /* Global Interrupt Status Register */
  55#define AT_XDMAC_GE             0x1C    /* Global Channel Enable Register */
  56#define AT_XDMAC_GD             0x20    /* Global Channel Disable Register */
  57#define AT_XDMAC_GS             0x24    /* Global Channel Status Register */
  58#define AT_XDMAC_VERSION        0xFFC   /* XDMAC Version Register */
  59
  60/* Channel relative registers offsets */
  61#define AT_XDMAC_CIE            0x00    /* Channel Interrupt Enable Register */
  62#define         AT_XDMAC_CIE_BIE        BIT(0)  /* End of Block Interrupt Enable Bit */
  63#define         AT_XDMAC_CIE_LIE        BIT(1)  /* End of Linked List Interrupt Enable Bit */
  64#define         AT_XDMAC_CIE_DIE        BIT(2)  /* End of Disable Interrupt Enable Bit */
  65#define         AT_XDMAC_CIE_FIE        BIT(3)  /* End of Flush Interrupt Enable Bit */
  66#define         AT_XDMAC_CIE_RBEIE      BIT(4)  /* Read Bus Error Interrupt Enable Bit */
  67#define         AT_XDMAC_CIE_WBEIE      BIT(5)  /* Write Bus Error Interrupt Enable Bit */
  68#define         AT_XDMAC_CIE_ROIE       BIT(6)  /* Request Overflow Interrupt Enable Bit */
  69#define AT_XDMAC_CID            0x04    /* Channel Interrupt Disable Register */
  70#define         AT_XDMAC_CID_BID        BIT(0)  /* End of Block Interrupt Disable Bit */
  71#define         AT_XDMAC_CID_LID        BIT(1)  /* End of Linked List Interrupt Disable Bit */
  72#define         AT_XDMAC_CID_DID        BIT(2)  /* End of Disable Interrupt Disable Bit */
  73#define         AT_XDMAC_CID_FID        BIT(3)  /* End of Flush Interrupt Disable Bit */
  74#define         AT_XDMAC_CID_RBEID      BIT(4)  /* Read Bus Error Interrupt Disable Bit */
  75#define         AT_XDMAC_CID_WBEID      BIT(5)  /* Write Bus Error Interrupt Disable Bit */
  76#define         AT_XDMAC_CID_ROID       BIT(6)  /* Request Overflow Interrupt Disable Bit */
  77#define AT_XDMAC_CIM            0x08    /* Channel Interrupt Mask Register */
  78#define         AT_XDMAC_CIM_BIM        BIT(0)  /* End of Block Interrupt Mask Bit */
  79#define         AT_XDMAC_CIM_LIM        BIT(1)  /* End of Linked List Interrupt Mask Bit */
  80#define         AT_XDMAC_CIM_DIM        BIT(2)  /* End of Disable Interrupt Mask Bit */
  81#define         AT_XDMAC_CIM_FIM        BIT(3)  /* End of Flush Interrupt Mask Bit */
  82#define         AT_XDMAC_CIM_RBEIM      BIT(4)  /* Read Bus Error Interrupt Mask Bit */
  83#define         AT_XDMAC_CIM_WBEIM      BIT(5)  /* Write Bus Error Interrupt Mask Bit */
  84#define         AT_XDMAC_CIM_ROIM       BIT(6)  /* Request Overflow Interrupt Mask Bit */
  85#define AT_XDMAC_CIS            0x0C    /* Channel Interrupt Status Register */
  86#define         AT_XDMAC_CIS_BIS        BIT(0)  /* End of Block Interrupt Status Bit */
  87#define         AT_XDMAC_CIS_LIS        BIT(1)  /* End of Linked List Interrupt Status Bit */
  88#define         AT_XDMAC_CIS_DIS        BIT(2)  /* End of Disable Interrupt Status Bit */
  89#define         AT_XDMAC_CIS_FIS        BIT(3)  /* End of Flush Interrupt Status Bit */
  90#define         AT_XDMAC_CIS_RBEIS      BIT(4)  /* Read Bus Error Interrupt Status Bit */
  91#define         AT_XDMAC_CIS_WBEIS      BIT(5)  /* Write Bus Error Interrupt Status Bit */
  92#define         AT_XDMAC_CIS_ROIS       BIT(6)  /* Request Overflow Interrupt Status Bit */
  93#define AT_XDMAC_CSA            0x10    /* Channel Source Address Register */
  94#define AT_XDMAC_CDA            0x14    /* Channel Destination Address Register */
  95#define AT_XDMAC_CNDA           0x18    /* Channel Next Descriptor Address Register */
  96#define         AT_XDMAC_CNDA_NDAIF(i)  ((i) & 0x1)                     /* Channel x Next Descriptor Interface */
  97#define         AT_XDMAC_CNDA_NDA(i)    ((i) & 0xfffffffc)              /* Channel x Next Descriptor Address */
  98#define AT_XDMAC_CNDC           0x1C    /* Channel Next Descriptor Control Register */
  99#define         AT_XDMAC_CNDC_NDE               (0x1 << 0)              /* Channel x Next Descriptor Enable */
 100#define         AT_XDMAC_CNDC_NDSUP             (0x1 << 1)              /* Channel x Next Descriptor Source Update */
 101#define         AT_XDMAC_CNDC_NDDUP             (0x1 << 2)              /* Channel x Next Descriptor Destination Update */
 102#define         AT_XDMAC_CNDC_NDVIEW_NDV0       (0x0 << 3)              /* Channel x Next Descriptor View 0 */
 103#define         AT_XDMAC_CNDC_NDVIEW_NDV1       (0x1 << 3)              /* Channel x Next Descriptor View 1 */
 104#define         AT_XDMAC_CNDC_NDVIEW_NDV2       (0x2 << 3)              /* Channel x Next Descriptor View 2 */
 105#define         AT_XDMAC_CNDC_NDVIEW_NDV3       (0x3 << 3)              /* Channel x Next Descriptor View 3 */
 106#define AT_XDMAC_CUBC           0x20    /* Channel Microblock Control Register */
 107#define AT_XDMAC_CBC            0x24    /* Channel Block Control Register */
 108#define AT_XDMAC_CC             0x28    /* Channel Configuration Register */
 109#define         AT_XDMAC_CC_TYPE        (0x1 << 0)      /* Channel Transfer Type */
 110#define                 AT_XDMAC_CC_TYPE_MEM_TRAN       (0x0 << 0)      /* Memory to Memory Transfer */
 111#define                 AT_XDMAC_CC_TYPE_PER_TRAN       (0x1 << 0)      /* Peripheral to Memory or Memory to Peripheral Transfer */
 112#define         AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
 113#define                 AT_XDMAC_CC_MBSIZE_SINGLE       (0x0 << 1)
 114#define                 AT_XDMAC_CC_MBSIZE_FOUR         (0x1 << 1)
 115#define                 AT_XDMAC_CC_MBSIZE_EIGHT        (0x2 << 1)
 116#define                 AT_XDMAC_CC_MBSIZE_SIXTEEN      (0x3 << 1)
 117#define         AT_XDMAC_CC_DSYNC       (0x1 << 4)      /* Channel Synchronization */
 118#define                 AT_XDMAC_CC_DSYNC_PER2MEM       (0x0 << 4)
 119#define                 AT_XDMAC_CC_DSYNC_MEM2PER       (0x1 << 4)
 120#define         AT_XDMAC_CC_PROT        (0x1 << 5)      /* Channel Protection */
 121#define                 AT_XDMAC_CC_PROT_SEC            (0x0 << 5)
 122#define                 AT_XDMAC_CC_PROT_UNSEC          (0x1 << 5)
 123#define         AT_XDMAC_CC_SWREQ       (0x1 << 6)      /* Channel Software Request Trigger */
 124#define                 AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
 125#define                 AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
 126#define         AT_XDMAC_CC_MEMSET      (0x1 << 7)      /* Channel Fill Block of memory */
 127#define                 AT_XDMAC_CC_MEMSET_NORMAL_MODE  (0x0 << 7)
 128#define                 AT_XDMAC_CC_MEMSET_HW_MODE      (0x1 << 7)
 129#define         AT_XDMAC_CC_CSIZE(i)    ((0x7 & (i)) << 8)      /* Channel Chunk Size */
 130#define         AT_XDMAC_CC_DWIDTH_OFFSET       11
 131#define         AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
 132#define         AT_XDMAC_CC_DWIDTH(i)   ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET)      /* Channel Data Width */
 133#define                 AT_XDMAC_CC_DWIDTH_BYTE         0x0
 134#define                 AT_XDMAC_CC_DWIDTH_HALFWORD     0x1
 135#define                 AT_XDMAC_CC_DWIDTH_WORD         0x2
 136#define                 AT_XDMAC_CC_DWIDTH_DWORD        0x3
 137#define         AT_XDMAC_CC_SIF(i)      ((0x1 & (i)) << 13)     /* Channel Source Interface Identifier */
 138#define         AT_XDMAC_CC_DIF(i)      ((0x1 & (i)) << 14)     /* Channel Destination Interface Identifier */
 139#define         AT_XDMAC_CC_SAM_MASK    (0x3 << 16)     /* Channel Source Addressing Mode */
 140#define                 AT_XDMAC_CC_SAM_FIXED_AM        (0x0 << 16)
 141#define                 AT_XDMAC_CC_SAM_INCREMENTED_AM  (0x1 << 16)
 142#define                 AT_XDMAC_CC_SAM_UBS_AM          (0x2 << 16)
 143#define                 AT_XDMAC_CC_SAM_UBS_DS_AM       (0x3 << 16)
 144#define         AT_XDMAC_CC_DAM_MASK    (0x3 << 18)     /* Channel Source Addressing Mode */
 145#define                 AT_XDMAC_CC_DAM_FIXED_AM        (0x0 << 18)
 146#define                 AT_XDMAC_CC_DAM_INCREMENTED_AM  (0x1 << 18)
 147#define                 AT_XDMAC_CC_DAM_UBS_AM          (0x2 << 18)
 148#define                 AT_XDMAC_CC_DAM_UBS_DS_AM       (0x3 << 18)
 149#define         AT_XDMAC_CC_INITD       (0x1 << 21)     /* Channel Initialization Terminated (read only) */
 150#define                 AT_XDMAC_CC_INITD_TERMINATED    (0x0 << 21)
 151#define                 AT_XDMAC_CC_INITD_IN_PROGRESS   (0x1 << 21)
 152#define         AT_XDMAC_CC_RDIP        (0x1 << 22)     /* Read in Progress (read only) */
 153#define                 AT_XDMAC_CC_RDIP_DONE           (0x0 << 22)
 154#define                 AT_XDMAC_CC_RDIP_IN_PROGRESS    (0x1 << 22)
 155#define         AT_XDMAC_CC_WRIP        (0x1 << 23)     /* Write in Progress (read only) */
 156#define                 AT_XDMAC_CC_WRIP_DONE           (0x0 << 23)
 157#define                 AT_XDMAC_CC_WRIP_IN_PROGRESS    (0x1 << 23)
 158#define         AT_XDMAC_CC_PERID(i)    (0x7f & (i) << 24)      /* Channel Peripheral Identifier */
 159#define AT_XDMAC_CDS_MSP        0x2C    /* Channel Data Stride Memory Set Pattern */
 160#define AT_XDMAC_CSUS           0x30    /* Channel Source Microblock Stride */
 161#define AT_XDMAC_CDUS           0x34    /* Channel Destination Microblock Stride */
 162
 163/* Microblock control members */
 164#define AT_XDMAC_MBR_UBC_UBLEN_MAX      0xFFFFFFUL      /* Maximum Microblock Length */
 165#define AT_XDMAC_MBR_UBC_NDE            (0x1 << 24)     /* Next Descriptor Enable */
 166#define AT_XDMAC_MBR_UBC_NSEN           (0x1 << 25)     /* Next Descriptor Source Update */
 167#define AT_XDMAC_MBR_UBC_NDEN           (0x1 << 26)     /* Next Descriptor Destination Update */
 168#define AT_XDMAC_MBR_UBC_NDV0           (0x0 << 27)     /* Next Descriptor View 0 */
 169#define AT_XDMAC_MBR_UBC_NDV1           (0x1 << 27)     /* Next Descriptor View 1 */
 170#define AT_XDMAC_MBR_UBC_NDV2           (0x2 << 27)     /* Next Descriptor View 2 */
 171#define AT_XDMAC_MBR_UBC_NDV3           (0x3 << 27)     /* Next Descriptor View 3 */
 172
 173#define AT_XDMAC_MAX_CHAN       0x20
 174#define AT_XDMAC_MAX_CSIZE      16      /* 16 data */
 175#define AT_XDMAC_MAX_DWIDTH     8       /* 64 bits */
 176#define AT_XDMAC_RESIDUE_MAX_RETRIES    5
 177
 178#define AT_XDMAC_DMA_BUSWIDTHS\
 179        (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
 180        BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
 181        BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
 182        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
 183        BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 184
 185enum atc_status {
 186        AT_XDMAC_CHAN_IS_CYCLIC = 0,
 187        AT_XDMAC_CHAN_IS_PAUSED,
 188};
 189
 190struct at_xdmac_layout {
 191        /* Global Channel Read Suspend Register */
 192        u8                              grs;
 193        /* Global Write Suspend Register */
 194        u8                              gws;
 195        /* Global Channel Read Write Suspend Register */
 196        u8                              grws;
 197        /* Global Channel Read Write Resume Register */
 198        u8                              grwr;
 199        /* Global Channel Software Request Register */
 200        u8                              gswr;
 201        /* Global channel Software Request Status Register */
 202        u8                              gsws;
 203        /* Global Channel Software Flush Request Register */
 204        u8                              gswf;
 205        /* Channel reg base */
 206        u8                              chan_cc_reg_base;
 207        /* Source/Destination Interface must be specified or not */
 208        bool                            sdif;
 209        /* AXI queue priority configuration supported */
 210        bool                            axi_config;
 211};
 212
 213/* ----- Channels ----- */
 214struct at_xdmac_chan {
 215        struct dma_chan                 chan;
 216        void __iomem                    *ch_regs;
 217        u32                             mask;           /* Channel Mask */
 218        u32                             cfg;            /* Channel Configuration Register */
 219        u8                              perid;          /* Peripheral ID */
 220        u8                              perif;          /* Peripheral Interface */
 221        u8                              memif;          /* Memory Interface */
 222        u32                             save_cc;
 223        u32                             save_cim;
 224        u32                             save_cnda;
 225        u32                             save_cndc;
 226        u32                             irq_status;
 227        unsigned long                   status;
 228        struct tasklet_struct           tasklet;
 229        struct dma_slave_config         sconfig;
 230
 231        spinlock_t                      lock;
 232
 233        struct list_head                xfers_list;
 234        struct list_head                free_descs_list;
 235};
 236
 237
 238/* ----- Controller ----- */
 239struct at_xdmac {
 240        struct dma_device       dma;
 241        void __iomem            *regs;
 242        int                     irq;
 243        struct clk              *clk;
 244        u32                     save_gim;
 245        struct dma_pool         *at_xdmac_desc_pool;
 246        const struct at_xdmac_layout    *layout;
 247        struct at_xdmac_chan    chan[];
 248};
 249
 250
 251/* ----- Descriptors ----- */
 252
 253/* Linked List Descriptor */
 254struct at_xdmac_lld {
 255        dma_addr_t      mbr_nda;        /* Next Descriptor Member */
 256        u32             mbr_ubc;        /* Microblock Control Member */
 257        dma_addr_t      mbr_sa;         /* Source Address Member */
 258        dma_addr_t      mbr_da;         /* Destination Address Member */
 259        u32             mbr_cfg;        /* Configuration Register */
 260        u32             mbr_bc;         /* Block Control Register */
 261        u32             mbr_ds;         /* Data Stride Register */
 262        u32             mbr_sus;        /* Source Microblock Stride Register */
 263        u32             mbr_dus;        /* Destination Microblock Stride Register */
 264};
 265
 266/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
 267struct at_xdmac_desc {
 268        struct at_xdmac_lld             lld;
 269        enum dma_transfer_direction     direction;
 270        struct dma_async_tx_descriptor  tx_dma_desc;
 271        struct list_head                desc_node;
 272        /* Following members are only used by the first descriptor */
 273        bool                            active_xfer;
 274        unsigned int                    xfer_size;
 275        struct list_head                descs_list;
 276        struct list_head                xfer_node;
 277} __aligned(sizeof(u64));
 278
 279static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
 280        .grs = 0x28,
 281        .gws = 0x2C,
 282        .grws = 0x30,
 283        .grwr = 0x34,
 284        .gswr = 0x38,
 285        .gsws = 0x3C,
 286        .gswf = 0x40,
 287        .chan_cc_reg_base = 0x50,
 288        .sdif = true,
 289        .axi_config = false,
 290};
 291
 292static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
 293        .grs = 0x30,
 294        .gws = 0x38,
 295        .grws = 0x40,
 296        .grwr = 0x44,
 297        .gswr = 0x48,
 298        .gsws = 0x4C,
 299        .gswf = 0x50,
 300        .chan_cc_reg_base = 0x60,
 301        .sdif = false,
 302        .axi_config = true,
 303};
 304
 305static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
 306{
 307        return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
 308}
 309
 310#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
 311#define at_xdmac_write(atxdmac, reg, value) \
 312        writel_relaxed((value), (atxdmac)->regs + (reg))
 313
 314#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
 315#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
 316
 317static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
 318{
 319        return container_of(dchan, struct at_xdmac_chan, chan);
 320}
 321
 322static struct device *chan2dev(struct dma_chan *chan)
 323{
 324        return &chan->dev->device;
 325}
 326
 327static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
 328{
 329        return container_of(ddev, struct at_xdmac, dma);
 330}
 331
 332static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
 333{
 334        return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
 335}
 336
 337static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
 338{
 339        return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
 340}
 341
 342static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
 343{
 344        return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
 345}
 346
 347static inline int at_xdmac_csize(u32 maxburst)
 348{
 349        int csize;
 350
 351        csize = ffs(maxburst) - 1;
 352        if (csize > 4)
 353                csize = -EINVAL;
 354
 355        return csize;
 356};
 357
 358static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
 359{
 360        return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
 361}
 362
 363static inline u8 at_xdmac_get_dwidth(u32 cfg)
 364{
 365        return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
 366};
 367
 368static unsigned int init_nr_desc_per_channel = 64;
 369module_param(init_nr_desc_per_channel, uint, 0644);
 370MODULE_PARM_DESC(init_nr_desc_per_channel,
 371                 "initial descriptors per channel (default: 64)");
 372
 373
 374static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
 375{
 376        return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
 377}
 378
 379static void at_xdmac_off(struct at_xdmac *atxdmac)
 380{
 381        at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
 382
 383        /* Wait that all chans are disabled. */
 384        while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
 385                cpu_relax();
 386
 387        at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
 388}
 389
 390/* Call with lock hold. */
 391static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
 392                                struct at_xdmac_desc *first)
 393{
 394        struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
 395        u32             reg;
 396
 397        dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
 398
 399        if (at_xdmac_chan_is_enabled(atchan))
 400                return;
 401
 402        /* Set transfer as active to not try to start it again. */
 403        first->active_xfer = true;
 404
 405        /* Tell xdmac where to get the first descriptor. */
 406        reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
 407        if (atxdmac->layout->sdif)
 408                reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
 409
 410        at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
 411
 412        /*
 413         * When doing non cyclic transfer we need to use the next
 414         * descriptor view 2 since some fields of the configuration register
 415         * depend on transfer size and src/dest addresses.
 416         */
 417        if (at_xdmac_chan_is_cyclic(atchan))
 418                reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
 419        else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
 420                reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
 421        else
 422                reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
 423        /*
 424         * Even if the register will be updated from the configuration in the
 425         * descriptor when using view 2 or higher, the PROT bit won't be set
 426         * properly. This bit can be modified only by using the channel
 427         * configuration register.
 428         */
 429        at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
 430
 431        reg |= AT_XDMAC_CNDC_NDDUP
 432               | AT_XDMAC_CNDC_NDSUP
 433               | AT_XDMAC_CNDC_NDE;
 434        at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
 435
 436        dev_vdbg(chan2dev(&atchan->chan),
 437                 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
 438                 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
 439                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
 440                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
 441                 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
 442                 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
 443                 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
 444
 445        at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
 446        reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
 447        /*
 448         * Request Overflow Error is only for peripheral synchronized transfers
 449         */
 450        if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
 451                reg |= AT_XDMAC_CIE_ROIE;
 452
 453        /*
 454         * There is no end of list when doing cyclic dma, we need to get
 455         * an interrupt after each periods.
 456         */
 457        if (at_xdmac_chan_is_cyclic(atchan))
 458                at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
 459                                    reg | AT_XDMAC_CIE_BIE);
 460        else
 461                at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
 462                                    reg | AT_XDMAC_CIE_LIE);
 463        at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
 464        dev_vdbg(chan2dev(&atchan->chan),
 465                 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
 466        wmb();
 467        at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
 468
 469        dev_vdbg(chan2dev(&atchan->chan),
 470                 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
 471                 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
 472                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
 473                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
 474                 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
 475                 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
 476                 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
 477
 478}
 479
 480static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
 481{
 482        struct at_xdmac_desc    *desc = txd_to_at_desc(tx);
 483        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(tx->chan);
 484        dma_cookie_t            cookie;
 485        unsigned long           irqflags;
 486
 487        spin_lock_irqsave(&atchan->lock, irqflags);
 488        cookie = dma_cookie_assign(tx);
 489
 490        dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
 491                 __func__, atchan, desc);
 492        list_add_tail(&desc->xfer_node, &atchan->xfers_list);
 493        if (list_is_singular(&atchan->xfers_list))
 494                at_xdmac_start_xfer(atchan, desc);
 495
 496        spin_unlock_irqrestore(&atchan->lock, irqflags);
 497        return cookie;
 498}
 499
 500static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
 501                                                 gfp_t gfp_flags)
 502{
 503        struct at_xdmac_desc    *desc;
 504        struct at_xdmac         *atxdmac = to_at_xdmac(chan->device);
 505        dma_addr_t              phys;
 506
 507        desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
 508        if (desc) {
 509                INIT_LIST_HEAD(&desc->descs_list);
 510                dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
 511                desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
 512                desc->tx_dma_desc.phys = phys;
 513        }
 514
 515        return desc;
 516}
 517
 518static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
 519{
 520        memset(&desc->lld, 0, sizeof(desc->lld));
 521        INIT_LIST_HEAD(&desc->descs_list);
 522        desc->direction = DMA_TRANS_NONE;
 523        desc->xfer_size = 0;
 524        desc->active_xfer = false;
 525}
 526
 527/* Call must be protected by lock. */
 528static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
 529{
 530        struct at_xdmac_desc *desc;
 531
 532        if (list_empty(&atchan->free_descs_list)) {
 533                desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
 534        } else {
 535                desc = list_first_entry(&atchan->free_descs_list,
 536                                        struct at_xdmac_desc, desc_node);
 537                list_del(&desc->desc_node);
 538                at_xdmac_init_used_desc(desc);
 539        }
 540
 541        return desc;
 542}
 543
 544static void at_xdmac_queue_desc(struct dma_chan *chan,
 545                                struct at_xdmac_desc *prev,
 546                                struct at_xdmac_desc *desc)
 547{
 548        if (!prev || !desc)
 549                return;
 550
 551        prev->lld.mbr_nda = desc->tx_dma_desc.phys;
 552        prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
 553
 554        dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
 555                __func__, prev, &prev->lld.mbr_nda);
 556}
 557
 558static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
 559                                                  struct at_xdmac_desc *desc)
 560{
 561        if (!desc)
 562                return;
 563
 564        desc->lld.mbr_bc++;
 565
 566        dev_dbg(chan2dev(chan),
 567                "%s: incrementing the block count of the desc 0x%p\n",
 568                __func__, desc);
 569}
 570
 571static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
 572                                       struct of_dma *of_dma)
 573{
 574        struct at_xdmac         *atxdmac = of_dma->of_dma_data;
 575        struct at_xdmac_chan    *atchan;
 576        struct dma_chan         *chan;
 577        struct device           *dev = atxdmac->dma.dev;
 578
 579        if (dma_spec->args_count != 1) {
 580                dev_err(dev, "dma phandler args: bad number of args\n");
 581                return NULL;
 582        }
 583
 584        chan = dma_get_any_slave_channel(&atxdmac->dma);
 585        if (!chan) {
 586                dev_err(dev, "can't get a dma channel\n");
 587                return NULL;
 588        }
 589
 590        atchan = to_at_xdmac_chan(chan);
 591        atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
 592        atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
 593        atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
 594        dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
 595                 atchan->memif, atchan->perif, atchan->perid);
 596
 597        return chan;
 598}
 599
 600static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
 601                                      enum dma_transfer_direction direction)
 602{
 603        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
 604        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
 605        int                     csize, dwidth;
 606
 607        if (direction == DMA_DEV_TO_MEM) {
 608                atchan->cfg =
 609                        AT91_XDMAC_DT_PERID(atchan->perid)
 610                        | AT_XDMAC_CC_DAM_INCREMENTED_AM
 611                        | AT_XDMAC_CC_SAM_FIXED_AM
 612                        | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
 613                        | AT_XDMAC_CC_DSYNC_PER2MEM
 614                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
 615                        | AT_XDMAC_CC_TYPE_PER_TRAN;
 616                if (atxdmac->layout->sdif)
 617                        atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
 618                                       AT_XDMAC_CC_SIF(atchan->perif);
 619
 620                csize = ffs(atchan->sconfig.src_maxburst) - 1;
 621                if (csize < 0) {
 622                        dev_err(chan2dev(chan), "invalid src maxburst value\n");
 623                        return -EINVAL;
 624                }
 625                atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
 626                dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
 627                if (dwidth < 0) {
 628                        dev_err(chan2dev(chan), "invalid src addr width value\n");
 629                        return -EINVAL;
 630                }
 631                atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
 632        } else if (direction == DMA_MEM_TO_DEV) {
 633                atchan->cfg =
 634                        AT91_XDMAC_DT_PERID(atchan->perid)
 635                        | AT_XDMAC_CC_DAM_FIXED_AM
 636                        | AT_XDMAC_CC_SAM_INCREMENTED_AM
 637                        | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
 638                        | AT_XDMAC_CC_DSYNC_MEM2PER
 639                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
 640                        | AT_XDMAC_CC_TYPE_PER_TRAN;
 641                if (atxdmac->layout->sdif)
 642                        atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
 643                                       AT_XDMAC_CC_SIF(atchan->memif);
 644
 645                csize = ffs(atchan->sconfig.dst_maxburst) - 1;
 646                if (csize < 0) {
 647                        dev_err(chan2dev(chan), "invalid src maxburst value\n");
 648                        return -EINVAL;
 649                }
 650                atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
 651                dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
 652                if (dwidth < 0) {
 653                        dev_err(chan2dev(chan), "invalid dst addr width value\n");
 654                        return -EINVAL;
 655                }
 656                atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
 657        }
 658
 659        dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
 660
 661        return 0;
 662}
 663
 664/*
 665 * Only check that maxburst and addr width values are supported by the
 666 * the controller but not that the configuration is good to perform the
 667 * transfer since we don't know the direction at this stage.
 668 */
 669static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
 670{
 671        if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
 672            || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
 673                return -EINVAL;
 674
 675        if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
 676            || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
 677                return -EINVAL;
 678
 679        return 0;
 680}
 681
 682static int at_xdmac_set_slave_config(struct dma_chan *chan,
 683                                      struct dma_slave_config *sconfig)
 684{
 685        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
 686
 687        if (at_xdmac_check_slave_config(sconfig)) {
 688                dev_err(chan2dev(chan), "invalid slave configuration\n");
 689                return -EINVAL;
 690        }
 691
 692        memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
 693
 694        return 0;
 695}
 696
 697static struct dma_async_tx_descriptor *
 698at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 699                       unsigned int sg_len, enum dma_transfer_direction direction,
 700                       unsigned long flags, void *context)
 701{
 702        struct at_xdmac_chan            *atchan = to_at_xdmac_chan(chan);
 703        struct at_xdmac_desc            *first = NULL, *prev = NULL;
 704        struct scatterlist              *sg;
 705        int                             i;
 706        unsigned int                    xfer_size = 0;
 707        unsigned long                   irqflags;
 708        struct dma_async_tx_descriptor  *ret = NULL;
 709
 710        if (!sgl)
 711                return NULL;
 712
 713        if (!is_slave_direction(direction)) {
 714                dev_err(chan2dev(chan), "invalid DMA direction\n");
 715                return NULL;
 716        }
 717
 718        dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
 719                 __func__, sg_len,
 720                 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
 721                 flags);
 722
 723        /* Protect dma_sconfig field that can be modified by set_slave_conf. */
 724        spin_lock_irqsave(&atchan->lock, irqflags);
 725
 726        if (at_xdmac_compute_chan_conf(chan, direction))
 727                goto spin_unlock;
 728
 729        /* Prepare descriptors. */
 730        for_each_sg(sgl, sg, sg_len, i) {
 731                struct at_xdmac_desc    *desc = NULL;
 732                u32                     len, mem, dwidth, fixed_dwidth;
 733
 734                len = sg_dma_len(sg);
 735                mem = sg_dma_address(sg);
 736                if (unlikely(!len)) {
 737                        dev_err(chan2dev(chan), "sg data length is zero\n");
 738                        goto spin_unlock;
 739                }
 740                dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
 741                         __func__, i, len, mem);
 742
 743                desc = at_xdmac_get_desc(atchan);
 744                if (!desc) {
 745                        dev_err(chan2dev(chan), "can't get descriptor\n");
 746                        if (first)
 747                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
 748                        goto spin_unlock;
 749                }
 750
 751                /* Linked list descriptor setup. */
 752                if (direction == DMA_DEV_TO_MEM) {
 753                        desc->lld.mbr_sa = atchan->sconfig.src_addr;
 754                        desc->lld.mbr_da = mem;
 755                } else {
 756                        desc->lld.mbr_sa = mem;
 757                        desc->lld.mbr_da = atchan->sconfig.dst_addr;
 758                }
 759                dwidth = at_xdmac_get_dwidth(atchan->cfg);
 760                fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
 761                               ? dwidth
 762                               : AT_XDMAC_CC_DWIDTH_BYTE;
 763                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2                       /* next descriptor view */
 764                        | AT_XDMAC_MBR_UBC_NDEN                                 /* next descriptor dst parameter update */
 765                        | AT_XDMAC_MBR_UBC_NSEN                                 /* next descriptor src parameter update */
 766                        | (len >> fixed_dwidth);                                /* microblock length */
 767                desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
 768                                    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
 769                dev_dbg(chan2dev(chan),
 770                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
 771                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
 772
 773                /* Chain lld. */
 774                if (prev)
 775                        at_xdmac_queue_desc(chan, prev, desc);
 776
 777                prev = desc;
 778                if (!first)
 779                        first = desc;
 780
 781                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
 782                         __func__, desc, first);
 783                list_add_tail(&desc->desc_node, &first->descs_list);
 784                xfer_size += len;
 785        }
 786
 787
 788        first->tx_dma_desc.flags = flags;
 789        first->xfer_size = xfer_size;
 790        first->direction = direction;
 791        ret = &first->tx_dma_desc;
 792
 793spin_unlock:
 794        spin_unlock_irqrestore(&atchan->lock, irqflags);
 795        return ret;
 796}
 797
 798static struct dma_async_tx_descriptor *
 799at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
 800                         size_t buf_len, size_t period_len,
 801                         enum dma_transfer_direction direction,
 802                         unsigned long flags)
 803{
 804        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
 805        struct at_xdmac_desc    *first = NULL, *prev = NULL;
 806        unsigned int            periods = buf_len / period_len;
 807        int                     i;
 808        unsigned long           irqflags;
 809
 810        dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
 811                __func__, &buf_addr, buf_len, period_len,
 812                direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
 813
 814        if (!is_slave_direction(direction)) {
 815                dev_err(chan2dev(chan), "invalid DMA direction\n");
 816                return NULL;
 817        }
 818
 819        if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
 820                dev_err(chan2dev(chan), "channel currently used\n");
 821                return NULL;
 822        }
 823
 824        if (at_xdmac_compute_chan_conf(chan, direction))
 825                return NULL;
 826
 827        for (i = 0; i < periods; i++) {
 828                struct at_xdmac_desc    *desc = NULL;
 829
 830                spin_lock_irqsave(&atchan->lock, irqflags);
 831                desc = at_xdmac_get_desc(atchan);
 832                if (!desc) {
 833                        dev_err(chan2dev(chan), "can't get descriptor\n");
 834                        if (first)
 835                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
 836                        spin_unlock_irqrestore(&atchan->lock, irqflags);
 837                        return NULL;
 838                }
 839                spin_unlock_irqrestore(&atchan->lock, irqflags);
 840                dev_dbg(chan2dev(chan),
 841                        "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
 842                        __func__, desc, &desc->tx_dma_desc.phys);
 843
 844                if (direction == DMA_DEV_TO_MEM) {
 845                        desc->lld.mbr_sa = atchan->sconfig.src_addr;
 846                        desc->lld.mbr_da = buf_addr + i * period_len;
 847                } else {
 848                        desc->lld.mbr_sa = buf_addr + i * period_len;
 849                        desc->lld.mbr_da = atchan->sconfig.dst_addr;
 850                }
 851                desc->lld.mbr_cfg = atchan->cfg;
 852                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
 853                        | AT_XDMAC_MBR_UBC_NDEN
 854                        | AT_XDMAC_MBR_UBC_NSEN
 855                        | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
 856
 857                dev_dbg(chan2dev(chan),
 858                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
 859                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
 860
 861                /* Chain lld. */
 862                if (prev)
 863                        at_xdmac_queue_desc(chan, prev, desc);
 864
 865                prev = desc;
 866                if (!first)
 867                        first = desc;
 868
 869                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
 870                         __func__, desc, first);
 871                list_add_tail(&desc->desc_node, &first->descs_list);
 872        }
 873
 874        at_xdmac_queue_desc(chan, prev, first);
 875        first->tx_dma_desc.flags = flags;
 876        first->xfer_size = buf_len;
 877        first->direction = direction;
 878
 879        return &first->tx_dma_desc;
 880}
 881
 882static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
 883{
 884        u32 width;
 885
 886        /*
 887         * Check address alignment to select the greater data width we
 888         * can use.
 889         *
 890         * Some XDMAC implementations don't provide dword transfer, in
 891         * this case selecting dword has the same behavior as
 892         * selecting word transfers.
 893         */
 894        if (!(addr & 7)) {
 895                width = AT_XDMAC_CC_DWIDTH_DWORD;
 896                dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
 897        } else if (!(addr & 3)) {
 898                width = AT_XDMAC_CC_DWIDTH_WORD;
 899                dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
 900        } else if (!(addr & 1)) {
 901                width = AT_XDMAC_CC_DWIDTH_HALFWORD;
 902                dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
 903        } else {
 904                width = AT_XDMAC_CC_DWIDTH_BYTE;
 905                dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
 906        }
 907
 908        return width;
 909}
 910
 911static struct at_xdmac_desc *
 912at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
 913                                struct at_xdmac_chan *atchan,
 914                                struct at_xdmac_desc *prev,
 915                                dma_addr_t src, dma_addr_t dst,
 916                                struct dma_interleaved_template *xt,
 917                                struct data_chunk *chunk)
 918{
 919        struct at_xdmac_desc    *desc;
 920        u32                     dwidth;
 921        unsigned long           flags;
 922        size_t                  ublen;
 923        /*
 924         * WARNING: The channel configuration is set here since there is no
 925         * dmaengine_slave_config call in this case. Moreover we don't know the
 926         * direction, it involves we can't dynamically set the source and dest
 927         * interface so we have to use the same one. Only interface 0 allows EBI
 928         * access. Hopefully we can access DDR through both ports (at least on
 929         * SAMA5D4x), so we can use the same interface for source and dest,
 930         * that solves the fact we don't know the direction.
 931         * ERRATA: Even if useless for memory transfers, the PERID has to not
 932         * match the one of another channel. If not, it could lead to spurious
 933         * flag status.
 934         * For SAMA7G5x case, the SIF and DIF fields are no longer used.
 935         * Thus, no need to have the SIF/DIF interfaces here.
 936         * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
 937         * zero.
 938         */
 939        u32                     chan_cc = AT_XDMAC_CC_PERID(0x7f)
 940                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
 941                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
 942
 943        dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
 944        if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
 945                dev_dbg(chan2dev(chan),
 946                        "%s: chunk too big (%zu, max size %lu)...\n",
 947                        __func__, chunk->size,
 948                        AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
 949                return NULL;
 950        }
 951
 952        if (prev)
 953                dev_dbg(chan2dev(chan),
 954                        "Adding items at the end of desc 0x%p\n", prev);
 955
 956        if (xt->src_inc) {
 957                if (xt->src_sgl)
 958                        chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
 959                else
 960                        chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
 961        }
 962
 963        if (xt->dst_inc) {
 964                if (xt->dst_sgl)
 965                        chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
 966                else
 967                        chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
 968        }
 969
 970        spin_lock_irqsave(&atchan->lock, flags);
 971        desc = at_xdmac_get_desc(atchan);
 972        spin_unlock_irqrestore(&atchan->lock, flags);
 973        if (!desc) {
 974                dev_err(chan2dev(chan), "can't get descriptor\n");
 975                return NULL;
 976        }
 977
 978        chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
 979
 980        ublen = chunk->size >> dwidth;
 981
 982        desc->lld.mbr_sa = src;
 983        desc->lld.mbr_da = dst;
 984        desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
 985        desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
 986
 987        desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
 988                | AT_XDMAC_MBR_UBC_NDEN
 989                | AT_XDMAC_MBR_UBC_NSEN
 990                | ublen;
 991        desc->lld.mbr_cfg = chan_cc;
 992
 993        dev_dbg(chan2dev(chan),
 994                "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
 995                __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
 996                desc->lld.mbr_ubc, desc->lld.mbr_cfg);
 997
 998        /* Chain lld. */
 999        if (prev)
1000                at_xdmac_queue_desc(chan, prev, desc);
1001
1002        return desc;
1003}
1004
1005static struct dma_async_tx_descriptor *
1006at_xdmac_prep_interleaved(struct dma_chan *chan,
1007                          struct dma_interleaved_template *xt,
1008                          unsigned long flags)
1009{
1010        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1011        struct at_xdmac_desc    *prev = NULL, *first = NULL;
1012        dma_addr_t              dst_addr, src_addr;
1013        size_t                  src_skip = 0, dst_skip = 0, len = 0;
1014        struct data_chunk       *chunk;
1015        int                     i;
1016
1017        if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1018                return NULL;
1019
1020        /*
1021         * TODO: Handle the case where we have to repeat a chain of
1022         * descriptors...
1023         */
1024        if ((xt->numf > 1) && (xt->frame_size > 1))
1025                return NULL;
1026
1027        dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1028                __func__, &xt->src_start, &xt->dst_start,       xt->numf,
1029                xt->frame_size, flags);
1030
1031        src_addr = xt->src_start;
1032        dst_addr = xt->dst_start;
1033
1034        if (xt->numf > 1) {
1035                first = at_xdmac_interleaved_queue_desc(chan, atchan,
1036                                                        NULL,
1037                                                        src_addr, dst_addr,
1038                                                        xt, xt->sgl);
1039
1040                /* Length of the block is (BLEN+1) microblocks. */
1041                for (i = 0; i < xt->numf - 1; i++)
1042                        at_xdmac_increment_block_count(chan, first);
1043
1044                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1045                        __func__, first, first);
1046                list_add_tail(&first->desc_node, &first->descs_list);
1047        } else {
1048                for (i = 0; i < xt->frame_size; i++) {
1049                        size_t src_icg = 0, dst_icg = 0;
1050                        struct at_xdmac_desc *desc;
1051
1052                        chunk = xt->sgl + i;
1053
1054                        dst_icg = dmaengine_get_dst_icg(xt, chunk);
1055                        src_icg = dmaengine_get_src_icg(xt, chunk);
1056
1057                        src_skip = chunk->size + src_icg;
1058                        dst_skip = chunk->size + dst_icg;
1059
1060                        dev_dbg(chan2dev(chan),
1061                                "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1062                                __func__, chunk->size, src_icg, dst_icg);
1063
1064                        desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1065                                                               prev,
1066                                                               src_addr, dst_addr,
1067                                                               xt, chunk);
1068                        if (!desc) {
1069                                list_splice_init(&first->descs_list,
1070                                                 &atchan->free_descs_list);
1071                                return NULL;
1072                        }
1073
1074                        if (!first)
1075                                first = desc;
1076
1077                        dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1078                                __func__, desc, first);
1079                        list_add_tail(&desc->desc_node, &first->descs_list);
1080
1081                        if (xt->src_sgl)
1082                                src_addr += src_skip;
1083
1084                        if (xt->dst_sgl)
1085                                dst_addr += dst_skip;
1086
1087                        len += chunk->size;
1088                        prev = desc;
1089                }
1090        }
1091
1092        first->tx_dma_desc.cookie = -EBUSY;
1093        first->tx_dma_desc.flags = flags;
1094        first->xfer_size = len;
1095
1096        return &first->tx_dma_desc;
1097}
1098
1099static struct dma_async_tx_descriptor *
1100at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1101                         size_t len, unsigned long flags)
1102{
1103        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1104        struct at_xdmac_desc    *first = NULL, *prev = NULL;
1105        size_t                  remaining_size = len, xfer_size = 0, ublen;
1106        dma_addr_t              src_addr = src, dst_addr = dest;
1107        u32                     dwidth;
1108        /*
1109         * WARNING: We don't know the direction, it involves we can't
1110         * dynamically set the source and dest interface so we have to use the
1111         * same one. Only interface 0 allows EBI access. Hopefully we can
1112         * access DDR through both ports (at least on SAMA5D4x), so we can use
1113         * the same interface for source and dest, that solves the fact we
1114         * don't know the direction.
1115         * ERRATA: Even if useless for memory transfers, the PERID has to not
1116         * match the one of another channel. If not, it could lead to spurious
1117         * flag status.
1118         * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1119         * Thus, no need to have the SIF/DIF interfaces here.
1120         * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1121         * zero.
1122         */
1123        u32                     chan_cc = AT_XDMAC_CC_PERID(0x7f)
1124                                        | AT_XDMAC_CC_DAM_INCREMENTED_AM
1125                                        | AT_XDMAC_CC_SAM_INCREMENTED_AM
1126                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
1127                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
1128        unsigned long           irqflags;
1129
1130        dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1131                __func__, &src, &dest, len, flags);
1132
1133        if (unlikely(!len))
1134                return NULL;
1135
1136        dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1137
1138        /* Prepare descriptors. */
1139        while (remaining_size) {
1140                struct at_xdmac_desc    *desc = NULL;
1141
1142                dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1143
1144                spin_lock_irqsave(&atchan->lock, irqflags);
1145                desc = at_xdmac_get_desc(atchan);
1146                spin_unlock_irqrestore(&atchan->lock, irqflags);
1147                if (!desc) {
1148                        dev_err(chan2dev(chan), "can't get descriptor\n");
1149                        if (first)
1150                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
1151                        return NULL;
1152                }
1153
1154                /* Update src and dest addresses. */
1155                src_addr += xfer_size;
1156                dst_addr += xfer_size;
1157
1158                if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1159                        xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1160                else
1161                        xfer_size = remaining_size;
1162
1163                dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1164
1165                /* Check remaining length and change data width if needed. */
1166                dwidth = at_xdmac_align_width(chan,
1167                                              src_addr | dst_addr | xfer_size);
1168                chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1169                chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1170
1171                ublen = xfer_size >> dwidth;
1172                remaining_size -= xfer_size;
1173
1174                desc->lld.mbr_sa = src_addr;
1175                desc->lld.mbr_da = dst_addr;
1176                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1177                        | AT_XDMAC_MBR_UBC_NDEN
1178                        | AT_XDMAC_MBR_UBC_NSEN
1179                        | ublen;
1180                desc->lld.mbr_cfg = chan_cc;
1181
1182                dev_dbg(chan2dev(chan),
1183                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1184                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1185
1186                /* Chain lld. */
1187                if (prev)
1188                        at_xdmac_queue_desc(chan, prev, desc);
1189
1190                prev = desc;
1191                if (!first)
1192                        first = desc;
1193
1194                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1195                         __func__, desc, first);
1196                list_add_tail(&desc->desc_node, &first->descs_list);
1197        }
1198
1199        first->tx_dma_desc.flags = flags;
1200        first->xfer_size = len;
1201
1202        return &first->tx_dma_desc;
1203}
1204
1205static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1206                                                         struct at_xdmac_chan *atchan,
1207                                                         dma_addr_t dst_addr,
1208                                                         size_t len,
1209                                                         int value)
1210{
1211        struct at_xdmac_desc    *desc;
1212        unsigned long           flags;
1213        size_t                  ublen;
1214        u32                     dwidth;
1215        /*
1216         * WARNING: The channel configuration is set here since there is no
1217         * dmaengine_slave_config call in this case. Moreover we don't know the
1218         * direction, it involves we can't dynamically set the source and dest
1219         * interface so we have to use the same one. Only interface 0 allows EBI
1220         * access. Hopefully we can access DDR through both ports (at least on
1221         * SAMA5D4x), so we can use the same interface for source and dest,
1222         * that solves the fact we don't know the direction.
1223         * ERRATA: Even if useless for memory transfers, the PERID has to not
1224         * match the one of another channel. If not, it could lead to spurious
1225         * flag status.
1226         * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1227         * Thus, no need to have the SIF/DIF interfaces here.
1228         * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1229         * zero.
1230         */
1231        u32                     chan_cc = AT_XDMAC_CC_PERID(0x7f)
1232                                        | AT_XDMAC_CC_DAM_UBS_AM
1233                                        | AT_XDMAC_CC_SAM_INCREMENTED_AM
1234                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
1235                                        | AT_XDMAC_CC_MEMSET_HW_MODE
1236                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
1237
1238        dwidth = at_xdmac_align_width(chan, dst_addr);
1239
1240        if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1241                dev_err(chan2dev(chan),
1242                        "%s: Transfer too large, aborting...\n",
1243                        __func__);
1244                return NULL;
1245        }
1246
1247        spin_lock_irqsave(&atchan->lock, flags);
1248        desc = at_xdmac_get_desc(atchan);
1249        spin_unlock_irqrestore(&atchan->lock, flags);
1250        if (!desc) {
1251                dev_err(chan2dev(chan), "can't get descriptor\n");
1252                return NULL;
1253        }
1254
1255        chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1256
1257        ublen = len >> dwidth;
1258
1259        desc->lld.mbr_da = dst_addr;
1260        desc->lld.mbr_ds = value;
1261        desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1262                | AT_XDMAC_MBR_UBC_NDEN
1263                | AT_XDMAC_MBR_UBC_NSEN
1264                | ublen;
1265        desc->lld.mbr_cfg = chan_cc;
1266
1267        dev_dbg(chan2dev(chan),
1268                "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1269                __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1270                desc->lld.mbr_cfg);
1271
1272        return desc;
1273}
1274
1275static struct dma_async_tx_descriptor *
1276at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1277                         size_t len, unsigned long flags)
1278{
1279        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1280        struct at_xdmac_desc    *desc;
1281
1282        dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1283                __func__, &dest, len, value, flags);
1284
1285        if (unlikely(!len))
1286                return NULL;
1287
1288        desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1289        list_add_tail(&desc->desc_node, &desc->descs_list);
1290
1291        desc->tx_dma_desc.cookie = -EBUSY;
1292        desc->tx_dma_desc.flags = flags;
1293        desc->xfer_size = len;
1294
1295        return &desc->tx_dma_desc;
1296}
1297
1298static struct dma_async_tx_descriptor *
1299at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1300                            unsigned int sg_len, int value,
1301                            unsigned long flags)
1302{
1303        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1304        struct at_xdmac_desc    *desc, *pdesc = NULL,
1305                                *ppdesc = NULL, *first = NULL;
1306        struct scatterlist      *sg, *psg = NULL, *ppsg = NULL;
1307        size_t                  stride = 0, pstride = 0, len = 0;
1308        int                     i;
1309
1310        if (!sgl)
1311                return NULL;
1312
1313        dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1314                __func__, sg_len, value, flags);
1315
1316        /* Prepare descriptors. */
1317        for_each_sg(sgl, sg, sg_len, i) {
1318                dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1319                        __func__, &sg_dma_address(sg), sg_dma_len(sg),
1320                        value, flags);
1321                desc = at_xdmac_memset_create_desc(chan, atchan,
1322                                                   sg_dma_address(sg),
1323                                                   sg_dma_len(sg),
1324                                                   value);
1325                if (!desc && first)
1326                        list_splice_init(&first->descs_list,
1327                                         &atchan->free_descs_list);
1328
1329                if (!first)
1330                        first = desc;
1331
1332                /* Update our strides */
1333                pstride = stride;
1334                if (psg)
1335                        stride = sg_dma_address(sg) -
1336                                (sg_dma_address(psg) + sg_dma_len(psg));
1337
1338                /*
1339                 * The scatterlist API gives us only the address and
1340                 * length of each elements.
1341                 *
1342                 * Unfortunately, we don't have the stride, which we
1343                 * will need to compute.
1344                 *
1345                 * That make us end up in a situation like this one:
1346                 *    len    stride    len    stride    len
1347                 * +-------+        +-------+        +-------+
1348                 * |  N-2  |        |  N-1  |        |   N   |
1349                 * +-------+        +-------+        +-------+
1350                 *
1351                 * We need all these three elements (N-2, N-1 and N)
1352                 * to actually take the decision on whether we need to
1353                 * queue N-1 or reuse N-2.
1354                 *
1355                 * We will only consider N if it is the last element.
1356                 */
1357                if (ppdesc && pdesc) {
1358                        if ((stride == pstride) &&
1359                            (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1360                                dev_dbg(chan2dev(chan),
1361                                        "%s: desc 0x%p can be merged with desc 0x%p\n",
1362                                        __func__, pdesc, ppdesc);
1363
1364                                /*
1365                                 * Increment the block count of the
1366                                 * N-2 descriptor
1367                                 */
1368                                at_xdmac_increment_block_count(chan, ppdesc);
1369                                ppdesc->lld.mbr_dus = stride;
1370
1371                                /*
1372                                 * Put back the N-1 descriptor in the
1373                                 * free descriptor list
1374                                 */
1375                                list_add_tail(&pdesc->desc_node,
1376                                              &atchan->free_descs_list);
1377
1378                                /*
1379                                 * Make our N-1 descriptor pointer
1380                                 * point to the N-2 since they were
1381                                 * actually merged.
1382                                 */
1383                                pdesc = ppdesc;
1384
1385                        /*
1386                         * Rule out the case where we don't have
1387                         * pstride computed yet (our second sg
1388                         * element)
1389                         *
1390                         * We also want to catch the case where there
1391                         * would be a negative stride,
1392                         */
1393                        } else if (pstride ||
1394                                   sg_dma_address(sg) < sg_dma_address(psg)) {
1395                                /*
1396                                 * Queue the N-1 descriptor after the
1397                                 * N-2
1398                                 */
1399                                at_xdmac_queue_desc(chan, ppdesc, pdesc);
1400
1401                                /*
1402                                 * Add the N-1 descriptor to the list
1403                                 * of the descriptors used for this
1404                                 * transfer
1405                                 */
1406                                list_add_tail(&desc->desc_node,
1407                                              &first->descs_list);
1408                                dev_dbg(chan2dev(chan),
1409                                        "%s: add desc 0x%p to descs_list 0x%p\n",
1410                                        __func__, desc, first);
1411                        }
1412                }
1413
1414                /*
1415                 * If we are the last element, just see if we have the
1416                 * same size than the previous element.
1417                 *
1418                 * If so, we can merge it with the previous descriptor
1419                 * since we don't care about the stride anymore.
1420                 */
1421                if ((i == (sg_len - 1)) &&
1422                    sg_dma_len(psg) == sg_dma_len(sg)) {
1423                        dev_dbg(chan2dev(chan),
1424                                "%s: desc 0x%p can be merged with desc 0x%p\n",
1425                                __func__, desc, pdesc);
1426
1427                        /*
1428                         * Increment the block count of the N-1
1429                         * descriptor
1430                         */
1431                        at_xdmac_increment_block_count(chan, pdesc);
1432                        pdesc->lld.mbr_dus = stride;
1433
1434                        /*
1435                         * Put back the N descriptor in the free
1436                         * descriptor list
1437                         */
1438                        list_add_tail(&desc->desc_node,
1439                                      &atchan->free_descs_list);
1440                }
1441
1442                /* Update our descriptors */
1443                ppdesc = pdesc;
1444                pdesc = desc;
1445
1446                /* Update our scatter pointers */
1447                ppsg = psg;
1448                psg = sg;
1449
1450                len += sg_dma_len(sg);
1451        }
1452
1453        first->tx_dma_desc.cookie = -EBUSY;
1454        first->tx_dma_desc.flags = flags;
1455        first->xfer_size = len;
1456
1457        return &first->tx_dma_desc;
1458}
1459
1460static enum dma_status
1461at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1462                struct dma_tx_state *txstate)
1463{
1464        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1465        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1466        struct at_xdmac_desc    *desc, *_desc;
1467        struct list_head        *descs_list;
1468        enum dma_status         ret;
1469        int                     residue, retry;
1470        u32                     cur_nda, check_nda, cur_ubc, mask, value;
1471        u8                      dwidth = 0;
1472        unsigned long           flags;
1473        bool                    initd;
1474
1475        ret = dma_cookie_status(chan, cookie, txstate);
1476        if (ret == DMA_COMPLETE)
1477                return ret;
1478
1479        if (!txstate)
1480                return ret;
1481
1482        spin_lock_irqsave(&atchan->lock, flags);
1483
1484        desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1485
1486        /*
1487         * If the transfer has not been started yet, don't need to compute the
1488         * residue, it's the transfer length.
1489         */
1490        if (!desc->active_xfer) {
1491                dma_set_residue(txstate, desc->xfer_size);
1492                goto spin_unlock;
1493        }
1494
1495        residue = desc->xfer_size;
1496        /*
1497         * Flush FIFO: only relevant when the transfer is source peripheral
1498         * synchronized. Flush is needed before reading CUBC because data in
1499         * the FIFO are not reported by CUBC. Reporting a residue of the
1500         * transfer length while we have data in FIFO can cause issue.
1501         * Usecase: atmel USART has a timeout which means I have received
1502         * characters but there is no more character received for a while. On
1503         * timeout, it requests the residue. If the data are in the DMA FIFO,
1504         * we will return a residue of the transfer length. It means no data
1505         * received. If an application is waiting for these data, it will hang
1506         * since we won't have another USART timeout without receiving new
1507         * data.
1508         */
1509        mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1510        value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1511        if ((desc->lld.mbr_cfg & mask) == value) {
1512                at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1513                while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1514                        cpu_relax();
1515        }
1516
1517        /*
1518         * The easiest way to compute the residue should be to pause the DMA
1519         * but doing this can lead to miss some data as some devices don't
1520         * have FIFO.
1521         * We need to read several registers because:
1522         * - DMA is running therefore a descriptor change is possible while
1523         * reading these registers
1524         * - When the block transfer is done, the value of the CUBC register
1525         * is set to its initial value until the fetch of the next descriptor.
1526         * This value will corrupt the residue calculation so we have to skip
1527         * it.
1528         *
1529         * INITD --------                    ------------
1530         *              |____________________|
1531         *       _______________________  _______________
1532         * NDA       @desc2             \/   @desc3
1533         *       _______________________/\_______________
1534         *       __________  ___________  _______________
1535         * CUBC       0    \/ MAX desc1 \/  MAX desc2
1536         *       __________/\___________/\_______________
1537         *
1538         * Since descriptors are aligned on 64 bits, we can assume that
1539         * the update of NDA and CUBC is atomic.
1540         * Memory barriers are used to ensure the read order of the registers.
1541         * A max number of retries is set because unlikely it could never ends.
1542         */
1543        for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1544                check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1545                rmb();
1546                cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1547                rmb();
1548                initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1549                rmb();
1550                cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1551                rmb();
1552
1553                if ((check_nda == cur_nda) && initd)
1554                        break;
1555        }
1556
1557        if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1558                ret = DMA_ERROR;
1559                goto spin_unlock;
1560        }
1561
1562        /*
1563         * Flush FIFO: only relevant when the transfer is source peripheral
1564         * synchronized. Another flush is needed here because CUBC is updated
1565         * when the controller sends the data write command. It can lead to
1566         * report data that are not written in the memory or the device. The
1567         * FIFO flush ensures that data are really written.
1568         */
1569        if ((desc->lld.mbr_cfg & mask) == value) {
1570                at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1571                while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1572                        cpu_relax();
1573        }
1574
1575        /*
1576         * Remove size of all microblocks already transferred and the current
1577         * one. Then add the remaining size to transfer of the current
1578         * microblock.
1579         */
1580        descs_list = &desc->descs_list;
1581        list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1582                dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1583                residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1584                if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1585                        break;
1586        }
1587        residue += cur_ubc << dwidth;
1588
1589        dma_set_residue(txstate, residue);
1590
1591        dev_dbg(chan2dev(chan),
1592                 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1593                 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1594
1595spin_unlock:
1596        spin_unlock_irqrestore(&atchan->lock, flags);
1597        return ret;
1598}
1599
1600/* Call must be protected by lock. */
1601static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1602                                    struct at_xdmac_desc *desc)
1603{
1604        dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1605
1606        /*
1607         * Remove the transfer from the transfer list then move the transfer
1608         * descriptors into the free descriptors list.
1609         */
1610        list_del(&desc->xfer_node);
1611        list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1612}
1613
1614static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1615{
1616        struct at_xdmac_desc    *desc;
1617
1618        /*
1619         * If channel is enabled, do nothing, advance_work will be triggered
1620         * after the interruption.
1621         */
1622        if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
1623                desc = list_first_entry(&atchan->xfers_list,
1624                                        struct at_xdmac_desc,
1625                                        xfer_node);
1626                dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1627                if (!desc->active_xfer)
1628                        at_xdmac_start_xfer(atchan, desc);
1629        }
1630}
1631
1632static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1633{
1634        struct at_xdmac_desc            *desc;
1635        struct dma_async_tx_descriptor  *txd;
1636
1637        if (!list_empty(&atchan->xfers_list)) {
1638                desc = list_first_entry(&atchan->xfers_list,
1639                                        struct at_xdmac_desc, xfer_node);
1640                txd = &desc->tx_dma_desc;
1641
1642                if (txd->flags & DMA_PREP_INTERRUPT)
1643                        dmaengine_desc_get_callback_invoke(txd, NULL);
1644        }
1645}
1646
1647static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1648{
1649        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1650        struct at_xdmac_desc    *bad_desc;
1651
1652        /*
1653         * The descriptor currently at the head of the active list is
1654         * broken. Since we don't have any way to report errors, we'll
1655         * just have to scream loudly and try to continue with other
1656         * descriptors queued (if any).
1657         */
1658        if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1659                dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1660        if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1661                dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1662        if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1663                dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1664
1665        spin_lock_irq(&atchan->lock);
1666
1667        /* Channel must be disabled first as it's not done automatically */
1668        at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1669        while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1670                cpu_relax();
1671
1672        bad_desc = list_first_entry(&atchan->xfers_list,
1673                                    struct at_xdmac_desc,
1674                                    xfer_node);
1675
1676        spin_unlock_irq(&atchan->lock);
1677
1678        /* Print bad descriptor's details if needed */
1679        dev_dbg(chan2dev(&atchan->chan),
1680                "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1681                __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1682                bad_desc->lld.mbr_ubc);
1683
1684        /* Then continue with usual descriptor management */
1685}
1686
1687static void at_xdmac_tasklet(struct tasklet_struct *t)
1688{
1689        struct at_xdmac_chan    *atchan = from_tasklet(atchan, t, tasklet);
1690        struct at_xdmac_desc    *desc;
1691        u32                     error_mask;
1692
1693        dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1694                __func__, atchan->irq_status);
1695
1696        error_mask = AT_XDMAC_CIS_RBEIS
1697                     | AT_XDMAC_CIS_WBEIS
1698                     | AT_XDMAC_CIS_ROIS;
1699
1700        if (at_xdmac_chan_is_cyclic(atchan)) {
1701                at_xdmac_handle_cyclic(atchan);
1702        } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1703                   || (atchan->irq_status & error_mask)) {
1704                struct dma_async_tx_descriptor  *txd;
1705
1706                if (atchan->irq_status & error_mask)
1707                        at_xdmac_handle_error(atchan);
1708
1709                spin_lock_irq(&atchan->lock);
1710                desc = list_first_entry(&atchan->xfers_list,
1711                                        struct at_xdmac_desc,
1712                                        xfer_node);
1713                dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1714                if (!desc->active_xfer) {
1715                        dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1716                        spin_unlock_irq(&atchan->lock);
1717                        return;
1718                }
1719
1720                txd = &desc->tx_dma_desc;
1721
1722                at_xdmac_remove_xfer(atchan, desc);
1723                spin_unlock_irq(&atchan->lock);
1724
1725                dma_cookie_complete(txd);
1726                if (txd->flags & DMA_PREP_INTERRUPT)
1727                        dmaengine_desc_get_callback_invoke(txd, NULL);
1728
1729                dma_run_dependencies(txd);
1730
1731                spin_lock_irq(&atchan->lock);
1732                at_xdmac_advance_work(atchan);
1733                spin_unlock_irq(&atchan->lock);
1734        }
1735}
1736
1737static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1738{
1739        struct at_xdmac         *atxdmac = (struct at_xdmac *)dev_id;
1740        struct at_xdmac_chan    *atchan;
1741        u32                     imr, status, pending;
1742        u32                     chan_imr, chan_status;
1743        int                     i, ret = IRQ_NONE;
1744
1745        do {
1746                imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1747                status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1748                pending = status & imr;
1749
1750                dev_vdbg(atxdmac->dma.dev,
1751                         "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1752                         __func__, status, imr, pending);
1753
1754                if (!pending)
1755                        break;
1756
1757                /* We have to find which channel has generated the interrupt. */
1758                for (i = 0; i < atxdmac->dma.chancnt; i++) {
1759                        if (!((1 << i) & pending))
1760                                continue;
1761
1762                        atchan = &atxdmac->chan[i];
1763                        chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1764                        chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1765                        atchan->irq_status = chan_status & chan_imr;
1766                        dev_vdbg(atxdmac->dma.dev,
1767                                 "%s: chan%d: imr=0x%x, status=0x%x\n",
1768                                 __func__, i, chan_imr, chan_status);
1769                        dev_vdbg(chan2dev(&atchan->chan),
1770                                 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1771                                 __func__,
1772                                 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1773                                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1774                                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1775                                 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1776                                 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1777                                 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1778
1779                        if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1780                                at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1781
1782                        tasklet_schedule(&atchan->tasklet);
1783                        ret = IRQ_HANDLED;
1784                }
1785
1786        } while (pending);
1787
1788        return ret;
1789}
1790
1791static void at_xdmac_issue_pending(struct dma_chan *chan)
1792{
1793        struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1794        unsigned long flags;
1795
1796        dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1797
1798        if (!at_xdmac_chan_is_cyclic(atchan)) {
1799                spin_lock_irqsave(&atchan->lock, flags);
1800                at_xdmac_advance_work(atchan);
1801                spin_unlock_irqrestore(&atchan->lock, flags);
1802        }
1803
1804        return;
1805}
1806
1807static int at_xdmac_device_config(struct dma_chan *chan,
1808                                  struct dma_slave_config *config)
1809{
1810        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1811        int ret;
1812        unsigned long           flags;
1813
1814        dev_dbg(chan2dev(chan), "%s\n", __func__);
1815
1816        spin_lock_irqsave(&atchan->lock, flags);
1817        ret = at_xdmac_set_slave_config(chan, config);
1818        spin_unlock_irqrestore(&atchan->lock, flags);
1819
1820        return ret;
1821}
1822
1823static int at_xdmac_device_pause(struct dma_chan *chan)
1824{
1825        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1826        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1827        unsigned long           flags;
1828
1829        dev_dbg(chan2dev(chan), "%s\n", __func__);
1830
1831        if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1832                return 0;
1833
1834        spin_lock_irqsave(&atchan->lock, flags);
1835        at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1836        while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1837               & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1838                cpu_relax();
1839        spin_unlock_irqrestore(&atchan->lock, flags);
1840
1841        return 0;
1842}
1843
1844static int at_xdmac_device_resume(struct dma_chan *chan)
1845{
1846        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1847        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1848        unsigned long           flags;
1849
1850        dev_dbg(chan2dev(chan), "%s\n", __func__);
1851
1852        spin_lock_irqsave(&atchan->lock, flags);
1853        if (!at_xdmac_chan_is_paused(atchan)) {
1854                spin_unlock_irqrestore(&atchan->lock, flags);
1855                return 0;
1856        }
1857
1858        at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1859        clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1860        spin_unlock_irqrestore(&atchan->lock, flags);
1861
1862        return 0;
1863}
1864
1865static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1866{
1867        struct at_xdmac_desc    *desc, *_desc;
1868        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1869        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1870        unsigned long           flags;
1871
1872        dev_dbg(chan2dev(chan), "%s\n", __func__);
1873
1874        spin_lock_irqsave(&atchan->lock, flags);
1875        at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1876        while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1877                cpu_relax();
1878
1879        /* Cancel all pending transfers. */
1880        list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1881                at_xdmac_remove_xfer(atchan, desc);
1882
1883        clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1884        clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1885        spin_unlock_irqrestore(&atchan->lock, flags);
1886
1887        return 0;
1888}
1889
1890static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1891{
1892        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1893        struct at_xdmac_desc    *desc;
1894        int                     i;
1895
1896        if (at_xdmac_chan_is_enabled(atchan)) {
1897                dev_err(chan2dev(chan),
1898                        "can't allocate channel resources (channel enabled)\n");
1899                return -EIO;
1900        }
1901
1902        if (!list_empty(&atchan->free_descs_list)) {
1903                dev_err(chan2dev(chan),
1904                        "can't allocate channel resources (channel not free from a previous use)\n");
1905                return -EIO;
1906        }
1907
1908        for (i = 0; i < init_nr_desc_per_channel; i++) {
1909                desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
1910                if (!desc) {
1911                        dev_warn(chan2dev(chan),
1912                                "only %d descriptors have been allocated\n", i);
1913                        break;
1914                }
1915                list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1916        }
1917
1918        dma_cookie_init(chan);
1919
1920        dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1921
1922        return i;
1923}
1924
1925static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1926{
1927        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1928        struct at_xdmac         *atxdmac = to_at_xdmac(chan->device);
1929        struct at_xdmac_desc    *desc, *_desc;
1930
1931        list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1932                dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1933                list_del(&desc->desc_node);
1934                dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1935        }
1936
1937        return;
1938}
1939
1940#ifdef CONFIG_PM
1941static int atmel_xdmac_prepare(struct device *dev)
1942{
1943        struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
1944        struct dma_chan         *chan, *_chan;
1945
1946        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1947                struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1948
1949                /* Wait for transfer completion, except in cyclic case. */
1950                if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1951                        return -EAGAIN;
1952        }
1953        return 0;
1954}
1955#else
1956#       define atmel_xdmac_prepare NULL
1957#endif
1958
1959#ifdef CONFIG_PM_SLEEP
1960static int atmel_xdmac_suspend(struct device *dev)
1961{
1962        struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
1963        struct dma_chan         *chan, *_chan;
1964
1965        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1966                struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1967
1968                atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1969                if (at_xdmac_chan_is_cyclic(atchan)) {
1970                        if (!at_xdmac_chan_is_paused(atchan))
1971                                at_xdmac_device_pause(chan);
1972                        atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1973                        atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1974                        atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1975                }
1976        }
1977        atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1978
1979        at_xdmac_off(atxdmac);
1980        clk_disable_unprepare(atxdmac->clk);
1981        return 0;
1982}
1983
1984static int atmel_xdmac_resume(struct device *dev)
1985{
1986        struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
1987        struct at_xdmac_chan    *atchan;
1988        struct dma_chan         *chan, *_chan;
1989        int                     i;
1990        int ret;
1991
1992        ret = clk_prepare_enable(atxdmac->clk);
1993        if (ret)
1994                return ret;
1995
1996        /* Clear pending interrupts. */
1997        for (i = 0; i < atxdmac->dma.chancnt; i++) {
1998                atchan = &atxdmac->chan[i];
1999                while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2000                        cpu_relax();
2001        }
2002
2003        at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
2004        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2005                atchan = to_at_xdmac_chan(chan);
2006                at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2007                if (at_xdmac_chan_is_cyclic(atchan)) {
2008                        if (at_xdmac_chan_is_paused(atchan))
2009                                at_xdmac_device_resume(chan);
2010                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2011                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2012                        at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2013                        wmb();
2014                        at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2015                }
2016        }
2017        return 0;
2018}
2019#endif /* CONFIG_PM_SLEEP */
2020
2021static void at_xdmac_axi_config(struct platform_device *pdev)
2022{
2023        struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2024        bool dev_m2m = false;
2025        u32 dma_requests;
2026
2027        if (!atxdmac->layout->axi_config)
2028                return; /* Not supported */
2029
2030        if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
2031                                  &dma_requests)) {
2032                dev_info(&pdev->dev, "controller in mem2mem mode.\n");
2033                dev_m2m = true;
2034        }
2035
2036        if (dev_m2m) {
2037                at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
2038                at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
2039        } else {
2040                at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
2041                at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
2042        }
2043}
2044
2045static int at_xdmac_probe(struct platform_device *pdev)
2046{
2047        struct at_xdmac *atxdmac;
2048        int             irq, size, nr_channels, i, ret;
2049        void __iomem    *base;
2050        u32             reg;
2051
2052        irq = platform_get_irq(pdev, 0);
2053        if (irq < 0)
2054                return irq;
2055
2056        base = devm_platform_ioremap_resource(pdev, 0);
2057        if (IS_ERR(base))
2058                return PTR_ERR(base);
2059
2060        /*
2061         * Read number of xdmac channels, read helper function can't be used
2062         * since atxdmac is not yet allocated and we need to know the number
2063         * of channels to do the allocation.
2064         */
2065        reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2066        nr_channels = AT_XDMAC_NB_CH(reg);
2067        if (nr_channels > AT_XDMAC_MAX_CHAN) {
2068                dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2069                        nr_channels);
2070                return -EINVAL;
2071        }
2072
2073        size = sizeof(*atxdmac);
2074        size += nr_channels * sizeof(struct at_xdmac_chan);
2075        atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2076        if (!atxdmac) {
2077                dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2078                return -ENOMEM;
2079        }
2080
2081        atxdmac->regs = base;
2082        atxdmac->irq = irq;
2083
2084        atxdmac->layout = of_device_get_match_data(&pdev->dev);
2085        if (!atxdmac->layout)
2086                return -ENODEV;
2087
2088        atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2089        if (IS_ERR(atxdmac->clk)) {
2090                dev_err(&pdev->dev, "can't get dma_clk\n");
2091                return PTR_ERR(atxdmac->clk);
2092        }
2093
2094        /* Do not use dev res to prevent races with tasklet */
2095        ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2096        if (ret) {
2097                dev_err(&pdev->dev, "can't request irq\n");
2098                return ret;
2099        }
2100
2101        ret = clk_prepare_enable(atxdmac->clk);
2102        if (ret) {
2103                dev_err(&pdev->dev, "can't prepare or enable clock\n");
2104                goto err_free_irq;
2105        }
2106
2107        atxdmac->at_xdmac_desc_pool =
2108                dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2109                                sizeof(struct at_xdmac_desc), 4, 0);
2110        if (!atxdmac->at_xdmac_desc_pool) {
2111                dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2112                ret = -ENOMEM;
2113                goto err_clk_disable;
2114        }
2115
2116        dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2117        dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2118        dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2119        dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2120        dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2121        dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2122        /*
2123         * Without DMA_PRIVATE the driver is not able to allocate more than
2124         * one channel, second allocation fails in private_candidate.
2125         */
2126        dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2127        atxdmac->dma.dev                                = &pdev->dev;
2128        atxdmac->dma.device_alloc_chan_resources        = at_xdmac_alloc_chan_resources;
2129        atxdmac->dma.device_free_chan_resources         = at_xdmac_free_chan_resources;
2130        atxdmac->dma.device_tx_status                   = at_xdmac_tx_status;
2131        atxdmac->dma.device_issue_pending               = at_xdmac_issue_pending;
2132        atxdmac->dma.device_prep_dma_cyclic             = at_xdmac_prep_dma_cyclic;
2133        atxdmac->dma.device_prep_interleaved_dma        = at_xdmac_prep_interleaved;
2134        atxdmac->dma.device_prep_dma_memcpy             = at_xdmac_prep_dma_memcpy;
2135        atxdmac->dma.device_prep_dma_memset             = at_xdmac_prep_dma_memset;
2136        atxdmac->dma.device_prep_dma_memset_sg          = at_xdmac_prep_dma_memset_sg;
2137        atxdmac->dma.device_prep_slave_sg               = at_xdmac_prep_slave_sg;
2138        atxdmac->dma.device_config                      = at_xdmac_device_config;
2139        atxdmac->dma.device_pause                       = at_xdmac_device_pause;
2140        atxdmac->dma.device_resume                      = at_xdmac_device_resume;
2141        atxdmac->dma.device_terminate_all               = at_xdmac_device_terminate_all;
2142        atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2143        atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2144        atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2145        atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2146
2147        /* Disable all chans and interrupts. */
2148        at_xdmac_off(atxdmac);
2149
2150        /* Init channels. */
2151        INIT_LIST_HEAD(&atxdmac->dma.channels);
2152        for (i = 0; i < nr_channels; i++) {
2153                struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2154
2155                atchan->chan.device = &atxdmac->dma;
2156                list_add_tail(&atchan->chan.device_node,
2157                              &atxdmac->dma.channels);
2158
2159                atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2160                atchan->mask = 1 << i;
2161
2162                spin_lock_init(&atchan->lock);
2163                INIT_LIST_HEAD(&atchan->xfers_list);
2164                INIT_LIST_HEAD(&atchan->free_descs_list);
2165                tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2166
2167                /* Clear pending interrupts. */
2168                while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2169                        cpu_relax();
2170        }
2171        platform_set_drvdata(pdev, atxdmac);
2172
2173        ret = dma_async_device_register(&atxdmac->dma);
2174        if (ret) {
2175                dev_err(&pdev->dev, "fail to register DMA engine device\n");
2176                goto err_clk_disable;
2177        }
2178
2179        ret = of_dma_controller_register(pdev->dev.of_node,
2180                                         at_xdmac_xlate, atxdmac);
2181        if (ret) {
2182                dev_err(&pdev->dev, "could not register of dma controller\n");
2183                goto err_dma_unregister;
2184        }
2185
2186        dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2187                 nr_channels, atxdmac->regs);
2188
2189        at_xdmac_axi_config(pdev);
2190
2191        return 0;
2192
2193err_dma_unregister:
2194        dma_async_device_unregister(&atxdmac->dma);
2195err_clk_disable:
2196        clk_disable_unprepare(atxdmac->clk);
2197err_free_irq:
2198        free_irq(atxdmac->irq, atxdmac);
2199        return ret;
2200}
2201
2202static int at_xdmac_remove(struct platform_device *pdev)
2203{
2204        struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2205        int             i;
2206
2207        at_xdmac_off(atxdmac);
2208        of_dma_controller_free(pdev->dev.of_node);
2209        dma_async_device_unregister(&atxdmac->dma);
2210        clk_disable_unprepare(atxdmac->clk);
2211
2212        free_irq(atxdmac->irq, atxdmac);
2213
2214        for (i = 0; i < atxdmac->dma.chancnt; i++) {
2215                struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2216
2217                tasklet_kill(&atchan->tasklet);
2218                at_xdmac_free_chan_resources(&atchan->chan);
2219        }
2220
2221        return 0;
2222}
2223
2224static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
2225        .prepare        = atmel_xdmac_prepare,
2226        SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2227};
2228
2229static const struct of_device_id atmel_xdmac_dt_ids[] = {
2230        {
2231                .compatible = "atmel,sama5d4-dma",
2232                .data = &at_xdmac_sama5d4_layout,
2233        }, {
2234                .compatible = "microchip,sama7g5-dma",
2235                .data = &at_xdmac_sama7g5_layout,
2236        }, {
2237                /* sentinel */
2238        }
2239};
2240MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2241
2242static struct platform_driver at_xdmac_driver = {
2243        .probe          = at_xdmac_probe,
2244        .remove         = at_xdmac_remove,
2245        .driver = {
2246                .name           = "at_xdmac",
2247                .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2248                .pm             = &atmel_xdmac_dev_pm_ops,
2249        }
2250};
2251
2252static int __init at_xdmac_init(void)
2253{
2254        return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
2255}
2256subsys_initcall(at_xdmac_init);
2257
2258MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2259MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2260MODULE_LICENSE("GPL");
2261