linux/drivers/dma/at_xdmac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
   4 *
   5 * Copyright (C) 2014 Atmel Corporation
   6 *
   7 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
   8 */
   9
  10#include <asm/barrier.h>
  11#include <dt-bindings/dma/at91.h>
  12#include <linux/clk.h>
  13#include <linux/dmaengine.h>
  14#include <linux/dmapool.h>
  15#include <linux/interrupt.h>
  16#include <linux/irq.h>
  17#include <linux/kernel.h>
  18#include <linux/list.h>
  19#include <linux/module.h>
  20#include <linux/of_dma.h>
  21#include <linux/of_platform.h>
  22#include <linux/platform_device.h>
  23#include <linux/pm.h>
  24
  25#include "dmaengine.h"
  26
  27/* Global registers */
  28#define AT_XDMAC_GTYPE          0x00    /* Global Type Register */
  29#define         AT_XDMAC_NB_CH(i)       (((i) & 0x1F) + 1)              /* Number of Channels Minus One */
  30#define         AT_XDMAC_FIFO_SZ(i)     (((i) >> 5) & 0x7FF)            /* Number of Bytes */
  31#define         AT_XDMAC_NB_REQ(i)      ((((i) >> 16) & 0x3F) + 1)      /* Number of Peripheral Requests Minus One */
  32#define AT_XDMAC_GCFG           0x04    /* Global Configuration Register */
  33#define         AT_XDMAC_WRHP(i)                (((i) & 0xF) << 4)
  34#define         AT_XDMAC_WRMP(i)                (((i) & 0xF) << 8)
  35#define         AT_XDMAC_WRLP(i)                (((i) & 0xF) << 12)
  36#define         AT_XDMAC_RDHP(i)                (((i) & 0xF) << 16)
  37#define         AT_XDMAC_RDMP(i)                (((i) & 0xF) << 20)
  38#define         AT_XDMAC_RDLP(i)                (((i) & 0xF) << 24)
  39#define         AT_XDMAC_RDSG(i)                (((i) & 0xF) << 28)
  40#define AT_XDMAC_GCFG_M2M       (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
  41#define AT_XDMAC_GCFG_P2M       (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
  42                                AT_XDMAC_WRHP(0x5))
  43#define AT_XDMAC_GWAC           0x08    /* Global Weighted Arbiter Configuration Register */
  44#define         AT_XDMAC_PW0(i)         (((i) & 0xF) << 0)
  45#define         AT_XDMAC_PW1(i)         (((i) & 0xF) << 4)
  46#define         AT_XDMAC_PW2(i)         (((i) & 0xF) << 8)
  47#define         AT_XDMAC_PW3(i)         (((i) & 0xF) << 12)
  48#define AT_XDMAC_GWAC_M2M       0
  49#define AT_XDMAC_GWAC_P2M       (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
  50
  51#define AT_XDMAC_GIE            0x0C    /* Global Interrupt Enable Register */
  52#define AT_XDMAC_GID            0x10    /* Global Interrupt Disable Register */
  53#define AT_XDMAC_GIM            0x14    /* Global Interrupt Mask Register */
  54#define AT_XDMAC_GIS            0x18    /* Global Interrupt Status Register */
  55#define AT_XDMAC_GE             0x1C    /* Global Channel Enable Register */
  56#define AT_XDMAC_GD             0x20    /* Global Channel Disable Register */
  57#define AT_XDMAC_GS             0x24    /* Global Channel Status Register */
  58#define AT_XDMAC_VERSION        0xFFC   /* XDMAC Version Register */
  59
  60/* Channel relative registers offsets */
  61#define AT_XDMAC_CIE            0x00    /* Channel Interrupt Enable Register */
  62#define         AT_XDMAC_CIE_BIE        BIT(0)  /* End of Block Interrupt Enable Bit */
  63#define         AT_XDMAC_CIE_LIE        BIT(1)  /* End of Linked List Interrupt Enable Bit */
  64#define         AT_XDMAC_CIE_DIE        BIT(2)  /* End of Disable Interrupt Enable Bit */
  65#define         AT_XDMAC_CIE_FIE        BIT(3)  /* End of Flush Interrupt Enable Bit */
  66#define         AT_XDMAC_CIE_RBEIE      BIT(4)  /* Read Bus Error Interrupt Enable Bit */
  67#define         AT_XDMAC_CIE_WBEIE      BIT(5)  /* Write Bus Error Interrupt Enable Bit */
  68#define         AT_XDMAC_CIE_ROIE       BIT(6)  /* Request Overflow Interrupt Enable Bit */
  69#define AT_XDMAC_CID            0x04    /* Channel Interrupt Disable Register */
  70#define         AT_XDMAC_CID_BID        BIT(0)  /* End of Block Interrupt Disable Bit */
  71#define         AT_XDMAC_CID_LID        BIT(1)  /* End of Linked List Interrupt Disable Bit */
  72#define         AT_XDMAC_CID_DID        BIT(2)  /* End of Disable Interrupt Disable Bit */
  73#define         AT_XDMAC_CID_FID        BIT(3)  /* End of Flush Interrupt Disable Bit */
  74#define         AT_XDMAC_CID_RBEID      BIT(4)  /* Read Bus Error Interrupt Disable Bit */
  75#define         AT_XDMAC_CID_WBEID      BIT(5)  /* Write Bus Error Interrupt Disable Bit */
  76#define         AT_XDMAC_CID_ROID       BIT(6)  /* Request Overflow Interrupt Disable Bit */
  77#define AT_XDMAC_CIM            0x08    /* Channel Interrupt Mask Register */
  78#define         AT_XDMAC_CIM_BIM        BIT(0)  /* End of Block Interrupt Mask Bit */
  79#define         AT_XDMAC_CIM_LIM        BIT(1)  /* End of Linked List Interrupt Mask Bit */
  80#define         AT_XDMAC_CIM_DIM        BIT(2)  /* End of Disable Interrupt Mask Bit */
  81#define         AT_XDMAC_CIM_FIM        BIT(3)  /* End of Flush Interrupt Mask Bit */
  82#define         AT_XDMAC_CIM_RBEIM      BIT(4)  /* Read Bus Error Interrupt Mask Bit */
  83#define         AT_XDMAC_CIM_WBEIM      BIT(5)  /* Write Bus Error Interrupt Mask Bit */
  84#define         AT_XDMAC_CIM_ROIM       BIT(6)  /* Request Overflow Interrupt Mask Bit */
  85#define AT_XDMAC_CIS            0x0C    /* Channel Interrupt Status Register */
  86#define         AT_XDMAC_CIS_BIS        BIT(0)  /* End of Block Interrupt Status Bit */
  87#define         AT_XDMAC_CIS_LIS        BIT(1)  /* End of Linked List Interrupt Status Bit */
  88#define         AT_XDMAC_CIS_DIS        BIT(2)  /* End of Disable Interrupt Status Bit */
  89#define         AT_XDMAC_CIS_FIS        BIT(3)  /* End of Flush Interrupt Status Bit */
  90#define         AT_XDMAC_CIS_RBEIS      BIT(4)  /* Read Bus Error Interrupt Status Bit */
  91#define         AT_XDMAC_CIS_WBEIS      BIT(5)  /* Write Bus Error Interrupt Status Bit */
  92#define         AT_XDMAC_CIS_ROIS       BIT(6)  /* Request Overflow Interrupt Status Bit */
  93#define AT_XDMAC_CSA            0x10    /* Channel Source Address Register */
  94#define AT_XDMAC_CDA            0x14    /* Channel Destination Address Register */
  95#define AT_XDMAC_CNDA           0x18    /* Channel Next Descriptor Address Register */
  96#define         AT_XDMAC_CNDA_NDAIF(i)  ((i) & 0x1)                     /* Channel x Next Descriptor Interface */
  97#define         AT_XDMAC_CNDA_NDA(i)    ((i) & 0xfffffffc)              /* Channel x Next Descriptor Address */
  98#define AT_XDMAC_CNDC           0x1C    /* Channel Next Descriptor Control Register */
  99#define         AT_XDMAC_CNDC_NDE               (0x1 << 0)              /* Channel x Next Descriptor Enable */
 100#define         AT_XDMAC_CNDC_NDSUP             (0x1 << 1)              /* Channel x Next Descriptor Source Update */
 101#define         AT_XDMAC_CNDC_NDDUP             (0x1 << 2)              /* Channel x Next Descriptor Destination Update */
 102#define         AT_XDMAC_CNDC_NDVIEW_NDV0       (0x0 << 3)              /* Channel x Next Descriptor View 0 */
 103#define         AT_XDMAC_CNDC_NDVIEW_NDV1       (0x1 << 3)              /* Channel x Next Descriptor View 1 */
 104#define         AT_XDMAC_CNDC_NDVIEW_NDV2       (0x2 << 3)              /* Channel x Next Descriptor View 2 */
 105#define         AT_XDMAC_CNDC_NDVIEW_NDV3       (0x3 << 3)              /* Channel x Next Descriptor View 3 */
 106#define AT_XDMAC_CUBC           0x20    /* Channel Microblock Control Register */
 107#define AT_XDMAC_CBC            0x24    /* Channel Block Control Register */
 108#define AT_XDMAC_CC             0x28    /* Channel Configuration Register */
 109#define         AT_XDMAC_CC_TYPE        (0x1 << 0)      /* Channel Transfer Type */
 110#define                 AT_XDMAC_CC_TYPE_MEM_TRAN       (0x0 << 0)      /* Memory to Memory Transfer */
 111#define                 AT_XDMAC_CC_TYPE_PER_TRAN       (0x1 << 0)      /* Peripheral to Memory or Memory to Peripheral Transfer */
 112#define         AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
 113#define                 AT_XDMAC_CC_MBSIZE_SINGLE       (0x0 << 1)
 114#define                 AT_XDMAC_CC_MBSIZE_FOUR         (0x1 << 1)
 115#define                 AT_XDMAC_CC_MBSIZE_EIGHT        (0x2 << 1)
 116#define                 AT_XDMAC_CC_MBSIZE_SIXTEEN      (0x3 << 1)
 117#define         AT_XDMAC_CC_DSYNC       (0x1 << 4)      /* Channel Synchronization */
 118#define                 AT_XDMAC_CC_DSYNC_PER2MEM       (0x0 << 4)
 119#define                 AT_XDMAC_CC_DSYNC_MEM2PER       (0x1 << 4)
 120#define         AT_XDMAC_CC_PROT        (0x1 << 5)      /* Channel Protection */
 121#define                 AT_XDMAC_CC_PROT_SEC            (0x0 << 5)
 122#define                 AT_XDMAC_CC_PROT_UNSEC          (0x1 << 5)
 123#define         AT_XDMAC_CC_SWREQ       (0x1 << 6)      /* Channel Software Request Trigger */
 124#define                 AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
 125#define                 AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
 126#define         AT_XDMAC_CC_MEMSET      (0x1 << 7)      /* Channel Fill Block of memory */
 127#define                 AT_XDMAC_CC_MEMSET_NORMAL_MODE  (0x0 << 7)
 128#define                 AT_XDMAC_CC_MEMSET_HW_MODE      (0x1 << 7)
 129#define         AT_XDMAC_CC_CSIZE(i)    ((0x7 & (i)) << 8)      /* Channel Chunk Size */
 130#define         AT_XDMAC_CC_DWIDTH_OFFSET       11
 131#define         AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
 132#define         AT_XDMAC_CC_DWIDTH(i)   ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET)      /* Channel Data Width */
 133#define                 AT_XDMAC_CC_DWIDTH_BYTE         0x0
 134#define                 AT_XDMAC_CC_DWIDTH_HALFWORD     0x1
 135#define                 AT_XDMAC_CC_DWIDTH_WORD         0x2
 136#define                 AT_XDMAC_CC_DWIDTH_DWORD        0x3
 137#define         AT_XDMAC_CC_SIF(i)      ((0x1 & (i)) << 13)     /* Channel Source Interface Identifier */
 138#define         AT_XDMAC_CC_DIF(i)      ((0x1 & (i)) << 14)     /* Channel Destination Interface Identifier */
 139#define         AT_XDMAC_CC_SAM_MASK    (0x3 << 16)     /* Channel Source Addressing Mode */
 140#define                 AT_XDMAC_CC_SAM_FIXED_AM        (0x0 << 16)
 141#define                 AT_XDMAC_CC_SAM_INCREMENTED_AM  (0x1 << 16)
 142#define                 AT_XDMAC_CC_SAM_UBS_AM          (0x2 << 16)
 143#define                 AT_XDMAC_CC_SAM_UBS_DS_AM       (0x3 << 16)
 144#define         AT_XDMAC_CC_DAM_MASK    (0x3 << 18)     /* Channel Source Addressing Mode */
 145#define                 AT_XDMAC_CC_DAM_FIXED_AM        (0x0 << 18)
 146#define                 AT_XDMAC_CC_DAM_INCREMENTED_AM  (0x1 << 18)
 147#define                 AT_XDMAC_CC_DAM_UBS_AM          (0x2 << 18)
 148#define                 AT_XDMAC_CC_DAM_UBS_DS_AM       (0x3 << 18)
 149#define         AT_XDMAC_CC_INITD       (0x1 << 21)     /* Channel Initialization Terminated (read only) */
 150#define                 AT_XDMAC_CC_INITD_TERMINATED    (0x0 << 21)
 151#define                 AT_XDMAC_CC_INITD_IN_PROGRESS   (0x1 << 21)
 152#define         AT_XDMAC_CC_RDIP        (0x1 << 22)     /* Read in Progress (read only) */
 153#define                 AT_XDMAC_CC_RDIP_DONE           (0x0 << 22)
 154#define                 AT_XDMAC_CC_RDIP_IN_PROGRESS    (0x1 << 22)
 155#define         AT_XDMAC_CC_WRIP        (0x1 << 23)     /* Write in Progress (read only) */
 156#define                 AT_XDMAC_CC_WRIP_DONE           (0x0 << 23)
 157#define                 AT_XDMAC_CC_WRIP_IN_PROGRESS    (0x1 << 23)
 158#define         AT_XDMAC_CC_PERID(i)    (0x7f & (i) << 24)      /* Channel Peripheral Identifier */
 159#define AT_XDMAC_CDS_MSP        0x2C    /* Channel Data Stride Memory Set Pattern */
 160#define AT_XDMAC_CSUS           0x30    /* Channel Source Microblock Stride */
 161#define AT_XDMAC_CDUS           0x34    /* Channel Destination Microblock Stride */
 162
 163/* Microblock control members */
 164#define AT_XDMAC_MBR_UBC_UBLEN_MAX      0xFFFFFFUL      /* Maximum Microblock Length */
 165#define AT_XDMAC_MBR_UBC_NDE            (0x1 << 24)     /* Next Descriptor Enable */
 166#define AT_XDMAC_MBR_UBC_NSEN           (0x1 << 25)     /* Next Descriptor Source Update */
 167#define AT_XDMAC_MBR_UBC_NDEN           (0x1 << 26)     /* Next Descriptor Destination Update */
 168#define AT_XDMAC_MBR_UBC_NDV0           (0x0 << 27)     /* Next Descriptor View 0 */
 169#define AT_XDMAC_MBR_UBC_NDV1           (0x1 << 27)     /* Next Descriptor View 1 */
 170#define AT_XDMAC_MBR_UBC_NDV2           (0x2 << 27)     /* Next Descriptor View 2 */
 171#define AT_XDMAC_MBR_UBC_NDV3           (0x3 << 27)     /* Next Descriptor View 3 */
 172
 173#define AT_XDMAC_MAX_CHAN       0x20
 174#define AT_XDMAC_MAX_CSIZE      16      /* 16 data */
 175#define AT_XDMAC_MAX_DWIDTH     8       /* 64 bits */
 176#define AT_XDMAC_RESIDUE_MAX_RETRIES    5
 177
 178#define AT_XDMAC_DMA_BUSWIDTHS\
 179        (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
 180        BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
 181        BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
 182        BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
 183        BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
 184
 185enum atc_status {
 186        AT_XDMAC_CHAN_IS_CYCLIC = 0,
 187        AT_XDMAC_CHAN_IS_PAUSED,
 188};
 189
 190struct at_xdmac_layout {
 191        /* Global Channel Read Suspend Register */
 192        u8                              grs;
 193        /* Global Write Suspend Register */
 194        u8                              gws;
 195        /* Global Channel Read Write Suspend Register */
 196        u8                              grws;
 197        /* Global Channel Read Write Resume Register */
 198        u8                              grwr;
 199        /* Global Channel Software Request Register */
 200        u8                              gswr;
 201        /* Global channel Software Request Status Register */
 202        u8                              gsws;
 203        /* Global Channel Software Flush Request Register */
 204        u8                              gswf;
 205        /* Channel reg base */
 206        u8                              chan_cc_reg_base;
 207        /* Source/Destination Interface must be specified or not */
 208        bool                            sdif;
 209        /* AXI queue priority configuration supported */
 210        bool                            axi_config;
 211};
 212
 213/* ----- Channels ----- */
 214struct at_xdmac_chan {
 215        struct dma_chan                 chan;
 216        void __iomem                    *ch_regs;
 217        u32                             mask;           /* Channel Mask */
 218        u32                             cfg;            /* Channel Configuration Register */
 219        u8                              perid;          /* Peripheral ID */
 220        u8                              perif;          /* Peripheral Interface */
 221        u8                              memif;          /* Memory Interface */
 222        u32                             save_cc;
 223        u32                             save_cim;
 224        u32                             save_cnda;
 225        u32                             save_cndc;
 226        u32                             irq_status;
 227        unsigned long                   status;
 228        struct tasklet_struct           tasklet;
 229        struct dma_slave_config         sconfig;
 230
 231        spinlock_t                      lock;
 232
 233        struct list_head                xfers_list;
 234        struct list_head                free_descs_list;
 235};
 236
 237
 238/* ----- Controller ----- */
 239struct at_xdmac {
 240        struct dma_device       dma;
 241        void __iomem            *regs;
 242        int                     irq;
 243        struct clk              *clk;
 244        u32                     save_gim;
 245        struct dma_pool         *at_xdmac_desc_pool;
 246        const struct at_xdmac_layout    *layout;
 247        struct at_xdmac_chan    chan[];
 248};
 249
 250
 251/* ----- Descriptors ----- */
 252
 253/* Linked List Descriptor */
 254struct at_xdmac_lld {
 255        dma_addr_t      mbr_nda;        /* Next Descriptor Member */
 256        u32             mbr_ubc;        /* Microblock Control Member */
 257        dma_addr_t      mbr_sa;         /* Source Address Member */
 258        dma_addr_t      mbr_da;         /* Destination Address Member */
 259        u32             mbr_cfg;        /* Configuration Register */
 260        u32             mbr_bc;         /* Block Control Register */
 261        u32             mbr_ds;         /* Data Stride Register */
 262        u32             mbr_sus;        /* Source Microblock Stride Register */
 263        u32             mbr_dus;        /* Destination Microblock Stride Register */
 264};
 265
 266/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
 267struct at_xdmac_desc {
 268        struct at_xdmac_lld             lld;
 269        enum dma_transfer_direction     direction;
 270        struct dma_async_tx_descriptor  tx_dma_desc;
 271        struct list_head                desc_node;
 272        /* Following members are only used by the first descriptor */
 273        bool                            active_xfer;
 274        unsigned int                    xfer_size;
 275        struct list_head                descs_list;
 276        struct list_head                xfer_node;
 277} __aligned(sizeof(u64));
 278
 279static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
 280        .grs = 0x28,
 281        .gws = 0x2C,
 282        .grws = 0x30,
 283        .grwr = 0x34,
 284        .gswr = 0x38,
 285        .gsws = 0x3C,
 286        .gswf = 0x40,
 287        .chan_cc_reg_base = 0x50,
 288        .sdif = true,
 289        .axi_config = false,
 290};
 291
 292static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
 293        .grs = 0x30,
 294        .gws = 0x38,
 295        .grws = 0x40,
 296        .grwr = 0x44,
 297        .gswr = 0x48,
 298        .gsws = 0x4C,
 299        .gswf = 0x50,
 300        .chan_cc_reg_base = 0x60,
 301        .sdif = false,
 302        .axi_config = true,
 303};
 304
 305static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
 306{
 307        return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
 308}
 309
 310#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
 311#define at_xdmac_write(atxdmac, reg, value) \
 312        writel_relaxed((value), (atxdmac)->regs + (reg))
 313
 314#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
 315#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
 316
 317static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
 318{
 319        return container_of(dchan, struct at_xdmac_chan, chan);
 320}
 321
 322static struct device *chan2dev(struct dma_chan *chan)
 323{
 324        return &chan->dev->device;
 325}
 326
 327static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
 328{
 329        return container_of(ddev, struct at_xdmac, dma);
 330}
 331
 332static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
 333{
 334        return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
 335}
 336
 337static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
 338{
 339        return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
 340}
 341
 342static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
 343{
 344        return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
 345}
 346
 347static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
 348{
 349        return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
 350}
 351
 352static inline u8 at_xdmac_get_dwidth(u32 cfg)
 353{
 354        return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
 355};
 356
 357static unsigned int init_nr_desc_per_channel = 64;
 358module_param(init_nr_desc_per_channel, uint, 0644);
 359MODULE_PARM_DESC(init_nr_desc_per_channel,
 360                 "initial descriptors per channel (default: 64)");
 361
 362
 363static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
 364{
 365        return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
 366}
 367
 368static void at_xdmac_off(struct at_xdmac *atxdmac)
 369{
 370        at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
 371
 372        /* Wait that all chans are disabled. */
 373        while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
 374                cpu_relax();
 375
 376        at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
 377}
 378
 379/* Call with lock hold. */
 380static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
 381                                struct at_xdmac_desc *first)
 382{
 383        struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
 384        u32             reg;
 385
 386        dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
 387
 388        if (at_xdmac_chan_is_enabled(atchan))
 389                return;
 390
 391        /* Set transfer as active to not try to start it again. */
 392        first->active_xfer = true;
 393
 394        /* Tell xdmac where to get the first descriptor. */
 395        reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
 396        if (atxdmac->layout->sdif)
 397                reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
 398
 399        at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
 400
 401        /*
 402         * When doing non cyclic transfer we need to use the next
 403         * descriptor view 2 since some fields of the configuration register
 404         * depend on transfer size and src/dest addresses.
 405         */
 406        if (at_xdmac_chan_is_cyclic(atchan))
 407                reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
 408        else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
 409                reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
 410        else
 411                reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
 412        /*
 413         * Even if the register will be updated from the configuration in the
 414         * descriptor when using view 2 or higher, the PROT bit won't be set
 415         * properly. This bit can be modified only by using the channel
 416         * configuration register.
 417         */
 418        at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
 419
 420        reg |= AT_XDMAC_CNDC_NDDUP
 421               | AT_XDMAC_CNDC_NDSUP
 422               | AT_XDMAC_CNDC_NDE;
 423        at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
 424
 425        dev_vdbg(chan2dev(&atchan->chan),
 426                 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
 427                 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
 428                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
 429                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
 430                 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
 431                 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
 432                 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
 433
 434        at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
 435        reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
 436        /*
 437         * Request Overflow Error is only for peripheral synchronized transfers
 438         */
 439        if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
 440                reg |= AT_XDMAC_CIE_ROIE;
 441
 442        /*
 443         * There is no end of list when doing cyclic dma, we need to get
 444         * an interrupt after each periods.
 445         */
 446        if (at_xdmac_chan_is_cyclic(atchan))
 447                at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
 448                                    reg | AT_XDMAC_CIE_BIE);
 449        else
 450                at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
 451                                    reg | AT_XDMAC_CIE_LIE);
 452        at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
 453        dev_vdbg(chan2dev(&atchan->chan),
 454                 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
 455        wmb();
 456        at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
 457
 458        dev_vdbg(chan2dev(&atchan->chan),
 459                 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
 460                 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
 461                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
 462                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
 463                 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
 464                 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
 465                 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
 466
 467}
 468
 469static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
 470{
 471        struct at_xdmac_desc    *desc = txd_to_at_desc(tx);
 472        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(tx->chan);
 473        dma_cookie_t            cookie;
 474        unsigned long           irqflags;
 475
 476        spin_lock_irqsave(&atchan->lock, irqflags);
 477        cookie = dma_cookie_assign(tx);
 478
 479        dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
 480                 __func__, atchan, desc);
 481        list_add_tail(&desc->xfer_node, &atchan->xfers_list);
 482        if (list_is_singular(&atchan->xfers_list))
 483                at_xdmac_start_xfer(atchan, desc);
 484
 485        spin_unlock_irqrestore(&atchan->lock, irqflags);
 486        return cookie;
 487}
 488
 489static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
 490                                                 gfp_t gfp_flags)
 491{
 492        struct at_xdmac_desc    *desc;
 493        struct at_xdmac         *atxdmac = to_at_xdmac(chan->device);
 494        dma_addr_t              phys;
 495
 496        desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
 497        if (desc) {
 498                INIT_LIST_HEAD(&desc->descs_list);
 499                dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
 500                desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
 501                desc->tx_dma_desc.phys = phys;
 502        }
 503
 504        return desc;
 505}
 506
 507static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
 508{
 509        memset(&desc->lld, 0, sizeof(desc->lld));
 510        INIT_LIST_HEAD(&desc->descs_list);
 511        desc->direction = DMA_TRANS_NONE;
 512        desc->xfer_size = 0;
 513        desc->active_xfer = false;
 514}
 515
 516/* Call must be protected by lock. */
 517static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
 518{
 519        struct at_xdmac_desc *desc;
 520
 521        if (list_empty(&atchan->free_descs_list)) {
 522                desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
 523        } else {
 524                desc = list_first_entry(&atchan->free_descs_list,
 525                                        struct at_xdmac_desc, desc_node);
 526                list_del(&desc->desc_node);
 527                at_xdmac_init_used_desc(desc);
 528        }
 529
 530        return desc;
 531}
 532
 533static void at_xdmac_queue_desc(struct dma_chan *chan,
 534                                struct at_xdmac_desc *prev,
 535                                struct at_xdmac_desc *desc)
 536{
 537        if (!prev || !desc)
 538                return;
 539
 540        prev->lld.mbr_nda = desc->tx_dma_desc.phys;
 541        prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
 542
 543        dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
 544                __func__, prev, &prev->lld.mbr_nda);
 545}
 546
 547static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
 548                                                  struct at_xdmac_desc *desc)
 549{
 550        if (!desc)
 551                return;
 552
 553        desc->lld.mbr_bc++;
 554
 555        dev_dbg(chan2dev(chan),
 556                "%s: incrementing the block count of the desc 0x%p\n",
 557                __func__, desc);
 558}
 559
 560static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
 561                                       struct of_dma *of_dma)
 562{
 563        struct at_xdmac         *atxdmac = of_dma->of_dma_data;
 564        struct at_xdmac_chan    *atchan;
 565        struct dma_chan         *chan;
 566        struct device           *dev = atxdmac->dma.dev;
 567
 568        if (dma_spec->args_count != 1) {
 569                dev_err(dev, "dma phandler args: bad number of args\n");
 570                return NULL;
 571        }
 572
 573        chan = dma_get_any_slave_channel(&atxdmac->dma);
 574        if (!chan) {
 575                dev_err(dev, "can't get a dma channel\n");
 576                return NULL;
 577        }
 578
 579        atchan = to_at_xdmac_chan(chan);
 580        atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
 581        atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
 582        atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
 583        dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
 584                 atchan->memif, atchan->perif, atchan->perid);
 585
 586        return chan;
 587}
 588
 589static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
 590                                      enum dma_transfer_direction direction)
 591{
 592        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
 593        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
 594        int                     csize, dwidth;
 595
 596        if (direction == DMA_DEV_TO_MEM) {
 597                atchan->cfg =
 598                        AT91_XDMAC_DT_PERID(atchan->perid)
 599                        | AT_XDMAC_CC_DAM_INCREMENTED_AM
 600                        | AT_XDMAC_CC_SAM_FIXED_AM
 601                        | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
 602                        | AT_XDMAC_CC_DSYNC_PER2MEM
 603                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
 604                        | AT_XDMAC_CC_TYPE_PER_TRAN;
 605                if (atxdmac->layout->sdif)
 606                        atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
 607                                       AT_XDMAC_CC_SIF(atchan->perif);
 608
 609                csize = ffs(atchan->sconfig.src_maxburst) - 1;
 610                if (csize < 0) {
 611                        dev_err(chan2dev(chan), "invalid src maxburst value\n");
 612                        return -EINVAL;
 613                }
 614                atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
 615                dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
 616                if (dwidth < 0) {
 617                        dev_err(chan2dev(chan), "invalid src addr width value\n");
 618                        return -EINVAL;
 619                }
 620                atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
 621        } else if (direction == DMA_MEM_TO_DEV) {
 622                atchan->cfg =
 623                        AT91_XDMAC_DT_PERID(atchan->perid)
 624                        | AT_XDMAC_CC_DAM_FIXED_AM
 625                        | AT_XDMAC_CC_SAM_INCREMENTED_AM
 626                        | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
 627                        | AT_XDMAC_CC_DSYNC_MEM2PER
 628                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
 629                        | AT_XDMAC_CC_TYPE_PER_TRAN;
 630                if (atxdmac->layout->sdif)
 631                        atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
 632                                       AT_XDMAC_CC_SIF(atchan->memif);
 633
 634                csize = ffs(atchan->sconfig.dst_maxburst) - 1;
 635                if (csize < 0) {
 636                        dev_err(chan2dev(chan), "invalid src maxburst value\n");
 637                        return -EINVAL;
 638                }
 639                atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
 640                dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
 641                if (dwidth < 0) {
 642                        dev_err(chan2dev(chan), "invalid dst addr width value\n");
 643                        return -EINVAL;
 644                }
 645                atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
 646        }
 647
 648        dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
 649
 650        return 0;
 651}
 652
 653/*
 654 * Only check that maxburst and addr width values are supported by the
 655 * the controller but not that the configuration is good to perform the
 656 * transfer since we don't know the direction at this stage.
 657 */
 658static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
 659{
 660        if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
 661            || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
 662                return -EINVAL;
 663
 664        if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
 665            || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
 666                return -EINVAL;
 667
 668        return 0;
 669}
 670
 671static int at_xdmac_set_slave_config(struct dma_chan *chan,
 672                                      struct dma_slave_config *sconfig)
 673{
 674        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
 675
 676        if (at_xdmac_check_slave_config(sconfig)) {
 677                dev_err(chan2dev(chan), "invalid slave configuration\n");
 678                return -EINVAL;
 679        }
 680
 681        memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
 682
 683        return 0;
 684}
 685
 686static struct dma_async_tx_descriptor *
 687at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 688                       unsigned int sg_len, enum dma_transfer_direction direction,
 689                       unsigned long flags, void *context)
 690{
 691        struct at_xdmac_chan            *atchan = to_at_xdmac_chan(chan);
 692        struct at_xdmac_desc            *first = NULL, *prev = NULL;
 693        struct scatterlist              *sg;
 694        int                             i;
 695        unsigned int                    xfer_size = 0;
 696        unsigned long                   irqflags;
 697        struct dma_async_tx_descriptor  *ret = NULL;
 698
 699        if (!sgl)
 700                return NULL;
 701
 702        if (!is_slave_direction(direction)) {
 703                dev_err(chan2dev(chan), "invalid DMA direction\n");
 704                return NULL;
 705        }
 706
 707        dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
 708                 __func__, sg_len,
 709                 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
 710                 flags);
 711
 712        /* Protect dma_sconfig field that can be modified by set_slave_conf. */
 713        spin_lock_irqsave(&atchan->lock, irqflags);
 714
 715        if (at_xdmac_compute_chan_conf(chan, direction))
 716                goto spin_unlock;
 717
 718        /* Prepare descriptors. */
 719        for_each_sg(sgl, sg, sg_len, i) {
 720                struct at_xdmac_desc    *desc = NULL;
 721                u32                     len, mem, dwidth, fixed_dwidth;
 722
 723                len = sg_dma_len(sg);
 724                mem = sg_dma_address(sg);
 725                if (unlikely(!len)) {
 726                        dev_err(chan2dev(chan), "sg data length is zero\n");
 727                        goto spin_unlock;
 728                }
 729                dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
 730                         __func__, i, len, mem);
 731
 732                desc = at_xdmac_get_desc(atchan);
 733                if (!desc) {
 734                        dev_err(chan2dev(chan), "can't get descriptor\n");
 735                        if (first)
 736                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
 737                        goto spin_unlock;
 738                }
 739
 740                /* Linked list descriptor setup. */
 741                if (direction == DMA_DEV_TO_MEM) {
 742                        desc->lld.mbr_sa = atchan->sconfig.src_addr;
 743                        desc->lld.mbr_da = mem;
 744                } else {
 745                        desc->lld.mbr_sa = mem;
 746                        desc->lld.mbr_da = atchan->sconfig.dst_addr;
 747                }
 748                dwidth = at_xdmac_get_dwidth(atchan->cfg);
 749                fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
 750                               ? dwidth
 751                               : AT_XDMAC_CC_DWIDTH_BYTE;
 752                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2                       /* next descriptor view */
 753                        | AT_XDMAC_MBR_UBC_NDEN                                 /* next descriptor dst parameter update */
 754                        | AT_XDMAC_MBR_UBC_NSEN                                 /* next descriptor src parameter update */
 755                        | (len >> fixed_dwidth);                                /* microblock length */
 756                desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
 757                                    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
 758                dev_dbg(chan2dev(chan),
 759                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
 760                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
 761
 762                /* Chain lld. */
 763                if (prev)
 764                        at_xdmac_queue_desc(chan, prev, desc);
 765
 766                prev = desc;
 767                if (!first)
 768                        first = desc;
 769
 770                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
 771                         __func__, desc, first);
 772                list_add_tail(&desc->desc_node, &first->descs_list);
 773                xfer_size += len;
 774        }
 775
 776
 777        first->tx_dma_desc.flags = flags;
 778        first->xfer_size = xfer_size;
 779        first->direction = direction;
 780        ret = &first->tx_dma_desc;
 781
 782spin_unlock:
 783        spin_unlock_irqrestore(&atchan->lock, irqflags);
 784        return ret;
 785}
 786
 787static struct dma_async_tx_descriptor *
 788at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
 789                         size_t buf_len, size_t period_len,
 790                         enum dma_transfer_direction direction,
 791                         unsigned long flags)
 792{
 793        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
 794        struct at_xdmac_desc    *first = NULL, *prev = NULL;
 795        unsigned int            periods = buf_len / period_len;
 796        int                     i;
 797        unsigned long           irqflags;
 798
 799        dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
 800                __func__, &buf_addr, buf_len, period_len,
 801                direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
 802
 803        if (!is_slave_direction(direction)) {
 804                dev_err(chan2dev(chan), "invalid DMA direction\n");
 805                return NULL;
 806        }
 807
 808        if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
 809                dev_err(chan2dev(chan), "channel currently used\n");
 810                return NULL;
 811        }
 812
 813        if (at_xdmac_compute_chan_conf(chan, direction))
 814                return NULL;
 815
 816        for (i = 0; i < periods; i++) {
 817                struct at_xdmac_desc    *desc = NULL;
 818
 819                spin_lock_irqsave(&atchan->lock, irqflags);
 820                desc = at_xdmac_get_desc(atchan);
 821                if (!desc) {
 822                        dev_err(chan2dev(chan), "can't get descriptor\n");
 823                        if (first)
 824                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
 825                        spin_unlock_irqrestore(&atchan->lock, irqflags);
 826                        return NULL;
 827                }
 828                spin_unlock_irqrestore(&atchan->lock, irqflags);
 829                dev_dbg(chan2dev(chan),
 830                        "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
 831                        __func__, desc, &desc->tx_dma_desc.phys);
 832
 833                if (direction == DMA_DEV_TO_MEM) {
 834                        desc->lld.mbr_sa = atchan->sconfig.src_addr;
 835                        desc->lld.mbr_da = buf_addr + i * period_len;
 836                } else {
 837                        desc->lld.mbr_sa = buf_addr + i * period_len;
 838                        desc->lld.mbr_da = atchan->sconfig.dst_addr;
 839                }
 840                desc->lld.mbr_cfg = atchan->cfg;
 841                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
 842                        | AT_XDMAC_MBR_UBC_NDEN
 843                        | AT_XDMAC_MBR_UBC_NSEN
 844                        | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
 845
 846                dev_dbg(chan2dev(chan),
 847                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
 848                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
 849
 850                /* Chain lld. */
 851                if (prev)
 852                        at_xdmac_queue_desc(chan, prev, desc);
 853
 854                prev = desc;
 855                if (!first)
 856                        first = desc;
 857
 858                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
 859                         __func__, desc, first);
 860                list_add_tail(&desc->desc_node, &first->descs_list);
 861        }
 862
 863        at_xdmac_queue_desc(chan, prev, first);
 864        first->tx_dma_desc.flags = flags;
 865        first->xfer_size = buf_len;
 866        first->direction = direction;
 867
 868        return &first->tx_dma_desc;
 869}
 870
 871static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
 872{
 873        u32 width;
 874
 875        /*
 876         * Check address alignment to select the greater data width we
 877         * can use.
 878         *
 879         * Some XDMAC implementations don't provide dword transfer, in
 880         * this case selecting dword has the same behavior as
 881         * selecting word transfers.
 882         */
 883        if (!(addr & 7)) {
 884                width = AT_XDMAC_CC_DWIDTH_DWORD;
 885                dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
 886        } else if (!(addr & 3)) {
 887                width = AT_XDMAC_CC_DWIDTH_WORD;
 888                dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
 889        } else if (!(addr & 1)) {
 890                width = AT_XDMAC_CC_DWIDTH_HALFWORD;
 891                dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
 892        } else {
 893                width = AT_XDMAC_CC_DWIDTH_BYTE;
 894                dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
 895        }
 896
 897        return width;
 898}
 899
 900static struct at_xdmac_desc *
 901at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
 902                                struct at_xdmac_chan *atchan,
 903                                struct at_xdmac_desc *prev,
 904                                dma_addr_t src, dma_addr_t dst,
 905                                struct dma_interleaved_template *xt,
 906                                struct data_chunk *chunk)
 907{
 908        struct at_xdmac_desc    *desc;
 909        u32                     dwidth;
 910        unsigned long           flags;
 911        size_t                  ublen;
 912        /*
 913         * WARNING: The channel configuration is set here since there is no
 914         * dmaengine_slave_config call in this case. Moreover we don't know the
 915         * direction, it involves we can't dynamically set the source and dest
 916         * interface so we have to use the same one. Only interface 0 allows EBI
 917         * access. Hopefully we can access DDR through both ports (at least on
 918         * SAMA5D4x), so we can use the same interface for source and dest,
 919         * that solves the fact we don't know the direction.
 920         * ERRATA: Even if useless for memory transfers, the PERID has to not
 921         * match the one of another channel. If not, it could lead to spurious
 922         * flag status.
 923         * For SAMA7G5x case, the SIF and DIF fields are no longer used.
 924         * Thus, no need to have the SIF/DIF interfaces here.
 925         * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
 926         * zero.
 927         */
 928        u32                     chan_cc = AT_XDMAC_CC_PERID(0x7f)
 929                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
 930                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
 931
 932        dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
 933        if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
 934                dev_dbg(chan2dev(chan),
 935                        "%s: chunk too big (%zu, max size %lu)...\n",
 936                        __func__, chunk->size,
 937                        AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
 938                return NULL;
 939        }
 940
 941        if (prev)
 942                dev_dbg(chan2dev(chan),
 943                        "Adding items at the end of desc 0x%p\n", prev);
 944
 945        if (xt->src_inc) {
 946                if (xt->src_sgl)
 947                        chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
 948                else
 949                        chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
 950        }
 951
 952        if (xt->dst_inc) {
 953                if (xt->dst_sgl)
 954                        chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
 955                else
 956                        chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
 957        }
 958
 959        spin_lock_irqsave(&atchan->lock, flags);
 960        desc = at_xdmac_get_desc(atchan);
 961        spin_unlock_irqrestore(&atchan->lock, flags);
 962        if (!desc) {
 963                dev_err(chan2dev(chan), "can't get descriptor\n");
 964                return NULL;
 965        }
 966
 967        chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
 968
 969        ublen = chunk->size >> dwidth;
 970
 971        desc->lld.mbr_sa = src;
 972        desc->lld.mbr_da = dst;
 973        desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
 974        desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
 975
 976        desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
 977                | AT_XDMAC_MBR_UBC_NDEN
 978                | AT_XDMAC_MBR_UBC_NSEN
 979                | ublen;
 980        desc->lld.mbr_cfg = chan_cc;
 981
 982        dev_dbg(chan2dev(chan),
 983                "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
 984                __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
 985                desc->lld.mbr_ubc, desc->lld.mbr_cfg);
 986
 987        /* Chain lld. */
 988        if (prev)
 989                at_xdmac_queue_desc(chan, prev, desc);
 990
 991        return desc;
 992}
 993
 994static struct dma_async_tx_descriptor *
 995at_xdmac_prep_interleaved(struct dma_chan *chan,
 996                          struct dma_interleaved_template *xt,
 997                          unsigned long flags)
 998{
 999        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1000        struct at_xdmac_desc    *prev = NULL, *first = NULL;
1001        dma_addr_t              dst_addr, src_addr;
1002        size_t                  src_skip = 0, dst_skip = 0, len = 0;
1003        struct data_chunk       *chunk;
1004        int                     i;
1005
1006        if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1007                return NULL;
1008
1009        /*
1010         * TODO: Handle the case where we have to repeat a chain of
1011         * descriptors...
1012         */
1013        if ((xt->numf > 1) && (xt->frame_size > 1))
1014                return NULL;
1015
1016        dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1017                __func__, &xt->src_start, &xt->dst_start,       xt->numf,
1018                xt->frame_size, flags);
1019
1020        src_addr = xt->src_start;
1021        dst_addr = xt->dst_start;
1022
1023        if (xt->numf > 1) {
1024                first = at_xdmac_interleaved_queue_desc(chan, atchan,
1025                                                        NULL,
1026                                                        src_addr, dst_addr,
1027                                                        xt, xt->sgl);
1028
1029                /* Length of the block is (BLEN+1) microblocks. */
1030                for (i = 0; i < xt->numf - 1; i++)
1031                        at_xdmac_increment_block_count(chan, first);
1032
1033                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1034                        __func__, first, first);
1035                list_add_tail(&first->desc_node, &first->descs_list);
1036        } else {
1037                for (i = 0; i < xt->frame_size; i++) {
1038                        size_t src_icg = 0, dst_icg = 0;
1039                        struct at_xdmac_desc *desc;
1040
1041                        chunk = xt->sgl + i;
1042
1043                        dst_icg = dmaengine_get_dst_icg(xt, chunk);
1044                        src_icg = dmaengine_get_src_icg(xt, chunk);
1045
1046                        src_skip = chunk->size + src_icg;
1047                        dst_skip = chunk->size + dst_icg;
1048
1049                        dev_dbg(chan2dev(chan),
1050                                "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1051                                __func__, chunk->size, src_icg, dst_icg);
1052
1053                        desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1054                                                               prev,
1055                                                               src_addr, dst_addr,
1056                                                               xt, chunk);
1057                        if (!desc) {
1058                                list_splice_init(&first->descs_list,
1059                                                 &atchan->free_descs_list);
1060                                return NULL;
1061                        }
1062
1063                        if (!first)
1064                                first = desc;
1065
1066                        dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1067                                __func__, desc, first);
1068                        list_add_tail(&desc->desc_node, &first->descs_list);
1069
1070                        if (xt->src_sgl)
1071                                src_addr += src_skip;
1072
1073                        if (xt->dst_sgl)
1074                                dst_addr += dst_skip;
1075
1076                        len += chunk->size;
1077                        prev = desc;
1078                }
1079        }
1080
1081        first->tx_dma_desc.cookie = -EBUSY;
1082        first->tx_dma_desc.flags = flags;
1083        first->xfer_size = len;
1084
1085        return &first->tx_dma_desc;
1086}
1087
1088static struct dma_async_tx_descriptor *
1089at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1090                         size_t len, unsigned long flags)
1091{
1092        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1093        struct at_xdmac_desc    *first = NULL, *prev = NULL;
1094        size_t                  remaining_size = len, xfer_size = 0, ublen;
1095        dma_addr_t              src_addr = src, dst_addr = dest;
1096        u32                     dwidth;
1097        /*
1098         * WARNING: We don't know the direction, it involves we can't
1099         * dynamically set the source and dest interface so we have to use the
1100         * same one. Only interface 0 allows EBI access. Hopefully we can
1101         * access DDR through both ports (at least on SAMA5D4x), so we can use
1102         * the same interface for source and dest, that solves the fact we
1103         * don't know the direction.
1104         * ERRATA: Even if useless for memory transfers, the PERID has to not
1105         * match the one of another channel. If not, it could lead to spurious
1106         * flag status.
1107         * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1108         * Thus, no need to have the SIF/DIF interfaces here.
1109         * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1110         * zero.
1111         */
1112        u32                     chan_cc = AT_XDMAC_CC_PERID(0x7f)
1113                                        | AT_XDMAC_CC_DAM_INCREMENTED_AM
1114                                        | AT_XDMAC_CC_SAM_INCREMENTED_AM
1115                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
1116                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
1117        unsigned long           irqflags;
1118
1119        dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1120                __func__, &src, &dest, len, flags);
1121
1122        if (unlikely(!len))
1123                return NULL;
1124
1125        dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1126
1127        /* Prepare descriptors. */
1128        while (remaining_size) {
1129                struct at_xdmac_desc    *desc = NULL;
1130
1131                dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1132
1133                spin_lock_irqsave(&atchan->lock, irqflags);
1134                desc = at_xdmac_get_desc(atchan);
1135                spin_unlock_irqrestore(&atchan->lock, irqflags);
1136                if (!desc) {
1137                        dev_err(chan2dev(chan), "can't get descriptor\n");
1138                        if (first)
1139                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
1140                        return NULL;
1141                }
1142
1143                /* Update src and dest addresses. */
1144                src_addr += xfer_size;
1145                dst_addr += xfer_size;
1146
1147                if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1148                        xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1149                else
1150                        xfer_size = remaining_size;
1151
1152                dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1153
1154                /* Check remaining length and change data width if needed. */
1155                dwidth = at_xdmac_align_width(chan,
1156                                              src_addr | dst_addr | xfer_size);
1157                chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1158                chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1159
1160                ublen = xfer_size >> dwidth;
1161                remaining_size -= xfer_size;
1162
1163                desc->lld.mbr_sa = src_addr;
1164                desc->lld.mbr_da = dst_addr;
1165                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1166                        | AT_XDMAC_MBR_UBC_NDEN
1167                        | AT_XDMAC_MBR_UBC_NSEN
1168                        | ublen;
1169                desc->lld.mbr_cfg = chan_cc;
1170
1171                dev_dbg(chan2dev(chan),
1172                         "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1173                         __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1174
1175                /* Chain lld. */
1176                if (prev)
1177                        at_xdmac_queue_desc(chan, prev, desc);
1178
1179                prev = desc;
1180                if (!first)
1181                        first = desc;
1182
1183                dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1184                         __func__, desc, first);
1185                list_add_tail(&desc->desc_node, &first->descs_list);
1186        }
1187
1188        first->tx_dma_desc.flags = flags;
1189        first->xfer_size = len;
1190
1191        return &first->tx_dma_desc;
1192}
1193
1194static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1195                                                         struct at_xdmac_chan *atchan,
1196                                                         dma_addr_t dst_addr,
1197                                                         size_t len,
1198                                                         int value)
1199{
1200        struct at_xdmac_desc    *desc;
1201        unsigned long           flags;
1202        size_t                  ublen;
1203        u32                     dwidth;
1204        /*
1205         * WARNING: The channel configuration is set here since there is no
1206         * dmaengine_slave_config call in this case. Moreover we don't know the
1207         * direction, it involves we can't dynamically set the source and dest
1208         * interface so we have to use the same one. Only interface 0 allows EBI
1209         * access. Hopefully we can access DDR through both ports (at least on
1210         * SAMA5D4x), so we can use the same interface for source and dest,
1211         * that solves the fact we don't know the direction.
1212         * ERRATA: Even if useless for memory transfers, the PERID has to not
1213         * match the one of another channel. If not, it could lead to spurious
1214         * flag status.
1215         * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1216         * Thus, no need to have the SIF/DIF interfaces here.
1217         * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1218         * zero.
1219         */
1220        u32                     chan_cc = AT_XDMAC_CC_PERID(0x7f)
1221                                        | AT_XDMAC_CC_DAM_UBS_AM
1222                                        | AT_XDMAC_CC_SAM_INCREMENTED_AM
1223                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
1224                                        | AT_XDMAC_CC_MEMSET_HW_MODE
1225                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
1226
1227        dwidth = at_xdmac_align_width(chan, dst_addr);
1228
1229        if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1230                dev_err(chan2dev(chan),
1231                        "%s: Transfer too large, aborting...\n",
1232                        __func__);
1233                return NULL;
1234        }
1235
1236        spin_lock_irqsave(&atchan->lock, flags);
1237        desc = at_xdmac_get_desc(atchan);
1238        spin_unlock_irqrestore(&atchan->lock, flags);
1239        if (!desc) {
1240                dev_err(chan2dev(chan), "can't get descriptor\n");
1241                return NULL;
1242        }
1243
1244        chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1245
1246        ublen = len >> dwidth;
1247
1248        desc->lld.mbr_da = dst_addr;
1249        desc->lld.mbr_ds = value;
1250        desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1251                | AT_XDMAC_MBR_UBC_NDEN
1252                | AT_XDMAC_MBR_UBC_NSEN
1253                | ublen;
1254        desc->lld.mbr_cfg = chan_cc;
1255
1256        dev_dbg(chan2dev(chan),
1257                "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1258                __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1259                desc->lld.mbr_cfg);
1260
1261        return desc;
1262}
1263
1264static struct dma_async_tx_descriptor *
1265at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1266                         size_t len, unsigned long flags)
1267{
1268        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1269        struct at_xdmac_desc    *desc;
1270
1271        dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1272                __func__, &dest, len, value, flags);
1273
1274        if (unlikely(!len))
1275                return NULL;
1276
1277        desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1278        list_add_tail(&desc->desc_node, &desc->descs_list);
1279
1280        desc->tx_dma_desc.cookie = -EBUSY;
1281        desc->tx_dma_desc.flags = flags;
1282        desc->xfer_size = len;
1283
1284        return &desc->tx_dma_desc;
1285}
1286
1287static struct dma_async_tx_descriptor *
1288at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1289                            unsigned int sg_len, int value,
1290                            unsigned long flags)
1291{
1292        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1293        struct at_xdmac_desc    *desc, *pdesc = NULL,
1294                                *ppdesc = NULL, *first = NULL;
1295        struct scatterlist      *sg, *psg = NULL, *ppsg = NULL;
1296        size_t                  stride = 0, pstride = 0, len = 0;
1297        int                     i;
1298
1299        if (!sgl)
1300                return NULL;
1301
1302        dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1303                __func__, sg_len, value, flags);
1304
1305        /* Prepare descriptors. */
1306        for_each_sg(sgl, sg, sg_len, i) {
1307                dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1308                        __func__, &sg_dma_address(sg), sg_dma_len(sg),
1309                        value, flags);
1310                desc = at_xdmac_memset_create_desc(chan, atchan,
1311                                                   sg_dma_address(sg),
1312                                                   sg_dma_len(sg),
1313                                                   value);
1314                if (!desc && first)
1315                        list_splice_init(&first->descs_list,
1316                                         &atchan->free_descs_list);
1317
1318                if (!first)
1319                        first = desc;
1320
1321                /* Update our strides */
1322                pstride = stride;
1323                if (psg)
1324                        stride = sg_dma_address(sg) -
1325                                (sg_dma_address(psg) + sg_dma_len(psg));
1326
1327                /*
1328                 * The scatterlist API gives us only the address and
1329                 * length of each elements.
1330                 *
1331                 * Unfortunately, we don't have the stride, which we
1332                 * will need to compute.
1333                 *
1334                 * That make us end up in a situation like this one:
1335                 *    len    stride    len    stride    len
1336                 * +-------+        +-------+        +-------+
1337                 * |  N-2  |        |  N-1  |        |   N   |
1338                 * +-------+        +-------+        +-------+
1339                 *
1340                 * We need all these three elements (N-2, N-1 and N)
1341                 * to actually take the decision on whether we need to
1342                 * queue N-1 or reuse N-2.
1343                 *
1344                 * We will only consider N if it is the last element.
1345                 */
1346                if (ppdesc && pdesc) {
1347                        if ((stride == pstride) &&
1348                            (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1349                                dev_dbg(chan2dev(chan),
1350                                        "%s: desc 0x%p can be merged with desc 0x%p\n",
1351                                        __func__, pdesc, ppdesc);
1352
1353                                /*
1354                                 * Increment the block count of the
1355                                 * N-2 descriptor
1356                                 */
1357                                at_xdmac_increment_block_count(chan, ppdesc);
1358                                ppdesc->lld.mbr_dus = stride;
1359
1360                                /*
1361                                 * Put back the N-1 descriptor in the
1362                                 * free descriptor list
1363                                 */
1364                                list_add_tail(&pdesc->desc_node,
1365                                              &atchan->free_descs_list);
1366
1367                                /*
1368                                 * Make our N-1 descriptor pointer
1369                                 * point to the N-2 since they were
1370                                 * actually merged.
1371                                 */
1372                                pdesc = ppdesc;
1373
1374                        /*
1375                         * Rule out the case where we don't have
1376                         * pstride computed yet (our second sg
1377                         * element)
1378                         *
1379                         * We also want to catch the case where there
1380                         * would be a negative stride,
1381                         */
1382                        } else if (pstride ||
1383                                   sg_dma_address(sg) < sg_dma_address(psg)) {
1384                                /*
1385                                 * Queue the N-1 descriptor after the
1386                                 * N-2
1387                                 */
1388                                at_xdmac_queue_desc(chan, ppdesc, pdesc);
1389
1390                                /*
1391                                 * Add the N-1 descriptor to the list
1392                                 * of the descriptors used for this
1393                                 * transfer
1394                                 */
1395                                list_add_tail(&desc->desc_node,
1396                                              &first->descs_list);
1397                                dev_dbg(chan2dev(chan),
1398                                        "%s: add desc 0x%p to descs_list 0x%p\n",
1399                                        __func__, desc, first);
1400                        }
1401                }
1402
1403                /*
1404                 * If we are the last element, just see if we have the
1405                 * same size than the previous element.
1406                 *
1407                 * If so, we can merge it with the previous descriptor
1408                 * since we don't care about the stride anymore.
1409                 */
1410                if ((i == (sg_len - 1)) &&
1411                    sg_dma_len(psg) == sg_dma_len(sg)) {
1412                        dev_dbg(chan2dev(chan),
1413                                "%s: desc 0x%p can be merged with desc 0x%p\n",
1414                                __func__, desc, pdesc);
1415
1416                        /*
1417                         * Increment the block count of the N-1
1418                         * descriptor
1419                         */
1420                        at_xdmac_increment_block_count(chan, pdesc);
1421                        pdesc->lld.mbr_dus = stride;
1422
1423                        /*
1424                         * Put back the N descriptor in the free
1425                         * descriptor list
1426                         */
1427                        list_add_tail(&desc->desc_node,
1428                                      &atchan->free_descs_list);
1429                }
1430
1431                /* Update our descriptors */
1432                ppdesc = pdesc;
1433                pdesc = desc;
1434
1435                /* Update our scatter pointers */
1436                ppsg = psg;
1437                psg = sg;
1438
1439                len += sg_dma_len(sg);
1440        }
1441
1442        first->tx_dma_desc.cookie = -EBUSY;
1443        first->tx_dma_desc.flags = flags;
1444        first->xfer_size = len;
1445
1446        return &first->tx_dma_desc;
1447}
1448
1449static enum dma_status
1450at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1451                struct dma_tx_state *txstate)
1452{
1453        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1454        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1455        struct at_xdmac_desc    *desc, *_desc;
1456        struct list_head        *descs_list;
1457        enum dma_status         ret;
1458        int                     residue, retry;
1459        u32                     cur_nda, check_nda, cur_ubc, mask, value;
1460        u8                      dwidth = 0;
1461        unsigned long           flags;
1462        bool                    initd;
1463
1464        ret = dma_cookie_status(chan, cookie, txstate);
1465        if (ret == DMA_COMPLETE)
1466                return ret;
1467
1468        if (!txstate)
1469                return ret;
1470
1471        spin_lock_irqsave(&atchan->lock, flags);
1472
1473        desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1474
1475        /*
1476         * If the transfer has not been started yet, don't need to compute the
1477         * residue, it's the transfer length.
1478         */
1479        if (!desc->active_xfer) {
1480                dma_set_residue(txstate, desc->xfer_size);
1481                goto spin_unlock;
1482        }
1483
1484        residue = desc->xfer_size;
1485        /*
1486         * Flush FIFO: only relevant when the transfer is source peripheral
1487         * synchronized. Flush is needed before reading CUBC because data in
1488         * the FIFO are not reported by CUBC. Reporting a residue of the
1489         * transfer length while we have data in FIFO can cause issue.
1490         * Usecase: atmel USART has a timeout which means I have received
1491         * characters but there is no more character received for a while. On
1492         * timeout, it requests the residue. If the data are in the DMA FIFO,
1493         * we will return a residue of the transfer length. It means no data
1494         * received. If an application is waiting for these data, it will hang
1495         * since we won't have another USART timeout without receiving new
1496         * data.
1497         */
1498        mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1499        value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1500        if ((desc->lld.mbr_cfg & mask) == value) {
1501                at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1502                while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1503                        cpu_relax();
1504        }
1505
1506        /*
1507         * The easiest way to compute the residue should be to pause the DMA
1508         * but doing this can lead to miss some data as some devices don't
1509         * have FIFO.
1510         * We need to read several registers because:
1511         * - DMA is running therefore a descriptor change is possible while
1512         * reading these registers
1513         * - When the block transfer is done, the value of the CUBC register
1514         * is set to its initial value until the fetch of the next descriptor.
1515         * This value will corrupt the residue calculation so we have to skip
1516         * it.
1517         *
1518         * INITD --------                    ------------
1519         *              |____________________|
1520         *       _______________________  _______________
1521         * NDA       @desc2             \/   @desc3
1522         *       _______________________/\_______________
1523         *       __________  ___________  _______________
1524         * CUBC       0    \/ MAX desc1 \/  MAX desc2
1525         *       __________/\___________/\_______________
1526         *
1527         * Since descriptors are aligned on 64 bits, we can assume that
1528         * the update of NDA and CUBC is atomic.
1529         * Memory barriers are used to ensure the read order of the registers.
1530         * A max number of retries is set because unlikely it could never ends.
1531         */
1532        for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1533                check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1534                rmb();
1535                cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1536                rmb();
1537                initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1538                rmb();
1539                cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1540                rmb();
1541
1542                if ((check_nda == cur_nda) && initd)
1543                        break;
1544        }
1545
1546        if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1547                ret = DMA_ERROR;
1548                goto spin_unlock;
1549        }
1550
1551        /*
1552         * Flush FIFO: only relevant when the transfer is source peripheral
1553         * synchronized. Another flush is needed here because CUBC is updated
1554         * when the controller sends the data write command. It can lead to
1555         * report data that are not written in the memory or the device. The
1556         * FIFO flush ensures that data are really written.
1557         */
1558        if ((desc->lld.mbr_cfg & mask) == value) {
1559                at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1560                while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1561                        cpu_relax();
1562        }
1563
1564        /*
1565         * Remove size of all microblocks already transferred and the current
1566         * one. Then add the remaining size to transfer of the current
1567         * microblock.
1568         */
1569        descs_list = &desc->descs_list;
1570        list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
1571                dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
1572                residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
1573                if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1574                        break;
1575        }
1576        residue += cur_ubc << dwidth;
1577
1578        dma_set_residue(txstate, residue);
1579
1580        dev_dbg(chan2dev(chan),
1581                 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1582                 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1583
1584spin_unlock:
1585        spin_unlock_irqrestore(&atchan->lock, flags);
1586        return ret;
1587}
1588
1589/* Call must be protected by lock. */
1590static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1591                                    struct at_xdmac_desc *desc)
1592{
1593        dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1594
1595        /*
1596         * Remove the transfer from the transfer list then move the transfer
1597         * descriptors into the free descriptors list.
1598         */
1599        list_del(&desc->xfer_node);
1600        list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1601}
1602
1603static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1604{
1605        struct at_xdmac_desc    *desc;
1606
1607        /*
1608         * If channel is enabled, do nothing, advance_work will be triggered
1609         * after the interruption.
1610         */
1611        if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
1612                desc = list_first_entry(&atchan->xfers_list,
1613                                        struct at_xdmac_desc,
1614                                        xfer_node);
1615                dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1616                if (!desc->active_xfer)
1617                        at_xdmac_start_xfer(atchan, desc);
1618        }
1619}
1620
1621static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1622{
1623        struct at_xdmac_desc            *desc;
1624        struct dma_async_tx_descriptor  *txd;
1625
1626        if (!list_empty(&atchan->xfers_list)) {
1627                desc = list_first_entry(&atchan->xfers_list,
1628                                        struct at_xdmac_desc, xfer_node);
1629                txd = &desc->tx_dma_desc;
1630
1631                if (txd->flags & DMA_PREP_INTERRUPT)
1632                        dmaengine_desc_get_callback_invoke(txd, NULL);
1633        }
1634}
1635
1636static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1637{
1638        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1639        struct at_xdmac_desc    *bad_desc;
1640
1641        /*
1642         * The descriptor currently at the head of the active list is
1643         * broken. Since we don't have any way to report errors, we'll
1644         * just have to scream loudly and try to continue with other
1645         * descriptors queued (if any).
1646         */
1647        if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1648                dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1649        if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1650                dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1651        if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1652                dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1653
1654        spin_lock_irq(&atchan->lock);
1655
1656        /* Channel must be disabled first as it's not done automatically */
1657        at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1658        while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1659                cpu_relax();
1660
1661        bad_desc = list_first_entry(&atchan->xfers_list,
1662                                    struct at_xdmac_desc,
1663                                    xfer_node);
1664
1665        spin_unlock_irq(&atchan->lock);
1666
1667        /* Print bad descriptor's details if needed */
1668        dev_dbg(chan2dev(&atchan->chan),
1669                "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1670                __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1671                bad_desc->lld.mbr_ubc);
1672
1673        /* Then continue with usual descriptor management */
1674}
1675
1676static void at_xdmac_tasklet(struct tasklet_struct *t)
1677{
1678        struct at_xdmac_chan    *atchan = from_tasklet(atchan, t, tasklet);
1679        struct at_xdmac_desc    *desc;
1680        u32                     error_mask;
1681
1682        dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1683                __func__, atchan->irq_status);
1684
1685        error_mask = AT_XDMAC_CIS_RBEIS
1686                     | AT_XDMAC_CIS_WBEIS
1687                     | AT_XDMAC_CIS_ROIS;
1688
1689        if (at_xdmac_chan_is_cyclic(atchan)) {
1690                at_xdmac_handle_cyclic(atchan);
1691        } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1692                   || (atchan->irq_status & error_mask)) {
1693                struct dma_async_tx_descriptor  *txd;
1694
1695                if (atchan->irq_status & error_mask)
1696                        at_xdmac_handle_error(atchan);
1697
1698                spin_lock_irq(&atchan->lock);
1699                desc = list_first_entry(&atchan->xfers_list,
1700                                        struct at_xdmac_desc,
1701                                        xfer_node);
1702                dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1703                if (!desc->active_xfer) {
1704                        dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1705                        spin_unlock_irq(&atchan->lock);
1706                        return;
1707                }
1708
1709                txd = &desc->tx_dma_desc;
1710
1711                at_xdmac_remove_xfer(atchan, desc);
1712                spin_unlock_irq(&atchan->lock);
1713
1714                dma_cookie_complete(txd);
1715                if (txd->flags & DMA_PREP_INTERRUPT)
1716                        dmaengine_desc_get_callback_invoke(txd, NULL);
1717
1718                dma_run_dependencies(txd);
1719
1720                spin_lock_irq(&atchan->lock);
1721                at_xdmac_advance_work(atchan);
1722                spin_unlock_irq(&atchan->lock);
1723        }
1724}
1725
1726static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1727{
1728        struct at_xdmac         *atxdmac = (struct at_xdmac *)dev_id;
1729        struct at_xdmac_chan    *atchan;
1730        u32                     imr, status, pending;
1731        u32                     chan_imr, chan_status;
1732        int                     i, ret = IRQ_NONE;
1733
1734        do {
1735                imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1736                status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1737                pending = status & imr;
1738
1739                dev_vdbg(atxdmac->dma.dev,
1740                         "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1741                         __func__, status, imr, pending);
1742
1743                if (!pending)
1744                        break;
1745
1746                /* We have to find which channel has generated the interrupt. */
1747                for (i = 0; i < atxdmac->dma.chancnt; i++) {
1748                        if (!((1 << i) & pending))
1749                                continue;
1750
1751                        atchan = &atxdmac->chan[i];
1752                        chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1753                        chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1754                        atchan->irq_status = chan_status & chan_imr;
1755                        dev_vdbg(atxdmac->dma.dev,
1756                                 "%s: chan%d: imr=0x%x, status=0x%x\n",
1757                                 __func__, i, chan_imr, chan_status);
1758                        dev_vdbg(chan2dev(&atchan->chan),
1759                                 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1760                                 __func__,
1761                                 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1762                                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1763                                 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1764                                 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1765                                 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1766                                 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1767
1768                        if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1769                                at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1770
1771                        tasklet_schedule(&atchan->tasklet);
1772                        ret = IRQ_HANDLED;
1773                }
1774
1775        } while (pending);
1776
1777        return ret;
1778}
1779
1780static void at_xdmac_issue_pending(struct dma_chan *chan)
1781{
1782        struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1783        unsigned long flags;
1784
1785        dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1786
1787        if (!at_xdmac_chan_is_cyclic(atchan)) {
1788                spin_lock_irqsave(&atchan->lock, flags);
1789                at_xdmac_advance_work(atchan);
1790                spin_unlock_irqrestore(&atchan->lock, flags);
1791        }
1792
1793        return;
1794}
1795
1796static int at_xdmac_device_config(struct dma_chan *chan,
1797                                  struct dma_slave_config *config)
1798{
1799        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1800        int ret;
1801        unsigned long           flags;
1802
1803        dev_dbg(chan2dev(chan), "%s\n", __func__);
1804
1805        spin_lock_irqsave(&atchan->lock, flags);
1806        ret = at_xdmac_set_slave_config(chan, config);
1807        spin_unlock_irqrestore(&atchan->lock, flags);
1808
1809        return ret;
1810}
1811
1812static int at_xdmac_device_pause(struct dma_chan *chan)
1813{
1814        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1815        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1816        unsigned long           flags;
1817
1818        dev_dbg(chan2dev(chan), "%s\n", __func__);
1819
1820        if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1821                return 0;
1822
1823        spin_lock_irqsave(&atchan->lock, flags);
1824        at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1825        while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1826               & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1827                cpu_relax();
1828        spin_unlock_irqrestore(&atchan->lock, flags);
1829
1830        return 0;
1831}
1832
1833static int at_xdmac_device_resume(struct dma_chan *chan)
1834{
1835        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1836        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1837        unsigned long           flags;
1838
1839        dev_dbg(chan2dev(chan), "%s\n", __func__);
1840
1841        spin_lock_irqsave(&atchan->lock, flags);
1842        if (!at_xdmac_chan_is_paused(atchan)) {
1843                spin_unlock_irqrestore(&atchan->lock, flags);
1844                return 0;
1845        }
1846
1847        at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1848        clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1849        spin_unlock_irqrestore(&atchan->lock, flags);
1850
1851        return 0;
1852}
1853
1854static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1855{
1856        struct at_xdmac_desc    *desc, *_desc;
1857        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1858        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
1859        unsigned long           flags;
1860
1861        dev_dbg(chan2dev(chan), "%s\n", __func__);
1862
1863        spin_lock_irqsave(&atchan->lock, flags);
1864        at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1865        while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1866                cpu_relax();
1867
1868        /* Cancel all pending transfers. */
1869        list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1870                at_xdmac_remove_xfer(atchan, desc);
1871
1872        clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1873        clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1874        spin_unlock_irqrestore(&atchan->lock, flags);
1875
1876        return 0;
1877}
1878
1879static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1880{
1881        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1882        struct at_xdmac_desc    *desc;
1883        int                     i;
1884
1885        if (at_xdmac_chan_is_enabled(atchan)) {
1886                dev_err(chan2dev(chan),
1887                        "can't allocate channel resources (channel enabled)\n");
1888                return -EIO;
1889        }
1890
1891        if (!list_empty(&atchan->free_descs_list)) {
1892                dev_err(chan2dev(chan),
1893                        "can't allocate channel resources (channel not free from a previous use)\n");
1894                return -EIO;
1895        }
1896
1897        for (i = 0; i < init_nr_desc_per_channel; i++) {
1898                desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
1899                if (!desc) {
1900                        dev_warn(chan2dev(chan),
1901                                "only %d descriptors have been allocated\n", i);
1902                        break;
1903                }
1904                list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1905        }
1906
1907        dma_cookie_init(chan);
1908
1909        dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1910
1911        return i;
1912}
1913
1914static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1915{
1916        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1917        struct at_xdmac         *atxdmac = to_at_xdmac(chan->device);
1918        struct at_xdmac_desc    *desc, *_desc;
1919
1920        list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1921                dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1922                list_del(&desc->desc_node);
1923                dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1924        }
1925
1926        return;
1927}
1928
1929#ifdef CONFIG_PM
1930static int atmel_xdmac_prepare(struct device *dev)
1931{
1932        struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
1933        struct dma_chan         *chan, *_chan;
1934
1935        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1936                struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1937
1938                /* Wait for transfer completion, except in cyclic case. */
1939                if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1940                        return -EAGAIN;
1941        }
1942        return 0;
1943}
1944#else
1945#       define atmel_xdmac_prepare NULL
1946#endif
1947
1948#ifdef CONFIG_PM_SLEEP
1949static int atmel_xdmac_suspend(struct device *dev)
1950{
1951        struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
1952        struct dma_chan         *chan, *_chan;
1953
1954        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1955                struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
1956
1957                atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1958                if (at_xdmac_chan_is_cyclic(atchan)) {
1959                        if (!at_xdmac_chan_is_paused(atchan))
1960                                at_xdmac_device_pause(chan);
1961                        atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1962                        atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1963                        atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1964                }
1965        }
1966        atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1967
1968        at_xdmac_off(atxdmac);
1969        clk_disable_unprepare(atxdmac->clk);
1970        return 0;
1971}
1972
1973static int atmel_xdmac_resume(struct device *dev)
1974{
1975        struct at_xdmac         *atxdmac = dev_get_drvdata(dev);
1976        struct at_xdmac_chan    *atchan;
1977        struct dma_chan         *chan, *_chan;
1978        int                     i;
1979        int ret;
1980
1981        ret = clk_prepare_enable(atxdmac->clk);
1982        if (ret)
1983                return ret;
1984
1985        /* Clear pending interrupts. */
1986        for (i = 0; i < atxdmac->dma.chancnt; i++) {
1987                atchan = &atxdmac->chan[i];
1988                while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1989                        cpu_relax();
1990        }
1991
1992        at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1993        list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1994                atchan = to_at_xdmac_chan(chan);
1995                at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1996                if (at_xdmac_chan_is_cyclic(atchan)) {
1997                        if (at_xdmac_chan_is_paused(atchan))
1998                                at_xdmac_device_resume(chan);
1999                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2000                        at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2001                        at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2002                        wmb();
2003                        at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2004                }
2005        }
2006        return 0;
2007}
2008#endif /* CONFIG_PM_SLEEP */
2009
2010static void at_xdmac_axi_config(struct platform_device *pdev)
2011{
2012        struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2013        bool dev_m2m = false;
2014        u32 dma_requests;
2015
2016        if (!atxdmac->layout->axi_config)
2017                return; /* Not supported */
2018
2019        if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
2020                                  &dma_requests)) {
2021                dev_info(&pdev->dev, "controller in mem2mem mode.\n");
2022                dev_m2m = true;
2023        }
2024
2025        if (dev_m2m) {
2026                at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
2027                at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
2028        } else {
2029                at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
2030                at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
2031        }
2032}
2033
2034static int at_xdmac_probe(struct platform_device *pdev)
2035{
2036        struct at_xdmac *atxdmac;
2037        int             irq, size, nr_channels, i, ret;
2038        void __iomem    *base;
2039        u32             reg;
2040
2041        irq = platform_get_irq(pdev, 0);
2042        if (irq < 0)
2043                return irq;
2044
2045        base = devm_platform_ioremap_resource(pdev, 0);
2046        if (IS_ERR(base))
2047                return PTR_ERR(base);
2048
2049        /*
2050         * Read number of xdmac channels, read helper function can't be used
2051         * since atxdmac is not yet allocated and we need to know the number
2052         * of channels to do the allocation.
2053         */
2054        reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2055        nr_channels = AT_XDMAC_NB_CH(reg);
2056        if (nr_channels > AT_XDMAC_MAX_CHAN) {
2057                dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2058                        nr_channels);
2059                return -EINVAL;
2060        }
2061
2062        size = sizeof(*atxdmac);
2063        size += nr_channels * sizeof(struct at_xdmac_chan);
2064        atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2065        if (!atxdmac) {
2066                dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2067                return -ENOMEM;
2068        }
2069
2070        atxdmac->regs = base;
2071        atxdmac->irq = irq;
2072
2073        atxdmac->layout = of_device_get_match_data(&pdev->dev);
2074        if (!atxdmac->layout)
2075                return -ENODEV;
2076
2077        atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2078        if (IS_ERR(atxdmac->clk)) {
2079                dev_err(&pdev->dev, "can't get dma_clk\n");
2080                return PTR_ERR(atxdmac->clk);
2081        }
2082
2083        /* Do not use dev res to prevent races with tasklet */
2084        ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2085        if (ret) {
2086                dev_err(&pdev->dev, "can't request irq\n");
2087                return ret;
2088        }
2089
2090        ret = clk_prepare_enable(atxdmac->clk);
2091        if (ret) {
2092                dev_err(&pdev->dev, "can't prepare or enable clock\n");
2093                goto err_free_irq;
2094        }
2095
2096        atxdmac->at_xdmac_desc_pool =
2097                dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2098                                sizeof(struct at_xdmac_desc), 4, 0);
2099        if (!atxdmac->at_xdmac_desc_pool) {
2100                dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2101                ret = -ENOMEM;
2102                goto err_clk_disable;
2103        }
2104
2105        dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2106        dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2107        dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2108        dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2109        dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2110        dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2111        /*
2112         * Without DMA_PRIVATE the driver is not able to allocate more than
2113         * one channel, second allocation fails in private_candidate.
2114         */
2115        dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2116        atxdmac->dma.dev                                = &pdev->dev;
2117        atxdmac->dma.device_alloc_chan_resources        = at_xdmac_alloc_chan_resources;
2118        atxdmac->dma.device_free_chan_resources         = at_xdmac_free_chan_resources;
2119        atxdmac->dma.device_tx_status                   = at_xdmac_tx_status;
2120        atxdmac->dma.device_issue_pending               = at_xdmac_issue_pending;
2121        atxdmac->dma.device_prep_dma_cyclic             = at_xdmac_prep_dma_cyclic;
2122        atxdmac->dma.device_prep_interleaved_dma        = at_xdmac_prep_interleaved;
2123        atxdmac->dma.device_prep_dma_memcpy             = at_xdmac_prep_dma_memcpy;
2124        atxdmac->dma.device_prep_dma_memset             = at_xdmac_prep_dma_memset;
2125        atxdmac->dma.device_prep_dma_memset_sg          = at_xdmac_prep_dma_memset_sg;
2126        atxdmac->dma.device_prep_slave_sg               = at_xdmac_prep_slave_sg;
2127        atxdmac->dma.device_config                      = at_xdmac_device_config;
2128        atxdmac->dma.device_pause                       = at_xdmac_device_pause;
2129        atxdmac->dma.device_resume                      = at_xdmac_device_resume;
2130        atxdmac->dma.device_terminate_all               = at_xdmac_device_terminate_all;
2131        atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2132        atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2133        atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2134        atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2135
2136        /* Disable all chans and interrupts. */
2137        at_xdmac_off(atxdmac);
2138
2139        /* Init channels. */
2140        INIT_LIST_HEAD(&atxdmac->dma.channels);
2141        for (i = 0; i < nr_channels; i++) {
2142                struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2143
2144                atchan->chan.device = &atxdmac->dma;
2145                list_add_tail(&atchan->chan.device_node,
2146                              &atxdmac->dma.channels);
2147
2148                atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2149                atchan->mask = 1 << i;
2150
2151                spin_lock_init(&atchan->lock);
2152                INIT_LIST_HEAD(&atchan->xfers_list);
2153                INIT_LIST_HEAD(&atchan->free_descs_list);
2154                tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2155
2156                /* Clear pending interrupts. */
2157                while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2158                        cpu_relax();
2159        }
2160        platform_set_drvdata(pdev, atxdmac);
2161
2162        ret = dma_async_device_register(&atxdmac->dma);
2163        if (ret) {
2164                dev_err(&pdev->dev, "fail to register DMA engine device\n");
2165                goto err_clk_disable;
2166        }
2167
2168        ret = of_dma_controller_register(pdev->dev.of_node,
2169                                         at_xdmac_xlate, atxdmac);
2170        if (ret) {
2171                dev_err(&pdev->dev, "could not register of dma controller\n");
2172                goto err_dma_unregister;
2173        }
2174
2175        dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2176                 nr_channels, atxdmac->regs);
2177
2178        at_xdmac_axi_config(pdev);
2179
2180        return 0;
2181
2182err_dma_unregister:
2183        dma_async_device_unregister(&atxdmac->dma);
2184err_clk_disable:
2185        clk_disable_unprepare(atxdmac->clk);
2186err_free_irq:
2187        free_irq(atxdmac->irq, atxdmac);
2188        return ret;
2189}
2190
2191static int at_xdmac_remove(struct platform_device *pdev)
2192{
2193        struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2194        int             i;
2195
2196        at_xdmac_off(atxdmac);
2197        of_dma_controller_free(pdev->dev.of_node);
2198        dma_async_device_unregister(&atxdmac->dma);
2199        clk_disable_unprepare(atxdmac->clk);
2200
2201        free_irq(atxdmac->irq, atxdmac);
2202
2203        for (i = 0; i < atxdmac->dma.chancnt; i++) {
2204                struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2205
2206                tasklet_kill(&atchan->tasklet);
2207                at_xdmac_free_chan_resources(&atchan->chan);
2208        }
2209
2210        return 0;
2211}
2212
2213static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
2214        .prepare        = atmel_xdmac_prepare,
2215        SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2216};
2217
2218static const struct of_device_id atmel_xdmac_dt_ids[] = {
2219        {
2220                .compatible = "atmel,sama5d4-dma",
2221                .data = &at_xdmac_sama5d4_layout,
2222        }, {
2223                .compatible = "microchip,sama7g5-dma",
2224                .data = &at_xdmac_sama7g5_layout,
2225        }, {
2226                /* sentinel */
2227        }
2228};
2229MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2230
2231static struct platform_driver at_xdmac_driver = {
2232        .probe          = at_xdmac_probe,
2233        .remove         = at_xdmac_remove,
2234        .driver = {
2235                .name           = "at_xdmac",
2236                .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2237                .pm             = &atmel_xdmac_dev_pm_ops,
2238        }
2239};
2240
2241static int __init at_xdmac_init(void)
2242{
2243        return platform_driver_register(&at_xdmac_driver);
2244}
2245subsys_initcall(at_xdmac_init);
2246
2247static void __exit at_xdmac_exit(void)
2248{
2249        platform_driver_unregister(&at_xdmac_driver);
2250}
2251module_exit(at_xdmac_exit);
2252
2253MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2254MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2255MODULE_LICENSE("GPL");
2256