linux/drivers/dma/dma-jz4780.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Ingenic JZ4780 DMA controller
   4 *
   5 * Copyright (c) 2015 Imagination Technologies
   6 * Author: Alex Smith <alex@alex-smith.me.uk>
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/dmapool.h>
  11#include <linux/init.h>
  12#include <linux/interrupt.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/of_device.h>
  16#include <linux/of_dma.h>
  17#include <linux/platform_device.h>
  18#include <linux/slab.h>
  19
  20#include "dmaengine.h"
  21#include "virt-dma.h"
  22
  23/* Global registers. */
  24#define JZ_DMA_REG_DMAC         0x00
  25#define JZ_DMA_REG_DIRQP        0x04
  26#define JZ_DMA_REG_DDR          0x08
  27#define JZ_DMA_REG_DDRS         0x0c
  28#define JZ_DMA_REG_DCKE         0x10
  29#define JZ_DMA_REG_DCKES        0x14
  30#define JZ_DMA_REG_DCKEC        0x18
  31#define JZ_DMA_REG_DMACP        0x1c
  32#define JZ_DMA_REG_DSIRQP       0x20
  33#define JZ_DMA_REG_DSIRQM       0x24
  34#define JZ_DMA_REG_DCIRQP       0x28
  35#define JZ_DMA_REG_DCIRQM       0x2c
  36
  37/* Per-channel registers. */
  38#define JZ_DMA_REG_CHAN(n)      (n * 0x20)
  39#define JZ_DMA_REG_DSA          0x00
  40#define JZ_DMA_REG_DTA          0x04
  41#define JZ_DMA_REG_DTC          0x08
  42#define JZ_DMA_REG_DRT          0x0c
  43#define JZ_DMA_REG_DCS          0x10
  44#define JZ_DMA_REG_DCM          0x14
  45#define JZ_DMA_REG_DDA          0x18
  46#define JZ_DMA_REG_DSD          0x1c
  47
  48#define JZ_DMA_DMAC_DMAE        BIT(0)
  49#define JZ_DMA_DMAC_AR          BIT(2)
  50#define JZ_DMA_DMAC_HLT         BIT(3)
  51#define JZ_DMA_DMAC_FAIC        BIT(27)
  52#define JZ_DMA_DMAC_FMSC        BIT(31)
  53
  54#define JZ_DMA_DRT_AUTO         0x8
  55
  56#define JZ_DMA_DCS_CTE          BIT(0)
  57#define JZ_DMA_DCS_HLT          BIT(2)
  58#define JZ_DMA_DCS_TT           BIT(3)
  59#define JZ_DMA_DCS_AR           BIT(4)
  60#define JZ_DMA_DCS_DES8         BIT(30)
  61
  62#define JZ_DMA_DCM_LINK         BIT(0)
  63#define JZ_DMA_DCM_TIE          BIT(1)
  64#define JZ_DMA_DCM_STDE         BIT(2)
  65#define JZ_DMA_DCM_TSZ_SHIFT    8
  66#define JZ_DMA_DCM_TSZ_MASK     (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
  67#define JZ_DMA_DCM_DP_SHIFT     12
  68#define JZ_DMA_DCM_SP_SHIFT     14
  69#define JZ_DMA_DCM_DAI          BIT(22)
  70#define JZ_DMA_DCM_SAI          BIT(23)
  71
  72#define JZ_DMA_SIZE_4_BYTE      0x0
  73#define JZ_DMA_SIZE_1_BYTE      0x1
  74#define JZ_DMA_SIZE_2_BYTE      0x2
  75#define JZ_DMA_SIZE_16_BYTE     0x3
  76#define JZ_DMA_SIZE_32_BYTE     0x4
  77#define JZ_DMA_SIZE_64_BYTE     0x5
  78#define JZ_DMA_SIZE_128_BYTE    0x6
  79
  80#define JZ_DMA_WIDTH_32_BIT     0x0
  81#define JZ_DMA_WIDTH_8_BIT      0x1
  82#define JZ_DMA_WIDTH_16_BIT     0x2
  83
  84#define JZ_DMA_BUSWIDTHS        (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)  | \
  85                                 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  86                                 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
  87
  88#define JZ4780_DMA_CTRL_OFFSET  0x1000
  89
  90/* macros for use with jz4780_dma_soc_data.flags */
  91#define JZ_SOC_DATA_ALLOW_LEGACY_DT     BIT(0)
  92#define JZ_SOC_DATA_PROGRAMMABLE_DMA    BIT(1)
  93#define JZ_SOC_DATA_PER_CHAN_PM         BIT(2)
  94#define JZ_SOC_DATA_NO_DCKES_DCKEC      BIT(3)
  95
  96/**
  97 * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
  98 * @dcm: value for the DCM (channel command) register
  99 * @dsa: source address
 100 * @dta: target address
 101 * @dtc: transfer count (number of blocks of the transfer size specified in DCM
 102 * to transfer) in the low 24 bits, offset of the next descriptor from the
 103 * descriptor base address in the upper 8 bits.
 104 */
 105struct jz4780_dma_hwdesc {
 106        uint32_t dcm;
 107        uint32_t dsa;
 108        uint32_t dta;
 109        uint32_t dtc;
 110};
 111
 112/* Size of allocations for hardware descriptor blocks. */
 113#define JZ_DMA_DESC_BLOCK_SIZE  PAGE_SIZE
 114#define JZ_DMA_MAX_DESC         \
 115        (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
 116
 117struct jz4780_dma_desc {
 118        struct virt_dma_desc vdesc;
 119
 120        struct jz4780_dma_hwdesc *desc;
 121        dma_addr_t desc_phys;
 122        unsigned int count;
 123        enum dma_transaction_type type;
 124        uint32_t status;
 125};
 126
 127struct jz4780_dma_chan {
 128        struct virt_dma_chan vchan;
 129        unsigned int id;
 130        struct dma_pool *desc_pool;
 131
 132        uint32_t transfer_type;
 133        uint32_t transfer_shift;
 134        struct dma_slave_config config;
 135
 136        struct jz4780_dma_desc *desc;
 137        unsigned int curr_hwdesc;
 138};
 139
 140struct jz4780_dma_soc_data {
 141        unsigned int nb_channels;
 142        unsigned int transfer_ord_max;
 143        unsigned long flags;
 144};
 145
 146struct jz4780_dma_dev {
 147        struct dma_device dma_device;
 148        void __iomem *chn_base;
 149        void __iomem *ctrl_base;
 150        struct clk *clk;
 151        unsigned int irq;
 152        const struct jz4780_dma_soc_data *soc_data;
 153
 154        uint32_t chan_reserved;
 155        struct jz4780_dma_chan chan[];
 156};
 157
 158struct jz4780_dma_filter_data {
 159        uint32_t transfer_type;
 160        int channel;
 161};
 162
 163static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
 164{
 165        return container_of(chan, struct jz4780_dma_chan, vchan.chan);
 166}
 167
 168static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
 169        struct virt_dma_desc *vdesc)
 170{
 171        return container_of(vdesc, struct jz4780_dma_desc, vdesc);
 172}
 173
 174static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
 175        struct jz4780_dma_chan *jzchan)
 176{
 177        return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
 178                            dma_device);
 179}
 180
 181static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
 182        unsigned int chn, unsigned int reg)
 183{
 184        return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
 185}
 186
 187static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
 188        unsigned int chn, unsigned int reg, uint32_t val)
 189{
 190        writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
 191}
 192
 193static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
 194        unsigned int reg)
 195{
 196        return readl(jzdma->ctrl_base + reg);
 197}
 198
 199static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
 200        unsigned int reg, uint32_t val)
 201{
 202        writel(val, jzdma->ctrl_base + reg);
 203}
 204
 205static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
 206        unsigned int chn)
 207{
 208        if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
 209                unsigned int reg;
 210
 211                if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
 212                        reg = JZ_DMA_REG_DCKE;
 213                else
 214                        reg = JZ_DMA_REG_DCKES;
 215
 216                jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
 217        }
 218}
 219
 220static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
 221        unsigned int chn)
 222{
 223        if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
 224                        !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
 225                jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
 226}
 227
 228static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
 229        struct jz4780_dma_chan *jzchan, unsigned int count,
 230        enum dma_transaction_type type)
 231{
 232        struct jz4780_dma_desc *desc;
 233
 234        if (count > JZ_DMA_MAX_DESC)
 235                return NULL;
 236
 237        desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
 238        if (!desc)
 239                return NULL;
 240
 241        desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
 242                                    &desc->desc_phys);
 243        if (!desc->desc) {
 244                kfree(desc);
 245                return NULL;
 246        }
 247
 248        desc->count = count;
 249        desc->type = type;
 250        return desc;
 251}
 252
 253static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
 254{
 255        struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
 256        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
 257
 258        dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
 259        kfree(desc);
 260}
 261
 262static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
 263        unsigned long val, uint32_t *shift)
 264{
 265        struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 266        int ord = ffs(val) - 1;
 267
 268        /*
 269         * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
 270         * than the maximum, just limit it. It is perfectly safe to fall back
 271         * in this way since we won't exceed the maximum burst size supported
 272         * by the device, the only effect is reduced efficiency. This is better
 273         * than refusing to perform the request at all.
 274         */
 275        if (ord == 3)
 276                ord = 2;
 277        else if (ord > jzdma->soc_data->transfer_ord_max)
 278                ord = jzdma->soc_data->transfer_ord_max;
 279
 280        *shift = ord;
 281
 282        switch (ord) {
 283        case 0:
 284                return JZ_DMA_SIZE_1_BYTE;
 285        case 1:
 286                return JZ_DMA_SIZE_2_BYTE;
 287        case 2:
 288                return JZ_DMA_SIZE_4_BYTE;
 289        case 4:
 290                return JZ_DMA_SIZE_16_BYTE;
 291        case 5:
 292                return JZ_DMA_SIZE_32_BYTE;
 293        case 6:
 294                return JZ_DMA_SIZE_64_BYTE;
 295        default:
 296                return JZ_DMA_SIZE_128_BYTE;
 297        }
 298}
 299
 300static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
 301        struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
 302        enum dma_transfer_direction direction)
 303{
 304        struct dma_slave_config *config = &jzchan->config;
 305        uint32_t width, maxburst, tsz;
 306
 307        if (direction == DMA_MEM_TO_DEV) {
 308                desc->dcm = JZ_DMA_DCM_SAI;
 309                desc->dsa = addr;
 310                desc->dta = config->dst_addr;
 311
 312                width = config->dst_addr_width;
 313                maxburst = config->dst_maxburst;
 314        } else {
 315                desc->dcm = JZ_DMA_DCM_DAI;
 316                desc->dsa = config->src_addr;
 317                desc->dta = addr;
 318
 319                width = config->src_addr_width;
 320                maxburst = config->src_maxburst;
 321        }
 322
 323        /*
 324         * This calculates the maximum transfer size that can be used with the
 325         * given address, length, width and maximum burst size. The address
 326         * must be aligned to the transfer size, the total length must be
 327         * divisible by the transfer size, and we must not use more than the
 328         * maximum burst specified by the user.
 329         */
 330        tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
 331                                       &jzchan->transfer_shift);
 332
 333        switch (width) {
 334        case DMA_SLAVE_BUSWIDTH_1_BYTE:
 335        case DMA_SLAVE_BUSWIDTH_2_BYTES:
 336                break;
 337        case DMA_SLAVE_BUSWIDTH_4_BYTES:
 338                width = JZ_DMA_WIDTH_32_BIT;
 339                break;
 340        default:
 341                return -EINVAL;
 342        }
 343
 344        desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
 345        desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
 346        desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
 347
 348        desc->dtc = len >> jzchan->transfer_shift;
 349        return 0;
 350}
 351
 352static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
 353        struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
 354        enum dma_transfer_direction direction, unsigned long flags,
 355        void *context)
 356{
 357        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 358        struct jz4780_dma_desc *desc;
 359        unsigned int i;
 360        int err;
 361
 362        desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
 363        if (!desc)
 364                return NULL;
 365
 366        for (i = 0; i < sg_len; i++) {
 367                err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
 368                                              sg_dma_address(&sgl[i]),
 369                                              sg_dma_len(&sgl[i]),
 370                                              direction);
 371                if (err < 0) {
 372                        jz4780_dma_desc_free(&jzchan->desc->vdesc);
 373                        return NULL;
 374                }
 375
 376                desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
 377
 378                if (i != (sg_len - 1)) {
 379                        /* Automatically proceeed to the next descriptor. */
 380                        desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
 381
 382                        /*
 383                         * The upper 8 bits of the DTC field in the descriptor
 384                         * must be set to (offset from descriptor base of next
 385                         * descriptor >> 4).
 386                         */
 387                        desc->desc[i].dtc |=
 388                                (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
 389                }
 390        }
 391
 392        return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
 393}
 394
 395static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
 396        struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
 397        size_t period_len, enum dma_transfer_direction direction,
 398        unsigned long flags)
 399{
 400        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 401        struct jz4780_dma_desc *desc;
 402        unsigned int periods, i;
 403        int err;
 404
 405        if (buf_len % period_len)
 406                return NULL;
 407
 408        periods = buf_len / period_len;
 409
 410        desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
 411        if (!desc)
 412                return NULL;
 413
 414        for (i = 0; i < periods; i++) {
 415                err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
 416                                              period_len, direction);
 417                if (err < 0) {
 418                        jz4780_dma_desc_free(&jzchan->desc->vdesc);
 419                        return NULL;
 420                }
 421
 422                buf_addr += period_len;
 423
 424                /*
 425                 * Set the link bit to indicate that the controller should
 426                 * automatically proceed to the next descriptor. In
 427                 * jz4780_dma_begin(), this will be cleared if we need to issue
 428                 * an interrupt after each period.
 429                 */
 430                desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
 431
 432                /*
 433                 * The upper 8 bits of the DTC field in the descriptor must be
 434                 * set to (offset from descriptor base of next descriptor >> 4).
 435                 * If this is the last descriptor, link it back to the first,
 436                 * i.e. leave offset set to 0, otherwise point to the next one.
 437                 */
 438                if (i != (periods - 1)) {
 439                        desc->desc[i].dtc |=
 440                                (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
 441                }
 442        }
 443
 444        return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
 445}
 446
 447static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
 448        struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 449        size_t len, unsigned long flags)
 450{
 451        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 452        struct jz4780_dma_desc *desc;
 453        uint32_t tsz;
 454
 455        desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
 456        if (!desc)
 457                return NULL;
 458
 459        tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
 460                                       &jzchan->transfer_shift);
 461
 462        jzchan->transfer_type = JZ_DMA_DRT_AUTO;
 463
 464        desc->desc[0].dsa = src;
 465        desc->desc[0].dta = dest;
 466        desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
 467                            tsz << JZ_DMA_DCM_TSZ_SHIFT |
 468                            JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
 469                            JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
 470        desc->desc[0].dtc = len >> jzchan->transfer_shift;
 471
 472        return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
 473}
 474
 475static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
 476{
 477        struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 478        struct virt_dma_desc *vdesc;
 479        unsigned int i;
 480        dma_addr_t desc_phys;
 481
 482        if (!jzchan->desc) {
 483                vdesc = vchan_next_desc(&jzchan->vchan);
 484                if (!vdesc)
 485                        return;
 486
 487                list_del(&vdesc->node);
 488
 489                jzchan->desc = to_jz4780_dma_desc(vdesc);
 490                jzchan->curr_hwdesc = 0;
 491
 492                if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
 493                        /*
 494                         * The DMA controller doesn't support triggering an
 495                         * interrupt after processing each descriptor, only
 496                         * after processing an entire terminated list of
 497                         * descriptors. For a cyclic DMA setup the list of
 498                         * descriptors is not terminated so we can never get an
 499                         * interrupt.
 500                         *
 501                         * If the user requested a callback for a cyclic DMA
 502                         * setup then we workaround this hardware limitation
 503                         * here by degrading to a set of unlinked descriptors
 504                         * which we will submit in sequence in response to the
 505                         * completion of processing the previous descriptor.
 506                         */
 507                        for (i = 0; i < jzchan->desc->count; i++)
 508                                jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
 509                }
 510        } else {
 511                /*
 512                 * There is an existing transfer, therefore this must be one
 513                 * for which we unlinked the descriptors above. Advance to the
 514                 * next one in the list.
 515                 */
 516                jzchan->curr_hwdesc =
 517                        (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
 518        }
 519
 520        /* Enable the channel's clock. */
 521        jz4780_dma_chan_enable(jzdma, jzchan->id);
 522
 523        /* Use 4-word descriptors. */
 524        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
 525
 526        /* Set transfer type. */
 527        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
 528                              jzchan->transfer_type);
 529
 530        /*
 531         * Set the transfer count. This is redundant for a descriptor-driven
 532         * transfer. However, there can be a delay between the transfer start
 533         * time and when DTCn reg contains the new transfer count. Setting
 534         * it explicitly ensures residue is computed correctly at all times.
 535         */
 536        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
 537                                jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
 538
 539        /* Write descriptor address and initiate descriptor fetch. */
 540        desc_phys = jzchan->desc->desc_phys +
 541                    (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
 542        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
 543        jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
 544
 545        /* Enable the channel. */
 546        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
 547                              JZ_DMA_DCS_CTE);
 548}
 549
 550static void jz4780_dma_issue_pending(struct dma_chan *chan)
 551{
 552        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 553        unsigned long flags;
 554
 555        spin_lock_irqsave(&jzchan->vchan.lock, flags);
 556
 557        if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
 558                jz4780_dma_begin(jzchan);
 559
 560        spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 561}
 562
 563static int jz4780_dma_terminate_all(struct dma_chan *chan)
 564{
 565        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 566        struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 567        unsigned long flags;
 568        LIST_HEAD(head);
 569
 570        spin_lock_irqsave(&jzchan->vchan.lock, flags);
 571
 572        /* Clear the DMA status and stop the transfer. */
 573        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
 574        if (jzchan->desc) {
 575                vchan_terminate_vdesc(&jzchan->desc->vdesc);
 576                jzchan->desc = NULL;
 577        }
 578
 579        jz4780_dma_chan_disable(jzdma, jzchan->id);
 580
 581        vchan_get_all_descriptors(&jzchan->vchan, &head);
 582
 583        spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 584
 585        vchan_dma_desc_free_list(&jzchan->vchan, &head);
 586        return 0;
 587}
 588
 589static void jz4780_dma_synchronize(struct dma_chan *chan)
 590{
 591        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 592        struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 593
 594        vchan_synchronize(&jzchan->vchan);
 595        jz4780_dma_chan_disable(jzdma, jzchan->id);
 596}
 597
 598static int jz4780_dma_config(struct dma_chan *chan,
 599        struct dma_slave_config *config)
 600{
 601        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 602
 603        if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 604           || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
 605                return -EINVAL;
 606
 607        /* Copy the reset of the slave configuration, it is used later. */
 608        memcpy(&jzchan->config, config, sizeof(jzchan->config));
 609
 610        return 0;
 611}
 612
 613static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
 614        struct jz4780_dma_desc *desc, unsigned int next_sg)
 615{
 616        struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 617        unsigned int count = 0;
 618        unsigned int i;
 619
 620        for (i = next_sg; i < desc->count; i++)
 621                count += desc->desc[i].dtc & GENMASK(23, 0);
 622
 623        if (next_sg != 0)
 624                count += jz4780_dma_chn_readl(jzdma, jzchan->id,
 625                                         JZ_DMA_REG_DTC);
 626
 627        return count << jzchan->transfer_shift;
 628}
 629
 630static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
 631        dma_cookie_t cookie, struct dma_tx_state *txstate)
 632{
 633        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 634        struct virt_dma_desc *vdesc;
 635        enum dma_status status;
 636        unsigned long flags;
 637        unsigned long residue = 0;
 638
 639        status = dma_cookie_status(chan, cookie, txstate);
 640        if ((status == DMA_COMPLETE) || (txstate == NULL))
 641                return status;
 642
 643        spin_lock_irqsave(&jzchan->vchan.lock, flags);
 644
 645        vdesc = vchan_find_desc(&jzchan->vchan, cookie);
 646        if (vdesc) {
 647                /* On the issued list, so hasn't been processed yet */
 648                residue = jz4780_dma_desc_residue(jzchan,
 649                                        to_jz4780_dma_desc(vdesc), 0);
 650        } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
 651                residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
 652                                        jzchan->curr_hwdesc + 1);
 653        }
 654        dma_set_residue(txstate, residue);
 655
 656        if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
 657            && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
 658                status = DMA_ERROR;
 659
 660        spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
 661        return status;
 662}
 663
 664static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
 665                                struct jz4780_dma_chan *jzchan)
 666{
 667        uint32_t dcs;
 668        bool ack = true;
 669
 670        spin_lock(&jzchan->vchan.lock);
 671
 672        dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
 673        jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
 674
 675        if (dcs & JZ_DMA_DCS_AR) {
 676                dev_warn(&jzchan->vchan.chan.dev->device,
 677                         "address error (DCS=0x%x)\n", dcs);
 678        }
 679
 680        if (dcs & JZ_DMA_DCS_HLT) {
 681                dev_warn(&jzchan->vchan.chan.dev->device,
 682                         "channel halt (DCS=0x%x)\n", dcs);
 683        }
 684
 685        if (jzchan->desc) {
 686                jzchan->desc->status = dcs;
 687
 688                if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
 689                        if (jzchan->desc->type == DMA_CYCLIC) {
 690                                vchan_cyclic_callback(&jzchan->desc->vdesc);
 691
 692                                jz4780_dma_begin(jzchan);
 693                        } else if (dcs & JZ_DMA_DCS_TT) {
 694                                vchan_cookie_complete(&jzchan->desc->vdesc);
 695                                jzchan->desc = NULL;
 696
 697                                jz4780_dma_begin(jzchan);
 698                        } else {
 699                                /* False positive - continue the transfer */
 700                                ack = false;
 701                                jz4780_dma_chn_writel(jzdma, jzchan->id,
 702                                                      JZ_DMA_REG_DCS,
 703                                                      JZ_DMA_DCS_CTE);
 704                        }
 705                }
 706        } else {
 707                dev_err(&jzchan->vchan.chan.dev->device,
 708                        "channel IRQ with no active transfer\n");
 709        }
 710
 711        spin_unlock(&jzchan->vchan.lock);
 712
 713        return ack;
 714}
 715
 716static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
 717{
 718        struct jz4780_dma_dev *jzdma = data;
 719        unsigned int nb_channels = jzdma->soc_data->nb_channels;
 720        unsigned long pending;
 721        uint32_t dmac;
 722        int i;
 723
 724        pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
 725
 726        for_each_set_bit(i, &pending, nb_channels) {
 727                if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
 728                        pending &= ~BIT(i);
 729        }
 730
 731        /* Clear halt and address error status of all channels. */
 732        dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
 733        dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
 734        jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
 735
 736        /* Clear interrupt pending status. */
 737        jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
 738
 739        return IRQ_HANDLED;
 740}
 741
 742static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
 743{
 744        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 745
 746        jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
 747                                            chan->device->dev,
 748                                            JZ_DMA_DESC_BLOCK_SIZE,
 749                                            PAGE_SIZE, 0);
 750        if (!jzchan->desc_pool) {
 751                dev_err(&chan->dev->device,
 752                        "failed to allocate descriptor pool\n");
 753                return -ENOMEM;
 754        }
 755
 756        return 0;
 757}
 758
 759static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
 760{
 761        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 762
 763        vchan_free_chan_resources(&jzchan->vchan);
 764        dma_pool_destroy(jzchan->desc_pool);
 765        jzchan->desc_pool = NULL;
 766}
 767
 768static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
 769{
 770        struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
 771        struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
 772        struct jz4780_dma_filter_data *data = param;
 773
 774
 775        if (data->channel > -1) {
 776                if (data->channel != jzchan->id)
 777                        return false;
 778        } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
 779                return false;
 780        }
 781
 782        jzchan->transfer_type = data->transfer_type;
 783
 784        return true;
 785}
 786
 787static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
 788        struct of_dma *ofdma)
 789{
 790        struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
 791        dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
 792        struct jz4780_dma_filter_data data;
 793
 794        if (dma_spec->args_count != 2)
 795                return NULL;
 796
 797        data.transfer_type = dma_spec->args[0];
 798        data.channel = dma_spec->args[1];
 799
 800        if (data.channel > -1) {
 801                if (data.channel >= jzdma->soc_data->nb_channels) {
 802                        dev_err(jzdma->dma_device.dev,
 803                                "device requested non-existent channel %u\n",
 804                                data.channel);
 805                        return NULL;
 806                }
 807
 808                /* Can only select a channel marked as reserved. */
 809                if (!(jzdma->chan_reserved & BIT(data.channel))) {
 810                        dev_err(jzdma->dma_device.dev,
 811                                "device requested unreserved channel %u\n",
 812                                data.channel);
 813                        return NULL;
 814                }
 815
 816                jzdma->chan[data.channel].transfer_type = data.transfer_type;
 817
 818                return dma_get_slave_channel(
 819                        &jzdma->chan[data.channel].vchan.chan);
 820        } else {
 821                return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
 822                                             ofdma->of_node);
 823        }
 824}
 825
 826static int jz4780_dma_probe(struct platform_device *pdev)
 827{
 828        struct device *dev = &pdev->dev;
 829        const struct jz4780_dma_soc_data *soc_data;
 830        struct jz4780_dma_dev *jzdma;
 831        struct jz4780_dma_chan *jzchan;
 832        struct dma_device *dd;
 833        struct resource *res;
 834        int i, ret;
 835
 836        if (!dev->of_node) {
 837                dev_err(dev, "This driver must be probed from devicetree\n");
 838                return -EINVAL;
 839        }
 840
 841        soc_data = device_get_match_data(dev);
 842        if (!soc_data)
 843                return -EINVAL;
 844
 845        jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
 846                             soc_data->nb_channels), GFP_KERNEL);
 847        if (!jzdma)
 848                return -ENOMEM;
 849
 850        jzdma->soc_data = soc_data;
 851        platform_set_drvdata(pdev, jzdma);
 852
 853        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 854        if (!res) {
 855                dev_err(dev, "failed to get I/O memory\n");
 856                return -EINVAL;
 857        }
 858
 859        jzdma->chn_base = devm_ioremap_resource(dev, res);
 860        if (IS_ERR(jzdma->chn_base))
 861                return PTR_ERR(jzdma->chn_base);
 862
 863        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 864        if (res) {
 865                jzdma->ctrl_base = devm_ioremap_resource(dev, res);
 866                if (IS_ERR(jzdma->ctrl_base))
 867                        return PTR_ERR(jzdma->ctrl_base);
 868        } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
 869                /*
 870                 * On JZ4780, if the second memory resource was not supplied,
 871                 * assume we're using an old devicetree, and calculate the
 872                 * offset to the control registers.
 873                 */
 874                jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
 875        } else {
 876                dev_err(dev, "failed to get I/O memory\n");
 877                return -EINVAL;
 878        }
 879
 880        ret = platform_get_irq(pdev, 0);
 881        if (ret < 0) {
 882                dev_err(dev, "failed to get IRQ: %d\n", ret);
 883                return ret;
 884        }
 885
 886        jzdma->irq = ret;
 887
 888        ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
 889                          jzdma);
 890        if (ret) {
 891                dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
 892                return ret;
 893        }
 894
 895        jzdma->clk = devm_clk_get(dev, NULL);
 896        if (IS_ERR(jzdma->clk)) {
 897                dev_err(dev, "failed to get clock\n");
 898                ret = PTR_ERR(jzdma->clk);
 899                goto err_free_irq;
 900        }
 901
 902        clk_prepare_enable(jzdma->clk);
 903
 904        /* Property is optional, if it doesn't exist the value will remain 0. */
 905        of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
 906                                   0, &jzdma->chan_reserved);
 907
 908        dd = &jzdma->dma_device;
 909
 910        dma_cap_set(DMA_MEMCPY, dd->cap_mask);
 911        dma_cap_set(DMA_SLAVE, dd->cap_mask);
 912        dma_cap_set(DMA_CYCLIC, dd->cap_mask);
 913
 914        dd->dev = dev;
 915        dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
 916        dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
 917        dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
 918        dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
 919        dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
 920        dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
 921        dd->device_config = jz4780_dma_config;
 922        dd->device_terminate_all = jz4780_dma_terminate_all;
 923        dd->device_synchronize = jz4780_dma_synchronize;
 924        dd->device_tx_status = jz4780_dma_tx_status;
 925        dd->device_issue_pending = jz4780_dma_issue_pending;
 926        dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
 927        dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
 928        dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 929        dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 930
 931        /*
 932         * Enable DMA controller, mark all channels as not programmable.
 933         * Also set the FMSC bit - it increases MSC performance, so it makes
 934         * little sense not to enable it.
 935         */
 936        jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
 937                               JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
 938
 939        if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
 940                jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
 941
 942        INIT_LIST_HEAD(&dd->channels);
 943
 944        for (i = 0; i < soc_data->nb_channels; i++) {
 945                jzchan = &jzdma->chan[i];
 946                jzchan->id = i;
 947
 948                vchan_init(&jzchan->vchan, dd);
 949                jzchan->vchan.desc_free = jz4780_dma_desc_free;
 950        }
 951
 952        ret = dmaenginem_async_device_register(dd);
 953        if (ret) {
 954                dev_err(dev, "failed to register device\n");
 955                goto err_disable_clk;
 956        }
 957
 958        /* Register with OF DMA helpers. */
 959        ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
 960                                         jzdma);
 961        if (ret) {
 962                dev_err(dev, "failed to register OF DMA controller\n");
 963                goto err_disable_clk;
 964        }
 965
 966        dev_info(dev, "JZ4780 DMA controller initialised\n");
 967        return 0;
 968
 969err_disable_clk:
 970        clk_disable_unprepare(jzdma->clk);
 971
 972err_free_irq:
 973        free_irq(jzdma->irq, jzdma);
 974        return ret;
 975}
 976
 977static int jz4780_dma_remove(struct platform_device *pdev)
 978{
 979        struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
 980        int i;
 981
 982        of_dma_controller_free(pdev->dev.of_node);
 983
 984        free_irq(jzdma->irq, jzdma);
 985
 986        for (i = 0; i < jzdma->soc_data->nb_channels; i++)
 987                tasklet_kill(&jzdma->chan[i].vchan.task);
 988
 989        return 0;
 990}
 991
 992static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
 993        .nb_channels = 6,
 994        .transfer_ord_max = 5,
 995};
 996
 997static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
 998        .nb_channels = 6,
 999        .transfer_ord_max = 5,
1000        .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1001};
1002
1003static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
1004        .nb_channels = 6,
1005        .transfer_ord_max = 6,
1006        .flags = JZ_SOC_DATA_PER_CHAN_PM,
1007};
1008
1009static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
1010        .nb_channels = 32,
1011        .transfer_ord_max = 7,
1012        .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
1013};
1014
1015static const struct of_device_id jz4780_dma_dt_match[] = {
1016        { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
1017        { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
1018        { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
1019        { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
1020        {},
1021};
1022MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
1023
1024static struct platform_driver jz4780_dma_driver = {
1025        .probe          = jz4780_dma_probe,
1026        .remove         = jz4780_dma_remove,
1027        .driver = {
1028                .name   = "jz4780-dma",
1029                .of_match_table = of_match_ptr(jz4780_dma_dt_match),
1030        },
1031};
1032
1033static int __init jz4780_dma_init(void)
1034{
1035        return platform_driver_register(&jz4780_dma_driver);
1036}
1037subsys_initcall(jz4780_dma_init);
1038
1039static void __exit jz4780_dma_exit(void)
1040{
1041        platform_driver_unregister(&jz4780_dma_driver);
1042}
1043module_exit(jz4780_dma_exit);
1044
1045MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
1046MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
1047MODULE_LICENSE("GPL");
1048