linux/drivers/dma/s3c24xx-dma.c
<<
>>
Prefs
   1/*
   2 * S3C24XX DMA handling
   3 *
   4 * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
   5 *
   6 * based on amba-pl08x.c
   7 *
   8 * Copyright (c) 2006 ARM Ltd.
   9 * Copyright (c) 2010 ST-Ericsson SA
  10 *
  11 * Author: Peter Pearse <peter.pearse@arm.com>
  12 * Author: Linus Walleij <linus.walleij@stericsson.com>
  13 *
  14 * This program is free software; you can redistribute it and/or modify it
  15 * under the terms of the GNU General Public License as published by the Free
  16 * Software Foundation; either version 2 of the License, or (at your option)
  17 * any later version.
  18 *
  19 * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
  20 * that can be routed to any of the 4 to 8 hardware-channels.
  21 *
  22 * Therefore on these DMA controllers the number of channels
  23 * and the number of incoming DMA signals are two totally different things.
  24 * It is usually not possible to theoretically handle all physical signals,
  25 * so a multiplexing scheme with possible denial of use is necessary.
  26 *
  27 * Open items:
  28 * - bursts
  29 */
  30
  31#include <linux/platform_device.h>
  32#include <linux/types.h>
  33#include <linux/dmaengine.h>
  34#include <linux/dma-mapping.h>
  35#include <linux/interrupt.h>
  36#include <linux/clk.h>
  37#include <linux/module.h>
  38#include <linux/slab.h>
  39#include <linux/platform_data/dma-s3c24xx.h>
  40
  41#include "dmaengine.h"
  42#include "virt-dma.h"
  43
  44#define MAX_DMA_CHANNELS        8
  45
  46#define S3C24XX_DISRC                   0x00
  47#define S3C24XX_DISRCC                  0x04
  48#define S3C24XX_DISRCC_INC_INCREMENT    0
  49#define S3C24XX_DISRCC_INC_FIXED        BIT(0)
  50#define S3C24XX_DISRCC_LOC_AHB          0
  51#define S3C24XX_DISRCC_LOC_APB          BIT(1)
  52
  53#define S3C24XX_DIDST                   0x08
  54#define S3C24XX_DIDSTC                  0x0c
  55#define S3C24XX_DIDSTC_INC_INCREMENT    0
  56#define S3C24XX_DIDSTC_INC_FIXED        BIT(0)
  57#define S3C24XX_DIDSTC_LOC_AHB          0
  58#define S3C24XX_DIDSTC_LOC_APB          BIT(1)
  59#define S3C24XX_DIDSTC_INT_TC0          0
  60#define S3C24XX_DIDSTC_INT_RELOAD       BIT(2)
  61
  62#define S3C24XX_DCON                    0x10
  63
  64#define S3C24XX_DCON_TC_MASK            0xfffff
  65#define S3C24XX_DCON_DSZ_BYTE           (0 << 20)
  66#define S3C24XX_DCON_DSZ_HALFWORD       (1 << 20)
  67#define S3C24XX_DCON_DSZ_WORD           (2 << 20)
  68#define S3C24XX_DCON_DSZ_MASK           (3 << 20)
  69#define S3C24XX_DCON_DSZ_SHIFT          20
  70#define S3C24XX_DCON_AUTORELOAD         0
  71#define S3C24XX_DCON_NORELOAD           BIT(22)
  72#define S3C24XX_DCON_HWTRIG             BIT(23)
  73#define S3C24XX_DCON_HWSRC_SHIFT        24
  74#define S3C24XX_DCON_SERV_SINGLE        0
  75#define S3C24XX_DCON_SERV_WHOLE         BIT(27)
  76#define S3C24XX_DCON_TSZ_UNIT           0
  77#define S3C24XX_DCON_TSZ_BURST4         BIT(28)
  78#define S3C24XX_DCON_INT                BIT(29)
  79#define S3C24XX_DCON_SYNC_PCLK          0
  80#define S3C24XX_DCON_SYNC_HCLK          BIT(30)
  81#define S3C24XX_DCON_DEMAND             0
  82#define S3C24XX_DCON_HANDSHAKE          BIT(31)
  83
  84#define S3C24XX_DSTAT                   0x14
  85#define S3C24XX_DSTAT_STAT_BUSY         BIT(20)
  86#define S3C24XX_DSTAT_CURRTC_MASK       0xfffff
  87
  88#define S3C24XX_DMASKTRIG               0x20
  89#define S3C24XX_DMASKTRIG_SWTRIG        BIT(0)
  90#define S3C24XX_DMASKTRIG_ON            BIT(1)
  91#define S3C24XX_DMASKTRIG_STOP          BIT(2)
  92
  93#define S3C24XX_DMAREQSEL               0x24
  94#define S3C24XX_DMAREQSEL_HW            BIT(0)
  95
  96/*
  97 * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
  98 * for a DMA source. Instead only specific channels are valid.
  99 * All of these SoCs have 4 physical channels and the number of request
 100 * source bits is 3. Additionally we also need 1 bit to mark the channel
 101 * as valid.
 102 * Therefore we separate the chansel element of the channel data into 4
 103 * parts of 4 bits each, to hold the information if the channel is valid
 104 * and the hw request source to use.
 105 *
 106 * Example:
 107 * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
 108 * For it the chansel field would look like
 109 *
 110 * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
 111 * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
 112 * ((BIT(3) | 2) << 0 * 4)   // channel 0, with request source 2
 113 */
 114#define S3C24XX_CHANSEL_WIDTH           4
 115#define S3C24XX_CHANSEL_VALID           BIT(3)
 116#define S3C24XX_CHANSEL_REQ_MASK        7
 117
 118/*
 119 * struct soc_data - vendor-specific config parameters for individual SoCs
 120 * @stride: spacing between the registers of each channel
 121 * @has_reqsel: does the controller use the newer requestselection mechanism
 122 * @has_clocks: are controllable dma-clocks present
 123 */
 124struct soc_data {
 125        int stride;
 126        bool has_reqsel;
 127        bool has_clocks;
 128};
 129
 130/*
 131 * enum s3c24xx_dma_chan_state - holds the virtual channel states
 132 * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
 133 * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
 134 * channel and is running a transfer on it
 135 * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
 136 * channel to become available (only pertains to memcpy channels)
 137 */
 138enum s3c24xx_dma_chan_state {
 139        S3C24XX_DMA_CHAN_IDLE,
 140        S3C24XX_DMA_CHAN_RUNNING,
 141        S3C24XX_DMA_CHAN_WAITING,
 142};
 143
 144/*
 145 * struct s3c24xx_sg - structure containing data per sg
 146 * @src_addr: src address of sg
 147 * @dst_addr: dst address of sg
 148 * @len: transfer len in bytes
 149 * @node: node for txd's dsg_list
 150 */
 151struct s3c24xx_sg {
 152        dma_addr_t src_addr;
 153        dma_addr_t dst_addr;
 154        size_t len;
 155        struct list_head node;
 156};
 157
 158/*
 159 * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
 160 * @vd: virtual DMA descriptor
 161 * @dsg_list: list of children sg's
 162 * @at: sg currently being transfered
 163 * @width: transfer width
 164 * @disrcc: value for source control register
 165 * @didstc: value for destination control register
 166 * @dcon: base value for dcon register
 167 * @cyclic: indicate cyclic transfer
 168 */
 169struct s3c24xx_txd {
 170        struct virt_dma_desc vd;
 171        struct list_head dsg_list;
 172        struct list_head *at;
 173        u8 width;
 174        u32 disrcc;
 175        u32 didstc;
 176        u32 dcon;
 177        bool cyclic;
 178};
 179
 180struct s3c24xx_dma_chan;
 181
 182/*
 183 * struct s3c24xx_dma_phy - holder for the physical channels
 184 * @id: physical index to this channel
 185 * @valid: does the channel have all required elements
 186 * @base: virtual memory base (remapped) for the this channel
 187 * @irq: interrupt for this channel
 188 * @clk: clock for this channel
 189 * @lock: a lock to use when altering an instance of this struct
 190 * @serving: virtual channel currently being served by this physicalchannel
 191 * @host: a pointer to the host (internal use)
 192 */
 193struct s3c24xx_dma_phy {
 194        unsigned int                    id;
 195        bool                            valid;
 196        void __iomem                    *base;
 197        int                             irq;
 198        struct clk                      *clk;
 199        spinlock_t                      lock;
 200        struct s3c24xx_dma_chan         *serving;
 201        struct s3c24xx_dma_engine       *host;
 202};
 203
 204/*
 205 * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
 206 * @id: the id of the channel
 207 * @name: name of the channel
 208 * @vc: wrappped virtual channel
 209 * @phy: the physical channel utilized by this channel, if there is one
 210 * @runtime_addr: address for RX/TX according to the runtime config
 211 * @at: active transaction on this channel
 212 * @lock: a lock for this channel data
 213 * @host: a pointer to the host (internal use)
 214 * @state: whether the channel is idle, running etc
 215 * @slave: whether this channel is a device (slave) or for memcpy
 216 */
 217struct s3c24xx_dma_chan {
 218        int id;
 219        const char *name;
 220        struct virt_dma_chan vc;
 221        struct s3c24xx_dma_phy *phy;
 222        struct dma_slave_config cfg;
 223        struct s3c24xx_txd *at;
 224        struct s3c24xx_dma_engine *host;
 225        enum s3c24xx_dma_chan_state state;
 226        bool slave;
 227};
 228
 229/*
 230 * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
 231 * @pdev: the corresponding platform device
 232 * @pdata: platform data passed in from the platform/machine
 233 * @base: virtual memory base (remapped)
 234 * @slave: slave engine for this instance
 235 * @memcpy: memcpy engine for this instance
 236 * @phy_chans: array of data for the physical channels
 237 */
 238struct s3c24xx_dma_engine {
 239        struct platform_device                  *pdev;
 240        const struct s3c24xx_dma_platdata       *pdata;
 241        struct soc_data                         *sdata;
 242        void __iomem                            *base;
 243        struct dma_device                       slave;
 244        struct dma_device                       memcpy;
 245        struct s3c24xx_dma_phy                  *phy_chans;
 246};
 247
 248/*
 249 * Physical channel handling
 250 */
 251
 252/*
 253 * Check whether a certain channel is busy or not.
 254 */
 255static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
 256{
 257        unsigned int val = readl(phy->base + S3C24XX_DSTAT);
 258        return val & S3C24XX_DSTAT_STAT_BUSY;
 259}
 260
 261static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
 262                                  struct s3c24xx_dma_phy *phy)
 263{
 264        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 265        const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
 266        struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
 267        int phyvalid;
 268
 269        /* every phy is valid for memcopy channels */
 270        if (!s3cchan->slave)
 271                return true;
 272
 273        /* On newer variants all phys can be used for all virtual channels */
 274        if (s3cdma->sdata->has_reqsel)
 275                return true;
 276
 277        phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
 278        return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
 279}
 280
 281/*
 282 * Allocate a physical channel for a virtual channel
 283 *
 284 * Try to locate a physical channel to be used for this transfer. If all
 285 * are taken return NULL and the requester will have to cope by using
 286 * some fallback PIO mode or retrying later.
 287 */
 288static
 289struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
 290{
 291        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 292        const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
 293        struct s3c24xx_dma_channel *cdata;
 294        struct s3c24xx_dma_phy *phy = NULL;
 295        unsigned long flags;
 296        int i;
 297        int ret;
 298
 299        if (s3cchan->slave)
 300                cdata = &pdata->channels[s3cchan->id];
 301
 302        for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
 303                phy = &s3cdma->phy_chans[i];
 304
 305                if (!phy->valid)
 306                        continue;
 307
 308                if (!s3c24xx_dma_phy_valid(s3cchan, phy))
 309                        continue;
 310
 311                spin_lock_irqsave(&phy->lock, flags);
 312
 313                if (!phy->serving) {
 314                        phy->serving = s3cchan;
 315                        spin_unlock_irqrestore(&phy->lock, flags);
 316                        break;
 317                }
 318
 319                spin_unlock_irqrestore(&phy->lock, flags);
 320        }
 321
 322        /* No physical channel available, cope with it */
 323        if (i == s3cdma->pdata->num_phy_channels) {
 324                dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
 325                return NULL;
 326        }
 327
 328        /* start the phy clock */
 329        if (s3cdma->sdata->has_clocks) {
 330                ret = clk_enable(phy->clk);
 331                if (ret) {
 332                        dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
 333                                phy->id, ret);
 334                        phy->serving = NULL;
 335                        return NULL;
 336                }
 337        }
 338
 339        return phy;
 340}
 341
 342/*
 343 * Mark the physical channel as free.
 344 *
 345 * This drops the link between the physical and virtual channel.
 346 */
 347static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
 348{
 349        struct s3c24xx_dma_engine *s3cdma = phy->host;
 350
 351        if (s3cdma->sdata->has_clocks)
 352                clk_disable(phy->clk);
 353
 354        phy->serving = NULL;
 355}
 356
 357/*
 358 * Stops the channel by writing the stop bit.
 359 * This should not be used for an on-going transfer, but as a method of
 360 * shutting down a channel (eg, when it's no longer used) or terminating a
 361 * transfer.
 362 */
 363static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
 364{
 365        writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
 366}
 367
 368/*
 369 * Virtual channel handling
 370 */
 371
 372static inline
 373struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
 374{
 375        return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
 376}
 377
 378static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
 379{
 380        struct s3c24xx_dma_phy *phy = s3cchan->phy;
 381        struct s3c24xx_txd *txd = s3cchan->at;
 382        u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
 383
 384        return tc * txd->width;
 385}
 386
 387static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
 388                                  struct dma_slave_config *config)
 389{
 390        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 391        unsigned long flags;
 392        int ret = 0;
 393
 394        /* Reject definitely invalid configurations */
 395        if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
 396            config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
 397                return -EINVAL;
 398
 399        spin_lock_irqsave(&s3cchan->vc.lock, flags);
 400
 401        if (!s3cchan->slave) {
 402                ret = -EINVAL;
 403                goto out;
 404        }
 405
 406        s3cchan->cfg = *config;
 407
 408out:
 409        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 410        return ret;
 411}
 412
 413/*
 414 * Transfer handling
 415 */
 416
 417static inline
 418struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
 419{
 420        return container_of(tx, struct s3c24xx_txd, vd.tx);
 421}
 422
 423static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
 424{
 425        struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
 426
 427        if (txd) {
 428                INIT_LIST_HEAD(&txd->dsg_list);
 429                txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
 430        }
 431
 432        return txd;
 433}
 434
 435static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
 436{
 437        struct s3c24xx_sg *dsg, *_dsg;
 438
 439        list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
 440                list_del(&dsg->node);
 441                kfree(dsg);
 442        }
 443
 444        kfree(txd);
 445}
 446
 447static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
 448                                       struct s3c24xx_txd *txd)
 449{
 450        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 451        struct s3c24xx_dma_phy *phy = s3cchan->phy;
 452        const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
 453        struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
 454        u32 dcon = txd->dcon;
 455        u32 val;
 456
 457        /* transfer-size and -count from len and width */
 458        switch (txd->width) {
 459        case 1:
 460                dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
 461                break;
 462        case 2:
 463                dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
 464                break;
 465        case 4:
 466                dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
 467                break;
 468        }
 469
 470        if (s3cchan->slave) {
 471                struct s3c24xx_dma_channel *cdata =
 472                                        &pdata->channels[s3cchan->id];
 473
 474                if (s3cdma->sdata->has_reqsel) {
 475                        writel_relaxed((cdata->chansel << 1) |
 476                                                        S3C24XX_DMAREQSEL_HW,
 477                                        phy->base + S3C24XX_DMAREQSEL);
 478                } else {
 479                        int csel = cdata->chansel >> (phy->id *
 480                                                        S3C24XX_CHANSEL_WIDTH);
 481
 482                        csel &= S3C24XX_CHANSEL_REQ_MASK;
 483                        dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
 484                        dcon |= S3C24XX_DCON_HWTRIG;
 485                }
 486        } else {
 487                if (s3cdma->sdata->has_reqsel)
 488                        writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
 489        }
 490
 491        writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
 492        writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
 493        writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
 494        writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
 495        writel_relaxed(dcon, phy->base + S3C24XX_DCON);
 496
 497        val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
 498        val &= ~S3C24XX_DMASKTRIG_STOP;
 499        val |= S3C24XX_DMASKTRIG_ON;
 500
 501        /* trigger the dma operation for memcpy transfers */
 502        if (!s3cchan->slave)
 503                val |= S3C24XX_DMASKTRIG_SWTRIG;
 504
 505        writel(val, phy->base + S3C24XX_DMASKTRIG);
 506}
 507
 508/*
 509 * Set the initial DMA register values and start first sg.
 510 */
 511static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
 512{
 513        struct s3c24xx_dma_phy *phy = s3cchan->phy;
 514        struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
 515        struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
 516
 517        list_del(&txd->vd.node);
 518
 519        s3cchan->at = txd;
 520
 521        /* Wait for channel inactive */
 522        while (s3c24xx_dma_phy_busy(phy))
 523                cpu_relax();
 524
 525        /* point to the first element of the sg list */
 526        txd->at = txd->dsg_list.next;
 527        s3c24xx_dma_start_next_sg(s3cchan, txd);
 528}
 529
 530static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
 531                                struct s3c24xx_dma_chan *s3cchan)
 532{
 533        LIST_HEAD(head);
 534
 535        vchan_get_all_descriptors(&s3cchan->vc, &head);
 536        vchan_dma_desc_free_list(&s3cchan->vc, &head);
 537}
 538
 539/*
 540 * Try to allocate a physical channel.  When successful, assign it to
 541 * this virtual channel, and initiate the next descriptor.  The
 542 * virtual channel lock must be held at this point.
 543 */
 544static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
 545{
 546        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 547        struct s3c24xx_dma_phy *phy;
 548
 549        phy = s3c24xx_dma_get_phy(s3cchan);
 550        if (!phy) {
 551                dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
 552                        s3cchan->name);
 553                s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
 554                return;
 555        }
 556
 557        dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
 558                phy->id, s3cchan->name);
 559
 560        s3cchan->phy = phy;
 561        s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
 562
 563        s3c24xx_dma_start_next_txd(s3cchan);
 564}
 565
 566static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
 567        struct s3c24xx_dma_chan *s3cchan)
 568{
 569        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 570
 571        dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
 572                phy->id, s3cchan->name);
 573
 574        /*
 575         * We do this without taking the lock; we're really only concerned
 576         * about whether this pointer is NULL or not, and we're guaranteed
 577         * that this will only be called when it _already_ is non-NULL.
 578         */
 579        phy->serving = s3cchan;
 580        s3cchan->phy = phy;
 581        s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
 582        s3c24xx_dma_start_next_txd(s3cchan);
 583}
 584
 585/*
 586 * Free a physical DMA channel, potentially reallocating it to another
 587 * virtual channel if we have any pending.
 588 */
 589static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
 590{
 591        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 592        struct s3c24xx_dma_chan *p, *next;
 593
 594retry:
 595        next = NULL;
 596
 597        /* Find a waiting virtual channel for the next transfer. */
 598        list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
 599                if (p->state == S3C24XX_DMA_CHAN_WAITING) {
 600                        next = p;
 601                        break;
 602                }
 603
 604        if (!next) {
 605                list_for_each_entry(p, &s3cdma->slave.channels,
 606                                    vc.chan.device_node)
 607                        if (p->state == S3C24XX_DMA_CHAN_WAITING &&
 608                                      s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
 609                                next = p;
 610                                break;
 611                        }
 612        }
 613
 614        /* Ensure that the physical channel is stopped */
 615        s3c24xx_dma_terminate_phy(s3cchan->phy);
 616
 617        if (next) {
 618                bool success;
 619
 620                /*
 621                 * Eww.  We know this isn't going to deadlock
 622                 * but lockdep probably doesn't.
 623                 */
 624                spin_lock(&next->vc.lock);
 625                /* Re-check the state now that we have the lock */
 626                success = next->state == S3C24XX_DMA_CHAN_WAITING;
 627                if (success)
 628                        s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
 629                spin_unlock(&next->vc.lock);
 630
 631                /* If the state changed, try to find another channel */
 632                if (!success)
 633                        goto retry;
 634        } else {
 635                /* No more jobs, so free up the physical channel */
 636                s3c24xx_dma_put_phy(s3cchan->phy);
 637        }
 638
 639        s3cchan->phy = NULL;
 640        s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
 641}
 642
 643static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
 644{
 645        struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
 646        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
 647
 648        if (!s3cchan->slave)
 649                dma_descriptor_unmap(&vd->tx);
 650
 651        s3c24xx_dma_free_txd(txd);
 652}
 653
 654static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
 655{
 656        struct s3c24xx_dma_phy *phy = data;
 657        struct s3c24xx_dma_chan *s3cchan = phy->serving;
 658        struct s3c24xx_txd *txd;
 659
 660        dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
 661
 662        /*
 663         * Interrupts happen to notify the completion of a transfer and the
 664         * channel should have moved into its stop state already on its own.
 665         * Therefore interrupts on channels not bound to a virtual channel
 666         * should never happen. Nevertheless send a terminate command to the
 667         * channel if the unlikely case happens.
 668         */
 669        if (unlikely(!s3cchan)) {
 670                dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
 671                        phy->id);
 672
 673                s3c24xx_dma_terminate_phy(phy);
 674
 675                return IRQ_HANDLED;
 676        }
 677
 678        spin_lock(&s3cchan->vc.lock);
 679        txd = s3cchan->at;
 680        if (txd) {
 681                /* when more sg's are in this txd, start the next one */
 682                if (!list_is_last(txd->at, &txd->dsg_list)) {
 683                        txd->at = txd->at->next;
 684                        if (txd->cyclic)
 685                                vchan_cyclic_callback(&txd->vd);
 686                        s3c24xx_dma_start_next_sg(s3cchan, txd);
 687                } else if (!txd->cyclic) {
 688                        s3cchan->at = NULL;
 689                        vchan_cookie_complete(&txd->vd);
 690
 691                        /*
 692                         * And start the next descriptor (if any),
 693                         * otherwise free this channel.
 694                         */
 695                        if (vchan_next_desc(&s3cchan->vc))
 696                                s3c24xx_dma_start_next_txd(s3cchan);
 697                        else
 698                                s3c24xx_dma_phy_free(s3cchan);
 699                } else {
 700                        vchan_cyclic_callback(&txd->vd);
 701
 702                        /* Cyclic: reset at beginning */
 703                        txd->at = txd->dsg_list.next;
 704                        s3c24xx_dma_start_next_sg(s3cchan, txd);
 705                }
 706        }
 707        spin_unlock(&s3cchan->vc.lock);
 708
 709        return IRQ_HANDLED;
 710}
 711
 712/*
 713 * The DMA ENGINE API
 714 */
 715
 716static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
 717{
 718        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 719        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 720        unsigned long flags;
 721        int ret = 0;
 722
 723        spin_lock_irqsave(&s3cchan->vc.lock, flags);
 724
 725        if (!s3cchan->phy && !s3cchan->at) {
 726                dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
 727                        s3cchan->id);
 728                ret = -EINVAL;
 729                goto unlock;
 730        }
 731
 732        s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
 733
 734        /* Mark physical channel as free */
 735        if (s3cchan->phy)
 736                s3c24xx_dma_phy_free(s3cchan);
 737
 738        /* Dequeue current job */
 739        if (s3cchan->at) {
 740                s3c24xx_dma_desc_free(&s3cchan->at->vd);
 741                s3cchan->at = NULL;
 742        }
 743
 744        /* Dequeue jobs not yet fired as well */
 745        s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
 746unlock:
 747        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 748
 749        return ret;
 750}
 751
 752static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
 753{
 754        /* Ensure all queued descriptors are freed */
 755        vchan_free_chan_resources(to_virt_chan(chan));
 756}
 757
 758static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
 759                dma_cookie_t cookie, struct dma_tx_state *txstate)
 760{
 761        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 762        struct s3c24xx_txd *txd;
 763        struct s3c24xx_sg *dsg;
 764        struct virt_dma_desc *vd;
 765        unsigned long flags;
 766        enum dma_status ret;
 767        size_t bytes = 0;
 768
 769        spin_lock_irqsave(&s3cchan->vc.lock, flags);
 770        ret = dma_cookie_status(chan, cookie, txstate);
 771        if (ret == DMA_COMPLETE) {
 772                spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 773                return ret;
 774        }
 775
 776        /*
 777         * There's no point calculating the residue if there's
 778         * no txstate to store the value.
 779         */
 780        if (!txstate) {
 781                spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 782                return ret;
 783        }
 784
 785        vd = vchan_find_desc(&s3cchan->vc, cookie);
 786        if (vd) {
 787                /* On the issued list, so hasn't been processed yet */
 788                txd = to_s3c24xx_txd(&vd->tx);
 789
 790                list_for_each_entry(dsg, &txd->dsg_list, node)
 791                        bytes += dsg->len;
 792        } else {
 793                /*
 794                 * Currently running, so sum over the pending sg's and
 795                 * the currently active one.
 796                 */
 797                txd = s3cchan->at;
 798
 799                dsg = list_entry(txd->at, struct s3c24xx_sg, node);
 800                list_for_each_entry_from(dsg, &txd->dsg_list, node)
 801                        bytes += dsg->len;
 802
 803                bytes += s3c24xx_dma_getbytes_chan(s3cchan);
 804        }
 805        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
 806
 807        /*
 808         * This cookie not complete yet
 809         * Get number of bytes left in the active transactions and queue
 810         */
 811        dma_set_residue(txstate, bytes);
 812
 813        /* Whether waiting or running, we're in progress */
 814        return ret;
 815}
 816
 817/*
 818 * Initialize a descriptor to be used by memcpy submit
 819 */
 820static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
 821                struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 822                size_t len, unsigned long flags)
 823{
 824        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 825        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 826        struct s3c24xx_txd *txd;
 827        struct s3c24xx_sg *dsg;
 828        int src_mod, dest_mod;
 829
 830        dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n",
 831                        len, s3cchan->name);
 832
 833        if ((len & S3C24XX_DCON_TC_MASK) != len) {
 834                dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len);
 835                return NULL;
 836        }
 837
 838        txd = s3c24xx_dma_get_txd();
 839        if (!txd)
 840                return NULL;
 841
 842        dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
 843        if (!dsg) {
 844                s3c24xx_dma_free_txd(txd);
 845                return NULL;
 846        }
 847        list_add_tail(&dsg->node, &txd->dsg_list);
 848
 849        dsg->src_addr = src;
 850        dsg->dst_addr = dest;
 851        dsg->len = len;
 852
 853        /*
 854         * Determine a suitable transfer width.
 855         * The DMA controller cannot fetch/store information which is not
 856         * naturally aligned on the bus, i.e., a 4 byte fetch must start at
 857         * an address divisible by 4 - more generally addr % width must be 0.
 858         */
 859        src_mod = src % 4;
 860        dest_mod = dest % 4;
 861        switch (len % 4) {
 862        case 0:
 863                txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
 864                break;
 865        case 2:
 866                txd->width = ((src_mod == 2 || src_mod == 0) &&
 867                              (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
 868                break;
 869        default:
 870                txd->width = 1;
 871                break;
 872        }
 873
 874        txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
 875        txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
 876        txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
 877                     S3C24XX_DCON_SERV_WHOLE;
 878
 879        return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
 880}
 881
 882static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
 883        struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
 884        enum dma_transfer_direction direction, unsigned long flags)
 885{
 886        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 887        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 888        const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
 889        struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
 890        struct s3c24xx_txd *txd;
 891        struct s3c24xx_sg *dsg;
 892        unsigned sg_len;
 893        dma_addr_t slave_addr;
 894        u32 hwcfg = 0;
 895        int i;
 896
 897        dev_dbg(&s3cdma->pdev->dev,
 898                "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
 899                size, period, s3cchan->name);
 900
 901        if (!is_slave_direction(direction)) {
 902                dev_err(&s3cdma->pdev->dev,
 903                        "direction %d unsupported\n", direction);
 904                return NULL;
 905        }
 906
 907        txd = s3c24xx_dma_get_txd();
 908        if (!txd)
 909                return NULL;
 910
 911        txd->cyclic = 1;
 912
 913        if (cdata->handshake)
 914                txd->dcon |= S3C24XX_DCON_HANDSHAKE;
 915
 916        switch (cdata->bus) {
 917        case S3C24XX_DMA_APB:
 918                txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
 919                hwcfg |= S3C24XX_DISRCC_LOC_APB;
 920                break;
 921        case S3C24XX_DMA_AHB:
 922                txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
 923                hwcfg |= S3C24XX_DISRCC_LOC_AHB;
 924                break;
 925        }
 926
 927        /*
 928         * Always assume our peripheral desintation is a fixed
 929         * address in memory.
 930         */
 931        hwcfg |= S3C24XX_DISRCC_INC_FIXED;
 932
 933        /*
 934         * Individual dma operations are requested by the slave,
 935         * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
 936         */
 937        txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
 938
 939        if (direction == DMA_MEM_TO_DEV) {
 940                txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
 941                              S3C24XX_DISRCC_INC_INCREMENT;
 942                txd->didstc = hwcfg;
 943                slave_addr = s3cchan->cfg.dst_addr;
 944                txd->width = s3cchan->cfg.dst_addr_width;
 945        } else {
 946                txd->disrcc = hwcfg;
 947                txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
 948                              S3C24XX_DIDSTC_INC_INCREMENT;
 949                slave_addr = s3cchan->cfg.src_addr;
 950                txd->width = s3cchan->cfg.src_addr_width;
 951        }
 952
 953        sg_len = size / period;
 954
 955        for (i = 0; i < sg_len; i++) {
 956                dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
 957                if (!dsg) {
 958                        s3c24xx_dma_free_txd(txd);
 959                        return NULL;
 960                }
 961                list_add_tail(&dsg->node, &txd->dsg_list);
 962
 963                dsg->len = period;
 964                /* Check last period length */
 965                if (i == sg_len - 1)
 966                        dsg->len = size - period * i;
 967                if (direction == DMA_MEM_TO_DEV) {
 968                        dsg->src_addr = addr + period * i;
 969                        dsg->dst_addr = slave_addr;
 970                } else { /* DMA_DEV_TO_MEM */
 971                        dsg->src_addr = slave_addr;
 972                        dsg->dst_addr = addr + period * i;
 973                }
 974        }
 975
 976        return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
 977}
 978
 979static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
 980                struct dma_chan *chan, struct scatterlist *sgl,
 981                unsigned int sg_len, enum dma_transfer_direction direction,
 982                unsigned long flags, void *context)
 983{
 984        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
 985        struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
 986        const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
 987        struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
 988        struct s3c24xx_txd *txd;
 989        struct s3c24xx_sg *dsg;
 990        struct scatterlist *sg;
 991        dma_addr_t slave_addr;
 992        u32 hwcfg = 0;
 993        int tmp;
 994
 995        dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
 996                        sg_dma_len(sgl), s3cchan->name);
 997
 998        txd = s3c24xx_dma_get_txd();
 999        if (!txd)
1000                return NULL;
1001
1002        if (cdata->handshake)
1003                txd->dcon |= S3C24XX_DCON_HANDSHAKE;
1004
1005        switch (cdata->bus) {
1006        case S3C24XX_DMA_APB:
1007                txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
1008                hwcfg |= S3C24XX_DISRCC_LOC_APB;
1009                break;
1010        case S3C24XX_DMA_AHB:
1011                txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
1012                hwcfg |= S3C24XX_DISRCC_LOC_AHB;
1013                break;
1014        }
1015
1016        /*
1017         * Always assume our peripheral desintation is a fixed
1018         * address in memory.
1019         */
1020        hwcfg |= S3C24XX_DISRCC_INC_FIXED;
1021
1022        /*
1023         * Individual dma operations are requested by the slave,
1024         * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
1025         */
1026        txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
1027
1028        if (direction == DMA_MEM_TO_DEV) {
1029                txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
1030                              S3C24XX_DISRCC_INC_INCREMENT;
1031                txd->didstc = hwcfg;
1032                slave_addr = s3cchan->cfg.dst_addr;
1033                txd->width = s3cchan->cfg.dst_addr_width;
1034        } else if (direction == DMA_DEV_TO_MEM) {
1035                txd->disrcc = hwcfg;
1036                txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
1037                              S3C24XX_DIDSTC_INC_INCREMENT;
1038                slave_addr = s3cchan->cfg.src_addr;
1039                txd->width = s3cchan->cfg.src_addr_width;
1040        } else {
1041                s3c24xx_dma_free_txd(txd);
1042                dev_err(&s3cdma->pdev->dev,
1043                        "direction %d unsupported\n", direction);
1044                return NULL;
1045        }
1046
1047        for_each_sg(sgl, sg, sg_len, tmp) {
1048                dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
1049                if (!dsg) {
1050                        s3c24xx_dma_free_txd(txd);
1051                        return NULL;
1052                }
1053                list_add_tail(&dsg->node, &txd->dsg_list);
1054
1055                dsg->len = sg_dma_len(sg);
1056                if (direction == DMA_MEM_TO_DEV) {
1057                        dsg->src_addr = sg_dma_address(sg);
1058                        dsg->dst_addr = slave_addr;
1059                } else { /* DMA_DEV_TO_MEM */
1060                        dsg->src_addr = slave_addr;
1061                        dsg->dst_addr = sg_dma_address(sg);
1062                }
1063        }
1064
1065        return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
1066}
1067
1068/*
1069 * Slave transactions callback to the slave device to allow
1070 * synchronization of slave DMA signals with the DMAC enable
1071 */
1072static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
1073{
1074        struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
1075        unsigned long flags;
1076
1077        spin_lock_irqsave(&s3cchan->vc.lock, flags);
1078        if (vchan_issue_pending(&s3cchan->vc)) {
1079                if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
1080                        s3c24xx_dma_phy_alloc_and_start(s3cchan);
1081        }
1082        spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
1083}
1084
1085/*
1086 * Bringup and teardown
1087 */
1088
1089/*
1090 * Initialise the DMAC memcpy/slave channels.
1091 * Make a local wrapper to hold required data
1092 */
1093static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
1094                struct dma_device *dmadev, unsigned int channels, bool slave)
1095{
1096        struct s3c24xx_dma_chan *chan;
1097        int i;
1098
1099        INIT_LIST_HEAD(&dmadev->channels);
1100
1101        /*
1102         * Register as many many memcpy as we have physical channels,
1103         * we won't always be able to use all but the code will have
1104         * to cope with that situation.
1105         */
1106        for (i = 0; i < channels; i++) {
1107                chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
1108                if (!chan) {
1109                        dev_err(dmadev->dev,
1110                                "%s no memory for channel\n", __func__);
1111                        return -ENOMEM;
1112                }
1113
1114                chan->id = i;
1115                chan->host = s3cdma;
1116                chan->state = S3C24XX_DMA_CHAN_IDLE;
1117
1118                if (slave) {
1119                        chan->slave = true;
1120                        chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
1121                        if (!chan->name)
1122                                return -ENOMEM;
1123                } else {
1124                        chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1125                        if (!chan->name)
1126                                return -ENOMEM;
1127                }
1128                dev_dbg(dmadev->dev,
1129                         "initialize virtual channel \"%s\"\n",
1130                         chan->name);
1131
1132                chan->vc.desc_free = s3c24xx_dma_desc_free;
1133                vchan_init(&chan->vc, dmadev);
1134        }
1135        dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
1136                 i, slave ? "slave" : "memcpy");
1137        return i;
1138}
1139
1140static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
1141{
1142        struct s3c24xx_dma_chan *chan = NULL;
1143        struct s3c24xx_dma_chan *next;
1144
1145        list_for_each_entry_safe(chan,
1146                                 next, &dmadev->channels, vc.chan.device_node)
1147                list_del(&chan->vc.chan.device_node);
1148}
1149
1150/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
1151static struct soc_data soc_s3c2410 = {
1152        .stride = 0x40,
1153        .has_reqsel = false,
1154        .has_clocks = false,
1155};
1156
1157/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
1158static struct soc_data soc_s3c2412 = {
1159        .stride = 0x40,
1160        .has_reqsel = true,
1161        .has_clocks = true,
1162};
1163
1164/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
1165static struct soc_data soc_s3c2443 = {
1166        .stride = 0x100,
1167        .has_reqsel = true,
1168        .has_clocks = true,
1169};
1170
1171static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
1172        {
1173                .name           = "s3c2410-dma",
1174                .driver_data    = (kernel_ulong_t)&soc_s3c2410,
1175        }, {
1176                .name           = "s3c2412-dma",
1177                .driver_data    = (kernel_ulong_t)&soc_s3c2412,
1178        }, {
1179                .name           = "s3c2443-dma",
1180                .driver_data    = (kernel_ulong_t)&soc_s3c2443,
1181        },
1182        { },
1183};
1184
1185static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
1186{
1187        return (struct soc_data *)
1188                         platform_get_device_id(pdev)->driver_data;
1189}
1190
1191static int s3c24xx_dma_probe(struct platform_device *pdev)
1192{
1193        const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1194        struct s3c24xx_dma_engine *s3cdma;
1195        struct soc_data *sdata;
1196        struct resource *res;
1197        int ret;
1198        int i;
1199
1200        if (!pdata) {
1201                dev_err(&pdev->dev, "platform data missing\n");
1202                return -ENODEV;
1203        }
1204
1205        /* Basic sanity check */
1206        if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
1207                dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
1208                        pdata->num_phy_channels, MAX_DMA_CHANNELS);
1209                return -EINVAL;
1210        }
1211
1212        sdata = s3c24xx_dma_get_soc_data(pdev);
1213        if (!sdata)
1214                return -EINVAL;
1215
1216        s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
1217        if (!s3cdma)
1218                return -ENOMEM;
1219
1220        s3cdma->pdev = pdev;
1221        s3cdma->pdata = pdata;
1222        s3cdma->sdata = sdata;
1223
1224        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1225        s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
1226        if (IS_ERR(s3cdma->base))
1227                return PTR_ERR(s3cdma->base);
1228
1229        s3cdma->phy_chans = devm_kzalloc(&pdev->dev,
1230                                              sizeof(struct s3c24xx_dma_phy) *
1231                                                        pdata->num_phy_channels,
1232                                              GFP_KERNEL);
1233        if (!s3cdma->phy_chans)
1234                return -ENOMEM;
1235
1236        /* acquire irqs and clocks for all physical channels */
1237        for (i = 0; i < pdata->num_phy_channels; i++) {
1238                struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1239                char clk_name[6];
1240
1241                phy->id = i;
1242                phy->base = s3cdma->base + (i * sdata->stride);
1243                phy->host = s3cdma;
1244
1245                phy->irq = platform_get_irq(pdev, i);
1246                if (phy->irq < 0) {
1247                        dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
1248                                i, phy->irq);
1249                        continue;
1250                }
1251
1252                ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
1253                                       0, pdev->name, phy);
1254                if (ret) {
1255                        dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
1256                                i, ret);
1257                        continue;
1258                }
1259
1260                if (sdata->has_clocks) {
1261                        sprintf(clk_name, "dma.%d", i);
1262                        phy->clk = devm_clk_get(&pdev->dev, clk_name);
1263                        if (IS_ERR(phy->clk) && sdata->has_clocks) {
1264                                dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
1265                                        i, PTR_ERR(phy->clk));
1266                                continue;
1267                        }
1268
1269                        ret = clk_prepare(phy->clk);
1270                        if (ret) {
1271                                dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
1272                                        i, ret);
1273                                continue;
1274                        }
1275                }
1276
1277                spin_lock_init(&phy->lock);
1278                phy->valid = true;
1279
1280                dev_dbg(&pdev->dev, "physical channel %d is %s\n",
1281                        i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
1282        }
1283
1284        /* Initialize memcpy engine */
1285        dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
1286        dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
1287        s3cdma->memcpy.dev = &pdev->dev;
1288        s3cdma->memcpy.device_free_chan_resources =
1289                                        s3c24xx_dma_free_chan_resources;
1290        s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1291        s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1292        s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1293        s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1294        s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
1295
1296        /* Initialize slave engine for SoC internal dedicated peripherals */
1297        dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
1298        dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
1299        dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
1300        s3cdma->slave.dev = &pdev->dev;
1301        s3cdma->slave.device_free_chan_resources =
1302                                        s3c24xx_dma_free_chan_resources;
1303        s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
1304        s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1305        s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1306        s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1307        s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1308        s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
1309
1310        /* Register as many memcpy channels as there are physical channels */
1311        ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
1312                                                pdata->num_phy_channels, false);
1313        if (ret <= 0) {
1314                dev_warn(&pdev->dev,
1315                         "%s failed to enumerate memcpy channels - %d\n",
1316                         __func__, ret);
1317                goto err_memcpy;
1318        }
1319
1320        /* Register slave channels */
1321        ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
1322                                pdata->num_channels, true);
1323        if (ret <= 0) {
1324                dev_warn(&pdev->dev,
1325                        "%s failed to enumerate slave channels - %d\n",
1326                                __func__, ret);
1327                goto err_slave;
1328        }
1329
1330        ret = dma_async_device_register(&s3cdma->memcpy);
1331        if (ret) {
1332                dev_warn(&pdev->dev,
1333                        "%s failed to register memcpy as an async device - %d\n",
1334                        __func__, ret);
1335                goto err_memcpy_reg;
1336        }
1337
1338        ret = dma_async_device_register(&s3cdma->slave);
1339        if (ret) {
1340                dev_warn(&pdev->dev,
1341                        "%s failed to register slave as an async device - %d\n",
1342                        __func__, ret);
1343                goto err_slave_reg;
1344        }
1345
1346        platform_set_drvdata(pdev, s3cdma);
1347        dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
1348                 pdata->num_phy_channels);
1349
1350        return 0;
1351
1352err_slave_reg:
1353        dma_async_device_unregister(&s3cdma->memcpy);
1354err_memcpy_reg:
1355        s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1356err_slave:
1357        s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1358err_memcpy:
1359        if (sdata->has_clocks)
1360                for (i = 0; i < pdata->num_phy_channels; i++) {
1361                        struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1362                        if (phy->valid)
1363                                clk_unprepare(phy->clk);
1364                }
1365
1366        return ret;
1367}
1368
1369static int s3c24xx_dma_remove(struct platform_device *pdev)
1370{
1371        const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1372        struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
1373        struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
1374        int i;
1375
1376        dma_async_device_unregister(&s3cdma->slave);
1377        dma_async_device_unregister(&s3cdma->memcpy);
1378
1379        s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1380        s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1381
1382        if (sdata->has_clocks)
1383                for (i = 0; i < pdata->num_phy_channels; i++) {
1384                        struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1385                        if (phy->valid)
1386                                clk_unprepare(phy->clk);
1387                }
1388
1389        return 0;
1390}
1391
1392static struct platform_driver s3c24xx_dma_driver = {
1393        .driver         = {
1394                .name   = "s3c24xx-dma",
1395        },
1396        .id_table       = s3c24xx_dma_driver_ids,
1397        .probe          = s3c24xx_dma_probe,
1398        .remove         = s3c24xx_dma_remove,
1399};
1400
1401module_platform_driver(s3c24xx_dma_driver);
1402
1403bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
1404{
1405        struct s3c24xx_dma_chan *s3cchan;
1406
1407        if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
1408                return false;
1409
1410        s3cchan = to_s3c24xx_dma_chan(chan);
1411
1412        return s3cchan->id == (int)param;
1413}
1414EXPORT_SYMBOL(s3c24xx_dma_filter);
1415
1416MODULE_DESCRIPTION("S3C24XX DMA Driver");
1417MODULE_AUTHOR("Heiko Stuebner");
1418MODULE_LICENSE("GPL v2");
1419