linux/drivers/dma/sa11x0-dma.c
<<
>>
Prefs
   1/*
   2 * SA11x0 DMAengine support
   3 *
   4 * Copyright (C) 2012 Russell King
   5 *   Derived in part from arch/arm/mach-sa1100/dma.c,
   6 *   Copyright (C) 2000, 2001 by Nicolas Pitre
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#include <linux/sched.h>
  13#include <linux/device.h>
  14#include <linux/dmaengine.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/platform_device.h>
  20#include <linux/sa11x0-dma.h>
  21#include <linux/slab.h>
  22#include <linux/spinlock.h>
  23
  24#include "virt-dma.h"
  25
  26#define NR_PHY_CHAN     6
  27#define DMA_ALIGN       3
  28#define DMA_MAX_SIZE    0x1fff
  29#define DMA_CHUNK_SIZE  0x1000
  30
  31#define DMA_DDAR        0x00
  32#define DMA_DCSR_S      0x04
  33#define DMA_DCSR_C      0x08
  34#define DMA_DCSR_R      0x0c
  35#define DMA_DBSA        0x10
  36#define DMA_DBTA        0x14
  37#define DMA_DBSB        0x18
  38#define DMA_DBTB        0x1c
  39#define DMA_SIZE        0x20
  40
  41#define DCSR_RUN        (1 << 0)
  42#define DCSR_IE         (1 << 1)
  43#define DCSR_ERROR      (1 << 2)
  44#define DCSR_DONEA      (1 << 3)
  45#define DCSR_STRTA      (1 << 4)
  46#define DCSR_DONEB      (1 << 5)
  47#define DCSR_STRTB      (1 << 6)
  48#define DCSR_BIU        (1 << 7)
  49
  50#define DDAR_RW         (1 << 0)        /* 0 = W, 1 = R */
  51#define DDAR_E          (1 << 1)        /* 0 = LE, 1 = BE */
  52#define DDAR_BS         (1 << 2)        /* 0 = BS4, 1 = BS8 */
  53#define DDAR_DW         (1 << 3)        /* 0 = 8b, 1 = 16b */
  54#define DDAR_Ser0UDCTr  (0x0 << 4)
  55#define DDAR_Ser0UDCRc  (0x1 << 4)
  56#define DDAR_Ser1SDLCTr (0x2 << 4)
  57#define DDAR_Ser1SDLCRc (0x3 << 4)
  58#define DDAR_Ser1UARTTr (0x4 << 4)
  59#define DDAR_Ser1UARTRc (0x5 << 4)
  60#define DDAR_Ser2ICPTr  (0x6 << 4)
  61#define DDAR_Ser2ICPRc  (0x7 << 4)
  62#define DDAR_Ser3UARTTr (0x8 << 4)
  63#define DDAR_Ser3UARTRc (0x9 << 4)
  64#define DDAR_Ser4MCP0Tr (0xa << 4)
  65#define DDAR_Ser4MCP0Rc (0xb << 4)
  66#define DDAR_Ser4MCP1Tr (0xc << 4)
  67#define DDAR_Ser4MCP1Rc (0xd << 4)
  68#define DDAR_Ser4SSPTr  (0xe << 4)
  69#define DDAR_Ser4SSPRc  (0xf << 4)
  70
  71struct sa11x0_dma_sg {
  72        u32                     addr;
  73        u32                     len;
  74};
  75
  76struct sa11x0_dma_desc {
  77        struct virt_dma_desc    vd;
  78
  79        u32                     ddar;
  80        size_t                  size;
  81        unsigned                period;
  82        bool                    cyclic;
  83
  84        unsigned                sglen;
  85        struct sa11x0_dma_sg    sg[0];
  86};
  87
  88struct sa11x0_dma_phy;
  89
  90struct sa11x0_dma_chan {
  91        struct virt_dma_chan    vc;
  92
  93        /* protected by c->vc.lock */
  94        struct sa11x0_dma_phy   *phy;
  95        enum dma_status         status;
  96
  97        /* protected by d->lock */
  98        struct list_head        node;
  99
 100        u32                     ddar;
 101        const char              *name;
 102};
 103
 104struct sa11x0_dma_phy {
 105        void __iomem            *base;
 106        struct sa11x0_dma_dev   *dev;
 107        unsigned                num;
 108
 109        struct sa11x0_dma_chan  *vchan;
 110
 111        /* Protected by c->vc.lock */
 112        unsigned                sg_load;
 113        struct sa11x0_dma_desc  *txd_load;
 114        unsigned                sg_done;
 115        struct sa11x0_dma_desc  *txd_done;
 116        u32                     dbs[2];
 117        u32                     dbt[2];
 118        u32                     dcsr;
 119};
 120
 121struct sa11x0_dma_dev {
 122        struct dma_device       slave;
 123        void __iomem            *base;
 124        spinlock_t              lock;
 125        struct tasklet_struct   task;
 126        struct list_head        chan_pending;
 127        struct sa11x0_dma_phy   phy[NR_PHY_CHAN];
 128};
 129
 130static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan)
 131{
 132        return container_of(chan, struct sa11x0_dma_chan, vc.chan);
 133}
 134
 135static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev)
 136{
 137        return container_of(dmadev, struct sa11x0_dma_dev, slave);
 138}
 139
 140static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c)
 141{
 142        struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
 143
 144        return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL;
 145}
 146
 147static void sa11x0_dma_free_desc(struct virt_dma_desc *vd)
 148{
 149        kfree(container_of(vd, struct sa11x0_dma_desc, vd));
 150}
 151
 152static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd)
 153{
 154        list_del(&txd->vd.node);
 155        p->txd_load = txd;
 156        p->sg_load = 0;
 157
 158        dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n",
 159                p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar);
 160}
 161
 162static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p,
 163        struct sa11x0_dma_chan *c)
 164{
 165        struct sa11x0_dma_desc *txd = p->txd_load;
 166        struct sa11x0_dma_sg *sg;
 167        void __iomem *base = p->base;
 168        unsigned dbsx, dbtx;
 169        u32 dcsr;
 170
 171        if (!txd)
 172                return;
 173
 174        dcsr = readl_relaxed(base + DMA_DCSR_R);
 175
 176        /* Don't try to load the next transfer if both buffers are started */
 177        if ((dcsr & (DCSR_STRTA | DCSR_STRTB)) == (DCSR_STRTA | DCSR_STRTB))
 178                return;
 179
 180        if (p->sg_load == txd->sglen) {
 181                if (!txd->cyclic) {
 182                        struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c);
 183
 184                        /*
 185                         * We have reached the end of the current descriptor.
 186                         * Peek at the next descriptor, and if compatible with
 187                         * the current, start processing it.
 188                         */
 189                        if (txn && txn->ddar == txd->ddar) {
 190                                txd = txn;
 191                                sa11x0_dma_start_desc(p, txn);
 192                        } else {
 193                                p->txd_load = NULL;
 194                                return;
 195                        }
 196                } else {
 197                        /* Cyclic: reset back to beginning */
 198                        p->sg_load = 0;
 199                }
 200        }
 201
 202        sg = &txd->sg[p->sg_load++];
 203
 204        /* Select buffer to load according to channel status */
 205        if (((dcsr & (DCSR_BIU | DCSR_STRTB)) == (DCSR_BIU | DCSR_STRTB)) ||
 206            ((dcsr & (DCSR_BIU | DCSR_STRTA)) == 0)) {
 207                dbsx = DMA_DBSA;
 208                dbtx = DMA_DBTA;
 209                dcsr = DCSR_STRTA | DCSR_IE | DCSR_RUN;
 210        } else {
 211                dbsx = DMA_DBSB;
 212                dbtx = DMA_DBTB;
 213                dcsr = DCSR_STRTB | DCSR_IE | DCSR_RUN;
 214        }
 215
 216        writel_relaxed(sg->addr, base + dbsx);
 217        writel_relaxed(sg->len, base + dbtx);
 218        writel(dcsr, base + DMA_DCSR_S);
 219
 220        dev_dbg(p->dev->slave.dev, "pchan %u: load: DCSR:%02x DBS%c:%08x DBT%c:%08x\n",
 221                p->num, dcsr,
 222                'A' + (dbsx == DMA_DBSB), sg->addr,
 223                'A' + (dbtx == DMA_DBTB), sg->len);
 224}
 225
 226static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p,
 227        struct sa11x0_dma_chan *c)
 228{
 229        struct sa11x0_dma_desc *txd = p->txd_done;
 230
 231        if (++p->sg_done == txd->sglen) {
 232                if (!txd->cyclic) {
 233                        vchan_cookie_complete(&txd->vd);
 234
 235                        p->sg_done = 0;
 236                        p->txd_done = p->txd_load;
 237
 238                        if (!p->txd_done)
 239                                tasklet_schedule(&p->dev->task);
 240                } else {
 241                        if ((p->sg_done % txd->period) == 0)
 242                                vchan_cyclic_callback(&txd->vd);
 243
 244                        /* Cyclic: reset back to beginning */
 245                        p->sg_done = 0;
 246                }
 247        }
 248
 249        sa11x0_dma_start_sg(p, c);
 250}
 251
 252static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id)
 253{
 254        struct sa11x0_dma_phy *p = dev_id;
 255        struct sa11x0_dma_dev *d = p->dev;
 256        struct sa11x0_dma_chan *c;
 257        u32 dcsr;
 258
 259        dcsr = readl_relaxed(p->base + DMA_DCSR_R);
 260        if (!(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB)))
 261                return IRQ_NONE;
 262
 263        /* Clear reported status bits */
 264        writel_relaxed(dcsr & (DCSR_ERROR | DCSR_DONEA | DCSR_DONEB),
 265                p->base + DMA_DCSR_C);
 266
 267        dev_dbg(d->slave.dev, "pchan %u: irq: DCSR:%02x\n", p->num, dcsr);
 268
 269        if (dcsr & DCSR_ERROR) {
 270                dev_err(d->slave.dev, "pchan %u: error. DCSR:%02x DDAR:%08x DBSA:%08x DBTA:%08x DBSB:%08x DBTB:%08x\n",
 271                        p->num, dcsr,
 272                        readl_relaxed(p->base + DMA_DDAR),
 273                        readl_relaxed(p->base + DMA_DBSA),
 274                        readl_relaxed(p->base + DMA_DBTA),
 275                        readl_relaxed(p->base + DMA_DBSB),
 276                        readl_relaxed(p->base + DMA_DBTB));
 277        }
 278
 279        c = p->vchan;
 280        if (c) {
 281                unsigned long flags;
 282
 283                spin_lock_irqsave(&c->vc.lock, flags);
 284                /*
 285                 * Now that we're holding the lock, check that the vchan
 286                 * really is associated with this pchan before touching the
 287                 * hardware.  This should always succeed, because we won't
 288                 * change p->vchan or c->phy while the channel is actively
 289                 * transferring.
 290                 */
 291                if (c->phy == p) {
 292                        if (dcsr & DCSR_DONEA)
 293                                sa11x0_dma_complete(p, c);
 294                        if (dcsr & DCSR_DONEB)
 295                                sa11x0_dma_complete(p, c);
 296                }
 297                spin_unlock_irqrestore(&c->vc.lock, flags);
 298        }
 299
 300        return IRQ_HANDLED;
 301}
 302
 303static void sa11x0_dma_start_txd(struct sa11x0_dma_chan *c)
 304{
 305        struct sa11x0_dma_desc *txd = sa11x0_dma_next_desc(c);
 306
 307        /* If the issued list is empty, we have no further txds to process */
 308        if (txd) {
 309                struct sa11x0_dma_phy *p = c->phy;
 310
 311                sa11x0_dma_start_desc(p, txd);
 312                p->txd_done = txd;
 313                p->sg_done = 0;
 314
 315                /* The channel should not have any transfers started */
 316                WARN_ON(readl_relaxed(p->base + DMA_DCSR_R) &
 317                                      (DCSR_STRTA | DCSR_STRTB));
 318
 319                /* Clear the run and start bits before changing DDAR */
 320                writel_relaxed(DCSR_RUN | DCSR_STRTA | DCSR_STRTB,
 321                               p->base + DMA_DCSR_C);
 322                writel_relaxed(txd->ddar, p->base + DMA_DDAR);
 323
 324                /* Try to start both buffers */
 325                sa11x0_dma_start_sg(p, c);
 326                sa11x0_dma_start_sg(p, c);
 327        }
 328}
 329
 330static void sa11x0_dma_tasklet(unsigned long arg)
 331{
 332        struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg;
 333        struct sa11x0_dma_phy *p;
 334        struct sa11x0_dma_chan *c;
 335        unsigned pch, pch_alloc = 0;
 336
 337        dev_dbg(d->slave.dev, "tasklet enter\n");
 338
 339        list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) {
 340                spin_lock_irq(&c->vc.lock);
 341                p = c->phy;
 342                if (p && !p->txd_done) {
 343                        sa11x0_dma_start_txd(c);
 344                        if (!p->txd_done) {
 345                                /* No current txd associated with this channel */
 346                                dev_dbg(d->slave.dev, "pchan %u: free\n", p->num);
 347
 348                                /* Mark this channel free */
 349                                c->phy = NULL;
 350                                p->vchan = NULL;
 351                        }
 352                }
 353                spin_unlock_irq(&c->vc.lock);
 354        }
 355
 356        spin_lock_irq(&d->lock);
 357        for (pch = 0; pch < NR_PHY_CHAN; pch++) {
 358                p = &d->phy[pch];
 359
 360                if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
 361                        c = list_first_entry(&d->chan_pending,
 362                                struct sa11x0_dma_chan, node);
 363                        list_del_init(&c->node);
 364
 365                        pch_alloc |= 1 << pch;
 366
 367                        /* Mark this channel allocated */
 368                        p->vchan = c;
 369
 370                        dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
 371                }
 372        }
 373        spin_unlock_irq(&d->lock);
 374
 375        for (pch = 0; pch < NR_PHY_CHAN; pch++) {
 376                if (pch_alloc & (1 << pch)) {
 377                        p = &d->phy[pch];
 378                        c = p->vchan;
 379
 380                        spin_lock_irq(&c->vc.lock);
 381                        c->phy = p;
 382
 383                        sa11x0_dma_start_txd(c);
 384                        spin_unlock_irq(&c->vc.lock);
 385                }
 386        }
 387
 388        dev_dbg(d->slave.dev, "tasklet exit\n");
 389}
 390
 391
 392static void sa11x0_dma_free_chan_resources(struct dma_chan *chan)
 393{
 394        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 395        struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 396        unsigned long flags;
 397
 398        spin_lock_irqsave(&d->lock, flags);
 399        list_del_init(&c->node);
 400        spin_unlock_irqrestore(&d->lock, flags);
 401
 402        vchan_free_chan_resources(&c->vc);
 403}
 404
 405static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p)
 406{
 407        unsigned reg;
 408        u32 dcsr;
 409
 410        dcsr = readl_relaxed(p->base + DMA_DCSR_R);
 411
 412        if ((dcsr & (DCSR_BIU | DCSR_STRTA)) == DCSR_STRTA ||
 413            (dcsr & (DCSR_BIU | DCSR_STRTB)) == DCSR_BIU)
 414                reg = DMA_DBSA;
 415        else
 416                reg = DMA_DBSB;
 417
 418        return readl_relaxed(p->base + reg);
 419}
 420
 421static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan,
 422        dma_cookie_t cookie, struct dma_tx_state *state)
 423{
 424        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 425        struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 426        struct sa11x0_dma_phy *p;
 427        struct virt_dma_desc *vd;
 428        unsigned long flags;
 429        enum dma_status ret;
 430
 431        ret = dma_cookie_status(&c->vc.chan, cookie, state);
 432        if (ret == DMA_COMPLETE)
 433                return ret;
 434
 435        if (!state)
 436                return c->status;
 437
 438        spin_lock_irqsave(&c->vc.lock, flags);
 439        p = c->phy;
 440
 441        /*
 442         * If the cookie is on our issue queue, then the residue is
 443         * its total size.
 444         */
 445        vd = vchan_find_desc(&c->vc, cookie);
 446        if (vd) {
 447                state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size;
 448        } else if (!p) {
 449                state->residue = 0;
 450        } else {
 451                struct sa11x0_dma_desc *txd;
 452                size_t bytes = 0;
 453
 454                if (p->txd_done && p->txd_done->vd.tx.cookie == cookie)
 455                        txd = p->txd_done;
 456                else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie)
 457                        txd = p->txd_load;
 458                else
 459                        txd = NULL;
 460
 461                ret = c->status;
 462                if (txd) {
 463                        dma_addr_t addr = sa11x0_dma_pos(p);
 464                        unsigned i;
 465
 466                        dev_vdbg(d->slave.dev, "tx_status: addr:%pad\n", &addr);
 467
 468                        for (i = 0; i < txd->sglen; i++) {
 469                                dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n",
 470                                        i, txd->sg[i].addr, txd->sg[i].len);
 471                                if (addr >= txd->sg[i].addr &&
 472                                    addr < txd->sg[i].addr + txd->sg[i].len) {
 473                                        unsigned len;
 474
 475                                        len = txd->sg[i].len -
 476                                                (addr - txd->sg[i].addr);
 477                                        dev_vdbg(d->slave.dev, "tx_status: [%u] +%x\n",
 478                                                i, len);
 479                                        bytes += len;
 480                                        i++;
 481                                        break;
 482                                }
 483                        }
 484                        for (; i < txd->sglen; i++) {
 485                                dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x ++\n",
 486                                        i, txd->sg[i].addr, txd->sg[i].len);
 487                                bytes += txd->sg[i].len;
 488                        }
 489                }
 490                state->residue = bytes;
 491        }
 492        spin_unlock_irqrestore(&c->vc.lock, flags);
 493
 494        dev_vdbg(d->slave.dev, "tx_status: bytes 0x%x\n", state->residue);
 495
 496        return ret;
 497}
 498
 499/*
 500 * Move pending txds to the issued list, and re-init pending list.
 501 * If not already pending, add this channel to the list of pending
 502 * channels and trigger the tasklet to run.
 503 */
 504static void sa11x0_dma_issue_pending(struct dma_chan *chan)
 505{
 506        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 507        struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 508        unsigned long flags;
 509
 510        spin_lock_irqsave(&c->vc.lock, flags);
 511        if (vchan_issue_pending(&c->vc)) {
 512                if (!c->phy) {
 513                        spin_lock(&d->lock);
 514                        if (list_empty(&c->node)) {
 515                                list_add_tail(&c->node, &d->chan_pending);
 516                                tasklet_schedule(&d->task);
 517                                dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
 518                        }
 519                        spin_unlock(&d->lock);
 520                }
 521        } else
 522                dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
 523        spin_unlock_irqrestore(&c->vc.lock, flags);
 524}
 525
 526static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg(
 527        struct dma_chan *chan, struct scatterlist *sg, unsigned int sglen,
 528        enum dma_transfer_direction dir, unsigned long flags, void *context)
 529{
 530        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 531        struct sa11x0_dma_desc *txd;
 532        struct scatterlist *sgent;
 533        unsigned i, j = sglen;
 534        size_t size = 0;
 535
 536        /* SA11x0 channels can only operate in their native direction */
 537        if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
 538                dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
 539                        &c->vc, c->ddar, dir);
 540                return NULL;
 541        }
 542
 543        /* Do not allow zero-sized txds */
 544        if (sglen == 0)
 545                return NULL;
 546
 547        for_each_sg(sg, sgent, sglen, i) {
 548                dma_addr_t addr = sg_dma_address(sgent);
 549                unsigned int len = sg_dma_len(sgent);
 550
 551                if (len > DMA_MAX_SIZE)
 552                        j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1;
 553                if (addr & DMA_ALIGN) {
 554                        dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %pad\n",
 555                                &c->vc, &addr);
 556                        return NULL;
 557                }
 558        }
 559
 560        txd = kzalloc(struct_size(txd, sg, j), GFP_ATOMIC);
 561        if (!txd) {
 562                dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
 563                return NULL;
 564        }
 565
 566        j = 0;
 567        for_each_sg(sg, sgent, sglen, i) {
 568                dma_addr_t addr = sg_dma_address(sgent);
 569                unsigned len = sg_dma_len(sgent);
 570
 571                size += len;
 572
 573                do {
 574                        unsigned tlen = len;
 575
 576                        /*
 577                         * Check whether the transfer will fit.  If not, try
 578                         * to split the transfer up such that we end up with
 579                         * equal chunks - but make sure that we preserve the
 580                         * alignment.  This avoids small segments.
 581                         */
 582                        if (tlen > DMA_MAX_SIZE) {
 583                                unsigned mult = DIV_ROUND_UP(tlen,
 584                                        DMA_MAX_SIZE & ~DMA_ALIGN);
 585
 586                                tlen = (tlen / mult) & ~DMA_ALIGN;
 587                        }
 588
 589                        txd->sg[j].addr = addr;
 590                        txd->sg[j].len = tlen;
 591
 592                        addr += tlen;
 593                        len -= tlen;
 594                        j++;
 595                } while (len);
 596        }
 597
 598        txd->ddar = c->ddar;
 599        txd->size = size;
 600        txd->sglen = j;
 601
 602        dev_dbg(chan->device->dev, "vchan %p: txd %p: size %zu nr %u\n",
 603                &c->vc, &txd->vd, txd->size, txd->sglen);
 604
 605        return vchan_tx_prep(&c->vc, &txd->vd, flags);
 606}
 607
 608static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic(
 609        struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
 610        enum dma_transfer_direction dir, unsigned long flags)
 611{
 612        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 613        struct sa11x0_dma_desc *txd;
 614        unsigned i, j, k, sglen, sgperiod;
 615
 616        /* SA11x0 channels can only operate in their native direction */
 617        if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) {
 618                dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n",
 619                        &c->vc, c->ddar, dir);
 620                return NULL;
 621        }
 622
 623        sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN);
 624        sglen = size * sgperiod / period;
 625
 626        /* Do not allow zero-sized txds */
 627        if (sglen == 0)
 628                return NULL;
 629
 630        txd = kzalloc(struct_size(txd, sg, sglen), GFP_ATOMIC);
 631        if (!txd) {
 632                dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc);
 633                return NULL;
 634        }
 635
 636        for (i = k = 0; i < size / period; i++) {
 637                size_t tlen, len = period;
 638
 639                for (j = 0; j < sgperiod; j++, k++) {
 640                        tlen = len;
 641
 642                        if (tlen > DMA_MAX_SIZE) {
 643                                unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN);
 644                                tlen = (tlen / mult) & ~DMA_ALIGN;
 645                        }
 646
 647                        txd->sg[k].addr = addr;
 648                        txd->sg[k].len = tlen;
 649                        addr += tlen;
 650                        len -= tlen;
 651                }
 652
 653                WARN_ON(len != 0);
 654        }
 655
 656        WARN_ON(k != sglen);
 657
 658        txd->ddar = c->ddar;
 659        txd->size = size;
 660        txd->sglen = sglen;
 661        txd->cyclic = 1;
 662        txd->period = sgperiod;
 663
 664        return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 665}
 666
 667static int sa11x0_dma_device_config(struct dma_chan *chan,
 668                                    struct dma_slave_config *cfg)
 669{
 670        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 671        u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW);
 672        dma_addr_t addr;
 673        enum dma_slave_buswidth width;
 674        u32 maxburst;
 675
 676        if (ddar & DDAR_RW) {
 677                addr = cfg->src_addr;
 678                width = cfg->src_addr_width;
 679                maxburst = cfg->src_maxburst;
 680        } else {
 681                addr = cfg->dst_addr;
 682                width = cfg->dst_addr_width;
 683                maxburst = cfg->dst_maxburst;
 684        }
 685
 686        if ((width != DMA_SLAVE_BUSWIDTH_1_BYTE &&
 687             width != DMA_SLAVE_BUSWIDTH_2_BYTES) ||
 688            (maxburst != 4 && maxburst != 8))
 689                return -EINVAL;
 690
 691        if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
 692                ddar |= DDAR_DW;
 693        if (maxburst == 8)
 694                ddar |= DDAR_BS;
 695
 696        dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %pad width %u burst %u\n",
 697                &c->vc, &addr, width, maxburst);
 698
 699        c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6;
 700
 701        return 0;
 702}
 703
 704static int sa11x0_dma_device_pause(struct dma_chan *chan)
 705{
 706        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 707        struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 708        struct sa11x0_dma_phy *p;
 709        LIST_HEAD(head);
 710        unsigned long flags;
 711
 712        dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
 713        spin_lock_irqsave(&c->vc.lock, flags);
 714        if (c->status == DMA_IN_PROGRESS) {
 715                c->status = DMA_PAUSED;
 716
 717                p = c->phy;
 718                if (p) {
 719                        writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
 720                } else {
 721                        spin_lock(&d->lock);
 722                        list_del_init(&c->node);
 723                        spin_unlock(&d->lock);
 724                }
 725        }
 726        spin_unlock_irqrestore(&c->vc.lock, flags);
 727
 728        return 0;
 729}
 730
 731static int sa11x0_dma_device_resume(struct dma_chan *chan)
 732{
 733        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 734        struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 735        struct sa11x0_dma_phy *p;
 736        LIST_HEAD(head);
 737        unsigned long flags;
 738
 739        dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
 740        spin_lock_irqsave(&c->vc.lock, flags);
 741        if (c->status == DMA_PAUSED) {
 742                c->status = DMA_IN_PROGRESS;
 743
 744                p = c->phy;
 745                if (p) {
 746                        writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S);
 747                } else if (!list_empty(&c->vc.desc_issued)) {
 748                        spin_lock(&d->lock);
 749                        list_add_tail(&c->node, &d->chan_pending);
 750                        spin_unlock(&d->lock);
 751                }
 752        }
 753        spin_unlock_irqrestore(&c->vc.lock, flags);
 754
 755        return 0;
 756}
 757
 758static int sa11x0_dma_device_terminate_all(struct dma_chan *chan)
 759{
 760        struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
 761        struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device);
 762        struct sa11x0_dma_phy *p;
 763        LIST_HEAD(head);
 764        unsigned long flags;
 765
 766        dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
 767        /* Clear the tx descriptor lists */
 768        spin_lock_irqsave(&c->vc.lock, flags);
 769        vchan_get_all_descriptors(&c->vc, &head);
 770
 771        p = c->phy;
 772        if (p) {
 773                dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num);
 774                /* vchan is assigned to a pchan - stop the channel */
 775                writel(DCSR_RUN | DCSR_IE |
 776                       DCSR_STRTA | DCSR_DONEA |
 777                       DCSR_STRTB | DCSR_DONEB,
 778                       p->base + DMA_DCSR_C);
 779
 780                if (p->txd_load) {
 781                        if (p->txd_load != p->txd_done)
 782                                list_add_tail(&p->txd_load->vd.node, &head);
 783                        p->txd_load = NULL;
 784                }
 785                if (p->txd_done) {
 786                        list_add_tail(&p->txd_done->vd.node, &head);
 787                        p->txd_done = NULL;
 788                }
 789                c->phy = NULL;
 790                spin_lock(&d->lock);
 791                p->vchan = NULL;
 792                spin_unlock(&d->lock);
 793                tasklet_schedule(&d->task);
 794        }
 795        spin_unlock_irqrestore(&c->vc.lock, flags);
 796        vchan_dma_desc_free_list(&c->vc, &head);
 797
 798        return 0;
 799}
 800
 801struct sa11x0_dma_channel_desc {
 802        u32 ddar;
 803        const char *name;
 804};
 805
 806#define CD(d1, d2) { .ddar = DDAR_##d1 | d2, .name = #d1 }
 807static const struct sa11x0_dma_channel_desc chan_desc[] = {
 808        CD(Ser0UDCTr, 0),
 809        CD(Ser0UDCRc, DDAR_RW),
 810        CD(Ser1SDLCTr, 0),
 811        CD(Ser1SDLCRc, DDAR_RW),
 812        CD(Ser1UARTTr, 0),
 813        CD(Ser1UARTRc, DDAR_RW),
 814        CD(Ser2ICPTr, 0),
 815        CD(Ser2ICPRc, DDAR_RW),
 816        CD(Ser3UARTTr, 0),
 817        CD(Ser3UARTRc, DDAR_RW),
 818        CD(Ser4MCP0Tr, 0),
 819        CD(Ser4MCP0Rc, DDAR_RW),
 820        CD(Ser4MCP1Tr, 0),
 821        CD(Ser4MCP1Rc, DDAR_RW),
 822        CD(Ser4SSPTr, 0),
 823        CD(Ser4SSPRc, DDAR_RW),
 824};
 825
 826static const struct dma_slave_map sa11x0_dma_map[] = {
 827        { "sa11x0-ir", "tx", "Ser2ICPTr" },
 828        { "sa11x0-ir", "rx", "Ser2ICPRc" },
 829        { "sa11x0-ssp", "tx", "Ser4SSPTr" },
 830        { "sa11x0-ssp", "rx", "Ser4SSPRc" },
 831};
 832
 833static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
 834        struct device *dev)
 835{
 836        unsigned i;
 837
 838        INIT_LIST_HEAD(&dmadev->channels);
 839        dmadev->dev = dev;
 840        dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources;
 841        dmadev->device_config = sa11x0_dma_device_config;
 842        dmadev->device_pause = sa11x0_dma_device_pause;
 843        dmadev->device_resume = sa11x0_dma_device_resume;
 844        dmadev->device_terminate_all = sa11x0_dma_device_terminate_all;
 845        dmadev->device_tx_status = sa11x0_dma_tx_status;
 846        dmadev->device_issue_pending = sa11x0_dma_issue_pending;
 847
 848        for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
 849                struct sa11x0_dma_chan *c;
 850
 851                c = kzalloc(sizeof(*c), GFP_KERNEL);
 852                if (!c) {
 853                        dev_err(dev, "no memory for channel %u\n", i);
 854                        return -ENOMEM;
 855                }
 856
 857                c->status = DMA_IN_PROGRESS;
 858                c->ddar = chan_desc[i].ddar;
 859                c->name = chan_desc[i].name;
 860                INIT_LIST_HEAD(&c->node);
 861
 862                c->vc.desc_free = sa11x0_dma_free_desc;
 863                vchan_init(&c->vc, dmadev);
 864        }
 865
 866        return dma_async_device_register(dmadev);
 867}
 868
 869static int sa11x0_dma_request_irq(struct platform_device *pdev, int nr,
 870        void *data)
 871{
 872        int irq = platform_get_irq(pdev, nr);
 873
 874        if (irq <= 0)
 875                return -ENXIO;
 876
 877        return request_irq(irq, sa11x0_dma_irq, 0, dev_name(&pdev->dev), data);
 878}
 879
 880static void sa11x0_dma_free_irq(struct platform_device *pdev, int nr,
 881        void *data)
 882{
 883        int irq = platform_get_irq(pdev, nr);
 884        if (irq > 0)
 885                free_irq(irq, data);
 886}
 887
 888static void sa11x0_dma_free_channels(struct dma_device *dmadev)
 889{
 890        struct sa11x0_dma_chan *c, *cn;
 891
 892        list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) {
 893                list_del(&c->vc.chan.device_node);
 894                tasklet_kill(&c->vc.task);
 895                kfree(c);
 896        }
 897}
 898
 899static int sa11x0_dma_probe(struct platform_device *pdev)
 900{
 901        struct sa11x0_dma_dev *d;
 902        struct resource *res;
 903        unsigned i;
 904        int ret;
 905
 906        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 907        if (!res)
 908                return -ENXIO;
 909
 910        d = kzalloc(sizeof(*d), GFP_KERNEL);
 911        if (!d) {
 912                ret = -ENOMEM;
 913                goto err_alloc;
 914        }
 915
 916        spin_lock_init(&d->lock);
 917        INIT_LIST_HEAD(&d->chan_pending);
 918
 919        d->slave.filter.fn = sa11x0_dma_filter_fn;
 920        d->slave.filter.mapcnt = ARRAY_SIZE(sa11x0_dma_map);
 921        d->slave.filter.map = sa11x0_dma_map;
 922
 923        d->base = ioremap(res->start, resource_size(res));
 924        if (!d->base) {
 925                ret = -ENOMEM;
 926                goto err_ioremap;
 927        }
 928
 929        tasklet_init(&d->task, sa11x0_dma_tasklet, (unsigned long)d);
 930
 931        for (i = 0; i < NR_PHY_CHAN; i++) {
 932                struct sa11x0_dma_phy *p = &d->phy[i];
 933
 934                p->dev = d;
 935                p->num = i;
 936                p->base = d->base + i * DMA_SIZE;
 937                writel_relaxed(DCSR_RUN | DCSR_IE | DCSR_ERROR |
 938                        DCSR_DONEA | DCSR_STRTA | DCSR_DONEB | DCSR_STRTB,
 939                        p->base + DMA_DCSR_C);
 940                writel_relaxed(0, p->base + DMA_DDAR);
 941
 942                ret = sa11x0_dma_request_irq(pdev, i, p);
 943                if (ret) {
 944                        while (i) {
 945                                i--;
 946                                sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
 947                        }
 948                        goto err_irq;
 949                }
 950        }
 951
 952        dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
 953        dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
 954        d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg;
 955        d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic;
 956        d->slave.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
 957        d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 958        d->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
 959                                   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
 960        d->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
 961                                   BIT(DMA_SLAVE_BUSWIDTH_2_BYTES);
 962        ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev);
 963        if (ret) {
 964                dev_warn(d->slave.dev, "failed to register slave async device: %d\n",
 965                        ret);
 966                goto err_slave_reg;
 967        }
 968
 969        platform_set_drvdata(pdev, d);
 970        return 0;
 971
 972 err_slave_reg:
 973        sa11x0_dma_free_channels(&d->slave);
 974        for (i = 0; i < NR_PHY_CHAN; i++)
 975                sa11x0_dma_free_irq(pdev, i, &d->phy[i]);
 976 err_irq:
 977        tasklet_kill(&d->task);
 978        iounmap(d->base);
 979 err_ioremap:
 980        kfree(d);
 981 err_alloc:
 982        return ret;
 983}
 984
 985static int sa11x0_dma_remove(struct platform_device *pdev)
 986{
 987        struct sa11x0_dma_dev *d = platform_get_drvdata(pdev);
 988        unsigned pch;
 989
 990        dma_async_device_unregister(&d->slave);
 991
 992        sa11x0_dma_free_channels(&d->slave);
 993        for (pch = 0; pch < NR_PHY_CHAN; pch++)
 994                sa11x0_dma_free_irq(pdev, pch, &d->phy[pch]);
 995        tasklet_kill(&d->task);
 996        iounmap(d->base);
 997        kfree(d);
 998
 999        return 0;
1000}
1001
1002static int sa11x0_dma_suspend(struct device *dev)
1003{
1004        struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1005        unsigned pch;
1006
1007        for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1008                struct sa11x0_dma_phy *p = &d->phy[pch];
1009                u32 dcsr, saved_dcsr;
1010
1011                dcsr = saved_dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1012                if (dcsr & DCSR_RUN) {
1013                        writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C);
1014                        dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1015                }
1016
1017                saved_dcsr &= DCSR_RUN | DCSR_IE;
1018                if (dcsr & DCSR_BIU) {
1019                        p->dbs[0] = readl_relaxed(p->base + DMA_DBSB);
1020                        p->dbt[0] = readl_relaxed(p->base + DMA_DBTB);
1021                        p->dbs[1] = readl_relaxed(p->base + DMA_DBSA);
1022                        p->dbt[1] = readl_relaxed(p->base + DMA_DBTA);
1023                        saved_dcsr |= (dcsr & DCSR_STRTA ? DCSR_STRTB : 0) |
1024                                      (dcsr & DCSR_STRTB ? DCSR_STRTA : 0);
1025                } else {
1026                        p->dbs[0] = readl_relaxed(p->base + DMA_DBSA);
1027                        p->dbt[0] = readl_relaxed(p->base + DMA_DBTA);
1028                        p->dbs[1] = readl_relaxed(p->base + DMA_DBSB);
1029                        p->dbt[1] = readl_relaxed(p->base + DMA_DBTB);
1030                        saved_dcsr |= dcsr & (DCSR_STRTA | DCSR_STRTB);
1031                }
1032                p->dcsr = saved_dcsr;
1033
1034                writel(DCSR_STRTA | DCSR_STRTB, p->base + DMA_DCSR_C);
1035        }
1036
1037        return 0;
1038}
1039
1040static int sa11x0_dma_resume(struct device *dev)
1041{
1042        struct sa11x0_dma_dev *d = dev_get_drvdata(dev);
1043        unsigned pch;
1044
1045        for (pch = 0; pch < NR_PHY_CHAN; pch++) {
1046                struct sa11x0_dma_phy *p = &d->phy[pch];
1047                struct sa11x0_dma_desc *txd = NULL;
1048                u32 dcsr = readl_relaxed(p->base + DMA_DCSR_R);
1049
1050                WARN_ON(dcsr & (DCSR_BIU | DCSR_STRTA | DCSR_STRTB | DCSR_RUN));
1051
1052                if (p->txd_done)
1053                        txd = p->txd_done;
1054                else if (p->txd_load)
1055                        txd = p->txd_load;
1056
1057                if (!txd)
1058                        continue;
1059
1060                writel_relaxed(txd->ddar, p->base + DMA_DDAR);
1061
1062                writel_relaxed(p->dbs[0], p->base + DMA_DBSA);
1063                writel_relaxed(p->dbt[0], p->base + DMA_DBTA);
1064                writel_relaxed(p->dbs[1], p->base + DMA_DBSB);
1065                writel_relaxed(p->dbt[1], p->base + DMA_DBTB);
1066                writel_relaxed(p->dcsr, p->base + DMA_DCSR_S);
1067        }
1068
1069        return 0;
1070}
1071
1072static const struct dev_pm_ops sa11x0_dma_pm_ops = {
1073        .suspend_noirq = sa11x0_dma_suspend,
1074        .resume_noirq = sa11x0_dma_resume,
1075        .freeze_noirq = sa11x0_dma_suspend,
1076        .thaw_noirq = sa11x0_dma_resume,
1077        .poweroff_noirq = sa11x0_dma_suspend,
1078        .restore_noirq = sa11x0_dma_resume,
1079};
1080
1081static struct platform_driver sa11x0_dma_driver = {
1082        .driver = {
1083                .name   = "sa11x0-dma",
1084                .pm     = &sa11x0_dma_pm_ops,
1085        },
1086        .probe          = sa11x0_dma_probe,
1087        .remove         = sa11x0_dma_remove,
1088};
1089
1090bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param)
1091{
1092        if (chan->device->dev->driver == &sa11x0_dma_driver.driver) {
1093                struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan);
1094                const char *p = param;
1095
1096                return !strcmp(c->name, p);
1097        }
1098        return false;
1099}
1100EXPORT_SYMBOL(sa11x0_dma_filter_fn);
1101
1102static int __init sa11x0_dma_init(void)
1103{
1104        return platform_driver_register(&sa11x0_dma_driver);
1105}
1106subsys_initcall(sa11x0_dma_init);
1107
1108static void __exit sa11x0_dma_exit(void)
1109{
1110        platform_driver_unregister(&sa11x0_dma_driver);
1111}
1112module_exit(sa11x0_dma_exit);
1113
1114MODULE_AUTHOR("Russell King");
1115MODULE_DESCRIPTION("SA-11x0 DMA driver");
1116MODULE_LICENSE("GPL v2");
1117MODULE_ALIAS("platform:sa11x0-dma");
1118