linux/drivers/dma/mpc512x_dma.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
   3 * Copyright (C) Semihalf 2009
   4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
   5 *
   6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
   7 * (defines, structures and comments) was taken from MPC5121 DMA driver
   8 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
   9 *
  10 * Approved as OSADL project by a majority of OSADL members and funded
  11 * by OSADL membership fees in 2009;  for details see www.osadl.org.
  12 *
  13 * This program is free software; you can redistribute it and/or modify it
  14 * under the terms of the GNU General Public License as published by the Free
  15 * Software Foundation; either version 2 of the License, or (at your option)
  16 * any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful, but WITHOUT
  19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  20 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  21 * more details.
  22 *
  23 * You should have received a copy of the GNU General Public License along with
  24 * this program; if not, write to the Free Software Foundation, Inc., 59
  25 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  26 *
  27 * The full GNU General Public License is included in this distribution in the
  28 * file called COPYING.
  29 */
  30
  31/*
  32 * This is initial version of MPC5121 DMA driver. Only memory to memory
  33 * transfers are supported (tested using dmatest module).
  34 */
  35
  36#include <linux/module.h>
  37#include <linux/dmaengine.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/interrupt.h>
  40#include <linux/io.h>
  41#include <linux/slab.h>
  42#include <linux/of_device.h>
  43#include <linux/of_platform.h>
  44
  45#include <linux/random.h>
  46
  47/* Number of DMA Transfer descriptors allocated per channel */
  48#define MPC_DMA_DESCRIPTORS     64
  49
  50/* Macro definitions */
  51#define MPC_DMA_CHANNELS        64
  52#define MPC_DMA_TCD_OFFSET      0x1000
  53
  54/* Arbitration mode of group and channel */
  55#define MPC_DMA_DMACR_EDCG      (1 << 31)
  56#define MPC_DMA_DMACR_ERGA      (1 << 3)
  57#define MPC_DMA_DMACR_ERCA      (1 << 2)
  58
  59/* Error codes */
  60#define MPC_DMA_DMAES_VLD       (1 << 31)
  61#define MPC_DMA_DMAES_GPE       (1 << 15)
  62#define MPC_DMA_DMAES_CPE       (1 << 14)
  63#define MPC_DMA_DMAES_ERRCHN(err) \
  64                                (((err) >> 8) & 0x3f)
  65#define MPC_DMA_DMAES_SAE       (1 << 7)
  66#define MPC_DMA_DMAES_SOE       (1 << 6)
  67#define MPC_DMA_DMAES_DAE       (1 << 5)
  68#define MPC_DMA_DMAES_DOE       (1 << 4)
  69#define MPC_DMA_DMAES_NCE       (1 << 3)
  70#define MPC_DMA_DMAES_SGE       (1 << 2)
  71#define MPC_DMA_DMAES_SBE       (1 << 1)
  72#define MPC_DMA_DMAES_DBE       (1 << 0)
  73
  74#define MPC_DMA_DMAGPOR_SNOOP_ENABLE    (1 << 6)
  75
  76#define MPC_DMA_TSIZE_1         0x00
  77#define MPC_DMA_TSIZE_2         0x01
  78#define MPC_DMA_TSIZE_4         0x02
  79#define MPC_DMA_TSIZE_16        0x04
  80#define MPC_DMA_TSIZE_32        0x05
  81
  82/* MPC5121 DMA engine registers */
  83struct __attribute__ ((__packed__)) mpc_dma_regs {
  84        /* 0x00 */
  85        u32 dmacr;              /* DMA control register */
  86        u32 dmaes;              /* DMA error status */
  87        /* 0x08 */
  88        u32 dmaerqh;            /* DMA enable request high(channels 63~32) */
  89        u32 dmaerql;            /* DMA enable request low(channels 31~0) */
  90        u32 dmaeeih;            /* DMA enable error interrupt high(ch63~32) */
  91        u32 dmaeeil;            /* DMA enable error interrupt low(ch31~0) */
  92        /* 0x18 */
  93        u8 dmaserq;             /* DMA set enable request */
  94        u8 dmacerq;             /* DMA clear enable request */
  95        u8 dmaseei;             /* DMA set enable error interrupt */
  96        u8 dmaceei;             /* DMA clear enable error interrupt */
  97        /* 0x1c */
  98        u8 dmacint;             /* DMA clear interrupt request */
  99        u8 dmacerr;             /* DMA clear error */
 100        u8 dmassrt;             /* DMA set start bit */
 101        u8 dmacdne;             /* DMA clear DONE status bit */
 102        /* 0x20 */
 103        u32 dmainth;            /* DMA interrupt request high(ch63~32) */
 104        u32 dmaintl;            /* DMA interrupt request low(ch31~0) */
 105        u32 dmaerrh;            /* DMA error high(ch63~32) */
 106        u32 dmaerrl;            /* DMA error low(ch31~0) */
 107        /* 0x30 */
 108        u32 dmahrsh;            /* DMA hw request status high(ch63~32) */
 109        u32 dmahrsl;            /* DMA hardware request status low(ch31~0) */
 110        union {
 111                u32 dmaihsa;    /* DMA interrupt high select AXE(ch63~32) */
 112                u32 dmagpor;    /* (General purpose register on MPC8308) */
 113        };
 114        u32 dmailsa;            /* DMA interrupt low select AXE(ch31~0) */
 115        /* 0x40 ~ 0xff */
 116        u32 reserve0[48];       /* Reserved */
 117        /* 0x100 */
 118        u8 dchpri[MPC_DMA_CHANNELS];
 119        /* DMA channels(0~63) priority */
 120};
 121
 122struct __attribute__ ((__packed__)) mpc_dma_tcd {
 123        /* 0x00 */
 124        u32 saddr;              /* Source address */
 125
 126        u32 smod:5;             /* Source address modulo */
 127        u32 ssize:3;            /* Source data transfer size */
 128        u32 dmod:5;             /* Destination address modulo */
 129        u32 dsize:3;            /* Destination data transfer size */
 130        u32 soff:16;            /* Signed source address offset */
 131
 132        /* 0x08 */
 133        u32 nbytes;             /* Inner "minor" byte count */
 134        u32 slast;              /* Last source address adjustment */
 135        u32 daddr;              /* Destination address */
 136
 137        /* 0x14 */
 138        u32 citer_elink:1;      /* Enable channel-to-channel linking on
 139                                 * minor loop complete
 140                                 */
 141        u32 citer_linkch:6;     /* Link channel for minor loop complete */
 142        u32 citer:9;            /* Current "major" iteration count */
 143        u32 doff:16;            /* Signed destination address offset */
 144
 145        /* 0x18 */
 146        u32 dlast_sga;          /* Last Destination address adjustment/scatter
 147                                 * gather address
 148                                 */
 149
 150        /* 0x1c */
 151        u32 biter_elink:1;      /* Enable channel-to-channel linking on major
 152                                 * loop complete
 153                                 */
 154        u32 biter_linkch:6;
 155        u32 biter:9;            /* Beginning "major" iteration count */
 156        u32 bwc:2;              /* Bandwidth control */
 157        u32 major_linkch:6;     /* Link channel number */
 158        u32 done:1;             /* Channel done */
 159        u32 active:1;           /* Channel active */
 160        u32 major_elink:1;      /* Enable channel-to-channel linking on major
 161                                 * loop complete
 162                                 */
 163        u32 e_sg:1;             /* Enable scatter/gather processing */
 164        u32 d_req:1;            /* Disable request */
 165        u32 int_half:1;         /* Enable an interrupt when major counter is
 166                                 * half complete
 167                                 */
 168        u32 int_maj:1;          /* Enable an interrupt when major iteration
 169                                 * count completes
 170                                 */
 171        u32 start:1;            /* Channel start */
 172};
 173
 174struct mpc_dma_desc {
 175        struct dma_async_tx_descriptor  desc;
 176        struct mpc_dma_tcd              *tcd;
 177        dma_addr_t                      tcd_paddr;
 178        int                             error;
 179        struct list_head                node;
 180};
 181
 182struct mpc_dma_chan {
 183        struct dma_chan                 chan;
 184        struct list_head                free;
 185        struct list_head                prepared;
 186        struct list_head                queued;
 187        struct list_head                active;
 188        struct list_head                completed;
 189        struct mpc_dma_tcd              *tcd;
 190        dma_addr_t                      tcd_paddr;
 191        dma_cookie_t                    completed_cookie;
 192
 193        /* Lock for this structure */
 194        spinlock_t                      lock;
 195};
 196
 197struct mpc_dma {
 198        struct dma_device               dma;
 199        struct tasklet_struct           tasklet;
 200        struct mpc_dma_chan             channels[MPC_DMA_CHANNELS];
 201        struct mpc_dma_regs __iomem     *regs;
 202        struct mpc_dma_tcd __iomem      *tcd;
 203        int                             irq;
 204        int                             irq2;
 205        uint                            error_status;
 206        int                             is_mpc8308;
 207
 208        /* Lock for error_status field in this structure */
 209        spinlock_t                      error_status_lock;
 210};
 211
 212#define DRV_NAME        "mpc512x_dma"
 213
 214/* Convert struct dma_chan to struct mpc_dma_chan */
 215static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
 216{
 217        return container_of(c, struct mpc_dma_chan, chan);
 218}
 219
 220/* Convert struct dma_chan to struct mpc_dma */
 221static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
 222{
 223        struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
 224        return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
 225}
 226
 227/*
 228 * Execute all queued DMA descriptors.
 229 *
 230 * Following requirements must be met while calling mpc_dma_execute():
 231 *      a) mchan->lock is acquired,
 232 *      b) mchan->active list is empty,
 233 *      c) mchan->queued list contains at least one entry.
 234 */
 235static void mpc_dma_execute(struct mpc_dma_chan *mchan)
 236{
 237        struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
 238        struct mpc_dma_desc *first = NULL;
 239        struct mpc_dma_desc *prev = NULL;
 240        struct mpc_dma_desc *mdesc;
 241        int cid = mchan->chan.chan_id;
 242
 243        /* Move all queued descriptors to active list */
 244        list_splice_tail_init(&mchan->queued, &mchan->active);
 245
 246        /* Chain descriptors into one transaction */
 247        list_for_each_entry(mdesc, &mchan->active, node) {
 248                if (!first)
 249                        first = mdesc;
 250
 251                if (!prev) {
 252                        prev = mdesc;
 253                        continue;
 254                }
 255
 256                prev->tcd->dlast_sga = mdesc->tcd_paddr;
 257                prev->tcd->e_sg = 1;
 258                mdesc->tcd->start = 1;
 259
 260                prev = mdesc;
 261        }
 262
 263        prev->tcd->int_maj = 1;
 264
 265        /* Send first descriptor in chain into hardware */
 266        memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
 267
 268        if (first != prev)
 269                mdma->tcd[cid].e_sg = 1;
 270        out_8(&mdma->regs->dmassrt, cid);
 271}
 272
 273/* Handle interrupt on one half of DMA controller (32 channels) */
 274static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
 275{
 276        struct mpc_dma_chan *mchan;
 277        struct mpc_dma_desc *mdesc;
 278        u32 status = is | es;
 279        int ch;
 280
 281        while ((ch = fls(status) - 1) >= 0) {
 282                status &= ~(1 << ch);
 283                mchan = &mdma->channels[ch + off];
 284
 285                spin_lock(&mchan->lock);
 286
 287                out_8(&mdma->regs->dmacint, ch + off);
 288                out_8(&mdma->regs->dmacerr, ch + off);
 289
 290                /* Check error status */
 291                if (es & (1 << ch))
 292                        list_for_each_entry(mdesc, &mchan->active, node)
 293                                mdesc->error = -EIO;
 294
 295                /* Execute queued descriptors */
 296                list_splice_tail_init(&mchan->active, &mchan->completed);
 297                if (!list_empty(&mchan->queued))
 298                        mpc_dma_execute(mchan);
 299
 300                spin_unlock(&mchan->lock);
 301        }
 302}
 303
 304/* Interrupt handler */
 305static irqreturn_t mpc_dma_irq(int irq, void *data)
 306{
 307        struct mpc_dma *mdma = data;
 308        uint es;
 309
 310        /* Save error status register */
 311        es = in_be32(&mdma->regs->dmaes);
 312        spin_lock(&mdma->error_status_lock);
 313        if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
 314                mdma->error_status = es;
 315        spin_unlock(&mdma->error_status_lock);
 316
 317        /* Handle interrupt on each channel */
 318        if (mdma->dma.chancnt > 32) {
 319                mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
 320                                        in_be32(&mdma->regs->dmaerrh), 32);
 321        }
 322        mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
 323                                        in_be32(&mdma->regs->dmaerrl), 0);
 324
 325        /* Schedule tasklet */
 326        tasklet_schedule(&mdma->tasklet);
 327
 328        return IRQ_HANDLED;
 329}
 330
 331/* proccess completed descriptors */
 332static void mpc_dma_process_completed(struct mpc_dma *mdma)
 333{
 334        dma_cookie_t last_cookie = 0;
 335        struct mpc_dma_chan *mchan;
 336        struct mpc_dma_desc *mdesc;
 337        struct dma_async_tx_descriptor *desc;
 338        unsigned long flags;
 339        LIST_HEAD(list);
 340        int i;
 341
 342        for (i = 0; i < mdma->dma.chancnt; i++) {
 343                mchan = &mdma->channels[i];
 344
 345                /* Get all completed descriptors */
 346                spin_lock_irqsave(&mchan->lock, flags);
 347                if (!list_empty(&mchan->completed))
 348                        list_splice_tail_init(&mchan->completed, &list);
 349                spin_unlock_irqrestore(&mchan->lock, flags);
 350
 351                if (list_empty(&list))
 352                        continue;
 353
 354                /* Execute callbacks and run dependencies */
 355                list_for_each_entry(mdesc, &list, node) {
 356                        desc = &mdesc->desc;
 357
 358                        if (desc->callback)
 359                                desc->callback(desc->callback_param);
 360
 361                        last_cookie = desc->cookie;
 362                        dma_run_dependencies(desc);
 363                }
 364
 365                /* Free descriptors */
 366                spin_lock_irqsave(&mchan->lock, flags);
 367                list_splice_tail_init(&list, &mchan->free);
 368                mchan->completed_cookie = last_cookie;
 369                spin_unlock_irqrestore(&mchan->lock, flags);
 370        }
 371}
 372
 373/* DMA Tasklet */
 374static void mpc_dma_tasklet(unsigned long data)
 375{
 376        struct mpc_dma *mdma = (void *)data;
 377        unsigned long flags;
 378        uint es;
 379
 380        spin_lock_irqsave(&mdma->error_status_lock, flags);
 381        es = mdma->error_status;
 382        mdma->error_status = 0;
 383        spin_unlock_irqrestore(&mdma->error_status_lock, flags);
 384
 385        /* Print nice error report */
 386        if (es) {
 387                dev_err(mdma->dma.dev,
 388                        "Hardware reported following error(s) on channel %u:\n",
 389                                                      MPC_DMA_DMAES_ERRCHN(es));
 390
 391                if (es & MPC_DMA_DMAES_GPE)
 392                        dev_err(mdma->dma.dev, "- Group Priority Error\n");
 393                if (es & MPC_DMA_DMAES_CPE)
 394                        dev_err(mdma->dma.dev, "- Channel Priority Error\n");
 395                if (es & MPC_DMA_DMAES_SAE)
 396                        dev_err(mdma->dma.dev, "- Source Address Error\n");
 397                if (es & MPC_DMA_DMAES_SOE)
 398                        dev_err(mdma->dma.dev, "- Source Offset"
 399                                                " Configuration Error\n");
 400                if (es & MPC_DMA_DMAES_DAE)
 401                        dev_err(mdma->dma.dev, "- Destination Address"
 402                                                                " Error\n");
 403                if (es & MPC_DMA_DMAES_DOE)
 404                        dev_err(mdma->dma.dev, "- Destination Offset"
 405                                                " Configuration Error\n");
 406                if (es & MPC_DMA_DMAES_NCE)
 407                        dev_err(mdma->dma.dev, "- NBytes/Citter"
 408                                                " Configuration Error\n");
 409                if (es & MPC_DMA_DMAES_SGE)
 410                        dev_err(mdma->dma.dev, "- Scatter/Gather"
 411                                                " Configuration Error\n");
 412                if (es & MPC_DMA_DMAES_SBE)
 413                        dev_err(mdma->dma.dev, "- Source Bus Error\n");
 414                if (es & MPC_DMA_DMAES_DBE)
 415                        dev_err(mdma->dma.dev, "- Destination Bus Error\n");
 416        }
 417
 418        mpc_dma_process_completed(mdma);
 419}
 420
 421/* Submit descriptor to hardware */
 422static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
 423{
 424        struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
 425        struct mpc_dma_desc *mdesc;
 426        unsigned long flags;
 427        dma_cookie_t cookie;
 428
 429        mdesc = container_of(txd, struct mpc_dma_desc, desc);
 430
 431        spin_lock_irqsave(&mchan->lock, flags);
 432
 433        /* Move descriptor to queue */
 434        list_move_tail(&mdesc->node, &mchan->queued);
 435
 436        /* If channel is idle, execute all queued descriptors */
 437        if (list_empty(&mchan->active))
 438                mpc_dma_execute(mchan);
 439
 440        /* Update cookie */
 441        cookie = mchan->chan.cookie + 1;
 442        if (cookie <= 0)
 443                cookie = 1;
 444
 445        mchan->chan.cookie = cookie;
 446        mdesc->desc.cookie = cookie;
 447
 448        spin_unlock_irqrestore(&mchan->lock, flags);
 449
 450        return cookie;
 451}
 452
 453/* Alloc channel resources */
 454static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
 455{
 456        struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 457        struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 458        struct mpc_dma_desc *mdesc;
 459        struct mpc_dma_tcd *tcd;
 460        dma_addr_t tcd_paddr;
 461        unsigned long flags;
 462        LIST_HEAD(descs);
 463        int i;
 464
 465        /* Alloc DMA memory for Transfer Control Descriptors */
 466        tcd = dma_alloc_coherent(mdma->dma.dev,
 467                        MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
 468                                                        &tcd_paddr, GFP_KERNEL);
 469        if (!tcd)
 470                return -ENOMEM;
 471
 472        /* Alloc descriptors for this channel */
 473        for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
 474                mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
 475                if (!mdesc) {
 476                        dev_notice(mdma->dma.dev, "Memory allocation error. "
 477                                        "Allocated only %u descriptors\n", i);
 478                        break;
 479                }
 480
 481                dma_async_tx_descriptor_init(&mdesc->desc, chan);
 482                mdesc->desc.flags = DMA_CTRL_ACK;
 483                mdesc->desc.tx_submit = mpc_dma_tx_submit;
 484
 485                mdesc->tcd = &tcd[i];
 486                mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
 487
 488                list_add_tail(&mdesc->node, &descs);
 489        }
 490
 491        /* Return error only if no descriptors were allocated */
 492        if (i == 0) {
 493                dma_free_coherent(mdma->dma.dev,
 494                        MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
 495                                                                tcd, tcd_paddr);
 496                return -ENOMEM;
 497        }
 498
 499        spin_lock_irqsave(&mchan->lock, flags);
 500        mchan->tcd = tcd;
 501        mchan->tcd_paddr = tcd_paddr;
 502        list_splice_tail_init(&descs, &mchan->free);
 503        spin_unlock_irqrestore(&mchan->lock, flags);
 504
 505        /* Enable Error Interrupt */
 506        out_8(&mdma->regs->dmaseei, chan->chan_id);
 507
 508        return 0;
 509}
 510
 511/* Free channel resources */
 512static void mpc_dma_free_chan_resources(struct dma_chan *chan)
 513{
 514        struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 515        struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 516        struct mpc_dma_desc *mdesc, *tmp;
 517        struct mpc_dma_tcd *tcd;
 518        dma_addr_t tcd_paddr;
 519        unsigned long flags;
 520        LIST_HEAD(descs);
 521
 522        spin_lock_irqsave(&mchan->lock, flags);
 523
 524        /* Channel must be idle */
 525        BUG_ON(!list_empty(&mchan->prepared));
 526        BUG_ON(!list_empty(&mchan->queued));
 527        BUG_ON(!list_empty(&mchan->active));
 528        BUG_ON(!list_empty(&mchan->completed));
 529
 530        /* Move data */
 531        list_splice_tail_init(&mchan->free, &descs);
 532        tcd = mchan->tcd;
 533        tcd_paddr = mchan->tcd_paddr;
 534
 535        spin_unlock_irqrestore(&mchan->lock, flags);
 536
 537        /* Free DMA memory used by descriptors */
 538        dma_free_coherent(mdma->dma.dev,
 539                        MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
 540                                                                tcd, tcd_paddr);
 541
 542        /* Free descriptors */
 543        list_for_each_entry_safe(mdesc, tmp, &descs, node)
 544                kfree(mdesc);
 545
 546        /* Disable Error Interrupt */
 547        out_8(&mdma->regs->dmaceei, chan->chan_id);
 548}
 549
 550/* Send all pending descriptor to hardware */
 551static void mpc_dma_issue_pending(struct dma_chan *chan)
 552{
 553        /*
 554         * We are posting descriptors to the hardware as soon as
 555         * they are ready, so this function does nothing.
 556         */
 557}
 558
 559/* Check request completion status */
 560static enum dma_status
 561mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 562               struct dma_tx_state *txstate)
 563{
 564        struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 565        unsigned long flags;
 566        dma_cookie_t last_used;
 567        dma_cookie_t last_complete;
 568
 569        spin_lock_irqsave(&mchan->lock, flags);
 570        last_used = mchan->chan.cookie;
 571        last_complete = mchan->completed_cookie;
 572        spin_unlock_irqrestore(&mchan->lock, flags);
 573
 574        dma_set_tx_state(txstate, last_complete, last_used, 0);
 575        return dma_async_is_complete(cookie, last_complete, last_used);
 576}
 577
 578/* Prepare descriptor for memory to memory copy */
 579static struct dma_async_tx_descriptor *
 580mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 581                                        size_t len, unsigned long flags)
 582{
 583        struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 584        struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 585        struct mpc_dma_desc *mdesc = NULL;
 586        struct mpc_dma_tcd *tcd;
 587        unsigned long iflags;
 588
 589        /* Get free descriptor */
 590        spin_lock_irqsave(&mchan->lock, iflags);
 591        if (!list_empty(&mchan->free)) {
 592                mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
 593                                                                        node);
 594                list_del(&mdesc->node);
 595        }
 596        spin_unlock_irqrestore(&mchan->lock, iflags);
 597
 598        if (!mdesc) {
 599                /* try to free completed descriptors */
 600                mpc_dma_process_completed(mdma);
 601                return NULL;
 602        }
 603
 604        mdesc->error = 0;
 605        tcd = mdesc->tcd;
 606
 607        /* Prepare Transfer Control Descriptor for this transaction */
 608        memset(tcd, 0, sizeof(struct mpc_dma_tcd));
 609
 610        if (IS_ALIGNED(src | dst | len, 32)) {
 611                tcd->ssize = MPC_DMA_TSIZE_32;
 612                tcd->dsize = MPC_DMA_TSIZE_32;
 613                tcd->soff = 32;
 614                tcd->doff = 32;
 615        } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
 616                /* MPC8308 doesn't support 16 byte transfers */
 617                tcd->ssize = MPC_DMA_TSIZE_16;
 618                tcd->dsize = MPC_DMA_TSIZE_16;
 619                tcd->soff = 16;
 620                tcd->doff = 16;
 621        } else if (IS_ALIGNED(src | dst | len, 4)) {
 622                tcd->ssize = MPC_DMA_TSIZE_4;
 623                tcd->dsize = MPC_DMA_TSIZE_4;
 624                tcd->soff = 4;
 625                tcd->doff = 4;
 626        } else if (IS_ALIGNED(src | dst | len, 2)) {
 627                tcd->ssize = MPC_DMA_TSIZE_2;
 628                tcd->dsize = MPC_DMA_TSIZE_2;
 629                tcd->soff = 2;
 630                tcd->doff = 2;
 631        } else {
 632                tcd->ssize = MPC_DMA_TSIZE_1;
 633                tcd->dsize = MPC_DMA_TSIZE_1;
 634                tcd->soff = 1;
 635                tcd->doff = 1;
 636        }
 637
 638        tcd->saddr = src;
 639        tcd->daddr = dst;
 640        tcd->nbytes = len;
 641        tcd->biter = 1;
 642        tcd->citer = 1;
 643
 644        /* Place descriptor in prepared list */
 645        spin_lock_irqsave(&mchan->lock, iflags);
 646        list_add_tail(&mdesc->node, &mchan->prepared);
 647        spin_unlock_irqrestore(&mchan->lock, iflags);
 648
 649        return &mdesc->desc;
 650}
 651
 652static int __devinit mpc_dma_probe(struct platform_device *op,
 653                                        const struct of_device_id *match)
 654{
 655        struct device_node *dn = op->dev.of_node;
 656        struct device *dev = &op->dev;
 657        struct dma_device *dma;
 658        struct mpc_dma *mdma;
 659        struct mpc_dma_chan *mchan;
 660        struct resource res;
 661        ulong regs_start, regs_size;
 662        int retval, i;
 663
 664        mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
 665        if (!mdma) {
 666                dev_err(dev, "Memory exhausted!\n");
 667                return -ENOMEM;
 668        }
 669
 670        mdma->irq = irq_of_parse_and_map(dn, 0);
 671        if (mdma->irq == NO_IRQ) {
 672                dev_err(dev, "Error mapping IRQ!\n");
 673                return -EINVAL;
 674        }
 675
 676        if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
 677                mdma->is_mpc8308 = 1;
 678                mdma->irq2 = irq_of_parse_and_map(dn, 1);
 679                if (mdma->irq2 == NO_IRQ) {
 680                        dev_err(dev, "Error mapping IRQ!\n");
 681                        return -EINVAL;
 682                }
 683        }
 684
 685        retval = of_address_to_resource(dn, 0, &res);
 686        if (retval) {
 687                dev_err(dev, "Error parsing memory region!\n");
 688                return retval;
 689        }
 690
 691        regs_start = res.start;
 692        regs_size = resource_size(&res);
 693
 694        if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
 695                dev_err(dev, "Error requesting memory region!\n");
 696                return -EBUSY;
 697        }
 698
 699        mdma->regs = devm_ioremap(dev, regs_start, regs_size);
 700        if (!mdma->regs) {
 701                dev_err(dev, "Error mapping memory region!\n");
 702                return -ENOMEM;
 703        }
 704
 705        mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
 706                                                        + MPC_DMA_TCD_OFFSET);
 707
 708        retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
 709                                                                        mdma);
 710        if (retval) {
 711                dev_err(dev, "Error requesting IRQ!\n");
 712                return -EINVAL;
 713        }
 714
 715        if (mdma->is_mpc8308) {
 716                retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
 717                                DRV_NAME, mdma);
 718                if (retval) {
 719                        dev_err(dev, "Error requesting IRQ2!\n");
 720                        return -EINVAL;
 721                }
 722        }
 723
 724        spin_lock_init(&mdma->error_status_lock);
 725
 726        dma = &mdma->dma;
 727        dma->dev = dev;
 728        if (!mdma->is_mpc8308)
 729                dma->chancnt = MPC_DMA_CHANNELS;
 730        else
 731                dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
 732        dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
 733        dma->device_free_chan_resources = mpc_dma_free_chan_resources;
 734        dma->device_issue_pending = mpc_dma_issue_pending;
 735        dma->device_tx_status = mpc_dma_tx_status;
 736        dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
 737
 738        INIT_LIST_HEAD(&dma->channels);
 739        dma_cap_set(DMA_MEMCPY, dma->cap_mask);
 740
 741        for (i = 0; i < dma->chancnt; i++) {
 742                mchan = &mdma->channels[i];
 743
 744                mchan->chan.device = dma;
 745                mchan->chan.chan_id = i;
 746                mchan->chan.cookie = 1;
 747                mchan->completed_cookie = mchan->chan.cookie;
 748
 749                INIT_LIST_HEAD(&mchan->free);
 750                INIT_LIST_HEAD(&mchan->prepared);
 751                INIT_LIST_HEAD(&mchan->queued);
 752                INIT_LIST_HEAD(&mchan->active);
 753                INIT_LIST_HEAD(&mchan->completed);
 754
 755                spin_lock_init(&mchan->lock);
 756                list_add_tail(&mchan->chan.device_node, &dma->channels);
 757        }
 758
 759        tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
 760
 761        /*
 762         * Configure DMA Engine:
 763         * - Dynamic clock,
 764         * - Round-robin group arbitration,
 765         * - Round-robin channel arbitration.
 766         */
 767        if (!mdma->is_mpc8308) {
 768                out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
 769                                        MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
 770
 771                /* Disable hardware DMA requests */
 772                out_be32(&mdma->regs->dmaerqh, 0);
 773                out_be32(&mdma->regs->dmaerql, 0);
 774
 775                /* Disable error interrupts */
 776                out_be32(&mdma->regs->dmaeeih, 0);
 777                out_be32(&mdma->regs->dmaeeil, 0);
 778
 779                /* Clear interrupts status */
 780                out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
 781                out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
 782                out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
 783                out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
 784
 785                /* Route interrupts to IPIC */
 786                out_be32(&mdma->regs->dmaihsa, 0);
 787                out_be32(&mdma->regs->dmailsa, 0);
 788        } else {
 789                /* MPC8308 has 16 channels and lacks some registers */
 790                out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
 791
 792                /* enable snooping */
 793                out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
 794                /* Disable error interrupts */
 795                out_be32(&mdma->regs->dmaeeil, 0);
 796
 797                /* Clear interrupts status */
 798                out_be32(&mdma->regs->dmaintl, 0xFFFF);
 799                out_be32(&mdma->regs->dmaerrl, 0xFFFF);
 800        }
 801
 802        /* Register DMA engine */
 803        dev_set_drvdata(dev, mdma);
 804        retval = dma_async_device_register(dma);
 805        if (retval) {
 806                devm_free_irq(dev, mdma->irq, mdma);
 807                irq_dispose_mapping(mdma->irq);
 808        }
 809
 810        return retval;
 811}
 812
 813static int __devexit mpc_dma_remove(struct platform_device *op)
 814{
 815        struct device *dev = &op->dev;
 816        struct mpc_dma *mdma = dev_get_drvdata(dev);
 817
 818        dma_async_device_unregister(&mdma->dma);
 819        devm_free_irq(dev, mdma->irq, mdma);
 820        irq_dispose_mapping(mdma->irq);
 821
 822        return 0;
 823}
 824
 825static struct of_device_id mpc_dma_match[] = {
 826        { .compatible = "fsl,mpc5121-dma", },
 827        {},
 828};
 829
 830static struct of_platform_driver mpc_dma_driver = {
 831        .probe          = mpc_dma_probe,
 832        .remove         = __devexit_p(mpc_dma_remove),
 833        .driver = {
 834                .name = DRV_NAME,
 835                .owner = THIS_MODULE,
 836                .of_match_table = mpc_dma_match,
 837        },
 838};
 839
 840static int __init mpc_dma_init(void)
 841{
 842        return of_register_platform_driver(&mpc_dma_driver);
 843}
 844module_init(mpc_dma_init);
 845
 846static void __exit mpc_dma_exit(void)
 847{
 848        of_unregister_platform_driver(&mpc_dma_driver);
 849}
 850module_exit(mpc_dma_exit);
 851
 852MODULE_LICENSE("GPL");
 853MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
 854