linux/drivers/dma/fsldma.c
<<
>>
Prefs
   1/*
   2 * Freescale MPC85xx, MPC83xx DMA Engine support
   3 *
   4 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
   5 *
   6 * Author:
   7 *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
   8 *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
   9 *
  10 * Description:
  11 *   DMA engine driver for Freescale MPC8540 DMA controller, which is
  12 *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
  13 *   The support for MPC8349 DMA controller is also added.
  14 *
  15 * This driver instructs the DMA controller to issue the PCI Read Multiple
  16 * command for PCI read operations, instead of using the default PCI Read Line
  17 * command. Please be aware that this setting may result in read pre-fetching
  18 * on some platforms.
  19 *
  20 * This is free software; you can redistribute it and/or modify
  21 * it under the terms of the GNU General Public License as published by
  22 * the Free Software Foundation; either version 2 of the License, or
  23 * (at your option) any later version.
  24 *
  25 */
  26
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/pci.h>
  30#include <linux/slab.h>
  31#include <linux/interrupt.h>
  32#include <linux/dmaengine.h>
  33#include <linux/delay.h>
  34#include <linux/dma-mapping.h>
  35#include <linux/dmapool.h>
  36#include <linux/of_address.h>
  37#include <linux/of_irq.h>
  38#include <linux/of_platform.h>
  39#include <linux/fsldma.h>
  40#include "dmaengine.h"
  41#include "fsldma.h"
  42
  43#define chan_dbg(chan, fmt, arg...)                                     \
  44        dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
  45#define chan_err(chan, fmt, arg...)                                     \
  46        dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
  47
  48static const char msg_ld_oom[] = "No free memory for link descriptor";
  49
  50/*
  51 * Register Helpers
  52 */
  53
  54static void set_sr(struct fsldma_chan *chan, u32 val)
  55{
  56        DMA_OUT(chan, &chan->regs->sr, val, 32);
  57}
  58
  59static u32 get_sr(struct fsldma_chan *chan)
  60{
  61        return DMA_IN(chan, &chan->regs->sr, 32);
  62}
  63
  64static void set_mr(struct fsldma_chan *chan, u32 val)
  65{
  66        DMA_OUT(chan, &chan->regs->mr, val, 32);
  67}
  68
  69static u32 get_mr(struct fsldma_chan *chan)
  70{
  71        return DMA_IN(chan, &chan->regs->mr, 32);
  72}
  73
  74static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
  75{
  76        DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
  77}
  78
  79static dma_addr_t get_cdar(struct fsldma_chan *chan)
  80{
  81        return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
  82}
  83
  84static void set_bcr(struct fsldma_chan *chan, u32 val)
  85{
  86        DMA_OUT(chan, &chan->regs->bcr, val, 32);
  87}
  88
  89static u32 get_bcr(struct fsldma_chan *chan)
  90{
  91        return DMA_IN(chan, &chan->regs->bcr, 32);
  92}
  93
  94/*
  95 * Descriptor Helpers
  96 */
  97
  98static void set_desc_cnt(struct fsldma_chan *chan,
  99                                struct fsl_dma_ld_hw *hw, u32 count)
 100{
 101        hw->count = CPU_TO_DMA(chan, count, 32);
 102}
 103
 104static void set_desc_src(struct fsldma_chan *chan,
 105                         struct fsl_dma_ld_hw *hw, dma_addr_t src)
 106{
 107        u64 snoop_bits;
 108
 109        snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
 110                ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
 111        hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
 112}
 113
 114static void set_desc_dst(struct fsldma_chan *chan,
 115                         struct fsl_dma_ld_hw *hw, dma_addr_t dst)
 116{
 117        u64 snoop_bits;
 118
 119        snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
 120                ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
 121        hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
 122}
 123
 124static void set_desc_next(struct fsldma_chan *chan,
 125                          struct fsl_dma_ld_hw *hw, dma_addr_t next)
 126{
 127        u64 snoop_bits;
 128
 129        snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
 130                ? FSL_DMA_SNEN : 0;
 131        hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
 132}
 133
 134static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
 135{
 136        u64 snoop_bits;
 137
 138        snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
 139                ? FSL_DMA_SNEN : 0;
 140
 141        desc->hw.next_ln_addr = CPU_TO_DMA(chan,
 142                DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
 143                        | snoop_bits, 64);
 144}
 145
 146/*
 147 * DMA Engine Hardware Control Helpers
 148 */
 149
 150static void dma_init(struct fsldma_chan *chan)
 151{
 152        /* Reset the channel */
 153        set_mr(chan, 0);
 154
 155        switch (chan->feature & FSL_DMA_IP_MASK) {
 156        case FSL_DMA_IP_85XX:
 157                /* Set the channel to below modes:
 158                 * EIE - Error interrupt enable
 159                 * EOLNIE - End of links interrupt enable
 160                 * BWC - Bandwidth sharing among channels
 161                 */
 162                set_mr(chan, FSL_DMA_MR_BWC | FSL_DMA_MR_EIE
 163                        | FSL_DMA_MR_EOLNIE);
 164                break;
 165        case FSL_DMA_IP_83XX:
 166                /* Set the channel to below modes:
 167                 * EOTIE - End-of-transfer interrupt enable
 168                 * PRC_RM - PCI read multiple
 169                 */
 170                set_mr(chan, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM);
 171                break;
 172        }
 173}
 174
 175static int dma_is_idle(struct fsldma_chan *chan)
 176{
 177        u32 sr = get_sr(chan);
 178        return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
 179}
 180
 181/*
 182 * Start the DMA controller
 183 *
 184 * Preconditions:
 185 * - the CDAR register must point to the start descriptor
 186 * - the MRn[CS] bit must be cleared
 187 */
 188static void dma_start(struct fsldma_chan *chan)
 189{
 190        u32 mode;
 191
 192        mode = get_mr(chan);
 193
 194        if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
 195                set_bcr(chan, 0);
 196                mode |= FSL_DMA_MR_EMP_EN;
 197        } else {
 198                mode &= ~FSL_DMA_MR_EMP_EN;
 199        }
 200
 201        if (chan->feature & FSL_DMA_CHAN_START_EXT) {
 202                mode |= FSL_DMA_MR_EMS_EN;
 203        } else {
 204                mode &= ~FSL_DMA_MR_EMS_EN;
 205                mode |= FSL_DMA_MR_CS;
 206        }
 207
 208        set_mr(chan, mode);
 209}
 210
 211static void dma_halt(struct fsldma_chan *chan)
 212{
 213        u32 mode;
 214        int i;
 215
 216        /* read the mode register */
 217        mode = get_mr(chan);
 218
 219        /*
 220         * The 85xx controller supports channel abort, which will stop
 221         * the current transfer. On 83xx, this bit is the transfer error
 222         * mask bit, which should not be changed.
 223         */
 224        if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
 225                mode |= FSL_DMA_MR_CA;
 226                set_mr(chan, mode);
 227
 228                mode &= ~FSL_DMA_MR_CA;
 229        }
 230
 231        /* stop the DMA controller */
 232        mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
 233        set_mr(chan, mode);
 234
 235        /* wait for the DMA controller to become idle */
 236        for (i = 0; i < 100; i++) {
 237                if (dma_is_idle(chan))
 238                        return;
 239
 240                udelay(10);
 241        }
 242
 243        if (!dma_is_idle(chan))
 244                chan_err(chan, "DMA halt timeout!\n");
 245}
 246
 247/**
 248 * fsl_chan_set_src_loop_size - Set source address hold transfer size
 249 * @chan : Freescale DMA channel
 250 * @size     : Address loop size, 0 for disable loop
 251 *
 252 * The set source address hold transfer size. The source
 253 * address hold or loop transfer size is when the DMA transfer
 254 * data from source address (SA), if the loop size is 4, the DMA will
 255 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
 256 * SA + 1 ... and so on.
 257 */
 258static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
 259{
 260        u32 mode;
 261
 262        mode = get_mr(chan);
 263
 264        switch (size) {
 265        case 0:
 266                mode &= ~FSL_DMA_MR_SAHE;
 267                break;
 268        case 1:
 269        case 2:
 270        case 4:
 271        case 8:
 272                mode &= ~FSL_DMA_MR_SAHTS_MASK;
 273                mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
 274                break;
 275        }
 276
 277        set_mr(chan, mode);
 278}
 279
 280/**
 281 * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
 282 * @chan : Freescale DMA channel
 283 * @size     : Address loop size, 0 for disable loop
 284 *
 285 * The set destination address hold transfer size. The destination
 286 * address hold or loop transfer size is when the DMA transfer
 287 * data to destination address (TA), if the loop size is 4, the DMA will
 288 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
 289 * TA + 1 ... and so on.
 290 */
 291static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
 292{
 293        u32 mode;
 294
 295        mode = get_mr(chan);
 296
 297        switch (size) {
 298        case 0:
 299                mode &= ~FSL_DMA_MR_DAHE;
 300                break;
 301        case 1:
 302        case 2:
 303        case 4:
 304        case 8:
 305                mode &= ~FSL_DMA_MR_DAHTS_MASK;
 306                mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
 307                break;
 308        }
 309
 310        set_mr(chan, mode);
 311}
 312
 313/**
 314 * fsl_chan_set_request_count - Set DMA Request Count for external control
 315 * @chan : Freescale DMA channel
 316 * @size     : Number of bytes to transfer in a single request
 317 *
 318 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 319 * The DMA request count is how many bytes are allowed to transfer before
 320 * pausing the channel, after which a new assertion of DREQ# resumes channel
 321 * operation.
 322 *
 323 * A size of 0 disables external pause control. The maximum size is 1024.
 324 */
 325static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
 326{
 327        u32 mode;
 328
 329        BUG_ON(size > 1024);
 330
 331        mode = get_mr(chan);
 332        mode &= ~FSL_DMA_MR_BWC_MASK;
 333        mode |= (__ilog2(size) << 24) & FSL_DMA_MR_BWC_MASK;
 334
 335        set_mr(chan, mode);
 336}
 337
 338/**
 339 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
 340 * @chan : Freescale DMA channel
 341 * @enable   : 0 is disabled, 1 is enabled.
 342 *
 343 * The Freescale DMA channel can be controlled by the external signal DREQ#.
 344 * The DMA Request Count feature should be used in addition to this feature
 345 * to set the number of bytes to transfer before pausing the channel.
 346 */
 347static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
 348{
 349        if (enable)
 350                chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
 351        else
 352                chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
 353}
 354
 355/**
 356 * fsl_chan_toggle_ext_start - Toggle channel external start status
 357 * @chan : Freescale DMA channel
 358 * @enable   : 0 is disabled, 1 is enabled.
 359 *
 360 * If enable the external start, the channel can be started by an
 361 * external DMA start pin. So the dma_start() does not start the
 362 * transfer immediately. The DMA channel will wait for the
 363 * control pin asserted.
 364 */
 365static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
 366{
 367        if (enable)
 368                chan->feature |= FSL_DMA_CHAN_START_EXT;
 369        else
 370                chan->feature &= ~FSL_DMA_CHAN_START_EXT;
 371}
 372
 373int fsl_dma_external_start(struct dma_chan *dchan, int enable)
 374{
 375        struct fsldma_chan *chan;
 376
 377        if (!dchan)
 378                return -EINVAL;
 379
 380        chan = to_fsl_chan(dchan);
 381
 382        fsl_chan_toggle_ext_start(chan, enable);
 383        return 0;
 384}
 385EXPORT_SYMBOL_GPL(fsl_dma_external_start);
 386
 387static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
 388{
 389        struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
 390
 391        if (list_empty(&chan->ld_pending))
 392                goto out_splice;
 393
 394        /*
 395         * Add the hardware descriptor to the chain of hardware descriptors
 396         * that already exists in memory.
 397         *
 398         * This will un-set the EOL bit of the existing transaction, and the
 399         * last link in this transaction will become the EOL descriptor.
 400         */
 401        set_desc_next(chan, &tail->hw, desc->async_tx.phys);
 402
 403        /*
 404         * Add the software descriptor and all children to the list
 405         * of pending transactions
 406         */
 407out_splice:
 408        list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
 409}
 410
 411static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
 412{
 413        struct fsldma_chan *chan = to_fsl_chan(tx->chan);
 414        struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
 415        struct fsl_desc_sw *child;
 416        dma_cookie_t cookie = -EINVAL;
 417
 418        spin_lock_bh(&chan->desc_lock);
 419
 420#ifdef CONFIG_PM
 421        if (unlikely(chan->pm_state != RUNNING)) {
 422                chan_dbg(chan, "cannot submit due to suspend\n");
 423                spin_unlock_bh(&chan->desc_lock);
 424                return -1;
 425        }
 426#endif
 427
 428        /*
 429         * assign cookies to all of the software descriptors
 430         * that make up this transaction
 431         */
 432        list_for_each_entry(child, &desc->tx_list, node) {
 433                cookie = dma_cookie_assign(&child->async_tx);
 434        }
 435
 436        /* put this transaction onto the tail of the pending queue */
 437        append_ld_queue(chan, desc);
 438
 439        spin_unlock_bh(&chan->desc_lock);
 440
 441        return cookie;
 442}
 443
 444/**
 445 * fsl_dma_free_descriptor - Free descriptor from channel's DMA pool.
 446 * @chan : Freescale DMA channel
 447 * @desc: descriptor to be freed
 448 */
 449static void fsl_dma_free_descriptor(struct fsldma_chan *chan,
 450                struct fsl_desc_sw *desc)
 451{
 452        list_del(&desc->node);
 453        chan_dbg(chan, "LD %p free\n", desc);
 454        dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
 455}
 456
 457/**
 458 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
 459 * @chan : Freescale DMA channel
 460 *
 461 * Return - The descriptor allocated. NULL for failed.
 462 */
 463static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
 464{
 465        struct fsl_desc_sw *desc;
 466        dma_addr_t pdesc;
 467
 468        desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
 469        if (!desc) {
 470                chan_dbg(chan, "out of memory for link descriptor\n");
 471                return NULL;
 472        }
 473
 474        INIT_LIST_HEAD(&desc->tx_list);
 475        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
 476        desc->async_tx.tx_submit = fsl_dma_tx_submit;
 477        desc->async_tx.phys = pdesc;
 478
 479        chan_dbg(chan, "LD %p allocated\n", desc);
 480
 481        return desc;
 482}
 483
 484/**
 485 * fsldma_clean_completed_descriptor - free all descriptors which
 486 * has been completed and acked
 487 * @chan: Freescale DMA channel
 488 *
 489 * This function is used on all completed and acked descriptors.
 490 * All descriptors should only be freed in this function.
 491 */
 492static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan)
 493{
 494        struct fsl_desc_sw *desc, *_desc;
 495
 496        /* Run the callback for each descriptor, in order */
 497        list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node)
 498                if (async_tx_test_ack(&desc->async_tx))
 499                        fsl_dma_free_descriptor(chan, desc);
 500}
 501
 502/**
 503 * fsldma_run_tx_complete_actions - cleanup a single link descriptor
 504 * @chan: Freescale DMA channel
 505 * @desc: descriptor to cleanup and free
 506 * @cookie: Freescale DMA transaction identifier
 507 *
 508 * This function is used on a descriptor which has been executed by the DMA
 509 * controller. It will run any callbacks, submit any dependencies.
 510 */
 511static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
 512                struct fsl_desc_sw *desc, dma_cookie_t cookie)
 513{
 514        struct dma_async_tx_descriptor *txd = &desc->async_tx;
 515        dma_cookie_t ret = cookie;
 516
 517        BUG_ON(txd->cookie < 0);
 518
 519        if (txd->cookie > 0) {
 520                ret = txd->cookie;
 521
 522                dma_descriptor_unmap(txd);
 523                /* Run the link descriptor callback function */
 524                dmaengine_desc_get_callback_invoke(txd, NULL);
 525        }
 526
 527        /* Run any dependencies */
 528        dma_run_dependencies(txd);
 529
 530        return ret;
 531}
 532
 533/**
 534 * fsldma_clean_running_descriptor - move the completed descriptor from
 535 * ld_running to ld_completed
 536 * @chan: Freescale DMA channel
 537 * @desc: the descriptor which is completed
 538 *
 539 * Free the descriptor directly if acked by async_tx api, or move it to
 540 * queue ld_completed.
 541 */
 542static void fsldma_clean_running_descriptor(struct fsldma_chan *chan,
 543                struct fsl_desc_sw *desc)
 544{
 545        /* Remove from the list of transactions */
 546        list_del(&desc->node);
 547
 548        /*
 549         * the client is allowed to attach dependent operations
 550         * until 'ack' is set
 551         */
 552        if (!async_tx_test_ack(&desc->async_tx)) {
 553                /*
 554                 * Move this descriptor to the list of descriptors which is
 555                 * completed, but still awaiting the 'ack' bit to be set.
 556                 */
 557                list_add_tail(&desc->node, &chan->ld_completed);
 558                return;
 559        }
 560
 561        dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
 562}
 563
 564/**
 565 * fsl_chan_xfer_ld_queue - transfer any pending transactions
 566 * @chan : Freescale DMA channel
 567 *
 568 * HARDWARE STATE: idle
 569 * LOCKING: must hold chan->desc_lock
 570 */
 571static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
 572{
 573        struct fsl_desc_sw *desc;
 574
 575        /*
 576         * If the list of pending descriptors is empty, then we
 577         * don't need to do any work at all
 578         */
 579        if (list_empty(&chan->ld_pending)) {
 580                chan_dbg(chan, "no pending LDs\n");
 581                return;
 582        }
 583
 584        /*
 585         * The DMA controller is not idle, which means that the interrupt
 586         * handler will start any queued transactions when it runs after
 587         * this transaction finishes
 588         */
 589        if (!chan->idle) {
 590                chan_dbg(chan, "DMA controller still busy\n");
 591                return;
 592        }
 593
 594        /*
 595         * If there are some link descriptors which have not been
 596         * transferred, we need to start the controller
 597         */
 598
 599        /*
 600         * Move all elements from the queue of pending transactions
 601         * onto the list of running transactions
 602         */
 603        chan_dbg(chan, "idle, starting controller\n");
 604        desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
 605        list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
 606
 607        /*
 608         * The 85xx DMA controller doesn't clear the channel start bit
 609         * automatically at the end of a transfer. Therefore we must clear
 610         * it in software before starting the transfer.
 611         */
 612        if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
 613                u32 mode;
 614
 615                mode = get_mr(chan);
 616                mode &= ~FSL_DMA_MR_CS;
 617                set_mr(chan, mode);
 618        }
 619
 620        /*
 621         * Program the descriptor's address into the DMA controller,
 622         * then start the DMA transaction
 623         */
 624        set_cdar(chan, desc->async_tx.phys);
 625        get_cdar(chan);
 626
 627        dma_start(chan);
 628        chan->idle = false;
 629}
 630
 631/**
 632 * fsldma_cleanup_descriptors - cleanup link descriptors which are completed
 633 * and move them to ld_completed to free until flag 'ack' is set
 634 * @chan: Freescale DMA channel
 635 *
 636 * This function is used on descriptors which have been executed by the DMA
 637 * controller. It will run any callbacks, submit any dependencies, then
 638 * free these descriptors if flag 'ack' is set.
 639 */
 640static void fsldma_cleanup_descriptors(struct fsldma_chan *chan)
 641{
 642        struct fsl_desc_sw *desc, *_desc;
 643        dma_cookie_t cookie = 0;
 644        dma_addr_t curr_phys = get_cdar(chan);
 645        int seen_current = 0;
 646
 647        fsldma_clean_completed_descriptor(chan);
 648
 649        /* Run the callback for each descriptor, in order */
 650        list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
 651                /*
 652                 * do not advance past the current descriptor loaded into the
 653                 * hardware channel, subsequent descriptors are either in
 654                 * process or have not been submitted
 655                 */
 656                if (seen_current)
 657                        break;
 658
 659                /*
 660                 * stop the search if we reach the current descriptor and the
 661                 * channel is busy
 662                 */
 663                if (desc->async_tx.phys == curr_phys) {
 664                        seen_current = 1;
 665                        if (!dma_is_idle(chan))
 666                                break;
 667                }
 668
 669                cookie = fsldma_run_tx_complete_actions(chan, desc, cookie);
 670
 671                fsldma_clean_running_descriptor(chan, desc);
 672        }
 673
 674        /*
 675         * Start any pending transactions automatically
 676         *
 677         * In the ideal case, we keep the DMA controller busy while we go
 678         * ahead and free the descriptors below.
 679         */
 680        fsl_chan_xfer_ld_queue(chan);
 681
 682        if (cookie > 0)
 683                chan->common.completed_cookie = cookie;
 684}
 685
 686/**
 687 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
 688 * @chan : Freescale DMA channel
 689 *
 690 * This function will create a dma pool for descriptor allocation.
 691 *
 692 * Return - The number of descriptors allocated.
 693 */
 694static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
 695{
 696        struct fsldma_chan *chan = to_fsl_chan(dchan);
 697
 698        /* Has this channel already been allocated? */
 699        if (chan->desc_pool)
 700                return 1;
 701
 702        /*
 703         * We need the descriptor to be aligned to 32bytes
 704         * for meeting FSL DMA specification requirement.
 705         */
 706        chan->desc_pool = dma_pool_create(chan->name, chan->dev,
 707                                          sizeof(struct fsl_desc_sw),
 708                                          __alignof__(struct fsl_desc_sw), 0);
 709        if (!chan->desc_pool) {
 710                chan_err(chan, "unable to allocate descriptor pool\n");
 711                return -ENOMEM;
 712        }
 713
 714        /* there is at least one descriptor free to be allocated */
 715        return 1;
 716}
 717
 718/**
 719 * fsldma_free_desc_list - Free all descriptors in a queue
 720 * @chan: Freescae DMA channel
 721 * @list: the list to free
 722 *
 723 * LOCKING: must hold chan->desc_lock
 724 */
 725static void fsldma_free_desc_list(struct fsldma_chan *chan,
 726                                  struct list_head *list)
 727{
 728        struct fsl_desc_sw *desc, *_desc;
 729
 730        list_for_each_entry_safe(desc, _desc, list, node)
 731                fsl_dma_free_descriptor(chan, desc);
 732}
 733
 734static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
 735                                          struct list_head *list)
 736{
 737        struct fsl_desc_sw *desc, *_desc;
 738
 739        list_for_each_entry_safe_reverse(desc, _desc, list, node)
 740                fsl_dma_free_descriptor(chan, desc);
 741}
 742
 743/**
 744 * fsl_dma_free_chan_resources - Free all resources of the channel.
 745 * @chan : Freescale DMA channel
 746 */
 747static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
 748{
 749        struct fsldma_chan *chan = to_fsl_chan(dchan);
 750
 751        chan_dbg(chan, "free all channel resources\n");
 752        spin_lock_bh(&chan->desc_lock);
 753        fsldma_cleanup_descriptors(chan);
 754        fsldma_free_desc_list(chan, &chan->ld_pending);
 755        fsldma_free_desc_list(chan, &chan->ld_running);
 756        fsldma_free_desc_list(chan, &chan->ld_completed);
 757        spin_unlock_bh(&chan->desc_lock);
 758
 759        dma_pool_destroy(chan->desc_pool);
 760        chan->desc_pool = NULL;
 761}
 762
 763static struct dma_async_tx_descriptor *
 764fsl_dma_prep_memcpy(struct dma_chan *dchan,
 765        dma_addr_t dma_dst, dma_addr_t dma_src,
 766        size_t len, unsigned long flags)
 767{
 768        struct fsldma_chan *chan;
 769        struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
 770        size_t copy;
 771
 772        if (!dchan)
 773                return NULL;
 774
 775        if (!len)
 776                return NULL;
 777
 778        chan = to_fsl_chan(dchan);
 779
 780        do {
 781
 782                /* Allocate the link descriptor from DMA pool */
 783                new = fsl_dma_alloc_descriptor(chan);
 784                if (!new) {
 785                        chan_err(chan, "%s\n", msg_ld_oom);
 786                        goto fail;
 787                }
 788
 789                copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
 790
 791                set_desc_cnt(chan, &new->hw, copy);
 792                set_desc_src(chan, &new->hw, dma_src);
 793                set_desc_dst(chan, &new->hw, dma_dst);
 794
 795                if (!first)
 796                        first = new;
 797                else
 798                        set_desc_next(chan, &prev->hw, new->async_tx.phys);
 799
 800                new->async_tx.cookie = 0;
 801                async_tx_ack(&new->async_tx);
 802
 803                prev = new;
 804                len -= copy;
 805                dma_src += copy;
 806                dma_dst += copy;
 807
 808                /* Insert the link descriptor to the LD ring */
 809                list_add_tail(&new->node, &first->tx_list);
 810        } while (len);
 811
 812        new->async_tx.flags = flags; /* client is in control of this ack */
 813        new->async_tx.cookie = -EBUSY;
 814
 815        /* Set End-of-link to the last link descriptor of new list */
 816        set_ld_eol(chan, new);
 817
 818        return &first->async_tx;
 819
 820fail:
 821        if (!first)
 822                return NULL;
 823
 824        fsldma_free_desc_list_reverse(chan, &first->tx_list);
 825        return NULL;
 826}
 827
 828static int fsl_dma_device_terminate_all(struct dma_chan *dchan)
 829{
 830        struct fsldma_chan *chan;
 831
 832        if (!dchan)
 833                return -EINVAL;
 834
 835        chan = to_fsl_chan(dchan);
 836
 837        spin_lock_bh(&chan->desc_lock);
 838
 839        /* Halt the DMA engine */
 840        dma_halt(chan);
 841
 842        /* Remove and free all of the descriptors in the LD queue */
 843        fsldma_free_desc_list(chan, &chan->ld_pending);
 844        fsldma_free_desc_list(chan, &chan->ld_running);
 845        fsldma_free_desc_list(chan, &chan->ld_completed);
 846        chan->idle = true;
 847
 848        spin_unlock_bh(&chan->desc_lock);
 849        return 0;
 850}
 851
 852static int fsl_dma_device_config(struct dma_chan *dchan,
 853                                 struct dma_slave_config *config)
 854{
 855        struct fsldma_chan *chan;
 856        int size;
 857
 858        if (!dchan)
 859                return -EINVAL;
 860
 861        chan = to_fsl_chan(dchan);
 862
 863        /* make sure the channel supports setting burst size */
 864        if (!chan->set_request_count)
 865                return -ENXIO;
 866
 867        /* we set the controller burst size depending on direction */
 868        if (config->direction == DMA_MEM_TO_DEV)
 869                size = config->dst_addr_width * config->dst_maxburst;
 870        else
 871                size = config->src_addr_width * config->src_maxburst;
 872
 873        chan->set_request_count(chan, size);
 874        return 0;
 875}
 876
 877
 878/**
 879 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
 880 * @chan : Freescale DMA channel
 881 */
 882static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
 883{
 884        struct fsldma_chan *chan = to_fsl_chan(dchan);
 885
 886        spin_lock_bh(&chan->desc_lock);
 887        fsl_chan_xfer_ld_queue(chan);
 888        spin_unlock_bh(&chan->desc_lock);
 889}
 890
 891/**
 892 * fsl_tx_status - Determine the DMA status
 893 * @chan : Freescale DMA channel
 894 */
 895static enum dma_status fsl_tx_status(struct dma_chan *dchan,
 896                                        dma_cookie_t cookie,
 897                                        struct dma_tx_state *txstate)
 898{
 899        struct fsldma_chan *chan = to_fsl_chan(dchan);
 900        enum dma_status ret;
 901
 902        ret = dma_cookie_status(dchan, cookie, txstate);
 903        if (ret == DMA_COMPLETE)
 904                return ret;
 905
 906        spin_lock_bh(&chan->desc_lock);
 907        fsldma_cleanup_descriptors(chan);
 908        spin_unlock_bh(&chan->desc_lock);
 909
 910        return dma_cookie_status(dchan, cookie, txstate);
 911}
 912
 913/*----------------------------------------------------------------------------*/
 914/* Interrupt Handling                                                         */
 915/*----------------------------------------------------------------------------*/
 916
 917static irqreturn_t fsldma_chan_irq(int irq, void *data)
 918{
 919        struct fsldma_chan *chan = data;
 920        u32 stat;
 921
 922        /* save and clear the status register */
 923        stat = get_sr(chan);
 924        set_sr(chan, stat);
 925        chan_dbg(chan, "irq: stat = 0x%x\n", stat);
 926
 927        /* check that this was really our device */
 928        stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
 929        if (!stat)
 930                return IRQ_NONE;
 931
 932        if (stat & FSL_DMA_SR_TE)
 933                chan_err(chan, "Transfer Error!\n");
 934
 935        /*
 936         * Programming Error
 937         * The DMA_INTERRUPT async_tx is a NULL transfer, which will
 938         * trigger a PE interrupt.
 939         */
 940        if (stat & FSL_DMA_SR_PE) {
 941                chan_dbg(chan, "irq: Programming Error INT\n");
 942                stat &= ~FSL_DMA_SR_PE;
 943                if (get_bcr(chan) != 0)
 944                        chan_err(chan, "Programming Error!\n");
 945        }
 946
 947        /*
 948         * For MPC8349, EOCDI event need to update cookie
 949         * and start the next transfer if it exist.
 950         */
 951        if (stat & FSL_DMA_SR_EOCDI) {
 952                chan_dbg(chan, "irq: End-of-Chain link INT\n");
 953                stat &= ~FSL_DMA_SR_EOCDI;
 954        }
 955
 956        /*
 957         * If it current transfer is the end-of-transfer,
 958         * we should clear the Channel Start bit for
 959         * prepare next transfer.
 960         */
 961        if (stat & FSL_DMA_SR_EOLNI) {
 962                chan_dbg(chan, "irq: End-of-link INT\n");
 963                stat &= ~FSL_DMA_SR_EOLNI;
 964        }
 965
 966        /* check that the DMA controller is really idle */
 967        if (!dma_is_idle(chan))
 968                chan_err(chan, "irq: controller not idle!\n");
 969
 970        /* check that we handled all of the bits */
 971        if (stat)
 972                chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
 973
 974        /*
 975         * Schedule the tasklet to handle all cleanup of the current
 976         * transaction. It will start a new transaction if there is
 977         * one pending.
 978         */
 979        tasklet_schedule(&chan->tasklet);
 980        chan_dbg(chan, "irq: Exit\n");
 981        return IRQ_HANDLED;
 982}
 983
 984static void dma_do_tasklet(unsigned long data)
 985{
 986        struct fsldma_chan *chan = (struct fsldma_chan *)data;
 987
 988        chan_dbg(chan, "tasklet entry\n");
 989
 990        spin_lock_bh(&chan->desc_lock);
 991
 992        /* the hardware is now idle and ready for more */
 993        chan->idle = true;
 994
 995        /* Run all cleanup for descriptors which have been completed */
 996        fsldma_cleanup_descriptors(chan);
 997
 998        spin_unlock_bh(&chan->desc_lock);
 999
1000        chan_dbg(chan, "tasklet exit\n");
1001}
1002
1003static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1004{
1005        struct fsldma_device *fdev = data;
1006        struct fsldma_chan *chan;
1007        unsigned int handled = 0;
1008        u32 gsr, mask;
1009        int i;
1010
1011        gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1012                                                   : in_le32(fdev->regs);
1013        mask = 0xff000000;
1014        dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1015
1016        for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1017                chan = fdev->chan[i];
1018                if (!chan)
1019                        continue;
1020
1021                if (gsr & mask) {
1022                        dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1023                        fsldma_chan_irq(irq, chan);
1024                        handled++;
1025                }
1026
1027                gsr &= ~mask;
1028                mask >>= 8;
1029        }
1030
1031        return IRQ_RETVAL(handled);
1032}
1033
1034static void fsldma_free_irqs(struct fsldma_device *fdev)
1035{
1036        struct fsldma_chan *chan;
1037        int i;
1038
1039        if (fdev->irq) {
1040                dev_dbg(fdev->dev, "free per-controller IRQ\n");
1041                free_irq(fdev->irq, fdev);
1042                return;
1043        }
1044
1045        for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1046                chan = fdev->chan[i];
1047                if (chan && chan->irq) {
1048                        chan_dbg(chan, "free per-channel IRQ\n");
1049                        free_irq(chan->irq, chan);
1050                }
1051        }
1052}
1053
1054static int fsldma_request_irqs(struct fsldma_device *fdev)
1055{
1056        struct fsldma_chan *chan;
1057        int ret;
1058        int i;
1059
1060        /* if we have a per-controller IRQ, use that */
1061        if (fdev->irq) {
1062                dev_dbg(fdev->dev, "request per-controller IRQ\n");
1063                ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1064                                  "fsldma-controller", fdev);
1065                return ret;
1066        }
1067
1068        /* no per-controller IRQ, use the per-channel IRQs */
1069        for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1070                chan = fdev->chan[i];
1071                if (!chan)
1072                        continue;
1073
1074                if (!chan->irq) {
1075                        chan_err(chan, "interrupts property missing in device tree\n");
1076                        ret = -ENODEV;
1077                        goto out_unwind;
1078                }
1079
1080                chan_dbg(chan, "request per-channel IRQ\n");
1081                ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1082                                  "fsldma-chan", chan);
1083                if (ret) {
1084                        chan_err(chan, "unable to request per-channel IRQ\n");
1085                        goto out_unwind;
1086                }
1087        }
1088
1089        return 0;
1090
1091out_unwind:
1092        for (/* none */; i >= 0; i--) {
1093                chan = fdev->chan[i];
1094                if (!chan)
1095                        continue;
1096
1097                if (!chan->irq)
1098                        continue;
1099
1100                free_irq(chan->irq, chan);
1101        }
1102
1103        return ret;
1104}
1105
1106/*----------------------------------------------------------------------------*/
1107/* OpenFirmware Subsystem                                                     */
1108/*----------------------------------------------------------------------------*/
1109
1110static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1111        struct device_node *node, u32 feature, const char *compatible)
1112{
1113        struct fsldma_chan *chan;
1114        struct resource res;
1115        int err;
1116
1117        /* alloc channel */
1118        chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1119        if (!chan) {
1120                err = -ENOMEM;
1121                goto out_return;
1122        }
1123
1124        /* ioremap registers for use */
1125        chan->regs = of_iomap(node, 0);
1126        if (!chan->regs) {
1127                dev_err(fdev->dev, "unable to ioremap registers\n");
1128                err = -ENOMEM;
1129                goto out_free_chan;
1130        }
1131
1132        err = of_address_to_resource(node, 0, &res);
1133        if (err) {
1134                dev_err(fdev->dev, "unable to find 'reg' property\n");
1135                goto out_iounmap_regs;
1136        }
1137
1138        chan->feature = feature;
1139        if (!fdev->feature)
1140                fdev->feature = chan->feature;
1141
1142        /*
1143         * If the DMA device's feature is different than the feature
1144         * of its channels, report the bug
1145         */
1146        WARN_ON(fdev->feature != chan->feature);
1147
1148        chan->dev = fdev->dev;
1149        chan->id = (res.start & 0xfff) < 0x300 ?
1150                   ((res.start - 0x100) & 0xfff) >> 7 :
1151                   ((res.start - 0x200) & 0xfff) >> 7;
1152        if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1153                dev_err(fdev->dev, "too many channels for device\n");
1154                err = -EINVAL;
1155                goto out_iounmap_regs;
1156        }
1157
1158        fdev->chan[chan->id] = chan;
1159        tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1160        snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1161
1162        /* Initialize the channel */
1163        dma_init(chan);
1164
1165        /* Clear cdar registers */
1166        set_cdar(chan, 0);
1167
1168        switch (chan->feature & FSL_DMA_IP_MASK) {
1169        case FSL_DMA_IP_85XX:
1170                chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1171        case FSL_DMA_IP_83XX:
1172                chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1173                chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1174                chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1175                chan->set_request_count = fsl_chan_set_request_count;
1176        }
1177
1178        spin_lock_init(&chan->desc_lock);
1179        INIT_LIST_HEAD(&chan->ld_pending);
1180        INIT_LIST_HEAD(&chan->ld_running);
1181        INIT_LIST_HEAD(&chan->ld_completed);
1182        chan->idle = true;
1183#ifdef CONFIG_PM
1184        chan->pm_state = RUNNING;
1185#endif
1186
1187        chan->common.device = &fdev->common;
1188        dma_cookie_init(&chan->common);
1189
1190        /* find the IRQ line, if it exists in the device tree */
1191        chan->irq = irq_of_parse_and_map(node, 0);
1192
1193        /* Add the channel to DMA device channel list */
1194        list_add_tail(&chan->common.device_node, &fdev->common.channels);
1195
1196        dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1197                 chan->irq ? chan->irq : fdev->irq);
1198
1199        return 0;
1200
1201out_iounmap_regs:
1202        iounmap(chan->regs);
1203out_free_chan:
1204        kfree(chan);
1205out_return:
1206        return err;
1207}
1208
1209static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1210{
1211        irq_dispose_mapping(chan->irq);
1212        list_del(&chan->common.device_node);
1213        iounmap(chan->regs);
1214        kfree(chan);
1215}
1216
1217static int fsldma_of_probe(struct platform_device *op)
1218{
1219        struct fsldma_device *fdev;
1220        struct device_node *child;
1221        int err;
1222
1223        fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1224        if (!fdev) {
1225                err = -ENOMEM;
1226                goto out_return;
1227        }
1228
1229        fdev->dev = &op->dev;
1230        INIT_LIST_HEAD(&fdev->common.channels);
1231
1232        /* ioremap the registers for use */
1233        fdev->regs = of_iomap(op->dev.of_node, 0);
1234        if (!fdev->regs) {
1235                dev_err(&op->dev, "unable to ioremap registers\n");
1236                err = -ENOMEM;
1237                goto out_free;
1238        }
1239
1240        /* map the channel IRQ if it exists, but don't hookup the handler yet */
1241        fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1242
1243        dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1244        dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1245        fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1246        fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1247        fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1248        fdev->common.device_tx_status = fsl_tx_status;
1249        fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1250        fdev->common.device_config = fsl_dma_device_config;
1251        fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
1252        fdev->common.dev = &op->dev;
1253
1254        fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS;
1255        fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS;
1256        fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1257        fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1258
1259        dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1260
1261        platform_set_drvdata(op, fdev);
1262
1263        /*
1264         * We cannot use of_platform_bus_probe() because there is no
1265         * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1266         * channel object.
1267         */
1268        for_each_child_of_node(op->dev.of_node, child) {
1269                if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1270                        fsl_dma_chan_probe(fdev, child,
1271                                FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1272                                "fsl,eloplus-dma-channel");
1273                }
1274
1275                if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1276                        fsl_dma_chan_probe(fdev, child,
1277                                FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1278                                "fsl,elo-dma-channel");
1279                }
1280        }
1281
1282        /*
1283         * Hookup the IRQ handler(s)
1284         *
1285         * If we have a per-controller interrupt, we prefer that to the
1286         * per-channel interrupts to reduce the number of shared interrupt
1287         * handlers on the same IRQ line
1288         */
1289        err = fsldma_request_irqs(fdev);
1290        if (err) {
1291                dev_err(fdev->dev, "unable to request IRQs\n");
1292                goto out_free_fdev;
1293        }
1294
1295        dma_async_device_register(&fdev->common);
1296        return 0;
1297
1298out_free_fdev:
1299        irq_dispose_mapping(fdev->irq);
1300        iounmap(fdev->regs);
1301out_free:
1302        kfree(fdev);
1303out_return:
1304        return err;
1305}
1306
1307static int fsldma_of_remove(struct platform_device *op)
1308{
1309        struct fsldma_device *fdev;
1310        unsigned int i;
1311
1312        fdev = platform_get_drvdata(op);
1313        dma_async_device_unregister(&fdev->common);
1314
1315        fsldma_free_irqs(fdev);
1316
1317        for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1318                if (fdev->chan[i])
1319                        fsl_dma_chan_remove(fdev->chan[i]);
1320        }
1321
1322        iounmap(fdev->regs);
1323        kfree(fdev);
1324
1325        return 0;
1326}
1327
1328#ifdef CONFIG_PM
1329static int fsldma_suspend_late(struct device *dev)
1330{
1331        struct fsldma_device *fdev = dev_get_drvdata(dev);
1332        struct fsldma_chan *chan;
1333        int i;
1334
1335        for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1336                chan = fdev->chan[i];
1337                if (!chan)
1338                        continue;
1339
1340                spin_lock_bh(&chan->desc_lock);
1341                if (unlikely(!chan->idle))
1342                        goto out;
1343                chan->regs_save.mr = get_mr(chan);
1344                chan->pm_state = SUSPENDED;
1345                spin_unlock_bh(&chan->desc_lock);
1346        }
1347        return 0;
1348
1349out:
1350        for (; i >= 0; i--) {
1351                chan = fdev->chan[i];
1352                if (!chan)
1353                        continue;
1354                chan->pm_state = RUNNING;
1355                spin_unlock_bh(&chan->desc_lock);
1356        }
1357        return -EBUSY;
1358}
1359
1360static int fsldma_resume_early(struct device *dev)
1361{
1362        struct fsldma_device *fdev = dev_get_drvdata(dev);
1363        struct fsldma_chan *chan;
1364        u32 mode;
1365        int i;
1366
1367        for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1368                chan = fdev->chan[i];
1369                if (!chan)
1370                        continue;
1371
1372                spin_lock_bh(&chan->desc_lock);
1373                mode = chan->regs_save.mr
1374                        & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA;
1375                set_mr(chan, mode);
1376                chan->pm_state = RUNNING;
1377                spin_unlock_bh(&chan->desc_lock);
1378        }
1379
1380        return 0;
1381}
1382
1383static const struct dev_pm_ops fsldma_pm_ops = {
1384        .suspend_late   = fsldma_suspend_late,
1385        .resume_early   = fsldma_resume_early,
1386};
1387#endif
1388
1389static const struct of_device_id fsldma_of_ids[] = {
1390        { .compatible = "fsl,elo3-dma", },
1391        { .compatible = "fsl,eloplus-dma", },
1392        { .compatible = "fsl,elo-dma", },
1393        {}
1394};
1395MODULE_DEVICE_TABLE(of, fsldma_of_ids);
1396
1397static struct platform_driver fsldma_of_driver = {
1398        .driver = {
1399                .name = "fsl-elo-dma",
1400                .of_match_table = fsldma_of_ids,
1401#ifdef CONFIG_PM
1402                .pm = &fsldma_pm_ops,
1403#endif
1404        },
1405        .probe = fsldma_of_probe,
1406        .remove = fsldma_of_remove,
1407};
1408
1409/*----------------------------------------------------------------------------*/
1410/* Module Init / Exit                                                         */
1411/*----------------------------------------------------------------------------*/
1412
1413static __init int fsldma_init(void)
1414{
1415        pr_info("Freescale Elo series DMA driver\n");
1416        return platform_driver_register(&fsldma_of_driver);
1417}
1418
1419static void __exit fsldma_exit(void)
1420{
1421        platform_driver_unregister(&fsldma_of_driver);
1422}
1423
1424subsys_initcall(fsldma_init);
1425module_exit(fsldma_exit);
1426
1427MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1428MODULE_LICENSE("GPL");
1429