linux/drivers/dma/pl330.c
<<
>>
Prefs
   1/* linux/drivers/dma/pl330.c
   2 *
   3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
   4 *      Jaswinder Singh <jassi.brar@samsung.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/io.h>
  13#include <linux/init.h>
  14#include <linux/slab.h>
  15#include <linux/module.h>
  16#include <linux/dmaengine.h>
  17#include <linux/interrupt.h>
  18#include <linux/amba/bus.h>
  19#include <linux/amba/pl330.h>
  20
  21#define NR_DEFAULT_DESC 16
  22
  23enum desc_status {
  24        /* In the DMAC pool */
  25        FREE,
  26        /*
  27         * Allocted to some channel during prep_xxx
  28         * Also may be sitting on the work_list.
  29         */
  30        PREP,
  31        /*
  32         * Sitting on the work_list and already submitted
  33         * to the PL330 core. Not more than two descriptors
  34         * of a channel can be BUSY at any time.
  35         */
  36        BUSY,
  37        /*
  38         * Sitting on the channel work_list but xfer done
  39         * by PL330 core
  40         */
  41        DONE,
  42};
  43
  44struct dma_pl330_chan {
  45        /* Schedule desc completion */
  46        struct tasklet_struct task;
  47
  48        /* DMA-Engine Channel */
  49        struct dma_chan chan;
  50
  51        /* Last completed cookie */
  52        dma_cookie_t completed;
  53
  54        /* List of to be xfered descriptors */
  55        struct list_head work_list;
  56
  57        /* Pointer to the DMAC that manages this channel,
  58         * NULL if the channel is available to be acquired.
  59         * As the parent, this DMAC also provides descriptors
  60         * to the channel.
  61         */
  62        struct dma_pl330_dmac *dmac;
  63
  64        /* To protect channel manipulation */
  65        spinlock_t lock;
  66
  67        /* Token of a hardware channel thread of PL330 DMAC
  68         * NULL if the channel is available to be acquired.
  69         */
  70        void *pl330_chid;
  71};
  72
  73struct dma_pl330_dmac {
  74        struct pl330_info pif;
  75
  76        /* DMA-Engine Device */
  77        struct dma_device ddma;
  78
  79        /* Pool of descriptors available for the DMAC's channels */
  80        struct list_head desc_pool;
  81        /* To protect desc_pool manipulation */
  82        spinlock_t pool_lock;
  83
  84        /* Peripheral channels connected to this DMAC */
  85        struct dma_pl330_chan peripherals[0]; /* keep at end */
  86};
  87
  88struct dma_pl330_desc {
  89        /* To attach to a queue as child */
  90        struct list_head node;
  91
  92        /* Descriptor for the DMA Engine API */
  93        struct dma_async_tx_descriptor txd;
  94
  95        /* Xfer for PL330 core */
  96        struct pl330_xfer px;
  97
  98        struct pl330_reqcfg rqcfg;
  99        struct pl330_req req;
 100
 101        enum desc_status status;
 102
 103        /* The channel which currently holds this desc */
 104        struct dma_pl330_chan *pchan;
 105};
 106
 107static inline struct dma_pl330_chan *
 108to_pchan(struct dma_chan *ch)
 109{
 110        if (!ch)
 111                return NULL;
 112
 113        return container_of(ch, struct dma_pl330_chan, chan);
 114}
 115
 116static inline struct dma_pl330_desc *
 117to_desc(struct dma_async_tx_descriptor *tx)
 118{
 119        return container_of(tx, struct dma_pl330_desc, txd);
 120}
 121
 122static inline void free_desc_list(struct list_head *list)
 123{
 124        struct dma_pl330_dmac *pdmac;
 125        struct dma_pl330_desc *desc;
 126        struct dma_pl330_chan *pch;
 127        unsigned long flags;
 128
 129        if (list_empty(list))
 130                return;
 131
 132        /* Finish off the work list */
 133        list_for_each_entry(desc, list, node) {
 134                dma_async_tx_callback callback;
 135                void *param;
 136
 137                /* All desc in a list belong to same channel */
 138                pch = desc->pchan;
 139                callback = desc->txd.callback;
 140                param = desc->txd.callback_param;
 141
 142                if (callback)
 143                        callback(param);
 144
 145                desc->pchan = NULL;
 146        }
 147
 148        pdmac = pch->dmac;
 149
 150        spin_lock_irqsave(&pdmac->pool_lock, flags);
 151        list_splice_tail_init(list, &pdmac->desc_pool);
 152        spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 153}
 154
 155static inline void fill_queue(struct dma_pl330_chan *pch)
 156{
 157        struct dma_pl330_desc *desc;
 158        int ret;
 159
 160        list_for_each_entry(desc, &pch->work_list, node) {
 161
 162                /* If already submitted */
 163                if (desc->status == BUSY)
 164                        break;
 165
 166                ret = pl330_submit_req(pch->pl330_chid,
 167                                                &desc->req);
 168                if (!ret) {
 169                        desc->status = BUSY;
 170                        break;
 171                } else if (ret == -EAGAIN) {
 172                        /* QFull or DMAC Dying */
 173                        break;
 174                } else {
 175                        /* Unacceptable request */
 176                        desc->status = DONE;
 177                        dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
 178                                        __func__, __LINE__, desc->txd.cookie);
 179                        tasklet_schedule(&pch->task);
 180                }
 181        }
 182}
 183
 184static void pl330_tasklet(unsigned long data)
 185{
 186        struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
 187        struct dma_pl330_desc *desc, *_dt;
 188        unsigned long flags;
 189        LIST_HEAD(list);
 190
 191        spin_lock_irqsave(&pch->lock, flags);
 192
 193        /* Pick up ripe tomatoes */
 194        list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
 195                if (desc->status == DONE) {
 196                        pch->completed = desc->txd.cookie;
 197                        list_move_tail(&desc->node, &list);
 198                }
 199
 200        /* Try to submit a req imm. next to the last completed cookie */
 201        fill_queue(pch);
 202
 203        /* Make sure the PL330 Channel thread is active */
 204        pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
 205
 206        spin_unlock_irqrestore(&pch->lock, flags);
 207
 208        free_desc_list(&list);
 209}
 210
 211static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
 212{
 213        struct dma_pl330_desc *desc = token;
 214        struct dma_pl330_chan *pch = desc->pchan;
 215        unsigned long flags;
 216
 217        /* If desc aborted */
 218        if (!pch)
 219                return;
 220
 221        spin_lock_irqsave(&pch->lock, flags);
 222
 223        desc->status = DONE;
 224
 225        spin_unlock_irqrestore(&pch->lock, flags);
 226
 227        tasklet_schedule(&pch->task);
 228}
 229
 230static int pl330_alloc_chan_resources(struct dma_chan *chan)
 231{
 232        struct dma_pl330_chan *pch = to_pchan(chan);
 233        struct dma_pl330_dmac *pdmac = pch->dmac;
 234        unsigned long flags;
 235
 236        spin_lock_irqsave(&pch->lock, flags);
 237
 238        pch->completed = chan->cookie = 1;
 239
 240        pch->pl330_chid = pl330_request_channel(&pdmac->pif);
 241        if (!pch->pl330_chid) {
 242                spin_unlock_irqrestore(&pch->lock, flags);
 243                return 0;
 244        }
 245
 246        tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
 247
 248        spin_unlock_irqrestore(&pch->lock, flags);
 249
 250        return 1;
 251}
 252
 253static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
 254{
 255        struct dma_pl330_chan *pch = to_pchan(chan);
 256        struct dma_pl330_desc *desc;
 257        unsigned long flags;
 258
 259        /* Only supports DMA_TERMINATE_ALL */
 260        if (cmd != DMA_TERMINATE_ALL)
 261                return -ENXIO;
 262
 263        spin_lock_irqsave(&pch->lock, flags);
 264
 265        /* FLUSH the PL330 Channel thread */
 266        pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 267
 268        /* Mark all desc done */
 269        list_for_each_entry(desc, &pch->work_list, node)
 270                desc->status = DONE;
 271
 272        spin_unlock_irqrestore(&pch->lock, flags);
 273
 274        pl330_tasklet((unsigned long) pch);
 275
 276        return 0;
 277}
 278
 279static void pl330_free_chan_resources(struct dma_chan *chan)
 280{
 281        struct dma_pl330_chan *pch = to_pchan(chan);
 282        unsigned long flags;
 283
 284        spin_lock_irqsave(&pch->lock, flags);
 285
 286        tasklet_kill(&pch->task);
 287
 288        pl330_release_channel(pch->pl330_chid);
 289        pch->pl330_chid = NULL;
 290
 291        spin_unlock_irqrestore(&pch->lock, flags);
 292}
 293
 294static enum dma_status
 295pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 296                 struct dma_tx_state *txstate)
 297{
 298        struct dma_pl330_chan *pch = to_pchan(chan);
 299        dma_cookie_t last_done, last_used;
 300        int ret;
 301
 302        last_done = pch->completed;
 303        last_used = chan->cookie;
 304
 305        ret = dma_async_is_complete(cookie, last_done, last_used);
 306
 307        dma_set_tx_state(txstate, last_done, last_used, 0);
 308
 309        return ret;
 310}
 311
 312static void pl330_issue_pending(struct dma_chan *chan)
 313{
 314        pl330_tasklet((unsigned long) to_pchan(chan));
 315}
 316
 317/*
 318 * We returned the last one of the circular list of descriptor(s)
 319 * from prep_xxx, so the argument to submit corresponds to the last
 320 * descriptor of the list.
 321 */
 322static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
 323{
 324        struct dma_pl330_desc *desc, *last = to_desc(tx);
 325        struct dma_pl330_chan *pch = to_pchan(tx->chan);
 326        dma_cookie_t cookie;
 327        unsigned long flags;
 328
 329        spin_lock_irqsave(&pch->lock, flags);
 330
 331        /* Assign cookies to all nodes */
 332        cookie = tx->chan->cookie;
 333
 334        while (!list_empty(&last->node)) {
 335                desc = list_entry(last->node.next, struct dma_pl330_desc, node);
 336
 337                if (++cookie < 0)
 338                        cookie = 1;
 339                desc->txd.cookie = cookie;
 340
 341                list_move_tail(&desc->node, &pch->work_list);
 342        }
 343
 344        if (++cookie < 0)
 345                cookie = 1;
 346        last->txd.cookie = cookie;
 347
 348        list_add_tail(&last->node, &pch->work_list);
 349
 350        tx->chan->cookie = cookie;
 351
 352        spin_unlock_irqrestore(&pch->lock, flags);
 353
 354        return cookie;
 355}
 356
 357static inline void _init_desc(struct dma_pl330_desc *desc)
 358{
 359        desc->pchan = NULL;
 360        desc->req.x = &desc->px;
 361        desc->req.token = desc;
 362        desc->rqcfg.swap = SWAP_NO;
 363        desc->rqcfg.privileged = 0;
 364        desc->rqcfg.insnaccess = 0;
 365        desc->rqcfg.scctl = SCCTRL0;
 366        desc->rqcfg.dcctl = DCCTRL0;
 367        desc->req.cfg = &desc->rqcfg;
 368        desc->req.xfer_cb = dma_pl330_rqcb;
 369        desc->txd.tx_submit = pl330_tx_submit;
 370
 371        INIT_LIST_HEAD(&desc->node);
 372}
 373
 374/* Returns the number of descriptors added to the DMAC pool */
 375int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
 376{
 377        struct dma_pl330_desc *desc;
 378        unsigned long flags;
 379        int i;
 380
 381        if (!pdmac)
 382                return 0;
 383
 384        desc = kmalloc(count * sizeof(*desc), flg);
 385        if (!desc)
 386                return 0;
 387
 388        spin_lock_irqsave(&pdmac->pool_lock, flags);
 389
 390        for (i = 0; i < count; i++) {
 391                _init_desc(&desc[i]);
 392                list_add_tail(&desc[i].node, &pdmac->desc_pool);
 393        }
 394
 395        spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 396
 397        return count;
 398}
 399
 400static struct dma_pl330_desc *
 401pluck_desc(struct dma_pl330_dmac *pdmac)
 402{
 403        struct dma_pl330_desc *desc = NULL;
 404        unsigned long flags;
 405
 406        if (!pdmac)
 407                return NULL;
 408
 409        spin_lock_irqsave(&pdmac->pool_lock, flags);
 410
 411        if (!list_empty(&pdmac->desc_pool)) {
 412                desc = list_entry(pdmac->desc_pool.next,
 413                                struct dma_pl330_desc, node);
 414
 415                list_del_init(&desc->node);
 416
 417                desc->status = PREP;
 418                desc->txd.callback = NULL;
 419        }
 420
 421        spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 422
 423        return desc;
 424}
 425
 426static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
 427{
 428        struct dma_pl330_dmac *pdmac = pch->dmac;
 429        struct dma_pl330_peri *peri = pch->chan.private;
 430        struct dma_pl330_desc *desc;
 431
 432        /* Pluck one desc from the pool of DMAC */
 433        desc = pluck_desc(pdmac);
 434
 435        /* If the DMAC pool is empty, alloc new */
 436        if (!desc) {
 437                if (!add_desc(pdmac, GFP_ATOMIC, 1))
 438                        return NULL;
 439
 440                /* Try again */
 441                desc = pluck_desc(pdmac);
 442                if (!desc) {
 443                        dev_err(pch->dmac->pif.dev,
 444                                "%s:%d ALERT!\n", __func__, __LINE__);
 445                        return NULL;
 446                }
 447        }
 448
 449        /* Initialize the descriptor */
 450        desc->pchan = pch;
 451        desc->txd.cookie = 0;
 452        async_tx_ack(&desc->txd);
 453
 454        desc->req.rqtype = peri->rqtype;
 455        desc->req.peri = peri->peri_id;
 456
 457        dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
 458
 459        return desc;
 460}
 461
 462static inline void fill_px(struct pl330_xfer *px,
 463                dma_addr_t dst, dma_addr_t src, size_t len)
 464{
 465        px->next = NULL;
 466        px->bytes = len;
 467        px->dst_addr = dst;
 468        px->src_addr = src;
 469}
 470
 471static struct dma_pl330_desc *
 472__pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
 473                dma_addr_t src, size_t len)
 474{
 475        struct dma_pl330_desc *desc = pl330_get_desc(pch);
 476
 477        if (!desc) {
 478                dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
 479                        __func__, __LINE__);
 480                return NULL;
 481        }
 482
 483        /*
 484         * Ideally we should lookout for reqs bigger than
 485         * those that can be programmed with 256 bytes of
 486         * MC buffer, but considering a req size is seldom
 487         * going to be word-unaligned and more than 200MB,
 488         * we take it easy.
 489         * Also, should the limit is reached we'd rather
 490         * have the platform increase MC buffer size than
 491         * complicating this API driver.
 492         */
 493        fill_px(&desc->px, dst, src, len);
 494
 495        return desc;
 496}
 497
 498/* Call after fixing burst size */
 499static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
 500{
 501        struct dma_pl330_chan *pch = desc->pchan;
 502        struct pl330_info *pi = &pch->dmac->pif;
 503        int burst_len;
 504
 505        burst_len = pi->pcfg.data_bus_width / 8;
 506        burst_len *= pi->pcfg.data_buf_dep;
 507        burst_len >>= desc->rqcfg.brst_size;
 508
 509        /* src/dst_burst_len can't be more than 16 */
 510        if (burst_len > 16)
 511                burst_len = 16;
 512
 513        while (burst_len > 1) {
 514                if (!(len % (burst_len << desc->rqcfg.brst_size)))
 515                        break;
 516                burst_len--;
 517        }
 518
 519        return burst_len;
 520}
 521
 522static struct dma_async_tx_descriptor *
 523pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
 524                dma_addr_t src, size_t len, unsigned long flags)
 525{
 526        struct dma_pl330_desc *desc;
 527        struct dma_pl330_chan *pch = to_pchan(chan);
 528        struct dma_pl330_peri *peri = chan->private;
 529        struct pl330_info *pi;
 530        int burst;
 531
 532        if (unlikely(!pch || !len || !peri))
 533                return NULL;
 534
 535        if (peri->rqtype != MEMTOMEM)
 536                return NULL;
 537
 538        pi = &pch->dmac->pif;
 539
 540        desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
 541        if (!desc)
 542                return NULL;
 543
 544        desc->rqcfg.src_inc = 1;
 545        desc->rqcfg.dst_inc = 1;
 546
 547        /* Select max possible burst size */
 548        burst = pi->pcfg.data_bus_width / 8;
 549
 550        while (burst > 1) {
 551                if (!(len % burst))
 552                        break;
 553                burst /= 2;
 554        }
 555
 556        desc->rqcfg.brst_size = 0;
 557        while (burst != (1 << desc->rqcfg.brst_size))
 558                desc->rqcfg.brst_size++;
 559
 560        desc->rqcfg.brst_len = get_burst_len(desc, len);
 561
 562        desc->txd.flags = flags;
 563
 564        return &desc->txd;
 565}
 566
 567static struct dma_async_tx_descriptor *
 568pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 569                unsigned int sg_len, enum dma_data_direction direction,
 570                unsigned long flg)
 571{
 572        struct dma_pl330_desc *first, *desc = NULL;
 573        struct dma_pl330_chan *pch = to_pchan(chan);
 574        struct dma_pl330_peri *peri = chan->private;
 575        struct scatterlist *sg;
 576        unsigned long flags;
 577        int i, burst_size;
 578        dma_addr_t addr;
 579
 580        if (unlikely(!pch || !sgl || !sg_len))
 581                return NULL;
 582
 583        /* Make sure the direction is consistent */
 584        if ((direction == DMA_TO_DEVICE &&
 585                                peri->rqtype != MEMTODEV) ||
 586                        (direction == DMA_FROM_DEVICE &&
 587                                peri->rqtype != DEVTOMEM)) {
 588                dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
 589                                __func__, __LINE__);
 590                return NULL;
 591        }
 592
 593        addr = peri->fifo_addr;
 594        burst_size = peri->burst_sz;
 595
 596        first = NULL;
 597
 598        for_each_sg(sgl, sg, sg_len, i) {
 599
 600                desc = pl330_get_desc(pch);
 601                if (!desc) {
 602                        struct dma_pl330_dmac *pdmac = pch->dmac;
 603
 604                        dev_err(pch->dmac->pif.dev,
 605                                "%s:%d Unable to fetch desc\n",
 606                                __func__, __LINE__);
 607                        if (!first)
 608                                return NULL;
 609
 610                        spin_lock_irqsave(&pdmac->pool_lock, flags);
 611
 612                        while (!list_empty(&first->node)) {
 613                                desc = list_entry(first->node.next,
 614                                                struct dma_pl330_desc, node);
 615                                list_move_tail(&desc->node, &pdmac->desc_pool);
 616                        }
 617
 618                        list_move_tail(&first->node, &pdmac->desc_pool);
 619
 620                        spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 621
 622                        return NULL;
 623                }
 624
 625                if (!first)
 626                        first = desc;
 627                else
 628                        list_add_tail(&desc->node, &first->node);
 629
 630                if (direction == DMA_TO_DEVICE) {
 631                        desc->rqcfg.src_inc = 1;
 632                        desc->rqcfg.dst_inc = 0;
 633                        fill_px(&desc->px,
 634                                addr, sg_dma_address(sg), sg_dma_len(sg));
 635                } else {
 636                        desc->rqcfg.src_inc = 0;
 637                        desc->rqcfg.dst_inc = 1;
 638                        fill_px(&desc->px,
 639                                sg_dma_address(sg), addr, sg_dma_len(sg));
 640                }
 641
 642                desc->rqcfg.brst_size = burst_size;
 643                desc->rqcfg.brst_len = 1;
 644        }
 645
 646        /* Return the last desc in the chain */
 647        desc->txd.flags = flg;
 648        return &desc->txd;
 649}
 650
 651static irqreturn_t pl330_irq_handler(int irq, void *data)
 652{
 653        if (pl330_update(data))
 654                return IRQ_HANDLED;
 655        else
 656                return IRQ_NONE;
 657}
 658
 659static int __devinit
 660pl330_probe(struct amba_device *adev, struct amba_id *id)
 661{
 662        struct dma_pl330_platdata *pdat;
 663        struct dma_pl330_dmac *pdmac;
 664        struct dma_pl330_chan *pch;
 665        struct pl330_info *pi;
 666        struct dma_device *pd;
 667        struct resource *res;
 668        int i, ret, irq;
 669
 670        pdat = adev->dev.platform_data;
 671
 672        if (!pdat || !pdat->nr_valid_peri) {
 673                dev_err(&adev->dev, "platform data missing\n");
 674                return -ENODEV;
 675        }
 676
 677        /* Allocate a new DMAC and its Channels */
 678        pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
 679                                + sizeof(*pdmac), GFP_KERNEL);
 680        if (!pdmac) {
 681                dev_err(&adev->dev, "unable to allocate mem\n");
 682                return -ENOMEM;
 683        }
 684
 685        pi = &pdmac->pif;
 686        pi->dev = &adev->dev;
 687        pi->pl330_data = NULL;
 688        pi->mcbufsz = pdat->mcbuf_sz;
 689
 690        res = &adev->res;
 691        request_mem_region(res->start, resource_size(res), "dma-pl330");
 692
 693        pi->base = ioremap(res->start, resource_size(res));
 694        if (!pi->base) {
 695                ret = -ENXIO;
 696                goto probe_err1;
 697        }
 698
 699        irq = adev->irq[0];
 700        ret = request_irq(irq, pl330_irq_handler, 0,
 701                        dev_name(&adev->dev), pi);
 702        if (ret)
 703                goto probe_err2;
 704
 705        ret = pl330_add(pi);
 706        if (ret)
 707                goto probe_err3;
 708
 709        INIT_LIST_HEAD(&pdmac->desc_pool);
 710        spin_lock_init(&pdmac->pool_lock);
 711
 712        /* Create a descriptor pool of default size */
 713        if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
 714                dev_warn(&adev->dev, "unable to allocate desc\n");
 715
 716        pd = &pdmac->ddma;
 717        INIT_LIST_HEAD(&pd->channels);
 718
 719        /* Initialize channel parameters */
 720        for (i = 0; i < pdat->nr_valid_peri; i++) {
 721                struct dma_pl330_peri *peri = &pdat->peri[i];
 722                pch = &pdmac->peripherals[i];
 723
 724                switch (peri->rqtype) {
 725                case MEMTOMEM:
 726                        dma_cap_set(DMA_MEMCPY, pd->cap_mask);
 727                        break;
 728                case MEMTODEV:
 729                case DEVTOMEM:
 730                        dma_cap_set(DMA_SLAVE, pd->cap_mask);
 731                        break;
 732                default:
 733                        dev_err(&adev->dev, "DEVTODEV Not Supported\n");
 734                        continue;
 735                }
 736
 737                INIT_LIST_HEAD(&pch->work_list);
 738                spin_lock_init(&pch->lock);
 739                pch->pl330_chid = NULL;
 740                pch->chan.private = peri;
 741                pch->chan.device = pd;
 742                pch->chan.chan_id = i;
 743                pch->dmac = pdmac;
 744
 745                /* Add the channel to the DMAC list */
 746                pd->chancnt++;
 747                list_add_tail(&pch->chan.device_node, &pd->channels);
 748        }
 749
 750        pd->dev = &adev->dev;
 751
 752        pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
 753        pd->device_free_chan_resources = pl330_free_chan_resources;
 754        pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
 755        pd->device_tx_status = pl330_tx_status;
 756        pd->device_prep_slave_sg = pl330_prep_slave_sg;
 757        pd->device_control = pl330_control;
 758        pd->device_issue_pending = pl330_issue_pending;
 759
 760        ret = dma_async_device_register(pd);
 761        if (ret) {
 762                dev_err(&adev->dev, "unable to register DMAC\n");
 763                goto probe_err4;
 764        }
 765
 766        amba_set_drvdata(adev, pdmac);
 767
 768        dev_info(&adev->dev,
 769                "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
 770        dev_info(&adev->dev,
 771                "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
 772                pi->pcfg.data_buf_dep,
 773                pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
 774                pi->pcfg.num_peri, pi->pcfg.num_events);
 775
 776        return 0;
 777
 778probe_err4:
 779        pl330_del(pi);
 780probe_err3:
 781        free_irq(irq, pi);
 782probe_err2:
 783        iounmap(pi->base);
 784probe_err1:
 785        release_mem_region(res->start, resource_size(res));
 786        kfree(pdmac);
 787
 788        return ret;
 789}
 790
 791static int __devexit pl330_remove(struct amba_device *adev)
 792{
 793        struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
 794        struct dma_pl330_chan *pch, *_p;
 795        struct pl330_info *pi;
 796        struct resource *res;
 797        int irq;
 798
 799        if (!pdmac)
 800                return 0;
 801
 802        amba_set_drvdata(adev, NULL);
 803
 804        /* Idle the DMAC */
 805        list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
 806                        chan.device_node) {
 807
 808                /* Remove the channel */
 809                list_del(&pch->chan.device_node);
 810
 811                /* Flush the channel */
 812                pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
 813                pl330_free_chan_resources(&pch->chan);
 814        }
 815
 816        pi = &pdmac->pif;
 817
 818        pl330_del(pi);
 819
 820        irq = adev->irq[0];
 821        free_irq(irq, pi);
 822
 823        iounmap(pi->base);
 824
 825        res = &adev->res;
 826        release_mem_region(res->start, resource_size(res));
 827
 828        kfree(pdmac);
 829
 830        return 0;
 831}
 832
 833static struct amba_id pl330_ids[] = {
 834        {
 835                .id     = 0x00041330,
 836                .mask   = 0x000fffff,
 837        },
 838        { 0, 0 },
 839};
 840
 841static struct amba_driver pl330_driver = {
 842        .drv = {
 843                .owner = THIS_MODULE,
 844                .name = "dma-pl330",
 845        },
 846        .id_table = pl330_ids,
 847        .probe = pl330_probe,
 848        .remove = pl330_remove,
 849};
 850
 851static int __init pl330_init(void)
 852{
 853        return amba_driver_register(&pl330_driver);
 854}
 855module_init(pl330_init);
 856
 857static void __exit pl330_exit(void)
 858{
 859        amba_driver_unregister(&pl330_driver);
 860        return;
 861}
 862module_exit(pl330_exit);
 863
 864MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
 865MODULE_DESCRIPTION("API Driver for PL330 DMAC");
 866MODULE_LICENSE("GPL");
 867