linux/arch/arm/plat-samsung/s3c-pl330.c
<<
>>
Prefs
   1/* linux/arch/arm/plat-samsung/s3c-pl330.c
   2 *
   3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
   4 *      Jaswinder Singh <jassi.brar@samsung.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/slab.h>
  17#include <linux/platform_device.h>
  18#include <linux/clk.h>
  19#include <linux/err.h>
  20
  21#include <asm/hardware/pl330.h>
  22
  23#include <plat/s3c-pl330-pdata.h>
  24
  25/**
  26 * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
  27 * @busy_chan: Number of channels currently busy.
  28 * @peri: List of IDs of peripherals this DMAC can work with.
  29 * @node: To attach to the global list of DMACs.
  30 * @pi: PL330 configuration info for the DMAC.
  31 * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
  32 * @clk: Pointer of DMAC operation clock.
  33 */
  34struct s3c_pl330_dmac {
  35        unsigned                busy_chan;
  36        enum dma_ch             *peri;
  37        struct list_head        node;
  38        struct pl330_info       *pi;
  39        struct kmem_cache       *kmcache;
  40        struct clk              *clk;
  41};
  42
  43/**
  44 * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
  45 * @token: Xfer ID provided by the client.
  46 * @node: To attach to the list of xfers on a channel.
  47 * @px: Xfer for PL330 core.
  48 * @chan: Owner channel of this xfer.
  49 */
  50struct s3c_pl330_xfer {
  51        void                    *token;
  52        struct list_head        node;
  53        struct pl330_xfer       px;
  54        struct s3c_pl330_chan   *chan;
  55};
  56
  57/**
  58 * struct s3c_pl330_chan - Logical channel to communicate with
  59 *      a Physical peripheral.
  60 * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
  61 *      NULL if the channel is available to be acquired.
  62 * @id: ID of the peripheral that this channel can communicate with.
  63 * @options: Options specified by the client.
  64 * @sdaddr: Address provided via s3c2410_dma_devconfig.
  65 * @node: To attach to the global list of channels.
  66 * @lrq: Pointer to the last submitted pl330_req to PL330 core.
  67 * @xfer_list: To manage list of xfers enqueued.
  68 * @req: Two requests to communicate with the PL330 engine.
  69 * @callback_fn: Callback function to the client.
  70 * @rqcfg: Channel configuration for the xfers.
  71 * @xfer_head: Pointer to the xfer to be next excecuted.
  72 * @dmac: Pointer to the DMAC that manages this channel, NULL if the
  73 *      channel is available to be acquired.
  74 * @client: Client of this channel. NULL if the
  75 *      channel is available to be acquired.
  76 */
  77struct s3c_pl330_chan {
  78        void                            *pl330_chan_id;
  79        enum dma_ch                     id;
  80        unsigned int                    options;
  81        unsigned long                   sdaddr;
  82        struct list_head                node;
  83        struct pl330_req                *lrq;
  84        struct list_head                xfer_list;
  85        struct pl330_req                req[2];
  86        s3c2410_dma_cbfn_t              callback_fn;
  87        struct pl330_reqcfg             rqcfg;
  88        struct s3c_pl330_xfer           *xfer_head;
  89        struct s3c_pl330_dmac           *dmac;
  90        struct s3c2410_dma_client       *client;
  91};
  92
  93/* All DMACs in the platform */
  94static LIST_HEAD(dmac_list);
  95
  96/* All channels to peripherals in the platform */
  97static LIST_HEAD(chan_list);
  98
  99/*
 100 * Since we add resources(DMACs and Channels) to the global pool,
 101 * we need to guard access to the resources using a global lock
 102 */
 103static DEFINE_SPINLOCK(res_lock);
 104
 105/* Returns the channel with ID 'id' in the chan_list */
 106static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
 107{
 108        struct s3c_pl330_chan *ch;
 109
 110        list_for_each_entry(ch, &chan_list, node)
 111                if (ch->id == id)
 112                        return ch;
 113
 114        return NULL;
 115}
 116
 117/* Allocate a new channel with ID 'id' and add to chan_list */
 118static void chan_add(const enum dma_ch id)
 119{
 120        struct s3c_pl330_chan *ch = id_to_chan(id);
 121
 122        /* Return if the channel already exists */
 123        if (ch)
 124                return;
 125
 126        ch = kmalloc(sizeof(*ch), GFP_KERNEL);
 127        /* Return silently to work with other channels */
 128        if (!ch)
 129                return;
 130
 131        ch->id = id;
 132        ch->dmac = NULL;
 133
 134        list_add_tail(&ch->node, &chan_list);
 135}
 136
 137/* If the channel is not yet acquired by any client */
 138static bool chan_free(struct s3c_pl330_chan *ch)
 139{
 140        if (!ch)
 141                return false;
 142
 143        /* Channel points to some DMAC only when it's acquired */
 144        return ch->dmac ? false : true;
 145}
 146
 147/*
 148 * Returns 0 is peripheral i/f is invalid or not present on the dmac.
 149 * Index + 1, otherwise.
 150 */
 151static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
 152{
 153        enum dma_ch *id = dmac->peri;
 154        int i;
 155
 156        /* Discount invalid markers */
 157        if (ch_id == DMACH_MAX)
 158                return 0;
 159
 160        for (i = 0; i < PL330_MAX_PERI; i++)
 161                if (id[i] == ch_id)
 162                        return i + 1;
 163
 164        return 0;
 165}
 166
 167/* If all channel threads of the DMAC are busy */
 168static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
 169{
 170        struct pl330_info *pi = dmac->pi;
 171
 172        return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
 173}
 174
 175/*
 176 * Returns the number of free channels that
 177 * can be handled by this dmac only.
 178 */
 179static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
 180{
 181        enum dma_ch *id = dmac->peri;
 182        struct s3c_pl330_dmac *d;
 183        struct s3c_pl330_chan *ch;
 184        unsigned found, count = 0;
 185        enum dma_ch p;
 186        int i;
 187
 188        for (i = 0; i < PL330_MAX_PERI; i++) {
 189                p = id[i];
 190                ch = id_to_chan(p);
 191
 192                if (p == DMACH_MAX || !chan_free(ch))
 193                        continue;
 194
 195                found = 0;
 196                list_for_each_entry(d, &dmac_list, node) {
 197                        if (d != dmac && iface_of_dmac(d, ch->id)) {
 198                                found = 1;
 199                                break;
 200                        }
 201                }
 202                if (!found)
 203                        count++;
 204        }
 205
 206        return count;
 207}
 208
 209/*
 210 * Measure of suitability of 'dmac' handling 'ch'
 211 *
 212 * 0 indicates 'dmac' can not handle 'ch' either
 213 * because it is not supported by the hardware or
 214 * because all dmac channels are currently busy.
 215 *
 216 * >0 vlaue indicates 'dmac' has the capability.
 217 * The bigger the value the more suitable the dmac.
 218 */
 219#define MAX_SUIT        UINT_MAX
 220#define MIN_SUIT        0
 221
 222static unsigned suitablility(struct s3c_pl330_dmac *dmac,
 223                struct s3c_pl330_chan *ch)
 224{
 225        struct pl330_info *pi = dmac->pi;
 226        enum dma_ch *id = dmac->peri;
 227        struct s3c_pl330_dmac *d;
 228        unsigned s;
 229        int i;
 230
 231        s = MIN_SUIT;
 232        /* If all the DMAC channel threads are busy */
 233        if (dmac_busy(dmac))
 234                return s;
 235
 236        for (i = 0; i < PL330_MAX_PERI; i++)
 237                if (id[i] == ch->id)
 238                        break;
 239
 240        /* If the 'dmac' can't talk to 'ch' */
 241        if (i == PL330_MAX_PERI)
 242                return s;
 243
 244        s = MAX_SUIT;
 245        list_for_each_entry(d, &dmac_list, node) {
 246                /*
 247                 * If some other dmac can talk to this
 248                 * peri and has some channel free.
 249                 */
 250                if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
 251                        s = 0;
 252                        break;
 253                }
 254        }
 255        if (s)
 256                return s;
 257
 258        s = 100;
 259
 260        /* Good if free chans are more, bad otherwise */
 261        s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
 262
 263        return s;
 264}
 265
 266/* More than one DMAC may have capability to transfer data with the
 267 * peripheral. This function assigns most suitable DMAC to manage the
 268 * channel and hence communicate with the peripheral.
 269 */
 270static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
 271{
 272        struct s3c_pl330_dmac *d, *dmac = NULL;
 273        unsigned sn, sl = MIN_SUIT;
 274
 275        list_for_each_entry(d, &dmac_list, node) {
 276                sn = suitablility(d, ch);
 277
 278                if (sn == MAX_SUIT)
 279                        return d;
 280
 281                if (sn > sl)
 282                        dmac = d;
 283        }
 284
 285        return dmac;
 286}
 287
 288/* Acquire the channel for peripheral 'id' */
 289static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
 290{
 291        struct s3c_pl330_chan *ch = id_to_chan(id);
 292        struct s3c_pl330_dmac *dmac;
 293
 294        /* If the channel doesn't exist or is already acquired */
 295        if (!ch || !chan_free(ch)) {
 296                ch = NULL;
 297                goto acq_exit;
 298        }
 299
 300        dmac = map_chan_to_dmac(ch);
 301        /* If couldn't map */
 302        if (!dmac) {
 303                ch = NULL;
 304                goto acq_exit;
 305        }
 306
 307        dmac->busy_chan++;
 308        ch->dmac = dmac;
 309
 310acq_exit:
 311        return ch;
 312}
 313
 314/* Delete xfer from the queue */
 315static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
 316{
 317        struct s3c_pl330_xfer *t;
 318        struct s3c_pl330_chan *ch;
 319        int found;
 320
 321        if (!xfer)
 322                return;
 323
 324        ch = xfer->chan;
 325
 326        /* Make sure xfer is in the queue */
 327        found = 0;
 328        list_for_each_entry(t, &ch->xfer_list, node)
 329                if (t == xfer) {
 330                        found = 1;
 331                        break;
 332                }
 333
 334        if (!found)
 335                return;
 336
 337        /* If xfer is last entry in the queue */
 338        if (xfer->node.next == &ch->xfer_list)
 339                t = list_entry(ch->xfer_list.next,
 340                                struct s3c_pl330_xfer, node);
 341        else
 342                t = list_entry(xfer->node.next,
 343                                struct s3c_pl330_xfer, node);
 344
 345        /* If there was only one node left */
 346        if (t == xfer)
 347                ch->xfer_head = NULL;
 348        else if (ch->xfer_head == xfer)
 349                ch->xfer_head = t;
 350
 351        list_del(&xfer->node);
 352}
 353
 354/* Provides pointer to the next xfer in the queue.
 355 * If CIRCULAR option is set, the list is left intact,
 356 * otherwise the xfer is removed from the list.
 357 * Forced delete 'pluck' can be set to override the CIRCULAR option.
 358 */
 359static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
 360                int pluck)
 361{
 362        struct s3c_pl330_xfer *xfer = ch->xfer_head;
 363
 364        if (!xfer)
 365                return NULL;
 366
 367        /* If xfer is last entry in the queue */
 368        if (xfer->node.next == &ch->xfer_list)
 369                ch->xfer_head = list_entry(ch->xfer_list.next,
 370                                        struct s3c_pl330_xfer, node);
 371        else
 372                ch->xfer_head = list_entry(xfer->node.next,
 373                                        struct s3c_pl330_xfer, node);
 374
 375        if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
 376                del_from_queue(xfer);
 377
 378        return xfer;
 379}
 380
 381static inline void add_to_queue(struct s3c_pl330_chan *ch,
 382                struct s3c_pl330_xfer *xfer, int front)
 383{
 384        struct pl330_xfer *xt;
 385
 386        /* If queue empty */
 387        if (ch->xfer_head == NULL)
 388                ch->xfer_head = xfer;
 389
 390        xt = &ch->xfer_head->px;
 391        /* If the head already submitted (CIRCULAR head) */
 392        if (ch->options & S3C2410_DMAF_CIRCULAR &&
 393                (xt == ch->req[0].x || xt == ch->req[1].x))
 394                ch->xfer_head = xfer;
 395
 396        /* If this is a resubmission, it should go at the head */
 397        if (front) {
 398                ch->xfer_head = xfer;
 399                list_add(&xfer->node, &ch->xfer_list);
 400        } else {
 401                list_add_tail(&xfer->node, &ch->xfer_list);
 402        }
 403}
 404
 405static inline void _finish_off(struct s3c_pl330_xfer *xfer,
 406                enum s3c2410_dma_buffresult res, int ffree)
 407{
 408        struct s3c_pl330_chan *ch;
 409
 410        if (!xfer)
 411                return;
 412
 413        ch = xfer->chan;
 414
 415        /* Do callback */
 416        if (ch->callback_fn)
 417                ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
 418
 419        /* Force Free or if buffer is not needed anymore */
 420        if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
 421                kmem_cache_free(ch->dmac->kmcache, xfer);
 422}
 423
 424static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
 425                struct pl330_req *r)
 426{
 427        struct s3c_pl330_xfer *xfer;
 428        int ret = 0;
 429
 430        /* If already submitted */
 431        if (r->x)
 432                return 0;
 433
 434        xfer = get_from_queue(ch, 0);
 435        if (xfer) {
 436                r->x = &xfer->px;
 437
 438                /* Use max bandwidth for M<->M xfers */
 439                if (r->rqtype == MEMTOMEM) {
 440                        struct pl330_info *pi = xfer->chan->dmac->pi;
 441                        int burst = 1 << ch->rqcfg.brst_size;
 442                        u32 bytes = r->x->bytes;
 443                        int bl;
 444
 445                        bl = pi->pcfg.data_bus_width / 8;
 446                        bl *= pi->pcfg.data_buf_dep;
 447                        bl /= burst;
 448
 449                        /* src/dst_burst_len can't be more than 16 */
 450                        if (bl > 16)
 451                                bl = 16;
 452
 453                        while (bl > 1) {
 454                                if (!(bytes % (bl * burst)))
 455                                        break;
 456                                bl--;
 457                        }
 458
 459                        ch->rqcfg.brst_len = bl;
 460                } else {
 461                        ch->rqcfg.brst_len = 1;
 462                }
 463
 464                ret = pl330_submit_req(ch->pl330_chan_id, r);
 465
 466                /* If submission was successful */
 467                if (!ret) {
 468                        ch->lrq = r; /* latest submitted req */
 469                        return 0;
 470                }
 471
 472                r->x = NULL;
 473
 474                /* If both of the PL330 ping-pong buffers filled */
 475                if (ret == -EAGAIN) {
 476                        dev_err(ch->dmac->pi->dev, "%s:%d!\n",
 477                                __func__, __LINE__);
 478                        /* Queue back again */
 479                        add_to_queue(ch, xfer, 1);
 480                        ret = 0;
 481                } else {
 482                        dev_err(ch->dmac->pi->dev, "%s:%d!\n",
 483                                __func__, __LINE__);
 484                        _finish_off(xfer, S3C2410_RES_ERR, 0);
 485                }
 486        }
 487
 488        return ret;
 489}
 490
 491static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
 492        struct pl330_req *r, enum pl330_op_err err)
 493{
 494        unsigned long flags;
 495        struct s3c_pl330_xfer *xfer;
 496        struct pl330_xfer *xl = r->x;
 497        enum s3c2410_dma_buffresult res;
 498
 499        spin_lock_irqsave(&res_lock, flags);
 500
 501        r->x = NULL;
 502
 503        s3c_pl330_submit(ch, r);
 504
 505        spin_unlock_irqrestore(&res_lock, flags);
 506
 507        /* Map result to S3C DMA API */
 508        if (err == PL330_ERR_NONE)
 509                res = S3C2410_RES_OK;
 510        else if (err == PL330_ERR_ABORT)
 511                res = S3C2410_RES_ABORT;
 512        else
 513                res = S3C2410_RES_ERR;
 514
 515        /* If last request had some xfer */
 516        if (xl) {
 517                xfer = container_of(xl, struct s3c_pl330_xfer, px);
 518                _finish_off(xfer, res, 0);
 519        } else {
 520                dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
 521                        __func__, __LINE__);
 522        }
 523}
 524
 525static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
 526{
 527        struct pl330_req *r = token;
 528        struct s3c_pl330_chan *ch = container_of(r,
 529                                        struct s3c_pl330_chan, req[0]);
 530        s3c_pl330_rq(ch, r, err);
 531}
 532
 533static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
 534{
 535        struct pl330_req *r = token;
 536        struct s3c_pl330_chan *ch = container_of(r,
 537                                        struct s3c_pl330_chan, req[1]);
 538        s3c_pl330_rq(ch, r, err);
 539}
 540
 541/* Release an acquired channel */
 542static void chan_release(struct s3c_pl330_chan *ch)
 543{
 544        struct s3c_pl330_dmac *dmac;
 545
 546        if (chan_free(ch))
 547                return;
 548
 549        dmac = ch->dmac;
 550        ch->dmac = NULL;
 551        dmac->busy_chan--;
 552}
 553
 554int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
 555{
 556        struct s3c_pl330_xfer *xfer;
 557        enum pl330_chan_op pl330op;
 558        struct s3c_pl330_chan *ch;
 559        unsigned long flags;
 560        int idx, ret;
 561
 562        spin_lock_irqsave(&res_lock, flags);
 563
 564        ch = id_to_chan(id);
 565
 566        if (!ch || chan_free(ch)) {
 567                ret = -EINVAL;
 568                goto ctrl_exit;
 569        }
 570
 571        switch (op) {
 572        case S3C2410_DMAOP_START:
 573                /* Make sure both reqs are enqueued */
 574                idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
 575                s3c_pl330_submit(ch, &ch->req[idx]);
 576                s3c_pl330_submit(ch, &ch->req[1 - idx]);
 577                pl330op = PL330_OP_START;
 578                break;
 579
 580        case S3C2410_DMAOP_STOP:
 581                pl330op = PL330_OP_ABORT;
 582                break;
 583
 584        case S3C2410_DMAOP_FLUSH:
 585                pl330op = PL330_OP_FLUSH;
 586                break;
 587
 588        case S3C2410_DMAOP_PAUSE:
 589        case S3C2410_DMAOP_RESUME:
 590        case S3C2410_DMAOP_TIMEOUT:
 591        case S3C2410_DMAOP_STARTED:
 592                spin_unlock_irqrestore(&res_lock, flags);
 593                return 0;
 594
 595        default:
 596                spin_unlock_irqrestore(&res_lock, flags);
 597                return -EINVAL;
 598        }
 599
 600        ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
 601
 602        if (pl330op == PL330_OP_START) {
 603                spin_unlock_irqrestore(&res_lock, flags);
 604                return ret;
 605        }
 606
 607        idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
 608
 609        /* Abort the current xfer */
 610        if (ch->req[idx].x) {
 611                xfer = container_of(ch->req[idx].x,
 612                                struct s3c_pl330_xfer, px);
 613
 614                /* Drop xfer during FLUSH */
 615                if (pl330op == PL330_OP_FLUSH)
 616                        del_from_queue(xfer);
 617
 618                ch->req[idx].x = NULL;
 619
 620                spin_unlock_irqrestore(&res_lock, flags);
 621                _finish_off(xfer, S3C2410_RES_ABORT,
 622                                pl330op == PL330_OP_FLUSH ? 1 : 0);
 623                spin_lock_irqsave(&res_lock, flags);
 624        }
 625
 626        /* Flush the whole queue */
 627        if (pl330op == PL330_OP_FLUSH) {
 628
 629                if (ch->req[1 - idx].x) {
 630                        xfer = container_of(ch->req[1 - idx].x,
 631                                        struct s3c_pl330_xfer, px);
 632
 633                        del_from_queue(xfer);
 634
 635                        ch->req[1 - idx].x = NULL;
 636
 637                        spin_unlock_irqrestore(&res_lock, flags);
 638                        _finish_off(xfer, S3C2410_RES_ABORT, 1);
 639                        spin_lock_irqsave(&res_lock, flags);
 640                }
 641
 642                /* Finish off the remaining in the queue */
 643                xfer = ch->xfer_head;
 644                while (xfer) {
 645
 646                        del_from_queue(xfer);
 647
 648                        spin_unlock_irqrestore(&res_lock, flags);
 649                        _finish_off(xfer, S3C2410_RES_ABORT, 1);
 650                        spin_lock_irqsave(&res_lock, flags);
 651
 652                        xfer = ch->xfer_head;
 653                }
 654        }
 655
 656ctrl_exit:
 657        spin_unlock_irqrestore(&res_lock, flags);
 658
 659        return ret;
 660}
 661EXPORT_SYMBOL(s3c2410_dma_ctrl);
 662
 663int s3c2410_dma_enqueue(enum dma_ch id, void *token,
 664                        dma_addr_t addr, int size)
 665{
 666        struct s3c_pl330_chan *ch;
 667        struct s3c_pl330_xfer *xfer;
 668        unsigned long flags;
 669        int idx, ret = 0;
 670
 671        spin_lock_irqsave(&res_lock, flags);
 672
 673        ch = id_to_chan(id);
 674
 675        /* Error if invalid or free channel */
 676        if (!ch || chan_free(ch)) {
 677                ret = -EINVAL;
 678                goto enq_exit;
 679        }
 680
 681        /* Error if size is unaligned */
 682        if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
 683                ret = -EINVAL;
 684                goto enq_exit;
 685        }
 686
 687        xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
 688        if (!xfer) {
 689                ret = -ENOMEM;
 690                goto enq_exit;
 691        }
 692
 693        xfer->token = token;
 694        xfer->chan = ch;
 695        xfer->px.bytes = size;
 696        xfer->px.next = NULL; /* Single request */
 697
 698        /* For S3C DMA API, direction is always fixed for all xfers */
 699        if (ch->req[0].rqtype == MEMTODEV) {
 700                xfer->px.src_addr = addr;
 701                xfer->px.dst_addr = ch->sdaddr;
 702        } else {
 703                xfer->px.src_addr = ch->sdaddr;
 704                xfer->px.dst_addr = addr;
 705        }
 706
 707        add_to_queue(ch, xfer, 0);
 708
 709        /* Try submitting on either request */
 710        idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
 711
 712        if (!ch->req[idx].x)
 713                s3c_pl330_submit(ch, &ch->req[idx]);
 714        else
 715                s3c_pl330_submit(ch, &ch->req[1 - idx]);
 716
 717        spin_unlock_irqrestore(&res_lock, flags);
 718
 719        if (ch->options & S3C2410_DMAF_AUTOSTART)
 720                s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
 721
 722        return 0;
 723
 724enq_exit:
 725        spin_unlock_irqrestore(&res_lock, flags);
 726
 727        return ret;
 728}
 729EXPORT_SYMBOL(s3c2410_dma_enqueue);
 730
 731int s3c2410_dma_request(enum dma_ch id,
 732                        struct s3c2410_dma_client *client,
 733                        void *dev)
 734{
 735        struct s3c_pl330_dmac *dmac;
 736        struct s3c_pl330_chan *ch;
 737        unsigned long flags;
 738        int ret = 0;
 739
 740        spin_lock_irqsave(&res_lock, flags);
 741
 742        ch = chan_acquire(id);
 743        if (!ch) {
 744                ret = -EBUSY;
 745                goto req_exit;
 746        }
 747
 748        dmac = ch->dmac;
 749
 750        ch->pl330_chan_id = pl330_request_channel(dmac->pi);
 751        if (!ch->pl330_chan_id) {
 752                chan_release(ch);
 753                ret = -EBUSY;
 754                goto req_exit;
 755        }
 756
 757        ch->client = client;
 758        ch->options = 0; /* Clear any option */
 759        ch->callback_fn = NULL; /* Clear any callback */
 760        ch->lrq = NULL;
 761
 762        ch->rqcfg.brst_size = 2; /* Default word size */
 763        ch->rqcfg.swap = SWAP_NO;
 764        ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
 765        ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
 766        ch->rqcfg.privileged = 0;
 767        ch->rqcfg.insnaccess = 0;
 768
 769        /* Set invalid direction */
 770        ch->req[0].rqtype = DEVTODEV;
 771        ch->req[1].rqtype = ch->req[0].rqtype;
 772
 773        ch->req[0].cfg = &ch->rqcfg;
 774        ch->req[1].cfg = ch->req[0].cfg;
 775
 776        ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
 777        ch->req[1].peri = ch->req[0].peri;
 778
 779        ch->req[0].token = &ch->req[0];
 780        ch->req[0].xfer_cb = s3c_pl330_rq0;
 781        ch->req[1].token = &ch->req[1];
 782        ch->req[1].xfer_cb = s3c_pl330_rq1;
 783
 784        ch->req[0].x = NULL;
 785        ch->req[1].x = NULL;
 786
 787        /* Reset xfer list */
 788        INIT_LIST_HEAD(&ch->xfer_list);
 789        ch->xfer_head = NULL;
 790
 791req_exit:
 792        spin_unlock_irqrestore(&res_lock, flags);
 793
 794        return ret;
 795}
 796EXPORT_SYMBOL(s3c2410_dma_request);
 797
 798int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
 799{
 800        struct s3c_pl330_chan *ch;
 801        struct s3c_pl330_xfer *xfer;
 802        unsigned long flags;
 803        int ret = 0;
 804        unsigned idx;
 805
 806        spin_lock_irqsave(&res_lock, flags);
 807
 808        ch = id_to_chan(id);
 809
 810        if (!ch || chan_free(ch))
 811                goto free_exit;
 812
 813        /* Refuse if someone else wanted to free the channel */
 814        if (ch->client != client) {
 815                ret = -EBUSY;
 816                goto free_exit;
 817        }
 818
 819        /* Stop any active xfer, Flushe the queue and do callbacks */
 820        pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
 821
 822        /* Abort the submitted requests */
 823        idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
 824
 825        if (ch->req[idx].x) {
 826                xfer = container_of(ch->req[idx].x,
 827                                struct s3c_pl330_xfer, px);
 828
 829                ch->req[idx].x = NULL;
 830                del_from_queue(xfer);
 831
 832                spin_unlock_irqrestore(&res_lock, flags);
 833                _finish_off(xfer, S3C2410_RES_ABORT, 1);
 834                spin_lock_irqsave(&res_lock, flags);
 835        }
 836
 837        if (ch->req[1 - idx].x) {
 838                xfer = container_of(ch->req[1 - idx].x,
 839                                struct s3c_pl330_xfer, px);
 840
 841                ch->req[1 - idx].x = NULL;
 842                del_from_queue(xfer);
 843
 844                spin_unlock_irqrestore(&res_lock, flags);
 845                _finish_off(xfer, S3C2410_RES_ABORT, 1);
 846                spin_lock_irqsave(&res_lock, flags);
 847        }
 848
 849        /* Pluck and Abort the queued requests in order */
 850        do {
 851                xfer = get_from_queue(ch, 1);
 852
 853                spin_unlock_irqrestore(&res_lock, flags);
 854                _finish_off(xfer, S3C2410_RES_ABORT, 1);
 855                spin_lock_irqsave(&res_lock, flags);
 856        } while (xfer);
 857
 858        ch->client = NULL;
 859
 860        pl330_release_channel(ch->pl330_chan_id);
 861
 862        ch->pl330_chan_id = NULL;
 863
 864        chan_release(ch);
 865
 866free_exit:
 867        spin_unlock_irqrestore(&res_lock, flags);
 868
 869        return ret;
 870}
 871EXPORT_SYMBOL(s3c2410_dma_free);
 872
 873int s3c2410_dma_config(enum dma_ch id, int xferunit)
 874{
 875        struct s3c_pl330_chan *ch;
 876        struct pl330_info *pi;
 877        unsigned long flags;
 878        int i, dbwidth, ret = 0;
 879
 880        spin_lock_irqsave(&res_lock, flags);
 881
 882        ch = id_to_chan(id);
 883
 884        if (!ch || chan_free(ch)) {
 885                ret = -EINVAL;
 886                goto cfg_exit;
 887        }
 888
 889        pi = ch->dmac->pi;
 890        dbwidth = pi->pcfg.data_bus_width / 8;
 891
 892        /* Max size of xfer can be pcfg.data_bus_width */
 893        if (xferunit > dbwidth) {
 894                ret = -EINVAL;
 895                goto cfg_exit;
 896        }
 897
 898        i = 0;
 899        while (xferunit != (1 << i))
 900                i++;
 901
 902        /* If valid value */
 903        if (xferunit == (1 << i))
 904                ch->rqcfg.brst_size = i;
 905        else
 906                ret = -EINVAL;
 907
 908cfg_exit:
 909        spin_unlock_irqrestore(&res_lock, flags);
 910
 911        return ret;
 912}
 913EXPORT_SYMBOL(s3c2410_dma_config);
 914
 915/* Options that are supported by this driver */
 916#define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
 917
 918int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
 919{
 920        struct s3c_pl330_chan *ch;
 921        unsigned long flags;
 922        int ret = 0;
 923
 924        spin_lock_irqsave(&res_lock, flags);
 925
 926        ch = id_to_chan(id);
 927
 928        if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
 929                ret = -EINVAL;
 930        else
 931                ch->options = options;
 932
 933        spin_unlock_irqrestore(&res_lock, flags);
 934
 935        return 0;
 936}
 937EXPORT_SYMBOL(s3c2410_dma_setflags);
 938
 939int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
 940{
 941        struct s3c_pl330_chan *ch;
 942        unsigned long flags;
 943        int ret = 0;
 944
 945        spin_lock_irqsave(&res_lock, flags);
 946
 947        ch = id_to_chan(id);
 948
 949        if (!ch || chan_free(ch))
 950                ret = -EINVAL;
 951        else
 952                ch->callback_fn = rtn;
 953
 954        spin_unlock_irqrestore(&res_lock, flags);
 955
 956        return ret;
 957}
 958EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
 959
 960int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
 961                          unsigned long address)
 962{
 963        struct s3c_pl330_chan *ch;
 964        unsigned long flags;
 965        int ret = 0;
 966
 967        spin_lock_irqsave(&res_lock, flags);
 968
 969        ch = id_to_chan(id);
 970
 971        if (!ch || chan_free(ch)) {
 972                ret = -EINVAL;
 973                goto devcfg_exit;
 974        }
 975
 976        switch (source) {
 977        case S3C2410_DMASRC_HW: /* P->M */
 978                ch->req[0].rqtype = DEVTOMEM;
 979                ch->req[1].rqtype = DEVTOMEM;
 980                ch->rqcfg.src_inc = 0;
 981                ch->rqcfg.dst_inc = 1;
 982                break;
 983        case S3C2410_DMASRC_MEM: /* M->P */
 984                ch->req[0].rqtype = MEMTODEV;
 985                ch->req[1].rqtype = MEMTODEV;
 986                ch->rqcfg.src_inc = 1;
 987                ch->rqcfg.dst_inc = 0;
 988                break;
 989        default:
 990                ret = -EINVAL;
 991                goto devcfg_exit;
 992        }
 993
 994        ch->sdaddr = address;
 995
 996devcfg_exit:
 997        spin_unlock_irqrestore(&res_lock, flags);
 998
 999        return ret;
1000}
1001EXPORT_SYMBOL(s3c2410_dma_devconfig);
1002
1003int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
1004{
1005        struct s3c_pl330_chan *ch = id_to_chan(id);
1006        struct pl330_chanstatus status;
1007        int ret;
1008
1009        if (!ch || chan_free(ch))
1010                return -EINVAL;
1011
1012        ret = pl330_chan_status(ch->pl330_chan_id, &status);
1013        if (ret < 0)
1014                return ret;
1015
1016        *src = status.src_addr;
1017        *dst = status.dst_addr;
1018
1019        return 0;
1020}
1021EXPORT_SYMBOL(s3c2410_dma_getposition);
1022
1023static irqreturn_t pl330_irq_handler(int irq, void *data)
1024{
1025        if (pl330_update(data))
1026                return IRQ_HANDLED;
1027        else
1028                return IRQ_NONE;
1029}
1030
1031static int pl330_probe(struct platform_device *pdev)
1032{
1033        struct s3c_pl330_dmac *s3c_pl330_dmac;
1034        struct s3c_pl330_platdata *pl330pd;
1035        struct pl330_info *pl330_info;
1036        struct resource *res;
1037        int i, ret, irq;
1038
1039        pl330pd = pdev->dev.platform_data;
1040
1041        /* Can't do without the list of _32_ peripherals */
1042        if (!pl330pd || !pl330pd->peri) {
1043                dev_err(&pdev->dev, "platform data missing!\n");
1044                return -ENODEV;
1045        }
1046
1047        pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
1048        if (!pl330_info)
1049                return -ENOMEM;
1050
1051        pl330_info->pl330_data = NULL;
1052        pl330_info->dev = &pdev->dev;
1053
1054        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055        if (!res) {
1056                ret = -ENODEV;
1057                goto probe_err1;
1058        }
1059
1060        request_mem_region(res->start, resource_size(res), pdev->name);
1061
1062        pl330_info->base = ioremap(res->start, resource_size(res));
1063        if (!pl330_info->base) {
1064                ret = -ENXIO;
1065                goto probe_err2;
1066        }
1067
1068        irq = platform_get_irq(pdev, 0);
1069        if (irq < 0) {
1070                ret = irq;
1071                goto probe_err3;
1072        }
1073
1074        ret = request_irq(irq, pl330_irq_handler, 0,
1075                        dev_name(&pdev->dev), pl330_info);
1076        if (ret)
1077                goto probe_err4;
1078
1079        /* Allocate a new DMAC */
1080        s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
1081        if (!s3c_pl330_dmac) {
1082                ret = -ENOMEM;
1083                goto probe_err5;
1084        }
1085
1086        /* Get operation clock and enable it */
1087        s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
1088        if (IS_ERR(s3c_pl330_dmac->clk)) {
1089                dev_err(&pdev->dev, "Cannot get operation clock.\n");
1090                ret = -EINVAL;
1091                goto probe_err6;
1092        }
1093        clk_enable(s3c_pl330_dmac->clk);
1094
1095        ret = pl330_add(pl330_info);
1096        if (ret)
1097                goto probe_err7;
1098
1099        /* Hook the info */
1100        s3c_pl330_dmac->pi = pl330_info;
1101
1102        /* No busy channels */
1103        s3c_pl330_dmac->busy_chan = 0;
1104
1105        s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
1106                                sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
1107
1108        if (!s3c_pl330_dmac->kmcache) {
1109                ret = -ENOMEM;
1110                goto probe_err8;
1111        }
1112
1113        /* Get the list of peripherals */
1114        s3c_pl330_dmac->peri = pl330pd->peri;
1115
1116        /* Attach to the list of DMACs */
1117        list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
1118
1119        /* Create a channel for each peripheral in the DMAC
1120         * that is, if it doesn't already exist
1121         */
1122        for (i = 0; i < PL330_MAX_PERI; i++)
1123                if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
1124                        chan_add(s3c_pl330_dmac->peri[i]);
1125
1126        printk(KERN_INFO
1127                "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
1128        printk(KERN_INFO
1129                "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1130                pl330_info->pcfg.data_buf_dep,
1131                pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
1132                pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
1133
1134        return 0;
1135
1136probe_err8:
1137        pl330_del(pl330_info);
1138probe_err7:
1139        clk_disable(s3c_pl330_dmac->clk);
1140        clk_put(s3c_pl330_dmac->clk);
1141probe_err6:
1142        kfree(s3c_pl330_dmac);
1143probe_err5:
1144        free_irq(irq, pl330_info);
1145probe_err4:
1146probe_err3:
1147        iounmap(pl330_info->base);
1148probe_err2:
1149        release_mem_region(res->start, resource_size(res));
1150probe_err1:
1151        kfree(pl330_info);
1152
1153        return ret;
1154}
1155
1156static int pl330_remove(struct platform_device *pdev)
1157{
1158        struct s3c_pl330_dmac *dmac, *d;
1159        struct s3c_pl330_chan *ch;
1160        unsigned long flags;
1161        int del, found;
1162
1163        if (!pdev->dev.platform_data)
1164                return -EINVAL;
1165
1166        spin_lock_irqsave(&res_lock, flags);
1167
1168        found = 0;
1169        list_for_each_entry(d, &dmac_list, node)
1170                if (d->pi->dev == &pdev->dev) {
1171                        found = 1;
1172                        break;
1173                }
1174
1175        if (!found) {
1176                spin_unlock_irqrestore(&res_lock, flags);
1177                return 0;
1178        }
1179
1180        dmac = d;
1181
1182        /* Remove all Channels that are managed only by this DMAC */
1183        list_for_each_entry(ch, &chan_list, node) {
1184
1185                /* Only channels that are handled by this DMAC */
1186                if (iface_of_dmac(dmac, ch->id))
1187                        del = 1;
1188                else
1189                        continue;
1190
1191                /* Don't remove if some other DMAC has it too */
1192                list_for_each_entry(d, &dmac_list, node)
1193                        if (d != dmac && iface_of_dmac(d, ch->id)) {
1194                                del = 0;
1195                                break;
1196                        }
1197
1198                if (del) {
1199                        spin_unlock_irqrestore(&res_lock, flags);
1200                        s3c2410_dma_free(ch->id, ch->client);
1201                        spin_lock_irqsave(&res_lock, flags);
1202                        list_del(&ch->node);
1203                        kfree(ch);
1204                }
1205        }
1206
1207        /* Disable operation clock */
1208        clk_disable(dmac->clk);
1209        clk_put(dmac->clk);
1210
1211        /* Remove the DMAC */
1212        list_del(&dmac->node);
1213        kfree(dmac);
1214
1215        spin_unlock_irqrestore(&res_lock, flags);
1216
1217        return 0;
1218}
1219
1220static struct platform_driver pl330_driver = {
1221        .driver         = {
1222                .owner  = THIS_MODULE,
1223                .name   = "s3c-pl330",
1224        },
1225        .probe          = pl330_probe,
1226        .remove         = pl330_remove,
1227};
1228
1229static int __init pl330_init(void)
1230{
1231        return platform_driver_register(&pl330_driver);
1232}
1233module_init(pl330_init);
1234
1235static void __exit pl330_exit(void)
1236{
1237        platform_driver_unregister(&pl330_driver);
1238        return;
1239}
1240module_exit(pl330_exit);
1241
1242MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1243MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1244MODULE_LICENSE("GPL");
1245