linux/drivers/dma/sh/shdma.c
<<
>>
Prefs
   1/*
   2 * Renesas SuperH DMA Engine support
   3 *
   4 * base is drivers/dma/flsdma.c
   5 *
   6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
   7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
   8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
   9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
  10 *
  11 * This is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2 of the License, or
  14 * (at your option) any later version.
  15 *
  16 * - DMA of SuperH does not have Hardware DMA chain mode.
  17 * - MAX DMA size is 16MB.
  18 *
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/module.h>
  23#include <linux/slab.h>
  24#include <linux/interrupt.h>
  25#include <linux/dmaengine.h>
  26#include <linux/delay.h>
  27#include <linux/platform_device.h>
  28#include <linux/pm_runtime.h>
  29#include <linux/sh_dma.h>
  30#include <linux/notifier.h>
  31#include <linux/kdebug.h>
  32#include <linux/spinlock.h>
  33#include <linux/rculist.h>
  34
  35#include "../dmaengine.h"
  36#include "shdma.h"
  37
  38#define SH_DMAE_DRV_NAME "sh-dma-engine"
  39
  40/* Default MEMCPY transfer size = 2^2 = 4 bytes */
  41#define LOG2_DEFAULT_XFER_SIZE  2
  42#define SH_DMA_SLAVE_NUMBER 256
  43#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
  44
  45/*
  46 * Used for write-side mutual exclusion for the global device list,
  47 * read-side synchronization by way of RCU, and per-controller data.
  48 */
  49static DEFINE_SPINLOCK(sh_dmae_lock);
  50static LIST_HEAD(sh_dmae_devices);
  51
  52static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
  53{
  54        struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
  55
  56        __raw_writel(data, shdev->chan_reg +
  57                     shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
  58}
  59
  60static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
  61{
  62        __raw_writel(data, sh_dc->base + reg / sizeof(u32));
  63}
  64
  65static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
  66{
  67        return __raw_readl(sh_dc->base + reg / sizeof(u32));
  68}
  69
  70static u16 dmaor_read(struct sh_dmae_device *shdev)
  71{
  72        u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
  73
  74        if (shdev->pdata->dmaor_is_32bit)
  75                return __raw_readl(addr);
  76        else
  77                return __raw_readw(addr);
  78}
  79
  80static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
  81{
  82        u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
  83
  84        if (shdev->pdata->dmaor_is_32bit)
  85                __raw_writel(data, addr);
  86        else
  87                __raw_writew(data, addr);
  88}
  89
  90static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
  91{
  92        struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
  93
  94        __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
  95}
  96
  97static u32 chcr_read(struct sh_dmae_chan *sh_dc)
  98{
  99        struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
 100
 101        return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
 102}
 103
 104/*
 105 * Reset DMA controller
 106 *
 107 * SH7780 has two DMAOR register
 108 */
 109static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
 110{
 111        unsigned short dmaor;
 112        unsigned long flags;
 113
 114        spin_lock_irqsave(&sh_dmae_lock, flags);
 115
 116        dmaor = dmaor_read(shdev);
 117        dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
 118
 119        spin_unlock_irqrestore(&sh_dmae_lock, flags);
 120}
 121
 122static int sh_dmae_rst(struct sh_dmae_device *shdev)
 123{
 124        unsigned short dmaor;
 125        unsigned long flags;
 126
 127        spin_lock_irqsave(&sh_dmae_lock, flags);
 128
 129        dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
 130
 131        if (shdev->pdata->chclr_present) {
 132                int i;
 133                for (i = 0; i < shdev->pdata->channel_num; i++) {
 134                        struct sh_dmae_chan *sh_chan = shdev->chan[i];
 135                        if (sh_chan)
 136                                chclr_write(sh_chan, 0);
 137                }
 138        }
 139
 140        dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
 141
 142        dmaor = dmaor_read(shdev);
 143
 144        spin_unlock_irqrestore(&sh_dmae_lock, flags);
 145
 146        if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
 147                dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
 148                return -EIO;
 149        }
 150        if (shdev->pdata->dmaor_init & ~dmaor)
 151                dev_warn(shdev->shdma_dev.dma_dev.dev,
 152                         "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
 153                         dmaor, shdev->pdata->dmaor_init);
 154        return 0;
 155}
 156
 157static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
 158{
 159        u32 chcr = chcr_read(sh_chan);
 160
 161        if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
 162                return true; /* working */
 163
 164        return false; /* waiting */
 165}
 166
 167static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
 168{
 169        struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
 170        struct sh_dmae_pdata *pdata = shdev->pdata;
 171        int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
 172                ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
 173
 174        if (cnt >= pdata->ts_shift_num)
 175                cnt = 0;
 176
 177        return pdata->ts_shift[cnt];
 178}
 179
 180static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
 181{
 182        struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
 183        struct sh_dmae_pdata *pdata = shdev->pdata;
 184        int i;
 185
 186        for (i = 0; i < pdata->ts_shift_num; i++)
 187                if (pdata->ts_shift[i] == l2size)
 188                        break;
 189
 190        if (i == pdata->ts_shift_num)
 191                i = 0;
 192
 193        return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
 194                ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
 195}
 196
 197static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
 198{
 199        sh_dmae_writel(sh_chan, hw->sar, SAR);
 200        sh_dmae_writel(sh_chan, hw->dar, DAR);
 201        sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
 202}
 203
 204static void dmae_start(struct sh_dmae_chan *sh_chan)
 205{
 206        struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
 207        u32 chcr = chcr_read(sh_chan);
 208
 209        if (shdev->pdata->needs_tend_set)
 210                sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
 211
 212        chcr |= CHCR_DE | shdev->chcr_ie_bit;
 213        chcr_write(sh_chan, chcr & ~CHCR_TE);
 214}
 215
 216static void dmae_init(struct sh_dmae_chan *sh_chan)
 217{
 218        /*
 219         * Default configuration for dual address memory-memory transfer.
 220         * 0x400 represents auto-request.
 221         */
 222        u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
 223                                                   LOG2_DEFAULT_XFER_SIZE);
 224        sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
 225        chcr_write(sh_chan, chcr);
 226}
 227
 228static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
 229{
 230        /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
 231        if (dmae_is_busy(sh_chan))
 232                return -EBUSY;
 233
 234        sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
 235        chcr_write(sh_chan, val);
 236
 237        return 0;
 238}
 239
 240static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
 241{
 242        struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
 243        struct sh_dmae_pdata *pdata = shdev->pdata;
 244        const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
 245        u16 __iomem *addr = shdev->dmars;
 246        unsigned int shift = chan_pdata->dmars_bit;
 247
 248        if (dmae_is_busy(sh_chan))
 249                return -EBUSY;
 250
 251        if (pdata->no_dmars)
 252                return 0;
 253
 254        /* in the case of a missing DMARS resource use first memory window */
 255        if (!addr)
 256                addr = (u16 __iomem *)shdev->chan_reg;
 257        addr += chan_pdata->dmars / sizeof(u16);
 258
 259        __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
 260                     addr);
 261
 262        return 0;
 263}
 264
 265static void sh_dmae_start_xfer(struct shdma_chan *schan,
 266                               struct shdma_desc *sdesc)
 267{
 268        struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 269                                                    shdma_chan);
 270        struct sh_dmae_desc *sh_desc = container_of(sdesc,
 271                                        struct sh_dmae_desc, shdma_desc);
 272        dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
 273                sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
 274                sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
 275        /* Get the ld start address from ld_queue */
 276        dmae_set_reg(sh_chan, &sh_desc->hw);
 277        dmae_start(sh_chan);
 278}
 279
 280static bool sh_dmae_channel_busy(struct shdma_chan *schan)
 281{
 282        struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 283                                                    shdma_chan);
 284        return dmae_is_busy(sh_chan);
 285}
 286
 287static void sh_dmae_setup_xfer(struct shdma_chan *schan,
 288                               int slave_id)
 289{
 290        struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 291                                                    shdma_chan);
 292
 293        if (slave_id >= 0) {
 294                const struct sh_dmae_slave_config *cfg =
 295                        sh_chan->config;
 296
 297                dmae_set_dmars(sh_chan, cfg->mid_rid);
 298                dmae_set_chcr(sh_chan, cfg->chcr);
 299        } else {
 300                dmae_init(sh_chan);
 301        }
 302}
 303
 304static const struct sh_dmae_slave_config *dmae_find_slave(
 305        struct sh_dmae_chan *sh_chan, int slave_id)
 306{
 307        struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
 308        struct sh_dmae_pdata *pdata = shdev->pdata;
 309        const struct sh_dmae_slave_config *cfg;
 310        int i;
 311
 312        if (slave_id >= SH_DMA_SLAVE_NUMBER)
 313                return NULL;
 314
 315        for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
 316                if (cfg->slave_id == slave_id)
 317                        return cfg;
 318
 319        return NULL;
 320}
 321
 322static int sh_dmae_set_slave(struct shdma_chan *schan,
 323                             int slave_id, bool try)
 324{
 325        struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 326                                                    shdma_chan);
 327        const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
 328        if (!cfg)
 329                return -ENXIO;
 330
 331        if (!try)
 332                sh_chan->config = cfg;
 333
 334        return 0;
 335}
 336
 337static void dmae_halt(struct sh_dmae_chan *sh_chan)
 338{
 339        struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
 340        u32 chcr = chcr_read(sh_chan);
 341
 342        chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
 343        chcr_write(sh_chan, chcr);
 344}
 345
 346static int sh_dmae_desc_setup(struct shdma_chan *schan,
 347                              struct shdma_desc *sdesc,
 348                              dma_addr_t src, dma_addr_t dst, size_t *len)
 349{
 350        struct sh_dmae_desc *sh_desc = container_of(sdesc,
 351                                        struct sh_dmae_desc, shdma_desc);
 352
 353        if (*len > schan->max_xfer_len)
 354                *len = schan->max_xfer_len;
 355
 356        sh_desc->hw.sar = src;
 357        sh_desc->hw.dar = dst;
 358        sh_desc->hw.tcr = *len;
 359
 360        return 0;
 361}
 362
 363static void sh_dmae_halt(struct shdma_chan *schan)
 364{
 365        struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 366                                                    shdma_chan);
 367        dmae_halt(sh_chan);
 368}
 369
 370static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
 371{
 372        struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 373                                                    shdma_chan);
 374
 375        if (!(chcr_read(sh_chan) & CHCR_TE))
 376                return false;
 377
 378        /* DMA stop */
 379        dmae_halt(sh_chan);
 380
 381        return true;
 382}
 383
 384static size_t sh_dmae_get_partial(struct shdma_chan *schan,
 385                                  struct shdma_desc *sdesc)
 386{
 387        struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
 388                                                    shdma_chan);
 389        struct sh_dmae_desc *sh_desc = container_of(sdesc,
 390                                        struct sh_dmae_desc, shdma_desc);
 391        return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
 392                sh_chan->xmit_shift;
 393}
 394
 395/* Called from error IRQ or NMI */
 396static bool sh_dmae_reset(struct sh_dmae_device *shdev)
 397{
 398        bool ret;
 399
 400        /* halt the dma controller */
 401        sh_dmae_ctl_stop(shdev);
 402
 403        /* We cannot detect, which channel caused the error, have to reset all */
 404        ret = shdma_reset(&shdev->shdma_dev);
 405
 406        sh_dmae_rst(shdev);
 407
 408        return ret;
 409}
 410
 411static irqreturn_t sh_dmae_err(int irq, void *data)
 412{
 413        struct sh_dmae_device *shdev = data;
 414
 415        if (!(dmaor_read(shdev) & DMAOR_AE))
 416                return IRQ_NONE;
 417
 418        sh_dmae_reset(shdev);
 419        return IRQ_HANDLED;
 420}
 421
 422static bool sh_dmae_desc_completed(struct shdma_chan *schan,
 423                                   struct shdma_desc *sdesc)
 424{
 425        struct sh_dmae_chan *sh_chan = container_of(schan,
 426                                        struct sh_dmae_chan, shdma_chan);
 427        struct sh_dmae_desc *sh_desc = container_of(sdesc,
 428                                        struct sh_dmae_desc, shdma_desc);
 429        u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
 430        u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
 431
 432        return  (sdesc->direction == DMA_DEV_TO_MEM &&
 433                 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
 434                (sdesc->direction != DMA_DEV_TO_MEM &&
 435                 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
 436}
 437
 438static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
 439{
 440        /* Fast path out if NMIF is not asserted for this controller */
 441        if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
 442                return false;
 443
 444        return sh_dmae_reset(shdev);
 445}
 446
 447static int sh_dmae_nmi_handler(struct notifier_block *self,
 448                               unsigned long cmd, void *data)
 449{
 450        struct sh_dmae_device *shdev;
 451        int ret = NOTIFY_DONE;
 452        bool triggered;
 453
 454        /*
 455         * Only concern ourselves with NMI events.
 456         *
 457         * Normally we would check the die chain value, but as this needs
 458         * to be architecture independent, check for NMI context instead.
 459         */
 460        if (!in_nmi())
 461                return NOTIFY_DONE;
 462
 463        rcu_read_lock();
 464        list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
 465                /*
 466                 * Only stop if one of the controllers has NMIF asserted,
 467                 * we do not want to interfere with regular address error
 468                 * handling or NMI events that don't concern the DMACs.
 469                 */
 470                triggered = sh_dmae_nmi_notify(shdev);
 471                if (triggered == true)
 472                        ret = NOTIFY_OK;
 473        }
 474        rcu_read_unlock();
 475
 476        return ret;
 477}
 478
 479static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
 480        .notifier_call  = sh_dmae_nmi_handler,
 481
 482        /* Run before NMI debug handler and KGDB */
 483        .priority       = 1,
 484};
 485
 486static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
 487                                        int irq, unsigned long flags)
 488{
 489        const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
 490        struct shdma_dev *sdev = &shdev->shdma_dev;
 491        struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
 492        struct sh_dmae_chan *sh_chan;
 493        struct shdma_chan *schan;
 494        int err;
 495
 496        sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
 497        if (!sh_chan) {
 498                dev_err(sdev->dma_dev.dev,
 499                        "No free memory for allocating dma channels!\n");
 500                return -ENOMEM;
 501        }
 502
 503        schan = &sh_chan->shdma_chan;
 504        schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
 505
 506        shdma_chan_probe(sdev, schan, id);
 507
 508        sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
 509
 510        /* set up channel irq */
 511        if (pdev->id >= 0)
 512                snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
 513                         "sh-dmae%d.%d", pdev->id, id);
 514        else
 515                snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
 516                         "sh-dma%d", id);
 517
 518        err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
 519        if (err) {
 520                dev_err(sdev->dma_dev.dev,
 521                        "DMA channel %d request_irq error %d\n",
 522                        id, err);
 523                goto err_no_irq;
 524        }
 525
 526        shdev->chan[id] = sh_chan;
 527        return 0;
 528
 529err_no_irq:
 530        /* remove from dmaengine device node */
 531        shdma_chan_remove(schan);
 532        kfree(sh_chan);
 533        return err;
 534}
 535
 536static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
 537{
 538        struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
 539        struct shdma_chan *schan;
 540        int i;
 541
 542        shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
 543                struct sh_dmae_chan *sh_chan = container_of(schan,
 544                                        struct sh_dmae_chan, shdma_chan);
 545                BUG_ON(!schan);
 546
 547                shdma_free_irq(&sh_chan->shdma_chan);
 548
 549                shdma_chan_remove(schan);
 550                kfree(sh_chan);
 551        }
 552        dma_dev->chancnt = 0;
 553}
 554
 555static void sh_dmae_shutdown(struct platform_device *pdev)
 556{
 557        struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
 558        sh_dmae_ctl_stop(shdev);
 559}
 560
 561static int sh_dmae_runtime_suspend(struct device *dev)
 562{
 563        return 0;
 564}
 565
 566static int sh_dmae_runtime_resume(struct device *dev)
 567{
 568        struct sh_dmae_device *shdev = dev_get_drvdata(dev);
 569
 570        return sh_dmae_rst(shdev);
 571}
 572
 573#ifdef CONFIG_PM
 574static int sh_dmae_suspend(struct device *dev)
 575{
 576        return 0;
 577}
 578
 579static int sh_dmae_resume(struct device *dev)
 580{
 581        struct sh_dmae_device *shdev = dev_get_drvdata(dev);
 582        int i, ret;
 583
 584        ret = sh_dmae_rst(shdev);
 585        if (ret < 0)
 586                dev_err(dev, "Failed to reset!\n");
 587
 588        for (i = 0; i < shdev->pdata->channel_num; i++) {
 589                struct sh_dmae_chan *sh_chan = shdev->chan[i];
 590
 591                if (!sh_chan->shdma_chan.desc_num)
 592                        continue;
 593
 594                if (sh_chan->shdma_chan.slave_id >= 0) {
 595                        const struct sh_dmae_slave_config *cfg = sh_chan->config;
 596                        dmae_set_dmars(sh_chan, cfg->mid_rid);
 597                        dmae_set_chcr(sh_chan, cfg->chcr);
 598                } else {
 599                        dmae_init(sh_chan);
 600                }
 601        }
 602
 603        return 0;
 604}
 605#else
 606#define sh_dmae_suspend NULL
 607#define sh_dmae_resume NULL
 608#endif
 609
 610const struct dev_pm_ops sh_dmae_pm = {
 611        .suspend                = sh_dmae_suspend,
 612        .resume                 = sh_dmae_resume,
 613        .runtime_suspend        = sh_dmae_runtime_suspend,
 614        .runtime_resume         = sh_dmae_runtime_resume,
 615};
 616
 617static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
 618{
 619        struct sh_dmae_chan *sh_chan = container_of(schan,
 620                                        struct sh_dmae_chan, shdma_chan);
 621
 622        /*
 623         * Implicit BUG_ON(!sh_chan->config)
 624         * This is an exclusive slave DMA operation, may only be called after a
 625         * successful slave configuration.
 626         */
 627        return sh_chan->config->addr;
 628}
 629
 630static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
 631{
 632        return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
 633}
 634
 635static const struct shdma_ops sh_dmae_shdma_ops = {
 636        .desc_completed = sh_dmae_desc_completed,
 637        .halt_channel = sh_dmae_halt,
 638        .channel_busy = sh_dmae_channel_busy,
 639        .slave_addr = sh_dmae_slave_addr,
 640        .desc_setup = sh_dmae_desc_setup,
 641        .set_slave = sh_dmae_set_slave,
 642        .setup_xfer = sh_dmae_setup_xfer,
 643        .start_xfer = sh_dmae_start_xfer,
 644        .embedded_desc = sh_dmae_embedded_desc,
 645        .chan_irq = sh_dmae_chan_irq,
 646        .get_partial = sh_dmae_get_partial,
 647};
 648
 649static int sh_dmae_probe(struct platform_device *pdev)
 650{
 651        struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
 652        unsigned long irqflags = IRQF_DISABLED,
 653                chan_flag[SH_DMAE_MAX_CHANNELS] = {};
 654        int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
 655        int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
 656        struct sh_dmae_device *shdev;
 657        struct dma_device *dma_dev;
 658        struct resource *chan, *dmars, *errirq_res, *chanirq_res;
 659
 660        /* get platform data */
 661        if (!pdata || !pdata->channel_num)
 662                return -ENODEV;
 663
 664        chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 665        /* DMARS area is optional */
 666        dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 667        /*
 668         * IRQ resources:
 669         * 1. there always must be at least one IRQ IO-resource. On SH4 it is
 670         *    the error IRQ, in which case it is the only IRQ in this resource:
 671         *    start == end. If it is the only IRQ resource, all channels also
 672         *    use the same IRQ.
 673         * 2. DMA channel IRQ resources can be specified one per resource or in
 674         *    ranges (start != end)
 675         * 3. iff all events (channels and, optionally, error) on this
 676         *    controller use the same IRQ, only one IRQ resource can be
 677         *    specified, otherwise there must be one IRQ per channel, even if
 678         *    some of them are equal
 679         * 4. if all IRQs on this controller are equal or if some specific IRQs
 680         *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
 681         *    requested with the IRQF_SHARED flag
 682         */
 683        errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 684        if (!chan || !errirq_res)
 685                return -ENODEV;
 686
 687        if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
 688                dev_err(&pdev->dev, "DMAC register region already claimed\n");
 689                return -EBUSY;
 690        }
 691
 692        if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
 693                dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
 694                err = -EBUSY;
 695                goto ermrdmars;
 696        }
 697
 698        err = -ENOMEM;
 699        shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
 700        if (!shdev) {
 701                dev_err(&pdev->dev, "Not enough memory\n");
 702                goto ealloc;
 703        }
 704
 705        dma_dev = &shdev->shdma_dev.dma_dev;
 706
 707        shdev->chan_reg = ioremap(chan->start, resource_size(chan));
 708        if (!shdev->chan_reg)
 709                goto emapchan;
 710        if (dmars) {
 711                shdev->dmars = ioremap(dmars->start, resource_size(dmars));
 712                if (!shdev->dmars)
 713                        goto emapdmars;
 714        }
 715
 716        if (!pdata->slave_only)
 717                dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 718        if (pdata->slave && pdata->slave_num)
 719                dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 720
 721        /* Default transfer size of 32 bytes requires 32-byte alignment */
 722        dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
 723
 724        shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
 725        shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
 726        err = shdma_init(&pdev->dev, &shdev->shdma_dev,
 727                              pdata->channel_num);
 728        if (err < 0)
 729                goto eshdma;
 730
 731        /* platform data */
 732        shdev->pdata = pdev->dev.platform_data;
 733
 734        if (pdata->chcr_offset)
 735                shdev->chcr_offset = pdata->chcr_offset;
 736        else
 737                shdev->chcr_offset = CHCR;
 738
 739        if (pdata->chcr_ie_bit)
 740                shdev->chcr_ie_bit = pdata->chcr_ie_bit;
 741        else
 742                shdev->chcr_ie_bit = CHCR_IE;
 743
 744        platform_set_drvdata(pdev, shdev);
 745
 746        pm_runtime_enable(&pdev->dev);
 747        err = pm_runtime_get_sync(&pdev->dev);
 748        if (err < 0)
 749                dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
 750
 751        spin_lock_irq(&sh_dmae_lock);
 752        list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
 753        spin_unlock_irq(&sh_dmae_lock);
 754
 755        /* reset dma controller - only needed as a test */
 756        err = sh_dmae_rst(shdev);
 757        if (err)
 758                goto rst_err;
 759
 760#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 761        chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
 762
 763        if (!chanirq_res)
 764                chanirq_res = errirq_res;
 765        else
 766                irqres++;
 767
 768        if (chanirq_res == errirq_res ||
 769            (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
 770                irqflags = IRQF_SHARED;
 771
 772        errirq = errirq_res->start;
 773
 774        err = request_irq(errirq, sh_dmae_err, irqflags,
 775                          "DMAC Address Error", shdev);
 776        if (err) {
 777                dev_err(&pdev->dev,
 778                        "DMA failed requesting irq #%d, error %d\n",
 779                        errirq, err);
 780                goto eirq_err;
 781        }
 782
 783#else
 784        chanirq_res = errirq_res;
 785#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
 786
 787        if (chanirq_res->start == chanirq_res->end &&
 788            !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
 789                /* Special case - all multiplexed */
 790                for (; irq_cnt < pdata->channel_num; irq_cnt++) {
 791                        if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
 792                                chan_irq[irq_cnt] = chanirq_res->start;
 793                                chan_flag[irq_cnt] = IRQF_SHARED;
 794                        } else {
 795                                irq_cap = 1;
 796                                break;
 797                        }
 798                }
 799        } else {
 800                do {
 801                        for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
 802                                if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
 803                                        irq_cap = 1;
 804                                        break;
 805                                }
 806
 807                                if ((errirq_res->flags & IORESOURCE_BITS) ==
 808                                    IORESOURCE_IRQ_SHAREABLE)
 809                                        chan_flag[irq_cnt] = IRQF_SHARED;
 810                                else
 811                                        chan_flag[irq_cnt] = IRQF_DISABLED;
 812                                dev_dbg(&pdev->dev,
 813                                        "Found IRQ %d for channel %d\n",
 814                                        i, irq_cnt);
 815                                chan_irq[irq_cnt++] = i;
 816                        }
 817
 818                        if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
 819                                break;
 820
 821                        chanirq_res = platform_get_resource(pdev,
 822                                                IORESOURCE_IRQ, ++irqres);
 823                } while (irq_cnt < pdata->channel_num && chanirq_res);
 824        }
 825
 826        /* Create DMA Channel */
 827        for (i = 0; i < irq_cnt; i++) {
 828                err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
 829                if (err)
 830                        goto chan_probe_err;
 831        }
 832
 833        if (irq_cap)
 834                dev_notice(&pdev->dev, "Attempting to register %d DMA "
 835                           "channels when a maximum of %d are supported.\n",
 836                           pdata->channel_num, SH_DMAE_MAX_CHANNELS);
 837
 838        pm_runtime_put(&pdev->dev);
 839
 840        err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
 841        if (err < 0)
 842                goto edmadevreg;
 843
 844        return err;
 845
 846edmadevreg:
 847        pm_runtime_get(&pdev->dev);
 848
 849chan_probe_err:
 850        sh_dmae_chan_remove(shdev);
 851
 852#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
 853        free_irq(errirq, shdev);
 854eirq_err:
 855#endif
 856rst_err:
 857        spin_lock_irq(&sh_dmae_lock);
 858        list_del_rcu(&shdev->node);
 859        spin_unlock_irq(&sh_dmae_lock);
 860
 861        pm_runtime_put(&pdev->dev);
 862        pm_runtime_disable(&pdev->dev);
 863
 864        platform_set_drvdata(pdev, NULL);
 865        shdma_cleanup(&shdev->shdma_dev);
 866eshdma:
 867        if (dmars)
 868                iounmap(shdev->dmars);
 869emapdmars:
 870        iounmap(shdev->chan_reg);
 871        synchronize_rcu();
 872emapchan:
 873        kfree(shdev);
 874ealloc:
 875        if (dmars)
 876                release_mem_region(dmars->start, resource_size(dmars));
 877ermrdmars:
 878        release_mem_region(chan->start, resource_size(chan));
 879
 880        return err;
 881}
 882
 883static int sh_dmae_remove(struct platform_device *pdev)
 884{
 885        struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
 886        struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
 887        struct resource *res;
 888        int errirq = platform_get_irq(pdev, 0);
 889
 890        dma_async_device_unregister(dma_dev);
 891
 892        if (errirq > 0)
 893                free_irq(errirq, shdev);
 894
 895        spin_lock_irq(&sh_dmae_lock);
 896        list_del_rcu(&shdev->node);
 897        spin_unlock_irq(&sh_dmae_lock);
 898
 899        pm_runtime_disable(&pdev->dev);
 900
 901        sh_dmae_chan_remove(shdev);
 902        shdma_cleanup(&shdev->shdma_dev);
 903
 904        if (shdev->dmars)
 905                iounmap(shdev->dmars);
 906        iounmap(shdev->chan_reg);
 907
 908        platform_set_drvdata(pdev, NULL);
 909
 910        synchronize_rcu();
 911        kfree(shdev);
 912
 913        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 914        if (res)
 915                release_mem_region(res->start, resource_size(res));
 916        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 917        if (res)
 918                release_mem_region(res->start, resource_size(res));
 919
 920        return 0;
 921}
 922
 923static struct platform_driver sh_dmae_driver = {
 924        .driver         = {
 925                .owner  = THIS_MODULE,
 926                .pm     = &sh_dmae_pm,
 927                .name   = SH_DMAE_DRV_NAME,
 928        },
 929        .remove         = sh_dmae_remove,
 930        .shutdown       = sh_dmae_shutdown,
 931};
 932
 933static int __init sh_dmae_init(void)
 934{
 935        /* Wire up NMI handling */
 936        int err = register_die_notifier(&sh_dmae_nmi_notifier);
 937        if (err)
 938                return err;
 939
 940        return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
 941}
 942module_init(sh_dmae_init);
 943
 944static void __exit sh_dmae_exit(void)
 945{
 946        platform_driver_unregister(&sh_dmae_driver);
 947
 948        unregister_die_notifier(&sh_dmae_nmi_notifier);
 949}
 950module_exit(sh_dmae_exit);
 951
 952MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
 953MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
 954MODULE_LICENSE("GPL");
 955MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
 956