linux/drivers/dma/sh/rcar-hpbdma.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011-2013 Renesas Electronics Corporation
   3 * Copyright (C) 2013 Cogent Embedded, Inc.
   4 *
   5 * This file is based on the drivers/dma/sh/shdma.c
   6 *
   7 * Renesas SuperH DMA Engine support
   8 *
   9 * This is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License as published by
  11 * the Free Software Foundation; either version 2 of the License, or
  12 * (at your option) any later version.
  13 *
  14 * - DMA of SuperH does not have Hardware DMA chain mode.
  15 * - max DMA size is 16MB.
  16 *
  17 */
  18
  19#include <linux/dmaengine.h>
  20#include <linux/delay.h>
  21#include <linux/init.h>
  22#include <linux/interrupt.h>
  23#include <linux/module.h>
  24#include <linux/platform_data/dma-rcar-hpbdma.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/shdma-base.h>
  28#include <linux/slab.h>
  29
  30/* DMA channel registers */
  31#define HPB_DMAE_DSAR0  0x00
  32#define HPB_DMAE_DDAR0  0x04
  33#define HPB_DMAE_DTCR0  0x08
  34#define HPB_DMAE_DSAR1  0x0C
  35#define HPB_DMAE_DDAR1  0x10
  36#define HPB_DMAE_DTCR1  0x14
  37#define HPB_DMAE_DSASR  0x18
  38#define HPB_DMAE_DDASR  0x1C
  39#define HPB_DMAE_DTCSR  0x20
  40#define HPB_DMAE_DPTR   0x24
  41#define HPB_DMAE_DCR    0x28
  42#define HPB_DMAE_DCMDR  0x2C
  43#define HPB_DMAE_DSTPR  0x30
  44#define HPB_DMAE_DSTSR  0x34
  45#define HPB_DMAE_DDBGR  0x38
  46#define HPB_DMAE_DDBGR2 0x3C
  47#define HPB_DMAE_CHAN(n)        (0x40 * (n))
  48
  49/* DMA command register (DCMDR) bits */
  50#define HPB_DMAE_DCMDR_BDOUT    BIT(7)
  51#define HPB_DMAE_DCMDR_DQSPD    BIT(6)
  52#define HPB_DMAE_DCMDR_DQSPC    BIT(5)
  53#define HPB_DMAE_DCMDR_DMSPD    BIT(4)
  54#define HPB_DMAE_DCMDR_DMSPC    BIT(3)
  55#define HPB_DMAE_DCMDR_DQEND    BIT(2)
  56#define HPB_DMAE_DCMDR_DNXT     BIT(1)
  57#define HPB_DMAE_DCMDR_DMEN     BIT(0)
  58
  59/* DMA forced stop register (DSTPR) bits */
  60#define HPB_DMAE_DSTPR_DMSTP    BIT(0)
  61
  62/* DMA status register (DSTSR) bits */
  63#define HPB_DMAE_DSTSR_DQSTS    BIT(2)
  64#define HPB_DMAE_DSTSR_DMSTS    BIT(0)
  65
  66/* DMA common registers */
  67#define HPB_DMAE_DTIMR          0x00
  68#define HPB_DMAE_DINTSR0                0x0C
  69#define HPB_DMAE_DINTSR1                0x10
  70#define HPB_DMAE_DINTCR0                0x14
  71#define HPB_DMAE_DINTCR1                0x18
  72#define HPB_DMAE_DINTMR0                0x1C
  73#define HPB_DMAE_DINTMR1                0x20
  74#define HPB_DMAE_DACTSR0                0x24
  75#define HPB_DMAE_DACTSR1                0x28
  76#define HPB_DMAE_HSRSTR(n)      (0x40 + (n) * 4)
  77#define HPB_DMAE_HPB_DMASPR(n)  (0x140 + (n) * 4)
  78#define HPB_DMAE_HPB_DMLVLR0    0x160
  79#define HPB_DMAE_HPB_DMLVLR1    0x164
  80#define HPB_DMAE_HPB_DMSHPT0    0x168
  81#define HPB_DMAE_HPB_DMSHPT1    0x16C
  82
  83#define HPB_DMA_SLAVE_NUMBER 256
  84#define HPB_DMA_TCR_MAX 0x01000000      /* 16 MiB */
  85
  86struct hpb_dmae_chan {
  87        struct shdma_chan shdma_chan;
  88        int xfer_mode;                  /* DMA transfer mode */
  89#define XFER_SINGLE     1
  90#define XFER_DOUBLE     2
  91        unsigned plane_idx;             /* current DMA information set */
  92        bool first_desc;                /* first/next transfer */
  93        int xmit_shift;                 /* log_2(bytes_per_xfer) */
  94        void __iomem *base;
  95        const struct hpb_dmae_slave_config *cfg;
  96        char dev_id[16];                /* unique name per DMAC of channel */
  97        dma_addr_t slave_addr;
  98};
  99
 100struct hpb_dmae_device {
 101        struct shdma_dev shdma_dev;
 102        spinlock_t reg_lock;            /* comm_reg operation lock */
 103        struct hpb_dmae_pdata *pdata;
 104        void __iomem *chan_reg;
 105        void __iomem *comm_reg;
 106        void __iomem *reset_reg;
 107        void __iomem *mode_reg;
 108};
 109
 110struct hpb_dmae_regs {
 111        u32 sar; /* SAR / source address */
 112        u32 dar; /* DAR / destination address */
 113        u32 tcr; /* TCR / transfer count */
 114};
 115
 116struct hpb_desc {
 117        struct shdma_desc shdma_desc;
 118        struct hpb_dmae_regs hw;
 119        unsigned plane_idx;
 120};
 121
 122#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
 123#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
 124#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
 125                                struct hpb_dmae_device, shdma_dev.dma_dev)
 126
 127static void ch_reg_write(struct hpb_dmae_chan *hpb_dc, u32 data, u32 reg)
 128{
 129        iowrite32(data, hpb_dc->base + reg);
 130}
 131
 132static u32 ch_reg_read(struct hpb_dmae_chan *hpb_dc, u32 reg)
 133{
 134        return ioread32(hpb_dc->base + reg);
 135}
 136
 137static void dcmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
 138{
 139        iowrite32(data, hpbdev->chan_reg + HPB_DMAE_DCMDR);
 140}
 141
 142static void hsrstr_write(struct hpb_dmae_device *hpbdev, u32 ch)
 143{
 144        iowrite32(0x1, hpbdev->comm_reg + HPB_DMAE_HSRSTR(ch));
 145}
 146
 147static u32 dintsr_read(struct hpb_dmae_device *hpbdev, u32 ch)
 148{
 149        u32 v;
 150
 151        if (ch < 32)
 152                v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR0) >> ch;
 153        else
 154                v = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTSR1) >> (ch - 32);
 155        return v & 0x1;
 156}
 157
 158static void dintcr_write(struct hpb_dmae_device *hpbdev, u32 ch)
 159{
 160        if (ch < 32)
 161                iowrite32((0x1 << ch), hpbdev->comm_reg + HPB_DMAE_DINTCR0);
 162        else
 163                iowrite32((0x1 << (ch - 32)),
 164                          hpbdev->comm_reg + HPB_DMAE_DINTCR1);
 165}
 166
 167static void asyncmdr_write(struct hpb_dmae_device *hpbdev, u32 data)
 168{
 169        iowrite32(data, hpbdev->mode_reg);
 170}
 171
 172static u32 asyncmdr_read(struct hpb_dmae_device *hpbdev)
 173{
 174        return ioread32(hpbdev->mode_reg);
 175}
 176
 177static void hpb_dmae_enable_int(struct hpb_dmae_device *hpbdev, u32 ch)
 178{
 179        u32 intreg;
 180
 181        spin_lock_irq(&hpbdev->reg_lock);
 182        if (ch < 32) {
 183                intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR0);
 184                iowrite32(BIT(ch) | intreg,
 185                          hpbdev->comm_reg + HPB_DMAE_DINTMR0);
 186        } else {
 187                intreg = ioread32(hpbdev->comm_reg + HPB_DMAE_DINTMR1);
 188                iowrite32(BIT(ch - 32) | intreg,
 189                          hpbdev->comm_reg + HPB_DMAE_DINTMR1);
 190        }
 191        spin_unlock_irq(&hpbdev->reg_lock);
 192}
 193
 194static void hpb_dmae_async_reset(struct hpb_dmae_device *hpbdev, u32 data)
 195{
 196        u32 rstr;
 197        int timeout = 10000;    /* 100 ms */
 198
 199        spin_lock(&hpbdev->reg_lock);
 200        rstr = ioread32(hpbdev->reset_reg);
 201        rstr |= data;
 202        iowrite32(rstr, hpbdev->reset_reg);
 203        do {
 204                rstr = ioread32(hpbdev->reset_reg);
 205                if ((rstr & data) == data)
 206                        break;
 207                udelay(10);
 208        } while (timeout--);
 209
 210        if (timeout < 0)
 211                dev_err(hpbdev->shdma_dev.dma_dev.dev,
 212                        "%s timeout\n", __func__);
 213
 214        rstr &= ~data;
 215        iowrite32(rstr, hpbdev->reset_reg);
 216        spin_unlock(&hpbdev->reg_lock);
 217}
 218
 219static void hpb_dmae_set_async_mode(struct hpb_dmae_device *hpbdev,
 220                                    u32 mask, u32 data)
 221{
 222        u32 mode;
 223
 224        spin_lock_irq(&hpbdev->reg_lock);
 225        mode = asyncmdr_read(hpbdev);
 226        mode &= ~mask;
 227        mode |= data;
 228        asyncmdr_write(hpbdev, mode);
 229        spin_unlock_irq(&hpbdev->reg_lock);
 230}
 231
 232static void hpb_dmae_ctl_stop(struct hpb_dmae_device *hpbdev)
 233{
 234        dcmdr_write(hpbdev, HPB_DMAE_DCMDR_DQSPD);
 235}
 236
 237static void hpb_dmae_reset(struct hpb_dmae_device *hpbdev)
 238{
 239        u32 ch;
 240
 241        for (ch = 0; ch < hpbdev->pdata->num_hw_channels; ch++)
 242                hsrstr_write(hpbdev, ch);
 243}
 244
 245static unsigned int calc_xmit_shift(struct hpb_dmae_chan *hpb_chan)
 246{
 247        struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
 248        struct hpb_dmae_pdata *pdata = hpbdev->pdata;
 249        int width = ch_reg_read(hpb_chan, HPB_DMAE_DCR);
 250        int i;
 251
 252        switch (width & (HPB_DMAE_DCR_SPDS_MASK | HPB_DMAE_DCR_DPDS_MASK)) {
 253        case HPB_DMAE_DCR_SPDS_8BIT | HPB_DMAE_DCR_DPDS_8BIT:
 254        default:
 255                i = XMIT_SZ_8BIT;
 256                break;
 257        case HPB_DMAE_DCR_SPDS_16BIT | HPB_DMAE_DCR_DPDS_16BIT:
 258                i = XMIT_SZ_16BIT;
 259                break;
 260        case HPB_DMAE_DCR_SPDS_32BIT | HPB_DMAE_DCR_DPDS_32BIT:
 261                i = XMIT_SZ_32BIT;
 262                break;
 263        }
 264        return pdata->ts_shift[i];
 265}
 266
 267static void hpb_dmae_set_reg(struct hpb_dmae_chan *hpb_chan,
 268                             struct hpb_dmae_regs *hw, unsigned plane)
 269{
 270        ch_reg_write(hpb_chan, hw->sar,
 271                     plane ? HPB_DMAE_DSAR1 : HPB_DMAE_DSAR0);
 272        ch_reg_write(hpb_chan, hw->dar,
 273                     plane ? HPB_DMAE_DDAR1 : HPB_DMAE_DDAR0);
 274        ch_reg_write(hpb_chan, hw->tcr >> hpb_chan->xmit_shift,
 275                     plane ? HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
 276}
 277
 278static void hpb_dmae_start(struct hpb_dmae_chan *hpb_chan, bool next)
 279{
 280        ch_reg_write(hpb_chan, (next ? HPB_DMAE_DCMDR_DNXT : 0) |
 281                     HPB_DMAE_DCMDR_DMEN, HPB_DMAE_DCMDR);
 282}
 283
 284static void hpb_dmae_halt(struct shdma_chan *schan)
 285{
 286        struct hpb_dmae_chan *chan = to_chan(schan);
 287
 288        ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
 289        ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
 290
 291        chan->plane_idx = 0;
 292        chan->first_desc = true;
 293}
 294
 295static const struct hpb_dmae_slave_config *
 296hpb_dmae_find_slave(struct hpb_dmae_chan *hpb_chan, int slave_id)
 297{
 298        struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
 299        struct hpb_dmae_pdata *pdata = hpbdev->pdata;
 300        int i;
 301
 302        if (slave_id >= HPB_DMA_SLAVE_NUMBER)
 303                return NULL;
 304
 305        for (i = 0; i < pdata->num_slaves; i++)
 306                if (pdata->slaves[i].id == slave_id)
 307                        return pdata->slaves + i;
 308
 309        return NULL;
 310}
 311
 312static void hpb_dmae_start_xfer(struct shdma_chan *schan,
 313                                struct shdma_desc *sdesc)
 314{
 315        struct hpb_dmae_chan *chan = to_chan(schan);
 316        struct hpb_dmae_device *hpbdev = to_dev(chan);
 317        struct hpb_desc *desc = to_desc(sdesc);
 318
 319        if (chan->cfg->flags & HPB_DMAE_SET_ASYNC_RESET)
 320                hpb_dmae_async_reset(hpbdev, chan->cfg->rstr);
 321
 322        desc->plane_idx = chan->plane_idx;
 323        hpb_dmae_set_reg(chan, &desc->hw, chan->plane_idx);
 324        hpb_dmae_start(chan, !chan->first_desc);
 325
 326        if (chan->xfer_mode == XFER_DOUBLE) {
 327                chan->plane_idx ^= 1;
 328                chan->first_desc = false;
 329        }
 330}
 331
 332static bool hpb_dmae_desc_completed(struct shdma_chan *schan,
 333                                    struct shdma_desc *sdesc)
 334{
 335        /*
 336         * This is correct since we always have at most single
 337         * outstanding DMA transfer per channel, and by the time
 338         * we get completion interrupt the transfer is completed.
 339         * This will change if we ever use alternating DMA
 340         * information sets and submit two descriptors at once.
 341         */
 342        return true;
 343}
 344
 345static bool hpb_dmae_chan_irq(struct shdma_chan *schan, int irq)
 346{
 347        struct hpb_dmae_chan *chan = to_chan(schan);
 348        struct hpb_dmae_device *hpbdev = to_dev(chan);
 349        int ch = chan->cfg->dma_ch;
 350
 351        /* Check Complete DMA Transfer */
 352        if (dintsr_read(hpbdev, ch)) {
 353                /* Clear Interrupt status */
 354                dintcr_write(hpbdev, ch);
 355                return true;
 356        }
 357        return false;
 358}
 359
 360static int hpb_dmae_desc_setup(struct shdma_chan *schan,
 361                               struct shdma_desc *sdesc,
 362                               dma_addr_t src, dma_addr_t dst, size_t *len)
 363{
 364        struct hpb_desc *desc = to_desc(sdesc);
 365
 366        if (*len > (size_t)HPB_DMA_TCR_MAX)
 367                *len = (size_t)HPB_DMA_TCR_MAX;
 368
 369        desc->hw.sar = src;
 370        desc->hw.dar = dst;
 371        desc->hw.tcr = *len;
 372
 373        return 0;
 374}
 375
 376static size_t hpb_dmae_get_partial(struct shdma_chan *schan,
 377                                   struct shdma_desc *sdesc)
 378{
 379        struct hpb_desc *desc = to_desc(sdesc);
 380        struct hpb_dmae_chan *chan = to_chan(schan);
 381        u32 tcr = ch_reg_read(chan, desc->plane_idx ?
 382                              HPB_DMAE_DTCR1 : HPB_DMAE_DTCR0);
 383
 384        return (desc->hw.tcr - tcr) << chan->xmit_shift;
 385}
 386
 387static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
 388{
 389        struct hpb_dmae_chan *chan = to_chan(schan);
 390        u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
 391
 392        if (chan->xfer_mode == XFER_DOUBLE)
 393                return dstsr & HPB_DMAE_DSTSR_DQSTS;
 394        else
 395                return dstsr & HPB_DMAE_DSTSR_DMSTS;
 396}
 397
 398static int
 399hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
 400                              const struct hpb_dmae_slave_config *cfg)
 401{
 402        struct hpb_dmae_device *hpbdev = to_dev(hpb_chan);
 403        struct hpb_dmae_pdata *pdata = hpbdev->pdata;
 404        const struct hpb_dmae_channel *channel = pdata->channels;
 405        int slave_id = cfg->id;
 406        int i, err;
 407
 408        for (i = 0; i < pdata->num_channels; i++, channel++) {
 409                if (channel->s_id == slave_id) {
 410                        struct device *dev = hpb_chan->shdma_chan.dev;
 411
 412                        hpb_chan->base = hpbdev->chan_reg +
 413                                HPB_DMAE_CHAN(cfg->dma_ch);
 414
 415                        dev_dbg(dev, "Detected Slave device\n");
 416                        dev_dbg(dev, " -- slave_id       : 0x%x\n", slave_id);
 417                        dev_dbg(dev, " -- cfg->dma_ch    : %d\n", cfg->dma_ch);
 418                        dev_dbg(dev, " -- channel->ch_irq: %d\n",
 419                                channel->ch_irq);
 420                        break;
 421                }
 422        }
 423
 424        err = shdma_request_irq(&hpb_chan->shdma_chan, channel->ch_irq,
 425                                IRQF_SHARED, hpb_chan->dev_id);
 426        if (err) {
 427                dev_err(hpb_chan->shdma_chan.dev,
 428                        "DMA channel request_irq %d failed with error %d\n",
 429                        channel->ch_irq, err);
 430                return err;
 431        }
 432
 433        hpb_chan->plane_idx = 0;
 434        hpb_chan->first_desc = true;
 435
 436        if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) == 0) {
 437                hpb_chan->xfer_mode = XFER_SINGLE;
 438        } else if ((cfg->dcr & (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) ==
 439                   (HPB_DMAE_DCR_CT | HPB_DMAE_DCR_DIP)) {
 440                hpb_chan->xfer_mode = XFER_DOUBLE;
 441        } else {
 442                dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
 443                return -EINVAL;
 444        }
 445
 446        if (cfg->flags & HPB_DMAE_SET_ASYNC_MODE)
 447                hpb_dmae_set_async_mode(hpbdev, cfg->mdm, cfg->mdr);
 448        ch_reg_write(hpb_chan, cfg->dcr, HPB_DMAE_DCR);
 449        ch_reg_write(hpb_chan, cfg->port, HPB_DMAE_DPTR);
 450        hpb_chan->xmit_shift = calc_xmit_shift(hpb_chan);
 451        hpb_dmae_enable_int(hpbdev, cfg->dma_ch);
 452
 453        return 0;
 454}
 455
 456static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
 457                              dma_addr_t slave_addr, bool try)
 458{
 459        struct hpb_dmae_chan *chan = to_chan(schan);
 460        const struct hpb_dmae_slave_config *sc =
 461                hpb_dmae_find_slave(chan, slave_id);
 462
 463        if (!sc)
 464                return -ENODEV;
 465        if (try)
 466                return 0;
 467        chan->cfg = sc;
 468        chan->slave_addr = slave_addr ? : sc->addr;
 469        return hpb_dmae_alloc_chan_resources(chan, sc);
 470}
 471
 472static void hpb_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
 473{
 474}
 475
 476static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
 477{
 478        struct hpb_dmae_chan *chan = to_chan(schan);
 479
 480        return chan->slave_addr;
 481}
 482
 483static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
 484{
 485        return &((struct hpb_desc *)buf)[i].shdma_desc;
 486}
 487
 488static const struct shdma_ops hpb_dmae_ops = {
 489        .desc_completed = hpb_dmae_desc_completed,
 490        .halt_channel = hpb_dmae_halt,
 491        .channel_busy = hpb_dmae_channel_busy,
 492        .slave_addr = hpb_dmae_slave_addr,
 493        .desc_setup = hpb_dmae_desc_setup,
 494        .set_slave = hpb_dmae_set_slave,
 495        .setup_xfer = hpb_dmae_setup_xfer,
 496        .start_xfer = hpb_dmae_start_xfer,
 497        .embedded_desc = hpb_dmae_embedded_desc,
 498        .chan_irq = hpb_dmae_chan_irq,
 499        .get_partial = hpb_dmae_get_partial,
 500};
 501
 502static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
 503{
 504        struct shdma_dev *sdev = &hpbdev->shdma_dev;
 505        struct platform_device *pdev =
 506                to_platform_device(hpbdev->shdma_dev.dma_dev.dev);
 507        struct hpb_dmae_chan *new_hpb_chan;
 508        struct shdma_chan *schan;
 509
 510        /* Alloc channel */
 511        new_hpb_chan = devm_kzalloc(&pdev->dev,
 512                                    sizeof(struct hpb_dmae_chan), GFP_KERNEL);
 513        if (!new_hpb_chan) {
 514                dev_err(hpbdev->shdma_dev.dma_dev.dev,
 515                        "No free memory for allocating DMA channels!\n");
 516                return -ENOMEM;
 517        }
 518
 519        schan = &new_hpb_chan->shdma_chan;
 520        schan->max_xfer_len = HPB_DMA_TCR_MAX;
 521
 522        shdma_chan_probe(sdev, schan, id);
 523
 524        if (pdev->id >= 0)
 525                snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
 526                         "hpb-dmae%d.%d", pdev->id, id);
 527        else
 528                snprintf(new_hpb_chan->dev_id, sizeof(new_hpb_chan->dev_id),
 529                         "hpb-dma.%d", id);
 530
 531        return 0;
 532}
 533
 534static int hpb_dmae_probe(struct platform_device *pdev)
 535{
 536        struct hpb_dmae_pdata *pdata = pdev->dev.platform_data;
 537        struct hpb_dmae_device *hpbdev;
 538        struct dma_device *dma_dev;
 539        struct resource *chan, *comm, *rest, *mode, *irq_res;
 540        int err, i;
 541
 542        /* Get platform data */
 543        if (!pdata || !pdata->num_channels)
 544                return -ENODEV;
 545
 546        chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 547        comm = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 548        rest = platform_get_resource(pdev, IORESOURCE_MEM, 2);
 549        mode = platform_get_resource(pdev, IORESOURCE_MEM, 3);
 550
 551        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 552        if (!irq_res)
 553                return -ENODEV;
 554
 555        hpbdev = devm_kzalloc(&pdev->dev, sizeof(struct hpb_dmae_device),
 556                              GFP_KERNEL);
 557        if (!hpbdev) {
 558                dev_err(&pdev->dev, "Not enough memory\n");
 559                return -ENOMEM;
 560        }
 561
 562        hpbdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
 563        if (IS_ERR(hpbdev->chan_reg))
 564                return PTR_ERR(hpbdev->chan_reg);
 565
 566        hpbdev->comm_reg = devm_ioremap_resource(&pdev->dev, comm);
 567        if (IS_ERR(hpbdev->comm_reg))
 568                return PTR_ERR(hpbdev->comm_reg);
 569
 570        hpbdev->reset_reg = devm_ioremap_resource(&pdev->dev, rest);
 571        if (IS_ERR(hpbdev->reset_reg))
 572                return PTR_ERR(hpbdev->reset_reg);
 573
 574        hpbdev->mode_reg = devm_ioremap_resource(&pdev->dev, mode);
 575        if (IS_ERR(hpbdev->mode_reg))
 576                return PTR_ERR(hpbdev->mode_reg);
 577
 578        dma_dev = &hpbdev->shdma_dev.dma_dev;
 579
 580        spin_lock_init(&hpbdev->reg_lock);
 581
 582        /* Platform data */
 583        hpbdev->pdata = pdata;
 584
 585        pm_runtime_enable(&pdev->dev);
 586        err = pm_runtime_get_sync(&pdev->dev);
 587        if (err < 0)
 588                dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
 589
 590        /* Reset DMA controller */
 591        hpb_dmae_reset(hpbdev);
 592
 593        pm_runtime_put(&pdev->dev);
 594
 595        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 596        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 597
 598        hpbdev->shdma_dev.ops = &hpb_dmae_ops;
 599        hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc);
 600        err = shdma_init(&pdev->dev, &hpbdev->shdma_dev, pdata->num_channels);
 601        if (err < 0)
 602                goto error;
 603
 604        /* Create DMA channels */
 605        for (i = 0; i < pdata->num_channels; i++)
 606                hpb_dmae_chan_probe(hpbdev, i);
 607
 608        platform_set_drvdata(pdev, hpbdev);
 609        err = dma_async_device_register(dma_dev);
 610        if (!err)
 611                return 0;
 612
 613        shdma_cleanup(&hpbdev->shdma_dev);
 614error:
 615        pm_runtime_disable(&pdev->dev);
 616        return err;
 617}
 618
 619static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
 620{
 621        struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
 622        struct shdma_chan *schan;
 623        int i;
 624
 625        shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
 626                BUG_ON(!schan);
 627
 628                shdma_chan_remove(schan);
 629        }
 630        dma_dev->chancnt = 0;
 631}
 632
 633static int hpb_dmae_remove(struct platform_device *pdev)
 634{
 635        struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
 636
 637        dma_async_device_unregister(&hpbdev->shdma_dev.dma_dev);
 638
 639        pm_runtime_disable(&pdev->dev);
 640
 641        hpb_dmae_chan_remove(hpbdev);
 642
 643        return 0;
 644}
 645
 646static void hpb_dmae_shutdown(struct platform_device *pdev)
 647{
 648        struct hpb_dmae_device *hpbdev = platform_get_drvdata(pdev);
 649        hpb_dmae_ctl_stop(hpbdev);
 650}
 651
 652static struct platform_driver hpb_dmae_driver = {
 653        .probe          = hpb_dmae_probe,
 654        .remove         = hpb_dmae_remove,
 655        .shutdown       = hpb_dmae_shutdown,
 656        .driver = {
 657                .owner  = THIS_MODULE,
 658                .name   = "hpb-dma-engine",
 659        },
 660};
 661module_platform_driver(hpb_dmae_driver);
 662
 663MODULE_AUTHOR("Max Filippov <max.filippov@cogentembedded.com>");
 664MODULE_DESCRIPTION("Renesas HPB DMA Engine driver");
 665MODULE_LICENSE("GPL");
 666