linux/drivers/dma/uniphier-xdmac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * External DMA controller driver for UniPhier SoCs
   4 * Copyright 2019 Socionext Inc.
   5 * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
   6 */
   7
   8#include <linux/bitops.h>
   9#include <linux/bitfield.h>
  10#include <linux/iopoll.h>
  11#include <linux/module.h>
  12#include <linux/of.h>
  13#include <linux/of_dma.h>
  14#include <linux/platform_device.h>
  15#include <linux/slab.h>
  16
  17#include "dmaengine.h"
  18#include "virt-dma.h"
  19
  20#define XDMAC_CH_WIDTH          0x100
  21
  22#define XDMAC_TFA               0x08
  23#define XDMAC_TFA_MCNT_MASK     GENMASK(23, 16)
  24#define XDMAC_TFA_MASK          GENMASK(5, 0)
  25#define XDMAC_SADM              0x10
  26#define XDMAC_SADM_STW_MASK     GENMASK(25, 24)
  27#define XDMAC_SADM_SAM          BIT(4)
  28#define XDMAC_SADM_SAM_FIXED    XDMAC_SADM_SAM
  29#define XDMAC_SADM_SAM_INC      0
  30#define XDMAC_DADM              0x14
  31#define XDMAC_DADM_DTW_MASK     XDMAC_SADM_STW_MASK
  32#define XDMAC_DADM_DAM          XDMAC_SADM_SAM
  33#define XDMAC_DADM_DAM_FIXED    XDMAC_SADM_SAM_FIXED
  34#define XDMAC_DADM_DAM_INC      XDMAC_SADM_SAM_INC
  35#define XDMAC_EXSAD             0x18
  36#define XDMAC_EXDAD             0x1c
  37#define XDMAC_SAD               0x20
  38#define XDMAC_DAD               0x24
  39#define XDMAC_ITS               0x28
  40#define XDMAC_ITS_MASK          GENMASK(25, 0)
  41#define XDMAC_TNUM              0x2c
  42#define XDMAC_TNUM_MASK         GENMASK(15, 0)
  43#define XDMAC_TSS               0x30
  44#define XDMAC_TSS_REQ           BIT(0)
  45#define XDMAC_IEN               0x34
  46#define XDMAC_IEN_ERRIEN        BIT(1)
  47#define XDMAC_IEN_ENDIEN        BIT(0)
  48#define XDMAC_STAT              0x40
  49#define XDMAC_STAT_TENF         BIT(0)
  50#define XDMAC_IR                0x44
  51#define XDMAC_IR_ERRF           BIT(1)
  52#define XDMAC_IR_ENDF           BIT(0)
  53#define XDMAC_ID                0x48
  54#define XDMAC_ID_ERRIDF         BIT(1)
  55#define XDMAC_ID_ENDIDF         BIT(0)
  56
  57#define XDMAC_MAX_CHANS         16
  58#define XDMAC_INTERVAL_CLKS     20
  59#define XDMAC_MAX_WORDS         XDMAC_TNUM_MASK
  60
  61/* cut lower bit for maintain alignment of maximum transfer size */
  62#define XDMAC_MAX_WORD_SIZE     (XDMAC_ITS_MASK & ~GENMASK(3, 0))
  63
  64#define UNIPHIER_XDMAC_BUSWIDTHS \
  65        (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
  66         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
  67         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
  68         BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
  69
  70struct uniphier_xdmac_desc_node {
  71        dma_addr_t src;
  72        dma_addr_t dst;
  73        u32 burst_size;
  74        u32 nr_burst;
  75};
  76
  77struct uniphier_xdmac_desc {
  78        struct virt_dma_desc vd;
  79
  80        unsigned int nr_node;
  81        unsigned int cur_node;
  82        enum dma_transfer_direction dir;
  83        struct uniphier_xdmac_desc_node nodes[];
  84};
  85
  86struct uniphier_xdmac_chan {
  87        struct virt_dma_chan vc;
  88        struct uniphier_xdmac_device *xdev;
  89        struct uniphier_xdmac_desc *xd;
  90        void __iomem *reg_ch_base;
  91        struct dma_slave_config sconfig;
  92        int id;
  93        unsigned int req_factor;
  94};
  95
  96struct uniphier_xdmac_device {
  97        struct dma_device ddev;
  98        void __iomem *reg_base;
  99        int nr_chans;
 100        struct uniphier_xdmac_chan channels[];
 101};
 102
 103static struct uniphier_xdmac_chan *
 104to_uniphier_xdmac_chan(struct virt_dma_chan *vc)
 105{
 106        return container_of(vc, struct uniphier_xdmac_chan, vc);
 107}
 108
 109static struct uniphier_xdmac_desc *
 110to_uniphier_xdmac_desc(struct virt_dma_desc *vd)
 111{
 112        return container_of(vd, struct uniphier_xdmac_desc, vd);
 113}
 114
 115/* xc->vc.lock must be held by caller */
 116static struct uniphier_xdmac_desc *
 117uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
 118{
 119        struct virt_dma_desc *vd;
 120
 121        vd = vchan_next_desc(&xc->vc);
 122        if (!vd)
 123                return NULL;
 124
 125        list_del(&vd->node);
 126
 127        return to_uniphier_xdmac_desc(vd);
 128}
 129
 130/* xc->vc.lock must be held by caller */
 131static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
 132                                      struct uniphier_xdmac_desc *xd)
 133{
 134        u32 src_mode, src_addr, src_width;
 135        u32 dst_mode, dst_addr, dst_width;
 136        u32 val, its, tnum;
 137        enum dma_slave_buswidth buswidth;
 138
 139        src_addr = xd->nodes[xd->cur_node].src;
 140        dst_addr = xd->nodes[xd->cur_node].dst;
 141        its      = xd->nodes[xd->cur_node].burst_size;
 142        tnum     = xd->nodes[xd->cur_node].nr_burst;
 143
 144        /*
 145         * The width of MEM side must be 4 or 8 bytes, that does not
 146         * affect that of DEV side and transfer size.
 147         */
 148        if (xd->dir == DMA_DEV_TO_MEM) {
 149                src_mode = XDMAC_SADM_SAM_FIXED;
 150                buswidth = xc->sconfig.src_addr_width;
 151        } else {
 152                src_mode = XDMAC_SADM_SAM_INC;
 153                buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
 154        }
 155        src_width = FIELD_PREP(XDMAC_SADM_STW_MASK, __ffs(buswidth));
 156
 157        if (xd->dir == DMA_MEM_TO_DEV) {
 158                dst_mode = XDMAC_DADM_DAM_FIXED;
 159                buswidth = xc->sconfig.dst_addr_width;
 160        } else {
 161                dst_mode = XDMAC_DADM_DAM_INC;
 162                buswidth = DMA_SLAVE_BUSWIDTH_8_BYTES;
 163        }
 164        dst_width = FIELD_PREP(XDMAC_DADM_DTW_MASK, __ffs(buswidth));
 165
 166        /* setup transfer factor */
 167        val = FIELD_PREP(XDMAC_TFA_MCNT_MASK, XDMAC_INTERVAL_CLKS);
 168        val |= FIELD_PREP(XDMAC_TFA_MASK, xc->req_factor);
 169        writel(val, xc->reg_ch_base + XDMAC_TFA);
 170
 171        /* setup the channel */
 172        writel(lower_32_bits(src_addr), xc->reg_ch_base + XDMAC_SAD);
 173        writel(upper_32_bits(src_addr), xc->reg_ch_base + XDMAC_EXSAD);
 174
 175        writel(lower_32_bits(dst_addr), xc->reg_ch_base + XDMAC_DAD);
 176        writel(upper_32_bits(dst_addr), xc->reg_ch_base + XDMAC_EXDAD);
 177
 178        src_mode |= src_width;
 179        dst_mode |= dst_width;
 180        writel(src_mode, xc->reg_ch_base + XDMAC_SADM);
 181        writel(dst_mode, xc->reg_ch_base + XDMAC_DADM);
 182
 183        writel(its, xc->reg_ch_base + XDMAC_ITS);
 184        writel(tnum, xc->reg_ch_base + XDMAC_TNUM);
 185
 186        /* enable interrupt */
 187        writel(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN,
 188               xc->reg_ch_base + XDMAC_IEN);
 189
 190        /* start XDMAC */
 191        val = readl(xc->reg_ch_base + XDMAC_TSS);
 192        val |= XDMAC_TSS_REQ;
 193        writel(val, xc->reg_ch_base + XDMAC_TSS);
 194}
 195
 196/* xc->vc.lock must be held by caller */
 197static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
 198{
 199        u32 val;
 200
 201        /* disable interrupt */
 202        val = readl(xc->reg_ch_base + XDMAC_IEN);
 203        val &= ~(XDMAC_IEN_ENDIEN | XDMAC_IEN_ERRIEN);
 204        writel(val, xc->reg_ch_base + XDMAC_IEN);
 205
 206        /* stop XDMAC */
 207        val = readl(xc->reg_ch_base + XDMAC_TSS);
 208        val &= ~XDMAC_TSS_REQ;
 209        writel(0, xc->reg_ch_base + XDMAC_TSS);
 210
 211        /* wait until transfer is stopped */
 212        return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
 213                                         !(val & XDMAC_STAT_TENF), 100, 1000);
 214}
 215
 216/* xc->vc.lock must be held by caller */
 217static void uniphier_xdmac_start(struct uniphier_xdmac_chan *xc)
 218{
 219        struct uniphier_xdmac_desc *xd;
 220
 221        xd = uniphier_xdmac_next_desc(xc);
 222        if (xd)
 223                uniphier_xdmac_chan_start(xc, xd);
 224
 225        /* set desc to chan regardless of xd is null */
 226        xc->xd = xd;
 227}
 228
 229static void uniphier_xdmac_chan_irq(struct uniphier_xdmac_chan *xc)
 230{
 231        u32 stat;
 232        int ret;
 233
 234        spin_lock(&xc->vc.lock);
 235
 236        stat = readl(xc->reg_ch_base + XDMAC_ID);
 237
 238        if (stat & XDMAC_ID_ERRIDF) {
 239                ret = uniphier_xdmac_chan_stop(xc);
 240                if (ret)
 241                        dev_err(xc->xdev->ddev.dev,
 242                                "DMA transfer error with aborting issue\n");
 243                else
 244                        dev_err(xc->xdev->ddev.dev,
 245                                "DMA transfer error\n");
 246
 247        } else if ((stat & XDMAC_ID_ENDIDF) && xc->xd) {
 248                xc->xd->cur_node++;
 249                if (xc->xd->cur_node >= xc->xd->nr_node) {
 250                        vchan_cookie_complete(&xc->xd->vd);
 251                        uniphier_xdmac_start(xc);
 252                } else {
 253                        uniphier_xdmac_chan_start(xc, xc->xd);
 254                }
 255        }
 256
 257        /* write bits to clear */
 258        writel(stat, xc->reg_ch_base + XDMAC_IR);
 259
 260        spin_unlock(&xc->vc.lock);
 261}
 262
 263static irqreturn_t uniphier_xdmac_irq_handler(int irq, void *dev_id)
 264{
 265        struct uniphier_xdmac_device *xdev = dev_id;
 266        int i;
 267
 268        for (i = 0; i < xdev->nr_chans; i++)
 269                uniphier_xdmac_chan_irq(&xdev->channels[i]);
 270
 271        return IRQ_HANDLED;
 272}
 273
 274static void uniphier_xdmac_free_chan_resources(struct dma_chan *chan)
 275{
 276        vchan_free_chan_resources(to_virt_chan(chan));
 277}
 278
 279static struct dma_async_tx_descriptor *
 280uniphier_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
 281                               dma_addr_t src, size_t len, unsigned long flags)
 282{
 283        struct virt_dma_chan *vc = to_virt_chan(chan);
 284        struct uniphier_xdmac_desc *xd;
 285        unsigned int nr;
 286        size_t burst_size, tlen;
 287        int i;
 288
 289        if (len > XDMAC_MAX_WORD_SIZE * XDMAC_MAX_WORDS)
 290                return NULL;
 291
 292        nr = 1 + len / XDMAC_MAX_WORD_SIZE;
 293
 294        xd = kzalloc(struct_size(xd, nodes, nr), GFP_NOWAIT);
 295        if (!xd)
 296                return NULL;
 297
 298        for (i = 0; i < nr; i++) {
 299                burst_size = min_t(size_t, len, XDMAC_MAX_WORD_SIZE);
 300                xd->nodes[i].src = src;
 301                xd->nodes[i].dst = dst;
 302                xd->nodes[i].burst_size = burst_size;
 303                xd->nodes[i].nr_burst = len / burst_size;
 304                tlen = rounddown(len, burst_size);
 305                src += tlen;
 306                dst += tlen;
 307                len -= tlen;
 308        }
 309
 310        xd->dir = DMA_MEM_TO_MEM;
 311        xd->nr_node = nr;
 312        xd->cur_node = 0;
 313
 314        return vchan_tx_prep(vc, &xd->vd, flags);
 315}
 316
 317static struct dma_async_tx_descriptor *
 318uniphier_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 319                             unsigned int sg_len,
 320                             enum dma_transfer_direction direction,
 321                             unsigned long flags, void *context)
 322{
 323        struct virt_dma_chan *vc = to_virt_chan(chan);
 324        struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
 325        struct uniphier_xdmac_desc *xd;
 326        struct scatterlist *sg;
 327        enum dma_slave_buswidth buswidth;
 328        u32 maxburst;
 329        int i;
 330
 331        if (!is_slave_direction(direction))
 332                return NULL;
 333
 334        if (direction == DMA_DEV_TO_MEM) {
 335                buswidth = xc->sconfig.src_addr_width;
 336                maxburst = xc->sconfig.src_maxburst;
 337        } else {
 338                buswidth = xc->sconfig.dst_addr_width;
 339                maxburst = xc->sconfig.dst_maxburst;
 340        }
 341
 342        if (!maxburst)
 343                maxburst = 1;
 344        if (maxburst > xc->xdev->ddev.max_burst) {
 345                dev_err(xc->xdev->ddev.dev,
 346                        "Exceed maximum number of burst words\n");
 347                return NULL;
 348        }
 349
 350        xd = kzalloc(struct_size(xd, nodes, sg_len), GFP_NOWAIT);
 351        if (!xd)
 352                return NULL;
 353
 354        for_each_sg(sgl, sg, sg_len, i) {
 355                xd->nodes[i].src = (direction == DMA_DEV_TO_MEM)
 356                        ? xc->sconfig.src_addr : sg_dma_address(sg);
 357                xd->nodes[i].dst = (direction == DMA_MEM_TO_DEV)
 358                        ? xc->sconfig.dst_addr : sg_dma_address(sg);
 359                xd->nodes[i].burst_size = maxburst * buswidth;
 360                xd->nodes[i].nr_burst =
 361                        sg_dma_len(sg) / xd->nodes[i].burst_size;
 362
 363                /*
 364                 * Currently transfer that size doesn't align the unit size
 365                 * (the number of burst words * bus-width) is not allowed,
 366                 * because the driver does not support the way to transfer
 367                 * residue size. As a matter of fact, in order to transfer
 368                 * arbitrary size, 'src_maxburst' or 'dst_maxburst' of
 369                 * dma_slave_config must be 1.
 370                 */
 371                if (sg_dma_len(sg) % xd->nodes[i].burst_size) {
 372                        dev_err(xc->xdev->ddev.dev,
 373                                "Unaligned transfer size: %d", sg_dma_len(sg));
 374                        kfree(xd);
 375                        return NULL;
 376                }
 377
 378                if (xd->nodes[i].nr_burst > XDMAC_MAX_WORDS) {
 379                        dev_err(xc->xdev->ddev.dev,
 380                                "Exceed maximum transfer size");
 381                        kfree(xd);
 382                        return NULL;
 383                }
 384        }
 385
 386        xd->dir = direction;
 387        xd->nr_node = sg_len;
 388        xd->cur_node = 0;
 389
 390        return vchan_tx_prep(vc, &xd->vd, flags);
 391}
 392
 393static int uniphier_xdmac_slave_config(struct dma_chan *chan,
 394                                       struct dma_slave_config *config)
 395{
 396        struct virt_dma_chan *vc = to_virt_chan(chan);
 397        struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
 398
 399        memcpy(&xc->sconfig, config, sizeof(*config));
 400
 401        return 0;
 402}
 403
 404static int uniphier_xdmac_terminate_all(struct dma_chan *chan)
 405{
 406        struct virt_dma_chan *vc = to_virt_chan(chan);
 407        struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
 408        unsigned long flags;
 409        int ret = 0;
 410        LIST_HEAD(head);
 411
 412        spin_lock_irqsave(&vc->lock, flags);
 413
 414        if (xc->xd) {
 415                vchan_terminate_vdesc(&xc->xd->vd);
 416                xc->xd = NULL;
 417                ret = uniphier_xdmac_chan_stop(xc);
 418        }
 419
 420        vchan_get_all_descriptors(vc, &head);
 421
 422        spin_unlock_irqrestore(&vc->lock, flags);
 423
 424        vchan_dma_desc_free_list(vc, &head);
 425
 426        return ret;
 427}
 428
 429static void uniphier_xdmac_synchronize(struct dma_chan *chan)
 430{
 431        vchan_synchronize(to_virt_chan(chan));
 432}
 433
 434static void uniphier_xdmac_issue_pending(struct dma_chan *chan)
 435{
 436        struct virt_dma_chan *vc = to_virt_chan(chan);
 437        struct uniphier_xdmac_chan *xc = to_uniphier_xdmac_chan(vc);
 438        unsigned long flags;
 439
 440        spin_lock_irqsave(&vc->lock, flags);
 441
 442        if (vchan_issue_pending(vc) && !xc->xd)
 443                uniphier_xdmac_start(xc);
 444
 445        spin_unlock_irqrestore(&vc->lock, flags);
 446}
 447
 448static void uniphier_xdmac_desc_free(struct virt_dma_desc *vd)
 449{
 450        kfree(to_uniphier_xdmac_desc(vd));
 451}
 452
 453static void uniphier_xdmac_chan_init(struct uniphier_xdmac_device *xdev,
 454                                     int ch)
 455{
 456        struct uniphier_xdmac_chan *xc = &xdev->channels[ch];
 457
 458        xc->xdev = xdev;
 459        xc->reg_ch_base = xdev->reg_base + XDMAC_CH_WIDTH * ch;
 460        xc->vc.desc_free = uniphier_xdmac_desc_free;
 461
 462        vchan_init(&xc->vc, &xdev->ddev);
 463}
 464
 465static struct dma_chan *of_dma_uniphier_xlate(struct of_phandle_args *dma_spec,
 466                                              struct of_dma *ofdma)
 467{
 468        struct uniphier_xdmac_device *xdev = ofdma->of_dma_data;
 469        int chan_id = dma_spec->args[0];
 470
 471        if (chan_id >= xdev->nr_chans)
 472                return NULL;
 473
 474        xdev->channels[chan_id].id = chan_id;
 475        xdev->channels[chan_id].req_factor = dma_spec->args[1];
 476
 477        return dma_get_slave_channel(&xdev->channels[chan_id].vc.chan);
 478}
 479
 480static int uniphier_xdmac_probe(struct platform_device *pdev)
 481{
 482        struct uniphier_xdmac_device *xdev;
 483        struct device *dev = &pdev->dev;
 484        struct dma_device *ddev;
 485        int irq;
 486        int nr_chans;
 487        int i, ret;
 488
 489        if (of_property_read_u32(dev->of_node, "dma-channels", &nr_chans))
 490                return -EINVAL;
 491        if (nr_chans > XDMAC_MAX_CHANS)
 492                nr_chans = XDMAC_MAX_CHANS;
 493
 494        xdev = devm_kzalloc(dev, struct_size(xdev, channels, nr_chans),
 495                            GFP_KERNEL);
 496        if (!xdev)
 497                return -ENOMEM;
 498
 499        xdev->nr_chans = nr_chans;
 500        xdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
 501        if (IS_ERR(xdev->reg_base))
 502                return PTR_ERR(xdev->reg_base);
 503
 504        ddev = &xdev->ddev;
 505        ddev->dev = dev;
 506        dma_cap_zero(ddev->cap_mask);
 507        dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
 508        dma_cap_set(DMA_SLAVE, ddev->cap_mask);
 509        ddev->src_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
 510        ddev->dst_addr_widths = UNIPHIER_XDMAC_BUSWIDTHS;
 511        ddev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
 512                           BIT(DMA_MEM_TO_MEM);
 513        ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
 514        ddev->max_burst = XDMAC_MAX_WORDS;
 515        ddev->device_free_chan_resources = uniphier_xdmac_free_chan_resources;
 516        ddev->device_prep_dma_memcpy = uniphier_xdmac_prep_dma_memcpy;
 517        ddev->device_prep_slave_sg = uniphier_xdmac_prep_slave_sg;
 518        ddev->device_config = uniphier_xdmac_slave_config;
 519        ddev->device_terminate_all = uniphier_xdmac_terminate_all;
 520        ddev->device_synchronize = uniphier_xdmac_synchronize;
 521        ddev->device_tx_status = dma_cookie_status;
 522        ddev->device_issue_pending = uniphier_xdmac_issue_pending;
 523        INIT_LIST_HEAD(&ddev->channels);
 524
 525        for (i = 0; i < nr_chans; i++)
 526                uniphier_xdmac_chan_init(xdev, i);
 527
 528        irq = platform_get_irq(pdev, 0);
 529        if (irq < 0)
 530                return irq;
 531
 532        ret = devm_request_irq(dev, irq, uniphier_xdmac_irq_handler,
 533                               IRQF_SHARED, "xdmac", xdev);
 534        if (ret) {
 535                dev_err(dev, "Failed to request IRQ\n");
 536                return ret;
 537        }
 538
 539        ret = dma_async_device_register(ddev);
 540        if (ret) {
 541                dev_err(dev, "Failed to register XDMA device\n");
 542                return ret;
 543        }
 544
 545        ret = of_dma_controller_register(dev->of_node,
 546                                         of_dma_uniphier_xlate, xdev);
 547        if (ret) {
 548                dev_err(dev, "Failed to register XDMA controller\n");
 549                goto out_unregister_dmac;
 550        }
 551
 552        platform_set_drvdata(pdev, xdev);
 553
 554        dev_info(&pdev->dev, "UniPhier XDMAC driver (%d channels)\n",
 555                 nr_chans);
 556
 557        return 0;
 558
 559out_unregister_dmac:
 560        dma_async_device_unregister(ddev);
 561
 562        return ret;
 563}
 564
 565static int uniphier_xdmac_remove(struct platform_device *pdev)
 566{
 567        struct uniphier_xdmac_device *xdev = platform_get_drvdata(pdev);
 568        struct dma_device *ddev = &xdev->ddev;
 569        struct dma_chan *chan;
 570        int ret;
 571
 572        /*
 573         * Before reaching here, almost all descriptors have been freed by the
 574         * ->device_free_chan_resources() hook. However, each channel might
 575         * be still holding one descriptor that was on-flight at that moment.
 576         * Terminate it to make sure this hardware is no longer running. Then,
 577         * free the channel resources once again to avoid memory leak.
 578         */
 579        list_for_each_entry(chan, &ddev->channels, device_node) {
 580                ret = dmaengine_terminate_sync(chan);
 581                if (ret)
 582                        return ret;
 583                uniphier_xdmac_free_chan_resources(chan);
 584        }
 585
 586        of_dma_controller_free(pdev->dev.of_node);
 587        dma_async_device_unregister(ddev);
 588
 589        return 0;
 590}
 591
 592static const struct of_device_id uniphier_xdmac_match[] = {
 593        { .compatible = "socionext,uniphier-xdmac" },
 594        { /* sentinel */ }
 595};
 596MODULE_DEVICE_TABLE(of, uniphier_xdmac_match);
 597
 598static struct platform_driver uniphier_xdmac_driver = {
 599        .probe = uniphier_xdmac_probe,
 600        .remove = uniphier_xdmac_remove,
 601        .driver = {
 602                .name = "uniphier-xdmac",
 603                .of_match_table = uniphier_xdmac_match,
 604        },
 605};
 606module_platform_driver(uniphier_xdmac_driver);
 607
 608MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
 609MODULE_DESCRIPTION("UniPhier external DMA controller driver");
 610MODULE_LICENSE("GPL v2");
 611