linux/drivers/dma/dma-axi-dmac.c
<<
>>
Prefs
   1/*
   2 * Driver for the Analog Devices AXI-DMAC core
   3 *
   4 * Copyright 2013-2015 Analog Devices Inc.
   5 *  Author: Lars-Peter Clausen <lars@metafoo.de>
   6 *
   7 * Licensed under the GPL-2.
   8 */
   9
  10#include <linux/clk.h>
  11#include <linux/device.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/dmaengine.h>
  14#include <linux/err.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/of_dma.h>
  21#include <linux/platform_device.h>
  22#include <linux/slab.h>
  23
  24#include <dt-bindings/dma/axi-dmac.h>
  25
  26#include "dmaengine.h"
  27#include "virt-dma.h"
  28
  29/*
  30 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
  31 * various instantiation parameters which decided the exact feature set support
  32 * by the core.
  33 *
  34 * Each channel of the core has a source interface and a destination interface.
  35 * The number of channels and the type of the channel interfaces is selected at
  36 * configuration time. A interface can either be a connected to a central memory
  37 * interconnect, which allows access to system memory, or it can be connected to
  38 * a dedicated bus which is directly connected to a data port on a peripheral.
  39 * Given that those are configuration options of the core that are selected when
  40 * it is instantiated this means that they can not be changed by software at
  41 * runtime. By extension this means that each channel is uni-directional. It can
  42 * either be device to memory or memory to device, but not both. Also since the
  43 * device side is a dedicated data bus only connected to a single peripheral
  44 * there is no address than can or needs to be configured for the device side.
  45 */
  46
  47#define AXI_DMAC_REG_IRQ_MASK           0x80
  48#define AXI_DMAC_REG_IRQ_PENDING        0x84
  49#define AXI_DMAC_REG_IRQ_SOURCE         0x88
  50
  51#define AXI_DMAC_REG_CTRL               0x400
  52#define AXI_DMAC_REG_TRANSFER_ID        0x404
  53#define AXI_DMAC_REG_START_TRANSFER     0x408
  54#define AXI_DMAC_REG_FLAGS              0x40c
  55#define AXI_DMAC_REG_DEST_ADDRESS       0x410
  56#define AXI_DMAC_REG_SRC_ADDRESS        0x414
  57#define AXI_DMAC_REG_X_LENGTH           0x418
  58#define AXI_DMAC_REG_Y_LENGTH           0x41c
  59#define AXI_DMAC_REG_DEST_STRIDE        0x420
  60#define AXI_DMAC_REG_SRC_STRIDE         0x424
  61#define AXI_DMAC_REG_TRANSFER_DONE      0x428
  62#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
  63#define AXI_DMAC_REG_STATUS             0x430
  64#define AXI_DMAC_REG_CURRENT_SRC_ADDR   0x434
  65#define AXI_DMAC_REG_CURRENT_DEST_ADDR  0x438
  66
  67#define AXI_DMAC_CTRL_ENABLE            BIT(0)
  68#define AXI_DMAC_CTRL_PAUSE             BIT(1)
  69
  70#define AXI_DMAC_IRQ_SOT                BIT(0)
  71#define AXI_DMAC_IRQ_EOT                BIT(1)
  72
  73#define AXI_DMAC_FLAG_CYCLIC            BIT(0)
  74
  75/* The maximum ID allocated by the hardware is 31 */
  76#define AXI_DMAC_SG_UNUSED 32U
  77
  78struct axi_dmac_sg {
  79        dma_addr_t src_addr;
  80        dma_addr_t dest_addr;
  81        unsigned int x_len;
  82        unsigned int y_len;
  83        unsigned int dest_stride;
  84        unsigned int src_stride;
  85        unsigned int id;
  86        bool schedule_when_free;
  87};
  88
  89struct axi_dmac_desc {
  90        struct virt_dma_desc vdesc;
  91        bool cyclic;
  92
  93        unsigned int num_submitted;
  94        unsigned int num_completed;
  95        unsigned int num_sgs;
  96        struct axi_dmac_sg sg[];
  97};
  98
  99struct axi_dmac_chan {
 100        struct virt_dma_chan vchan;
 101
 102        struct axi_dmac_desc *next_desc;
 103        struct list_head active_descs;
 104        enum dma_transfer_direction direction;
 105
 106        unsigned int src_width;
 107        unsigned int dest_width;
 108        unsigned int src_type;
 109        unsigned int dest_type;
 110
 111        unsigned int max_length;
 112        unsigned int align_mask;
 113
 114        bool hw_cyclic;
 115        bool hw_2d;
 116};
 117
 118struct axi_dmac {
 119        void __iomem *base;
 120        int irq;
 121
 122        struct clk *clk;
 123
 124        struct dma_device dma_dev;
 125        struct axi_dmac_chan chan;
 126
 127        struct device_dma_parameters dma_parms;
 128};
 129
 130static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
 131{
 132        return container_of(chan->vchan.chan.device, struct axi_dmac,
 133                dma_dev);
 134}
 135
 136static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
 137{
 138        return container_of(c, struct axi_dmac_chan, vchan.chan);
 139}
 140
 141static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
 142{
 143        return container_of(vdesc, struct axi_dmac_desc, vdesc);
 144}
 145
 146static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
 147        unsigned int val)
 148{
 149        writel(val, axi_dmac->base + reg);
 150}
 151
 152static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
 153{
 154        return readl(axi_dmac->base + reg);
 155}
 156
 157static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
 158{
 159        return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
 160}
 161
 162static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
 163{
 164        return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
 165}
 166
 167static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
 168{
 169        if (len == 0 || len > chan->max_length)
 170                return false;
 171        if ((len & chan->align_mask) != 0) /* Not aligned */
 172                return false;
 173        return true;
 174}
 175
 176static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
 177{
 178        if ((addr & chan->align_mask) != 0) /* Not aligned */
 179                return false;
 180        return true;
 181}
 182
 183static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
 184{
 185        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
 186        struct virt_dma_desc *vdesc;
 187        struct axi_dmac_desc *desc;
 188        struct axi_dmac_sg *sg;
 189        unsigned int flags = 0;
 190        unsigned int val;
 191
 192        val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
 193        if (val) /* Queue is full, wait for the next SOT IRQ */
 194                return;
 195
 196        desc = chan->next_desc;
 197
 198        if (!desc) {
 199                vdesc = vchan_next_desc(&chan->vchan);
 200                if (!vdesc)
 201                        return;
 202                list_move_tail(&vdesc->node, &chan->active_descs);
 203                desc = to_axi_dmac_desc(vdesc);
 204        }
 205        sg = &desc->sg[desc->num_submitted];
 206
 207        /* Already queued in cyclic mode. Wait for it to finish */
 208        if (sg->id != AXI_DMAC_SG_UNUSED) {
 209                sg->schedule_when_free = true;
 210                return;
 211        }
 212
 213        desc->num_submitted++;
 214        if (desc->num_submitted == desc->num_sgs) {
 215                if (desc->cyclic)
 216                        desc->num_submitted = 0; /* Start again */
 217                else
 218                        chan->next_desc = NULL;
 219        } else {
 220                chan->next_desc = desc;
 221        }
 222
 223        sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
 224
 225        if (axi_dmac_dest_is_mem(chan)) {
 226                axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
 227                axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
 228        }
 229
 230        if (axi_dmac_src_is_mem(chan)) {
 231                axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
 232                axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
 233        }
 234
 235        /*
 236         * If the hardware supports cyclic transfers and there is no callback to
 237         * call and only a single segment, enable hw cyclic mode to avoid
 238         * unnecessary interrupts.
 239         */
 240        if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
 241                desc->num_sgs == 1)
 242                flags |= AXI_DMAC_FLAG_CYCLIC;
 243
 244        axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
 245        axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
 246        axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
 247        axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
 248}
 249
 250static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
 251{
 252        return list_first_entry_or_null(&chan->active_descs,
 253                struct axi_dmac_desc, vdesc.node);
 254}
 255
 256static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
 257        unsigned int completed_transfers)
 258{
 259        struct axi_dmac_desc *active;
 260        struct axi_dmac_sg *sg;
 261        bool start_next = false;
 262
 263        active = axi_dmac_active_desc(chan);
 264        if (!active)
 265                return false;
 266
 267        do {
 268                sg = &active->sg[active->num_completed];
 269                if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
 270                        break;
 271                if (!(BIT(sg->id) & completed_transfers))
 272                        break;
 273                active->num_completed++;
 274                sg->id = AXI_DMAC_SG_UNUSED;
 275                if (sg->schedule_when_free) {
 276                        sg->schedule_when_free = false;
 277                        start_next = true;
 278                }
 279
 280                if (active->cyclic)
 281                        vchan_cyclic_callback(&active->vdesc);
 282
 283                if (active->num_completed == active->num_sgs) {
 284                        if (active->cyclic) {
 285                                active->num_completed = 0; /* wrap around */
 286                        } else {
 287                                list_del(&active->vdesc.node);
 288                                vchan_cookie_complete(&active->vdesc);
 289                                active = axi_dmac_active_desc(chan);
 290                        }
 291                }
 292        } while (active);
 293
 294        return start_next;
 295}
 296
 297static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
 298{
 299        struct axi_dmac *dmac = devid;
 300        unsigned int pending;
 301        bool start_next = false;
 302
 303        pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
 304        if (!pending)
 305                return IRQ_NONE;
 306
 307        axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
 308
 309        spin_lock(&dmac->chan.vchan.lock);
 310        /* One or more transfers have finished */
 311        if (pending & AXI_DMAC_IRQ_EOT) {
 312                unsigned int completed;
 313
 314                completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
 315                start_next = axi_dmac_transfer_done(&dmac->chan, completed);
 316        }
 317        /* Space has become available in the descriptor queue */
 318        if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
 319                axi_dmac_start_transfer(&dmac->chan);
 320        spin_unlock(&dmac->chan.vchan.lock);
 321
 322        return IRQ_HANDLED;
 323}
 324
 325static int axi_dmac_terminate_all(struct dma_chan *c)
 326{
 327        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 328        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
 329        unsigned long flags;
 330        LIST_HEAD(head);
 331
 332        spin_lock_irqsave(&chan->vchan.lock, flags);
 333        axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
 334        chan->next_desc = NULL;
 335        vchan_get_all_descriptors(&chan->vchan, &head);
 336        list_splice_tail_init(&chan->active_descs, &head);
 337        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 338
 339        vchan_dma_desc_free_list(&chan->vchan, &head);
 340
 341        return 0;
 342}
 343
 344static void axi_dmac_synchronize(struct dma_chan *c)
 345{
 346        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 347
 348        vchan_synchronize(&chan->vchan);
 349}
 350
 351static void axi_dmac_issue_pending(struct dma_chan *c)
 352{
 353        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 354        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
 355        unsigned long flags;
 356
 357        axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
 358
 359        spin_lock_irqsave(&chan->vchan.lock, flags);
 360        if (vchan_issue_pending(&chan->vchan))
 361                axi_dmac_start_transfer(chan);
 362        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 363}
 364
 365static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
 366{
 367        struct axi_dmac_desc *desc;
 368        unsigned int i;
 369
 370        desc = kzalloc(sizeof(struct axi_dmac_desc) +
 371                sizeof(struct axi_dmac_sg) * num_sgs, GFP_NOWAIT);
 372        if (!desc)
 373                return NULL;
 374
 375        for (i = 0; i < num_sgs; i++)
 376                desc->sg[i].id = AXI_DMAC_SG_UNUSED;
 377
 378        desc->num_sgs = num_sgs;
 379
 380        return desc;
 381}
 382
 383static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
 384        struct dma_chan *c, struct scatterlist *sgl,
 385        unsigned int sg_len, enum dma_transfer_direction direction,
 386        unsigned long flags, void *context)
 387{
 388        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 389        struct axi_dmac_desc *desc;
 390        struct scatterlist *sg;
 391        unsigned int i;
 392
 393        if (direction != chan->direction)
 394                return NULL;
 395
 396        desc = axi_dmac_alloc_desc(sg_len);
 397        if (!desc)
 398                return NULL;
 399
 400        for_each_sg(sgl, sg, sg_len, i) {
 401                if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
 402                    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
 403                        kfree(desc);
 404                        return NULL;
 405                }
 406
 407                if (direction == DMA_DEV_TO_MEM)
 408                        desc->sg[i].dest_addr = sg_dma_address(sg);
 409                else
 410                        desc->sg[i].src_addr = sg_dma_address(sg);
 411                desc->sg[i].x_len = sg_dma_len(sg);
 412                desc->sg[i].y_len = 1;
 413        }
 414
 415        desc->cyclic = false;
 416
 417        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 418}
 419
 420static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
 421        struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
 422        size_t period_len, enum dma_transfer_direction direction,
 423        unsigned long flags)
 424{
 425        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 426        struct axi_dmac_desc *desc;
 427        unsigned int num_periods, i;
 428
 429        if (direction != chan->direction)
 430                return NULL;
 431
 432        if (!axi_dmac_check_len(chan, buf_len) ||
 433            !axi_dmac_check_addr(chan, buf_addr))
 434                return NULL;
 435
 436        if (period_len == 0 || buf_len % period_len)
 437                return NULL;
 438
 439        num_periods = buf_len / period_len;
 440
 441        desc = axi_dmac_alloc_desc(num_periods);
 442        if (!desc)
 443                return NULL;
 444
 445        for (i = 0; i < num_periods; i++) {
 446                if (direction == DMA_DEV_TO_MEM)
 447                        desc->sg[i].dest_addr = buf_addr;
 448                else
 449                        desc->sg[i].src_addr = buf_addr;
 450                desc->sg[i].x_len = period_len;
 451                desc->sg[i].y_len = 1;
 452                buf_addr += period_len;
 453        }
 454
 455        desc->cyclic = true;
 456
 457        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 458}
 459
 460static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
 461        struct dma_chan *c, struct dma_interleaved_template *xt,
 462        unsigned long flags)
 463{
 464        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 465        struct axi_dmac_desc *desc;
 466        size_t dst_icg, src_icg;
 467
 468        if (xt->frame_size != 1)
 469                return NULL;
 470
 471        if (xt->dir != chan->direction)
 472                return NULL;
 473
 474        if (axi_dmac_src_is_mem(chan)) {
 475                if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
 476                        return NULL;
 477        }
 478
 479        if (axi_dmac_dest_is_mem(chan)) {
 480                if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
 481                        return NULL;
 482        }
 483
 484        dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
 485        src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
 486
 487        if (chan->hw_2d) {
 488                if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
 489                    !axi_dmac_check_len(chan, xt->numf))
 490                        return NULL;
 491                if (xt->sgl[0].size + dst_icg > chan->max_length ||
 492                    xt->sgl[0].size + src_icg > chan->max_length)
 493                        return NULL;
 494        } else {
 495                if (dst_icg != 0 || src_icg != 0)
 496                        return NULL;
 497                if (chan->max_length / xt->sgl[0].size < xt->numf)
 498                        return NULL;
 499                if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
 500                        return NULL;
 501        }
 502
 503        desc = axi_dmac_alloc_desc(1);
 504        if (!desc)
 505                return NULL;
 506
 507        if (axi_dmac_src_is_mem(chan)) {
 508                desc->sg[0].src_addr = xt->src_start;
 509                desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
 510        }
 511
 512        if (axi_dmac_dest_is_mem(chan)) {
 513                desc->sg[0].dest_addr = xt->dst_start;
 514                desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
 515        }
 516
 517        if (chan->hw_2d) {
 518                desc->sg[0].x_len = xt->sgl[0].size;
 519                desc->sg[0].y_len = xt->numf;
 520        } else {
 521                desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
 522                desc->sg[0].y_len = 1;
 523        }
 524
 525        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 526}
 527
 528static void axi_dmac_free_chan_resources(struct dma_chan *c)
 529{
 530        vchan_free_chan_resources(to_virt_chan(c));
 531}
 532
 533static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
 534{
 535        kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
 536}
 537
 538/*
 539 * The configuration stored in the devicetree matches the configuration
 540 * parameters of the peripheral instance and allows the driver to know which
 541 * features are implemented and how it should behave.
 542 */
 543static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
 544        struct axi_dmac_chan *chan)
 545{
 546        u32 val;
 547        int ret;
 548
 549        ret = of_property_read_u32(of_chan, "reg", &val);
 550        if (ret)
 551                return ret;
 552
 553        /* We only support 1 channel for now */
 554        if (val != 0)
 555                return -EINVAL;
 556
 557        ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
 558        if (ret)
 559                return ret;
 560        if (val > AXI_DMAC_BUS_TYPE_FIFO)
 561                return -EINVAL;
 562        chan->src_type = val;
 563
 564        ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
 565        if (ret)
 566                return ret;
 567        if (val > AXI_DMAC_BUS_TYPE_FIFO)
 568                return -EINVAL;
 569        chan->dest_type = val;
 570
 571        ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
 572        if (ret)
 573                return ret;
 574        chan->src_width = val / 8;
 575
 576        ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
 577        if (ret)
 578                return ret;
 579        chan->dest_width = val / 8;
 580
 581        ret = of_property_read_u32(of_chan, "adi,length-width", &val);
 582        if (ret)
 583                return ret;
 584
 585        if (val >= 32)
 586                chan->max_length = UINT_MAX;
 587        else
 588                chan->max_length = (1ULL << val) - 1;
 589
 590        chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
 591
 592        if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
 593                chan->direction = DMA_MEM_TO_MEM;
 594        else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
 595                chan->direction = DMA_MEM_TO_DEV;
 596        else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
 597                chan->direction = DMA_DEV_TO_MEM;
 598        else
 599                chan->direction = DMA_DEV_TO_DEV;
 600
 601        chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
 602        chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
 603
 604        return 0;
 605}
 606
 607static int axi_dmac_probe(struct platform_device *pdev)
 608{
 609        struct device_node *of_channels, *of_chan;
 610        struct dma_device *dma_dev;
 611        struct axi_dmac *dmac;
 612        struct resource *res;
 613        int ret;
 614
 615        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
 616        if (!dmac)
 617                return -ENOMEM;
 618
 619        dmac->irq = platform_get_irq(pdev, 0);
 620        if (dmac->irq < 0)
 621                return dmac->irq;
 622        if (dmac->irq == 0)
 623                return -EINVAL;
 624
 625        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 626        dmac->base = devm_ioremap_resource(&pdev->dev, res);
 627        if (IS_ERR(dmac->base))
 628                return PTR_ERR(dmac->base);
 629
 630        dmac->clk = devm_clk_get(&pdev->dev, NULL);
 631        if (IS_ERR(dmac->clk))
 632                return PTR_ERR(dmac->clk);
 633
 634        INIT_LIST_HEAD(&dmac->chan.active_descs);
 635
 636        of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
 637        if (of_channels == NULL)
 638                return -ENODEV;
 639
 640        for_each_child_of_node(of_channels, of_chan) {
 641                ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
 642                if (ret) {
 643                        of_node_put(of_chan);
 644                        of_node_put(of_channels);
 645                        return -EINVAL;
 646                }
 647        }
 648        of_node_put(of_channels);
 649
 650        pdev->dev.dma_parms = &dmac->dma_parms;
 651        dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length);
 652
 653        dma_dev = &dmac->dma_dev;
 654        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 655        dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
 656        dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
 657        dma_dev->device_tx_status = dma_cookie_status;
 658        dma_dev->device_issue_pending = axi_dmac_issue_pending;
 659        dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
 660        dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
 661        dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
 662        dma_dev->device_terminate_all = axi_dmac_terminate_all;
 663        dma_dev->device_synchronize = axi_dmac_synchronize;
 664        dma_dev->dev = &pdev->dev;
 665        dma_dev->chancnt = 1;
 666        dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
 667        dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
 668        dma_dev->directions = BIT(dmac->chan.direction);
 669        dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 670        INIT_LIST_HEAD(&dma_dev->channels);
 671
 672        dmac->chan.vchan.desc_free = axi_dmac_desc_free;
 673        vchan_init(&dmac->chan.vchan, dma_dev);
 674
 675        ret = clk_prepare_enable(dmac->clk);
 676        if (ret < 0)
 677                return ret;
 678
 679        axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
 680
 681        ret = dma_async_device_register(dma_dev);
 682        if (ret)
 683                goto err_clk_disable;
 684
 685        ret = of_dma_controller_register(pdev->dev.of_node,
 686                of_dma_xlate_by_chan_id, dma_dev);
 687        if (ret)
 688                goto err_unregister_device;
 689
 690        ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, 0,
 691                dev_name(&pdev->dev), dmac);
 692        if (ret)
 693                goto err_unregister_of;
 694
 695        platform_set_drvdata(pdev, dmac);
 696
 697        return 0;
 698
 699err_unregister_of:
 700        of_dma_controller_free(pdev->dev.of_node);
 701err_unregister_device:
 702        dma_async_device_unregister(&dmac->dma_dev);
 703err_clk_disable:
 704        clk_disable_unprepare(dmac->clk);
 705
 706        return ret;
 707}
 708
 709static int axi_dmac_remove(struct platform_device *pdev)
 710{
 711        struct axi_dmac *dmac = platform_get_drvdata(pdev);
 712
 713        of_dma_controller_free(pdev->dev.of_node);
 714        free_irq(dmac->irq, dmac);
 715        tasklet_kill(&dmac->chan.vchan.task);
 716        dma_async_device_unregister(&dmac->dma_dev);
 717        clk_disable_unprepare(dmac->clk);
 718
 719        return 0;
 720}
 721
 722static const struct of_device_id axi_dmac_of_match_table[] = {
 723        { .compatible = "adi,axi-dmac-1.00.a" },
 724        { },
 725};
 726MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
 727
 728static struct platform_driver axi_dmac_driver = {
 729        .driver = {
 730                .name = "dma-axi-dmac",
 731                .of_match_table = axi_dmac_of_match_table,
 732        },
 733        .probe = axi_dmac_probe,
 734        .remove = axi_dmac_remove,
 735};
 736module_platform_driver(axi_dmac_driver);
 737
 738MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 739MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
 740MODULE_LICENSE("GPL v2");
 741