linux/drivers/dma/dma-axi-dmac.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Driver for the Analog Devices AXI-DMAC core
   4 *
   5 * Copyright 2013-2019 Analog Devices Inc.
   6 *  Author: Lars-Peter Clausen <lars@metafoo.de>
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/device.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmaengine.h>
  13#include <linux/err.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/of.h>
  19#include <linux/of_dma.h>
  20#include <linux/platform_device.h>
  21#include <linux/regmap.h>
  22#include <linux/slab.h>
  23#include <linux/fpga/adi-axi-common.h>
  24
  25#include <dt-bindings/dma/axi-dmac.h>
  26
  27#include "dmaengine.h"
  28#include "virt-dma.h"
  29
  30/*
  31 * The AXI-DMAC is a soft IP core that is used in FPGA designs. The core has
  32 * various instantiation parameters which decided the exact feature set support
  33 * by the core.
  34 *
  35 * Each channel of the core has a source interface and a destination interface.
  36 * The number of channels and the type of the channel interfaces is selected at
  37 * configuration time. A interface can either be a connected to a central memory
  38 * interconnect, which allows access to system memory, or it can be connected to
  39 * a dedicated bus which is directly connected to a data port on a peripheral.
  40 * Given that those are configuration options of the core that are selected when
  41 * it is instantiated this means that they can not be changed by software at
  42 * runtime. By extension this means that each channel is uni-directional. It can
  43 * either be device to memory or memory to device, but not both. Also since the
  44 * device side is a dedicated data bus only connected to a single peripheral
  45 * there is no address than can or needs to be configured for the device side.
  46 */
  47
  48#define AXI_DMAC_REG_IRQ_MASK           0x80
  49#define AXI_DMAC_REG_IRQ_PENDING        0x84
  50#define AXI_DMAC_REG_IRQ_SOURCE         0x88
  51
  52#define AXI_DMAC_REG_CTRL               0x400
  53#define AXI_DMAC_REG_TRANSFER_ID        0x404
  54#define AXI_DMAC_REG_START_TRANSFER     0x408
  55#define AXI_DMAC_REG_FLAGS              0x40c
  56#define AXI_DMAC_REG_DEST_ADDRESS       0x410
  57#define AXI_DMAC_REG_SRC_ADDRESS        0x414
  58#define AXI_DMAC_REG_X_LENGTH           0x418
  59#define AXI_DMAC_REG_Y_LENGTH           0x41c
  60#define AXI_DMAC_REG_DEST_STRIDE        0x420
  61#define AXI_DMAC_REG_SRC_STRIDE         0x424
  62#define AXI_DMAC_REG_TRANSFER_DONE      0x428
  63#define AXI_DMAC_REG_ACTIVE_TRANSFER_ID 0x42c
  64#define AXI_DMAC_REG_STATUS             0x430
  65#define AXI_DMAC_REG_CURRENT_SRC_ADDR   0x434
  66#define AXI_DMAC_REG_CURRENT_DEST_ADDR  0x438
  67#define AXI_DMAC_REG_PARTIAL_XFER_LEN   0x44c
  68#define AXI_DMAC_REG_PARTIAL_XFER_ID    0x450
  69
  70#define AXI_DMAC_CTRL_ENABLE            BIT(0)
  71#define AXI_DMAC_CTRL_PAUSE             BIT(1)
  72
  73#define AXI_DMAC_IRQ_SOT                BIT(0)
  74#define AXI_DMAC_IRQ_EOT                BIT(1)
  75
  76#define AXI_DMAC_FLAG_CYCLIC            BIT(0)
  77#define AXI_DMAC_FLAG_LAST              BIT(1)
  78#define AXI_DMAC_FLAG_PARTIAL_REPORT    BIT(2)
  79
  80#define AXI_DMAC_FLAG_PARTIAL_XFER_DONE BIT(31)
  81
  82/* The maximum ID allocated by the hardware is 31 */
  83#define AXI_DMAC_SG_UNUSED 32U
  84
  85struct axi_dmac_sg {
  86        dma_addr_t src_addr;
  87        dma_addr_t dest_addr;
  88        unsigned int x_len;
  89        unsigned int y_len;
  90        unsigned int dest_stride;
  91        unsigned int src_stride;
  92        unsigned int id;
  93        unsigned int partial_len;
  94        bool schedule_when_free;
  95};
  96
  97struct axi_dmac_desc {
  98        struct virt_dma_desc vdesc;
  99        bool cyclic;
 100        bool have_partial_xfer;
 101
 102        unsigned int num_submitted;
 103        unsigned int num_completed;
 104        unsigned int num_sgs;
 105        struct axi_dmac_sg sg[];
 106};
 107
 108struct axi_dmac_chan {
 109        struct virt_dma_chan vchan;
 110
 111        struct axi_dmac_desc *next_desc;
 112        struct list_head active_descs;
 113        enum dma_transfer_direction direction;
 114
 115        unsigned int src_width;
 116        unsigned int dest_width;
 117        unsigned int src_type;
 118        unsigned int dest_type;
 119
 120        unsigned int max_length;
 121        unsigned int address_align_mask;
 122        unsigned int length_align_mask;
 123
 124        bool hw_partial_xfer;
 125        bool hw_cyclic;
 126        bool hw_2d;
 127};
 128
 129struct axi_dmac {
 130        void __iomem *base;
 131        int irq;
 132
 133        struct clk *clk;
 134
 135        struct dma_device dma_dev;
 136        struct axi_dmac_chan chan;
 137
 138        struct device_dma_parameters dma_parms;
 139};
 140
 141static struct axi_dmac *chan_to_axi_dmac(struct axi_dmac_chan *chan)
 142{
 143        return container_of(chan->vchan.chan.device, struct axi_dmac,
 144                dma_dev);
 145}
 146
 147static struct axi_dmac_chan *to_axi_dmac_chan(struct dma_chan *c)
 148{
 149        return container_of(c, struct axi_dmac_chan, vchan.chan);
 150}
 151
 152static struct axi_dmac_desc *to_axi_dmac_desc(struct virt_dma_desc *vdesc)
 153{
 154        return container_of(vdesc, struct axi_dmac_desc, vdesc);
 155}
 156
 157static void axi_dmac_write(struct axi_dmac *axi_dmac, unsigned int reg,
 158        unsigned int val)
 159{
 160        writel(val, axi_dmac->base + reg);
 161}
 162
 163static int axi_dmac_read(struct axi_dmac *axi_dmac, unsigned int reg)
 164{
 165        return readl(axi_dmac->base + reg);
 166}
 167
 168static int axi_dmac_src_is_mem(struct axi_dmac_chan *chan)
 169{
 170        return chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM;
 171}
 172
 173static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
 174{
 175        return chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM;
 176}
 177
 178static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
 179{
 180        if (len == 0)
 181                return false;
 182        if ((len & chan->length_align_mask) != 0) /* Not aligned */
 183                return false;
 184        return true;
 185}
 186
 187static bool axi_dmac_check_addr(struct axi_dmac_chan *chan, dma_addr_t addr)
 188{
 189        if ((addr & chan->address_align_mask) != 0) /* Not aligned */
 190                return false;
 191        return true;
 192}
 193
 194static void axi_dmac_start_transfer(struct axi_dmac_chan *chan)
 195{
 196        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
 197        struct virt_dma_desc *vdesc;
 198        struct axi_dmac_desc *desc;
 199        struct axi_dmac_sg *sg;
 200        unsigned int flags = 0;
 201        unsigned int val;
 202
 203        val = axi_dmac_read(dmac, AXI_DMAC_REG_START_TRANSFER);
 204        if (val) /* Queue is full, wait for the next SOT IRQ */
 205                return;
 206
 207        desc = chan->next_desc;
 208
 209        if (!desc) {
 210                vdesc = vchan_next_desc(&chan->vchan);
 211                if (!vdesc)
 212                        return;
 213                list_move_tail(&vdesc->node, &chan->active_descs);
 214                desc = to_axi_dmac_desc(vdesc);
 215        }
 216        sg = &desc->sg[desc->num_submitted];
 217
 218        /* Already queued in cyclic mode. Wait for it to finish */
 219        if (sg->id != AXI_DMAC_SG_UNUSED) {
 220                sg->schedule_when_free = true;
 221                return;
 222        }
 223
 224        desc->num_submitted++;
 225        if (desc->num_submitted == desc->num_sgs ||
 226            desc->have_partial_xfer) {
 227                if (desc->cyclic)
 228                        desc->num_submitted = 0; /* Start again */
 229                else
 230                        chan->next_desc = NULL;
 231                flags |= AXI_DMAC_FLAG_LAST;
 232        } else {
 233                chan->next_desc = desc;
 234        }
 235
 236        sg->id = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_ID);
 237
 238        if (axi_dmac_dest_is_mem(chan)) {
 239                axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, sg->dest_addr);
 240                axi_dmac_write(dmac, AXI_DMAC_REG_DEST_STRIDE, sg->dest_stride);
 241        }
 242
 243        if (axi_dmac_src_is_mem(chan)) {
 244                axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, sg->src_addr);
 245                axi_dmac_write(dmac, AXI_DMAC_REG_SRC_STRIDE, sg->src_stride);
 246        }
 247
 248        /*
 249         * If the hardware supports cyclic transfers and there is no callback to
 250         * call and only a single segment, enable hw cyclic mode to avoid
 251         * unnecessary interrupts.
 252         */
 253        if (chan->hw_cyclic && desc->cyclic && !desc->vdesc.tx.callback &&
 254                desc->num_sgs == 1)
 255                flags |= AXI_DMAC_FLAG_CYCLIC;
 256
 257        if (chan->hw_partial_xfer)
 258                flags |= AXI_DMAC_FLAG_PARTIAL_REPORT;
 259
 260        axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, sg->x_len - 1);
 261        axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, sg->y_len - 1);
 262        axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, flags);
 263        axi_dmac_write(dmac, AXI_DMAC_REG_START_TRANSFER, 1);
 264}
 265
 266static struct axi_dmac_desc *axi_dmac_active_desc(struct axi_dmac_chan *chan)
 267{
 268        return list_first_entry_or_null(&chan->active_descs,
 269                struct axi_dmac_desc, vdesc.node);
 270}
 271
 272static inline unsigned int axi_dmac_total_sg_bytes(struct axi_dmac_chan *chan,
 273        struct axi_dmac_sg *sg)
 274{
 275        if (chan->hw_2d)
 276                return sg->x_len * sg->y_len;
 277        else
 278                return sg->x_len;
 279}
 280
 281static void axi_dmac_dequeue_partial_xfers(struct axi_dmac_chan *chan)
 282{
 283        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
 284        struct axi_dmac_desc *desc;
 285        struct axi_dmac_sg *sg;
 286        u32 xfer_done, len, id, i;
 287        bool found_sg;
 288
 289        do {
 290                len = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_LEN);
 291                id  = axi_dmac_read(dmac, AXI_DMAC_REG_PARTIAL_XFER_ID);
 292
 293                found_sg = false;
 294                list_for_each_entry(desc, &chan->active_descs, vdesc.node) {
 295                        for (i = 0; i < desc->num_sgs; i++) {
 296                                sg = &desc->sg[i];
 297                                if (sg->id == AXI_DMAC_SG_UNUSED)
 298                                        continue;
 299                                if (sg->id == id) {
 300                                        desc->have_partial_xfer = true;
 301                                        sg->partial_len = len;
 302                                        found_sg = true;
 303                                        break;
 304                                }
 305                        }
 306                        if (found_sg)
 307                                break;
 308                }
 309
 310                if (found_sg) {
 311                        dev_dbg(dmac->dma_dev.dev,
 312                                "Found partial segment id=%u, len=%u\n",
 313                                id, len);
 314                } else {
 315                        dev_warn(dmac->dma_dev.dev,
 316                                 "Not found partial segment id=%u, len=%u\n",
 317                                 id, len);
 318                }
 319
 320                /* Check if we have any more partial transfers */
 321                xfer_done = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
 322                xfer_done = !(xfer_done & AXI_DMAC_FLAG_PARTIAL_XFER_DONE);
 323
 324        } while (!xfer_done);
 325}
 326
 327static void axi_dmac_compute_residue(struct axi_dmac_chan *chan,
 328        struct axi_dmac_desc *active)
 329{
 330        struct dmaengine_result *rslt = &active->vdesc.tx_result;
 331        unsigned int start = active->num_completed - 1;
 332        struct axi_dmac_sg *sg;
 333        unsigned int i, total;
 334
 335        rslt->result = DMA_TRANS_NOERROR;
 336        rslt->residue = 0;
 337
 338        /*
 339         * We get here if the last completed segment is partial, which
 340         * means we can compute the residue from that segment onwards
 341         */
 342        for (i = start; i < active->num_sgs; i++) {
 343                sg = &active->sg[i];
 344                total = axi_dmac_total_sg_bytes(chan, sg);
 345                rslt->residue += (total - sg->partial_len);
 346        }
 347}
 348
 349static bool axi_dmac_transfer_done(struct axi_dmac_chan *chan,
 350        unsigned int completed_transfers)
 351{
 352        struct axi_dmac_desc *active;
 353        struct axi_dmac_sg *sg;
 354        bool start_next = false;
 355
 356        active = axi_dmac_active_desc(chan);
 357        if (!active)
 358                return false;
 359
 360        if (chan->hw_partial_xfer &&
 361            (completed_transfers & AXI_DMAC_FLAG_PARTIAL_XFER_DONE))
 362                axi_dmac_dequeue_partial_xfers(chan);
 363
 364        do {
 365                sg = &active->sg[active->num_completed];
 366                if (sg->id == AXI_DMAC_SG_UNUSED) /* Not yet submitted */
 367                        break;
 368                if (!(BIT(sg->id) & completed_transfers))
 369                        break;
 370                active->num_completed++;
 371                sg->id = AXI_DMAC_SG_UNUSED;
 372                if (sg->schedule_when_free) {
 373                        sg->schedule_when_free = false;
 374                        start_next = true;
 375                }
 376
 377                if (sg->partial_len)
 378                        axi_dmac_compute_residue(chan, active);
 379
 380                if (active->cyclic)
 381                        vchan_cyclic_callback(&active->vdesc);
 382
 383                if (active->num_completed == active->num_sgs ||
 384                    sg->partial_len) {
 385                        if (active->cyclic) {
 386                                active->num_completed = 0; /* wrap around */
 387                        } else {
 388                                list_del(&active->vdesc.node);
 389                                vchan_cookie_complete(&active->vdesc);
 390                                active = axi_dmac_active_desc(chan);
 391                        }
 392                }
 393        } while (active);
 394
 395        return start_next;
 396}
 397
 398static irqreturn_t axi_dmac_interrupt_handler(int irq, void *devid)
 399{
 400        struct axi_dmac *dmac = devid;
 401        unsigned int pending;
 402        bool start_next = false;
 403
 404        pending = axi_dmac_read(dmac, AXI_DMAC_REG_IRQ_PENDING);
 405        if (!pending)
 406                return IRQ_NONE;
 407
 408        axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_PENDING, pending);
 409
 410        spin_lock(&dmac->chan.vchan.lock);
 411        /* One or more transfers have finished */
 412        if (pending & AXI_DMAC_IRQ_EOT) {
 413                unsigned int completed;
 414
 415                completed = axi_dmac_read(dmac, AXI_DMAC_REG_TRANSFER_DONE);
 416                start_next = axi_dmac_transfer_done(&dmac->chan, completed);
 417        }
 418        /* Space has become available in the descriptor queue */
 419        if ((pending & AXI_DMAC_IRQ_SOT) || start_next)
 420                axi_dmac_start_transfer(&dmac->chan);
 421        spin_unlock(&dmac->chan.vchan.lock);
 422
 423        return IRQ_HANDLED;
 424}
 425
 426static int axi_dmac_terminate_all(struct dma_chan *c)
 427{
 428        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 429        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
 430        unsigned long flags;
 431        LIST_HEAD(head);
 432
 433        spin_lock_irqsave(&chan->vchan.lock, flags);
 434        axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, 0);
 435        chan->next_desc = NULL;
 436        vchan_get_all_descriptors(&chan->vchan, &head);
 437        list_splice_tail_init(&chan->active_descs, &head);
 438        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 439
 440        vchan_dma_desc_free_list(&chan->vchan, &head);
 441
 442        return 0;
 443}
 444
 445static void axi_dmac_synchronize(struct dma_chan *c)
 446{
 447        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 448
 449        vchan_synchronize(&chan->vchan);
 450}
 451
 452static void axi_dmac_issue_pending(struct dma_chan *c)
 453{
 454        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 455        struct axi_dmac *dmac = chan_to_axi_dmac(chan);
 456        unsigned long flags;
 457
 458        axi_dmac_write(dmac, AXI_DMAC_REG_CTRL, AXI_DMAC_CTRL_ENABLE);
 459
 460        spin_lock_irqsave(&chan->vchan.lock, flags);
 461        if (vchan_issue_pending(&chan->vchan))
 462                axi_dmac_start_transfer(chan);
 463        spin_unlock_irqrestore(&chan->vchan.lock, flags);
 464}
 465
 466static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
 467{
 468        struct axi_dmac_desc *desc;
 469        unsigned int i;
 470
 471        desc = kzalloc(struct_size(desc, sg, num_sgs), GFP_NOWAIT);
 472        if (!desc)
 473                return NULL;
 474
 475        for (i = 0; i < num_sgs; i++)
 476                desc->sg[i].id = AXI_DMAC_SG_UNUSED;
 477
 478        desc->num_sgs = num_sgs;
 479
 480        return desc;
 481}
 482
 483static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
 484        enum dma_transfer_direction direction, dma_addr_t addr,
 485        unsigned int num_periods, unsigned int period_len,
 486        struct axi_dmac_sg *sg)
 487{
 488        unsigned int num_segments, i;
 489        unsigned int segment_size;
 490        unsigned int len;
 491
 492        /* Split into multiple equally sized segments if necessary */
 493        num_segments = DIV_ROUND_UP(period_len, chan->max_length);
 494        segment_size = DIV_ROUND_UP(period_len, num_segments);
 495        /* Take care of alignment */
 496        segment_size = ((segment_size - 1) | chan->length_align_mask) + 1;
 497
 498        for (i = 0; i < num_periods; i++) {
 499                len = period_len;
 500
 501                while (len > segment_size) {
 502                        if (direction == DMA_DEV_TO_MEM)
 503                                sg->dest_addr = addr;
 504                        else
 505                                sg->src_addr = addr;
 506                        sg->x_len = segment_size;
 507                        sg->y_len = 1;
 508                        sg++;
 509                        addr += segment_size;
 510                        len -= segment_size;
 511                }
 512
 513                if (direction == DMA_DEV_TO_MEM)
 514                        sg->dest_addr = addr;
 515                else
 516                        sg->src_addr = addr;
 517                sg->x_len = len;
 518                sg->y_len = 1;
 519                sg++;
 520                addr += len;
 521        }
 522
 523        return sg;
 524}
 525
 526static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
 527        struct dma_chan *c, struct scatterlist *sgl,
 528        unsigned int sg_len, enum dma_transfer_direction direction,
 529        unsigned long flags, void *context)
 530{
 531        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 532        struct axi_dmac_desc *desc;
 533        struct axi_dmac_sg *dsg;
 534        struct scatterlist *sg;
 535        unsigned int num_sgs;
 536        unsigned int i;
 537
 538        if (direction != chan->direction)
 539                return NULL;
 540
 541        num_sgs = 0;
 542        for_each_sg(sgl, sg, sg_len, i)
 543                num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
 544
 545        desc = axi_dmac_alloc_desc(num_sgs);
 546        if (!desc)
 547                return NULL;
 548
 549        dsg = desc->sg;
 550
 551        for_each_sg(sgl, sg, sg_len, i) {
 552                if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
 553                    !axi_dmac_check_len(chan, sg_dma_len(sg))) {
 554                        kfree(desc);
 555                        return NULL;
 556                }
 557
 558                dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
 559                        sg_dma_len(sg), dsg);
 560        }
 561
 562        desc->cyclic = false;
 563
 564        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 565}
 566
 567static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
 568        struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
 569        size_t period_len, enum dma_transfer_direction direction,
 570        unsigned long flags)
 571{
 572        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 573        struct axi_dmac_desc *desc;
 574        unsigned int num_periods, num_segments;
 575
 576        if (direction != chan->direction)
 577                return NULL;
 578
 579        if (!axi_dmac_check_len(chan, buf_len) ||
 580            !axi_dmac_check_addr(chan, buf_addr))
 581                return NULL;
 582
 583        if (period_len == 0 || buf_len % period_len)
 584                return NULL;
 585
 586        num_periods = buf_len / period_len;
 587        num_segments = DIV_ROUND_UP(period_len, chan->max_length);
 588
 589        desc = axi_dmac_alloc_desc(num_periods * num_segments);
 590        if (!desc)
 591                return NULL;
 592
 593        axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
 594                period_len, desc->sg);
 595
 596        desc->cyclic = true;
 597
 598        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 599}
 600
 601static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
 602        struct dma_chan *c, struct dma_interleaved_template *xt,
 603        unsigned long flags)
 604{
 605        struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
 606        struct axi_dmac_desc *desc;
 607        size_t dst_icg, src_icg;
 608
 609        if (xt->frame_size != 1)
 610                return NULL;
 611
 612        if (xt->dir != chan->direction)
 613                return NULL;
 614
 615        if (axi_dmac_src_is_mem(chan)) {
 616                if (!xt->src_inc || !axi_dmac_check_addr(chan, xt->src_start))
 617                        return NULL;
 618        }
 619
 620        if (axi_dmac_dest_is_mem(chan)) {
 621                if (!xt->dst_inc || !axi_dmac_check_addr(chan, xt->dst_start))
 622                        return NULL;
 623        }
 624
 625        dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
 626        src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
 627
 628        if (chan->hw_2d) {
 629                if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
 630                    xt->numf == 0)
 631                        return NULL;
 632                if (xt->sgl[0].size + dst_icg > chan->max_length ||
 633                    xt->sgl[0].size + src_icg > chan->max_length)
 634                        return NULL;
 635        } else {
 636                if (dst_icg != 0 || src_icg != 0)
 637                        return NULL;
 638                if (chan->max_length / xt->sgl[0].size < xt->numf)
 639                        return NULL;
 640                if (!axi_dmac_check_len(chan, xt->sgl[0].size * xt->numf))
 641                        return NULL;
 642        }
 643
 644        desc = axi_dmac_alloc_desc(1);
 645        if (!desc)
 646                return NULL;
 647
 648        if (axi_dmac_src_is_mem(chan)) {
 649                desc->sg[0].src_addr = xt->src_start;
 650                desc->sg[0].src_stride = xt->sgl[0].size + src_icg;
 651        }
 652
 653        if (axi_dmac_dest_is_mem(chan)) {
 654                desc->sg[0].dest_addr = xt->dst_start;
 655                desc->sg[0].dest_stride = xt->sgl[0].size + dst_icg;
 656        }
 657
 658        if (chan->hw_2d) {
 659                desc->sg[0].x_len = xt->sgl[0].size;
 660                desc->sg[0].y_len = xt->numf;
 661        } else {
 662                desc->sg[0].x_len = xt->sgl[0].size * xt->numf;
 663                desc->sg[0].y_len = 1;
 664        }
 665
 666        if (flags & DMA_CYCLIC)
 667                desc->cyclic = true;
 668
 669        return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
 670}
 671
 672static void axi_dmac_free_chan_resources(struct dma_chan *c)
 673{
 674        vchan_free_chan_resources(to_virt_chan(c));
 675}
 676
 677static void axi_dmac_desc_free(struct virt_dma_desc *vdesc)
 678{
 679        kfree(container_of(vdesc, struct axi_dmac_desc, vdesc));
 680}
 681
 682static bool axi_dmac_regmap_rdwr(struct device *dev, unsigned int reg)
 683{
 684        switch (reg) {
 685        case AXI_DMAC_REG_IRQ_MASK:
 686        case AXI_DMAC_REG_IRQ_SOURCE:
 687        case AXI_DMAC_REG_IRQ_PENDING:
 688        case AXI_DMAC_REG_CTRL:
 689        case AXI_DMAC_REG_TRANSFER_ID:
 690        case AXI_DMAC_REG_START_TRANSFER:
 691        case AXI_DMAC_REG_FLAGS:
 692        case AXI_DMAC_REG_DEST_ADDRESS:
 693        case AXI_DMAC_REG_SRC_ADDRESS:
 694        case AXI_DMAC_REG_X_LENGTH:
 695        case AXI_DMAC_REG_Y_LENGTH:
 696        case AXI_DMAC_REG_DEST_STRIDE:
 697        case AXI_DMAC_REG_SRC_STRIDE:
 698        case AXI_DMAC_REG_TRANSFER_DONE:
 699        case AXI_DMAC_REG_ACTIVE_TRANSFER_ID:
 700        case AXI_DMAC_REG_STATUS:
 701        case AXI_DMAC_REG_CURRENT_SRC_ADDR:
 702        case AXI_DMAC_REG_CURRENT_DEST_ADDR:
 703        case AXI_DMAC_REG_PARTIAL_XFER_LEN:
 704        case AXI_DMAC_REG_PARTIAL_XFER_ID:
 705                return true;
 706        default:
 707                return false;
 708        }
 709}
 710
 711static const struct regmap_config axi_dmac_regmap_config = {
 712        .reg_bits = 32,
 713        .val_bits = 32,
 714        .reg_stride = 4,
 715        .max_register = AXI_DMAC_REG_PARTIAL_XFER_ID,
 716        .readable_reg = axi_dmac_regmap_rdwr,
 717        .writeable_reg = axi_dmac_regmap_rdwr,
 718};
 719
 720/*
 721 * The configuration stored in the devicetree matches the configuration
 722 * parameters of the peripheral instance and allows the driver to know which
 723 * features are implemented and how it should behave.
 724 */
 725static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
 726        struct axi_dmac_chan *chan)
 727{
 728        u32 val;
 729        int ret;
 730
 731        ret = of_property_read_u32(of_chan, "reg", &val);
 732        if (ret)
 733                return ret;
 734
 735        /* We only support 1 channel for now */
 736        if (val != 0)
 737                return -EINVAL;
 738
 739        ret = of_property_read_u32(of_chan, "adi,source-bus-type", &val);
 740        if (ret)
 741                return ret;
 742        if (val > AXI_DMAC_BUS_TYPE_FIFO)
 743                return -EINVAL;
 744        chan->src_type = val;
 745
 746        ret = of_property_read_u32(of_chan, "adi,destination-bus-type", &val);
 747        if (ret)
 748                return ret;
 749        if (val > AXI_DMAC_BUS_TYPE_FIFO)
 750                return -EINVAL;
 751        chan->dest_type = val;
 752
 753        ret = of_property_read_u32(of_chan, "adi,source-bus-width", &val);
 754        if (ret)
 755                return ret;
 756        chan->src_width = val / 8;
 757
 758        ret = of_property_read_u32(of_chan, "adi,destination-bus-width", &val);
 759        if (ret)
 760                return ret;
 761        chan->dest_width = val / 8;
 762
 763        chan->address_align_mask = max(chan->dest_width, chan->src_width) - 1;
 764
 765        if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
 766                chan->direction = DMA_MEM_TO_MEM;
 767        else if (!axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
 768                chan->direction = DMA_MEM_TO_DEV;
 769        else if (axi_dmac_dest_is_mem(chan) && !axi_dmac_src_is_mem(chan))
 770                chan->direction = DMA_DEV_TO_MEM;
 771        else
 772                chan->direction = DMA_DEV_TO_DEV;
 773
 774        return 0;
 775}
 776
 777static int axi_dmac_detect_caps(struct axi_dmac *dmac)
 778{
 779        struct axi_dmac_chan *chan = &dmac->chan;
 780        unsigned int version;
 781
 782        version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
 783
 784        axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
 785        if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
 786                chan->hw_cyclic = true;
 787
 788        axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
 789        if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
 790                chan->hw_2d = true;
 791
 792        axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
 793        chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
 794        if (chan->max_length != UINT_MAX)
 795                chan->max_length++;
 796
 797        axi_dmac_write(dmac, AXI_DMAC_REG_DEST_ADDRESS, 0xffffffff);
 798        if (axi_dmac_read(dmac, AXI_DMAC_REG_DEST_ADDRESS) == 0 &&
 799            chan->dest_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
 800                dev_err(dmac->dma_dev.dev,
 801                        "Destination memory-mapped interface not supported.");
 802                return -ENODEV;
 803        }
 804
 805        axi_dmac_write(dmac, AXI_DMAC_REG_SRC_ADDRESS, 0xffffffff);
 806        if (axi_dmac_read(dmac, AXI_DMAC_REG_SRC_ADDRESS) == 0 &&
 807            chan->src_type == AXI_DMAC_BUS_TYPE_AXI_MM) {
 808                dev_err(dmac->dma_dev.dev,
 809                        "Source memory-mapped interface not supported.");
 810                return -ENODEV;
 811        }
 812
 813        if (version >= ADI_AXI_PCORE_VER(4, 2, 'a'))
 814                chan->hw_partial_xfer = true;
 815
 816        if (version >= ADI_AXI_PCORE_VER(4, 1, 'a')) {
 817                axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0x00);
 818                chan->length_align_mask =
 819                        axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
 820        } else {
 821                chan->length_align_mask = chan->address_align_mask;
 822        }
 823
 824        return 0;
 825}
 826
 827static int axi_dmac_probe(struct platform_device *pdev)
 828{
 829        struct device_node *of_channels, *of_chan;
 830        struct dma_device *dma_dev;
 831        struct axi_dmac *dmac;
 832        struct resource *res;
 833        int ret;
 834
 835        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
 836        if (!dmac)
 837                return -ENOMEM;
 838
 839        dmac->irq = platform_get_irq(pdev, 0);
 840        if (dmac->irq < 0)
 841                return dmac->irq;
 842        if (dmac->irq == 0)
 843                return -EINVAL;
 844
 845        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 846        dmac->base = devm_ioremap_resource(&pdev->dev, res);
 847        if (IS_ERR(dmac->base))
 848                return PTR_ERR(dmac->base);
 849
 850        dmac->clk = devm_clk_get(&pdev->dev, NULL);
 851        if (IS_ERR(dmac->clk))
 852                return PTR_ERR(dmac->clk);
 853
 854        INIT_LIST_HEAD(&dmac->chan.active_descs);
 855
 856        of_channels = of_get_child_by_name(pdev->dev.of_node, "adi,channels");
 857        if (of_channels == NULL)
 858                return -ENODEV;
 859
 860        for_each_child_of_node(of_channels, of_chan) {
 861                ret = axi_dmac_parse_chan_dt(of_chan, &dmac->chan);
 862                if (ret) {
 863                        of_node_put(of_chan);
 864                        of_node_put(of_channels);
 865                        return -EINVAL;
 866                }
 867        }
 868        of_node_put(of_channels);
 869
 870        pdev->dev.dma_parms = &dmac->dma_parms;
 871        dma_set_max_seg_size(&pdev->dev, UINT_MAX);
 872
 873        dma_dev = &dmac->dma_dev;
 874        dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
 875        dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
 876        dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
 877        dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
 878        dma_dev->device_tx_status = dma_cookie_status;
 879        dma_dev->device_issue_pending = axi_dmac_issue_pending;
 880        dma_dev->device_prep_slave_sg = axi_dmac_prep_slave_sg;
 881        dma_dev->device_prep_dma_cyclic = axi_dmac_prep_dma_cyclic;
 882        dma_dev->device_prep_interleaved_dma = axi_dmac_prep_interleaved;
 883        dma_dev->device_terminate_all = axi_dmac_terminate_all;
 884        dma_dev->device_synchronize = axi_dmac_synchronize;
 885        dma_dev->dev = &pdev->dev;
 886        dma_dev->chancnt = 1;
 887        dma_dev->src_addr_widths = BIT(dmac->chan.src_width);
 888        dma_dev->dst_addr_widths = BIT(dmac->chan.dest_width);
 889        dma_dev->directions = BIT(dmac->chan.direction);
 890        dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
 891        INIT_LIST_HEAD(&dma_dev->channels);
 892
 893        dmac->chan.vchan.desc_free = axi_dmac_desc_free;
 894        vchan_init(&dmac->chan.vchan, dma_dev);
 895
 896        ret = clk_prepare_enable(dmac->clk);
 897        if (ret < 0)
 898                return ret;
 899
 900        ret = axi_dmac_detect_caps(dmac);
 901        if (ret)
 902                goto err_clk_disable;
 903
 904        dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
 905
 906        axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
 907
 908        ret = dma_async_device_register(dma_dev);
 909        if (ret)
 910                goto err_clk_disable;
 911
 912        ret = of_dma_controller_register(pdev->dev.of_node,
 913                of_dma_xlate_by_chan_id, dma_dev);
 914        if (ret)
 915                goto err_unregister_device;
 916
 917        ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED,
 918                dev_name(&pdev->dev), dmac);
 919        if (ret)
 920                goto err_unregister_of;
 921
 922        platform_set_drvdata(pdev, dmac);
 923
 924        devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config);
 925
 926        return 0;
 927
 928err_unregister_of:
 929        of_dma_controller_free(pdev->dev.of_node);
 930err_unregister_device:
 931        dma_async_device_unregister(&dmac->dma_dev);
 932err_clk_disable:
 933        clk_disable_unprepare(dmac->clk);
 934
 935        return ret;
 936}
 937
 938static int axi_dmac_remove(struct platform_device *pdev)
 939{
 940        struct axi_dmac *dmac = platform_get_drvdata(pdev);
 941
 942        of_dma_controller_free(pdev->dev.of_node);
 943        free_irq(dmac->irq, dmac);
 944        tasklet_kill(&dmac->chan.vchan.task);
 945        dma_async_device_unregister(&dmac->dma_dev);
 946        clk_disable_unprepare(dmac->clk);
 947
 948        return 0;
 949}
 950
 951static const struct of_device_id axi_dmac_of_match_table[] = {
 952        { .compatible = "adi,axi-dmac-1.00.a" },
 953        { },
 954};
 955MODULE_DEVICE_TABLE(of, axi_dmac_of_match_table);
 956
 957static struct platform_driver axi_dmac_driver = {
 958        .driver = {
 959                .name = "dma-axi-dmac",
 960                .of_match_table = axi_dmac_of_match_table,
 961        },
 962        .probe = axi_dmac_probe,
 963        .remove = axi_dmac_remove,
 964};
 965module_platform_driver(axi_dmac_driver);
 966
 967MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
 968MODULE_DESCRIPTION("DMA controller driver for the AXI-DMAC controller");
 969MODULE_LICENSE("GPL v2");
 970