linux/drivers/dma/mv_xor_v2.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2015-2016 Marvell International Ltd.
   4
   5 */
   6
   7#include <linux/clk.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/module.h>
  12#include <linux/msi.h>
  13#include <linux/of.h>
  14#include <linux/of_irq.h>
  15#include <linux/platform_device.h>
  16#include <linux/spinlock.h>
  17
  18#include "dmaengine.h"
  19
  20/* DMA Engine Registers */
  21#define MV_XOR_V2_DMA_DESQ_BALR_OFF                     0x000
  22#define MV_XOR_V2_DMA_DESQ_BAHR_OFF                     0x004
  23#define MV_XOR_V2_DMA_DESQ_SIZE_OFF                     0x008
  24#define MV_XOR_V2_DMA_DESQ_DONE_OFF                     0x00C
  25#define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK          0x7FFF
  26#define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT         0
  27#define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK         0x1FFF
  28#define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT        16
  29#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF                   0x010
  30#define   MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK            0x3F3F
  31#define   MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE       0x202
  32#define   MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE             0x3C3C
  33#define MV_XOR_V2_DMA_IMSG_CDAT_OFF                     0x014
  34#define MV_XOR_V2_DMA_IMSG_THRD_OFF                     0x018
  35#define   MV_XOR_V2_DMA_IMSG_THRD_MASK                  0x7FFF
  36#define   MV_XOR_V2_DMA_IMSG_TIMER_EN                   BIT(18)
  37#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF                   0x01C
  38  /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
  39#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF                    0x04C
  40#define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK           0xFFFF
  41#define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT          16
  42#define MV_XOR_V2_DMA_IMSG_BALR_OFF                     0x050
  43#define MV_XOR_V2_DMA_IMSG_BAHR_OFF                     0x054
  44#define MV_XOR_V2_DMA_DESQ_CTRL_OFF                     0x100
  45#define   MV_XOR_V2_DMA_DESQ_CTRL_32B                   1
  46#define   MV_XOR_V2_DMA_DESQ_CTRL_128B                  7
  47#define MV_XOR_V2_DMA_DESQ_STOP_OFF                     0x800
  48#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF                  0x804
  49#define MV_XOR_V2_DMA_DESQ_ADD_OFF                      0x808
  50#define MV_XOR_V2_DMA_IMSG_TMOT                         0x810
  51#define   MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK            0x1FFF
  52
  53/* XOR Global registers */
  54#define MV_XOR_V2_GLOB_BW_CTRL                          0x4
  55#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT      0
  56#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL        64
  57#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT      8
  58#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL        8
  59#define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT     12
  60#define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL       4
  61#define   MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT     16
  62#define   MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL       4
  63#define MV_XOR_V2_GLOB_PAUSE                            0x014
  64#define   MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL         0x8
  65#define MV_XOR_V2_GLOB_SYS_INT_CAUSE                    0x200
  66#define MV_XOR_V2_GLOB_SYS_INT_MASK                     0x204
  67#define MV_XOR_V2_GLOB_MEM_INT_CAUSE                    0x220
  68#define MV_XOR_V2_GLOB_MEM_INT_MASK                     0x224
  69
  70#define MV_XOR_V2_MIN_DESC_SIZE                         32
  71#define MV_XOR_V2_EXT_DESC_SIZE                         128
  72
  73#define MV_XOR_V2_DESC_RESERVED_SIZE                    12
  74#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE                 12
  75
  76#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF                8
  77
  78/*
  79 * Descriptors queue size. With 32 bytes descriptors, up to 2^14
  80 * descriptors are allowed, with 128 bytes descriptors, up to 2^12
  81 * descriptors are allowed. This driver uses 128 bytes descriptors,
  82 * but experimentation has shown that a set of 1024 descriptors is
  83 * sufficient to reach a good level of performance.
  84 */
  85#define MV_XOR_V2_DESC_NUM                              1024
  86
  87/*
  88 * Threshold values for descriptors and timeout, determined by
  89 * experimentation as giving a good level of performance.
  90 */
  91#define MV_XOR_V2_DONE_IMSG_THRD  0x14
  92#define MV_XOR_V2_TIMER_THRD      0xB0
  93
  94/**
  95 * struct mv_xor_v2_descriptor - DMA HW descriptor
  96 * @desc_id: used by S/W and is not affected by H/W.
  97 * @flags: error and status flags
  98 * @crc32_result: CRC32 calculation result
  99 * @desc_ctrl: operation mode and control flags
 100 * @buff_size: amount of bytes to be processed
 101 * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
 102 * AW-Attributes
 103 * @data_buff_addr: Source (and might be RAID6 destination)
 104 * addresses of data buffers in RAID5 and RAID6
 105 * @reserved: reserved
 106 */
 107struct mv_xor_v2_descriptor {
 108        u16 desc_id;
 109        u16 flags;
 110        u32 crc32_result;
 111        u32 desc_ctrl;
 112
 113        /* Definitions for desc_ctrl */
 114#define DESC_NUM_ACTIVE_D_BUF_SHIFT     22
 115#define DESC_OP_MODE_SHIFT              28
 116#define DESC_OP_MODE_NOP                0       /* Idle operation */
 117#define DESC_OP_MODE_MEMCPY             1       /* Pure-DMA operation */
 118#define DESC_OP_MODE_MEMSET             2       /* Mem-Fill operation */
 119#define DESC_OP_MODE_MEMINIT            3       /* Mem-Init operation */
 120#define DESC_OP_MODE_MEM_COMPARE        4       /* Mem-Compare operation */
 121#define DESC_OP_MODE_CRC32              5       /* CRC32 calculation */
 122#define DESC_OP_MODE_XOR                6       /* RAID5 (XOR) operation */
 123#define DESC_OP_MODE_RAID6              7       /* RAID6 P&Q-generation */
 124#define DESC_OP_MODE_RAID6_REC          8       /* RAID6 Recovery */
 125#define DESC_Q_BUFFER_ENABLE            BIT(16)
 126#define DESC_P_BUFFER_ENABLE            BIT(17)
 127#define DESC_IOD                        BIT(27)
 128
 129        u32 buff_size;
 130        u32 fill_pattern_src_addr[4];
 131        u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
 132        u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
 133};
 134
 135/**
 136 * struct mv_xor_v2_device - implements a xor device
 137 * @lock: lock for the engine
 138 * @dma_base: memory mapped DMA register base
 139 * @glob_base: memory mapped global register base
 140 * @irq_tasklet:
 141 * @free_sw_desc: linked list of free SW descriptors
 142 * @dmadev: dma device
 143 * @dmachan: dma channel
 144 * @hw_desq: HW descriptors queue
 145 * @hw_desq_virt: virtual address of DESCQ
 146 * @sw_desq: SW descriptors queue
 147 * @desc_size: HW descriptor size
 148 * @npendings: number of pending descriptors (for which tx_submit has
 149 * been called, but not yet issue_pending)
 150 */
 151struct mv_xor_v2_device {
 152        spinlock_t lock;
 153        void __iomem *dma_base;
 154        void __iomem *glob_base;
 155        struct clk *clk;
 156        struct clk *reg_clk;
 157        struct tasklet_struct irq_tasklet;
 158        struct list_head free_sw_desc;
 159        struct dma_device dmadev;
 160        struct dma_chan dmachan;
 161        dma_addr_t hw_desq;
 162        struct mv_xor_v2_descriptor *hw_desq_virt;
 163        struct mv_xor_v2_sw_desc *sw_desq;
 164        int desc_size;
 165        unsigned int npendings;
 166        unsigned int hw_queue_idx;
 167        struct msi_desc *msi_desc;
 168};
 169
 170/**
 171 * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
 172 * @idx: descriptor index
 173 * @async_tx: support for the async_tx api
 174 * @hw_desc: assosiated HW descriptor
 175 * @free_list: node of the free SW descriprots list
 176*/
 177struct mv_xor_v2_sw_desc {
 178        int idx;
 179        struct dma_async_tx_descriptor async_tx;
 180        struct mv_xor_v2_descriptor hw_desc;
 181        struct list_head free_list;
 182};
 183
 184/*
 185 * Fill the data buffers to a HW descriptor
 186 */
 187static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
 188                                        struct mv_xor_v2_descriptor *desc,
 189                                        dma_addr_t src, int index)
 190{
 191        int arr_index = ((index >> 1) * 3);
 192
 193        /*
 194         * Fill the buffer's addresses to the descriptor.
 195         *
 196         * The format of the buffers address for 2 sequential buffers
 197         * X and X + 1:
 198         *
 199         *  First word:  Buffer-DX-Address-Low[31:0]
 200         *  Second word: Buffer-DX+1-Address-Low[31:0]
 201         *  Third word:  DX+1-Buffer-Address-High[47:32] [31:16]
 202         *               DX-Buffer-Address-High[47:32] [15:0]
 203         */
 204        if ((index & 0x1) == 0) {
 205                desc->data_buff_addr[arr_index] = lower_32_bits(src);
 206
 207                desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
 208                desc->data_buff_addr[arr_index + 2] |=
 209                        upper_32_bits(src) & 0xFFFF;
 210        } else {
 211                desc->data_buff_addr[arr_index + 1] =
 212                        lower_32_bits(src);
 213
 214                desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
 215                desc->data_buff_addr[arr_index + 2] |=
 216                        (upper_32_bits(src) & 0xFFFF) << 16;
 217        }
 218}
 219
 220/*
 221 * notify the engine of new descriptors, and update the available index.
 222 */
 223static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
 224                                       int num_of_desc)
 225{
 226        /* write the number of new descriptors in the DESQ. */
 227        writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
 228}
 229
 230/*
 231 * free HW descriptors
 232 */
 233static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
 234                                          int num_of_desc)
 235{
 236        /* write the number of new descriptors in the DESQ. */
 237        writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
 238}
 239
 240/*
 241 * Set descriptor size
 242 * Return the HW descriptor size in bytes
 243 */
 244static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
 245{
 246        writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
 247               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
 248
 249        return MV_XOR_V2_EXT_DESC_SIZE;
 250}
 251
 252/*
 253 * Set the IMSG threshold
 254 */
 255static inline
 256void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
 257{
 258        u32 reg;
 259
 260        /* Configure threshold of number of descriptors, and enable timer */
 261        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
 262        reg &= ~MV_XOR_V2_DMA_IMSG_THRD_MASK;
 263        reg |= MV_XOR_V2_DONE_IMSG_THRD;
 264        reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
 265        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
 266
 267        /* Configure Timer Threshold */
 268        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
 269        reg &= ~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK;
 270        reg |= MV_XOR_V2_TIMER_THRD;
 271        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
 272}
 273
 274static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 275{
 276        struct mv_xor_v2_device *xor_dev = data;
 277        unsigned int ndescs;
 278        u32 reg;
 279
 280        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
 281
 282        ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
 283                  MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
 284
 285        /* No descriptors to process */
 286        if (!ndescs)
 287                return IRQ_NONE;
 288
 289        /* schedule a tasklet to handle descriptors callbacks */
 290        tasklet_schedule(&xor_dev->irq_tasklet);
 291
 292        return IRQ_HANDLED;
 293}
 294
 295/*
 296 * submit a descriptor to the DMA engine
 297 */
 298static dma_cookie_t
 299mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 300{
 301        void *dest_hw_desc;
 302        dma_cookie_t cookie;
 303        struct mv_xor_v2_sw_desc *sw_desc =
 304                container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
 305        struct mv_xor_v2_device *xor_dev =
 306                container_of(tx->chan, struct mv_xor_v2_device, dmachan);
 307
 308        dev_dbg(xor_dev->dmadev.dev,
 309                "%s sw_desc %p: async_tx %p\n",
 310                __func__, sw_desc, &sw_desc->async_tx);
 311
 312        /* assign coookie */
 313        spin_lock_bh(&xor_dev->lock);
 314        cookie = dma_cookie_assign(tx);
 315
 316        /* copy the HW descriptor from the SW descriptor to the DESQ */
 317        dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 318
 319        memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 320
 321        xor_dev->npendings++;
 322        xor_dev->hw_queue_idx++;
 323        if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
 324                xor_dev->hw_queue_idx = 0;
 325
 326        spin_unlock_bh(&xor_dev->lock);
 327
 328        return cookie;
 329}
 330
 331/*
 332 * Prepare a SW descriptor
 333 */
 334static struct mv_xor_v2_sw_desc *
 335mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 336{
 337        struct mv_xor_v2_sw_desc *sw_desc;
 338        bool found = false;
 339
 340        /* Lock the channel */
 341        spin_lock_bh(&xor_dev->lock);
 342
 343        if (list_empty(&xor_dev->free_sw_desc)) {
 344                spin_unlock_bh(&xor_dev->lock);
 345                /* schedule tasklet to free some descriptors */
 346                tasklet_schedule(&xor_dev->irq_tasklet);
 347                return NULL;
 348        }
 349
 350        list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
 351                if (async_tx_test_ack(&sw_desc->async_tx)) {
 352                        found = true;
 353                        break;
 354                }
 355        }
 356
 357        if (!found) {
 358                spin_unlock_bh(&xor_dev->lock);
 359                return NULL;
 360        }
 361
 362        list_del(&sw_desc->free_list);
 363
 364        /* Release the channel */
 365        spin_unlock_bh(&xor_dev->lock);
 366
 367        return sw_desc;
 368}
 369
 370/*
 371 * Prepare a HW descriptor for a memcpy operation
 372 */
 373static struct dma_async_tx_descriptor *
 374mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 375                          dma_addr_t src, size_t len, unsigned long flags)
 376{
 377        struct mv_xor_v2_sw_desc *sw_desc;
 378        struct mv_xor_v2_descriptor *hw_descriptor;
 379        struct mv_xor_v2_device *xor_dev;
 380
 381        xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
 382
 383        dev_dbg(xor_dev->dmadev.dev,
 384                "%s len: %zu src %pad dest %pad flags: %ld\n",
 385                __func__, len, &src, &dest, flags);
 386
 387        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
 388        if (!sw_desc)
 389                return NULL;
 390
 391        sw_desc->async_tx.flags = flags;
 392
 393        /* set the HW descriptor */
 394        hw_descriptor = &sw_desc->hw_desc;
 395
 396        /* save the SW descriptor ID to restore when operation is done */
 397        hw_descriptor->desc_id = sw_desc->idx;
 398
 399        /* Set the MEMCPY control word */
 400        hw_descriptor->desc_ctrl =
 401                DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
 402
 403        if (flags & DMA_PREP_INTERRUPT)
 404                hw_descriptor->desc_ctrl |= DESC_IOD;
 405
 406        /* Set source address */
 407        hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
 408        hw_descriptor->fill_pattern_src_addr[1] =
 409                upper_32_bits(src) & 0xFFFF;
 410
 411        /* Set Destination address */
 412        hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
 413        hw_descriptor->fill_pattern_src_addr[3] =
 414                upper_32_bits(dest) & 0xFFFF;
 415
 416        /* Set buffers size */
 417        hw_descriptor->buff_size = len;
 418
 419        /* return the async tx descriptor */
 420        return &sw_desc->async_tx;
 421}
 422
 423/*
 424 * Prepare a HW descriptor for a XOR operation
 425 */
 426static struct dma_async_tx_descriptor *
 427mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 428                       unsigned int src_cnt, size_t len, unsigned long flags)
 429{
 430        struct mv_xor_v2_sw_desc *sw_desc;
 431        struct mv_xor_v2_descriptor *hw_descriptor;
 432        struct mv_xor_v2_device *xor_dev =
 433                container_of(chan, struct mv_xor_v2_device, dmachan);
 434        int i;
 435
 436        if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
 437                return NULL;
 438
 439        dev_dbg(xor_dev->dmadev.dev,
 440                "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
 441                __func__, src_cnt, len, &dest, flags);
 442
 443        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
 444        if (!sw_desc)
 445                return NULL;
 446
 447        sw_desc->async_tx.flags = flags;
 448
 449        /* set the HW descriptor */
 450        hw_descriptor = &sw_desc->hw_desc;
 451
 452        /* save the SW descriptor ID to restore when operation is done */
 453        hw_descriptor->desc_id = sw_desc->idx;
 454
 455        /* Set the XOR control word */
 456        hw_descriptor->desc_ctrl =
 457                DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
 458        hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
 459
 460        if (flags & DMA_PREP_INTERRUPT)
 461                hw_descriptor->desc_ctrl |= DESC_IOD;
 462
 463        /* Set the data buffers */
 464        for (i = 0; i < src_cnt; i++)
 465                mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
 466
 467        hw_descriptor->desc_ctrl |=
 468                src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
 469
 470        /* Set Destination address */
 471        hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
 472        hw_descriptor->fill_pattern_src_addr[3] =
 473                upper_32_bits(dest) & 0xFFFF;
 474
 475        /* Set buffers size */
 476        hw_descriptor->buff_size = len;
 477
 478        /* return the async tx descriptor */
 479        return &sw_desc->async_tx;
 480}
 481
 482/*
 483 * Prepare a HW descriptor for interrupt operation.
 484 */
 485static struct dma_async_tx_descriptor *
 486mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 487{
 488        struct mv_xor_v2_sw_desc *sw_desc;
 489        struct mv_xor_v2_descriptor *hw_descriptor;
 490        struct mv_xor_v2_device *xor_dev =
 491                container_of(chan, struct mv_xor_v2_device, dmachan);
 492
 493        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
 494        if (!sw_desc)
 495                return NULL;
 496
 497        /* set the HW descriptor */
 498        hw_descriptor = &sw_desc->hw_desc;
 499
 500        /* save the SW descriptor ID to restore when operation is done */
 501        hw_descriptor->desc_id = sw_desc->idx;
 502
 503        /* Set the INTERRUPT control word */
 504        hw_descriptor->desc_ctrl =
 505                DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
 506        hw_descriptor->desc_ctrl |= DESC_IOD;
 507
 508        /* return the async tx descriptor */
 509        return &sw_desc->async_tx;
 510}
 511
 512/*
 513 * push pending transactions to hardware
 514 */
 515static void mv_xor_v2_issue_pending(struct dma_chan *chan)
 516{
 517        struct mv_xor_v2_device *xor_dev =
 518                container_of(chan, struct mv_xor_v2_device, dmachan);
 519
 520        spin_lock_bh(&xor_dev->lock);
 521
 522        /*
 523         * update the engine with the number of descriptors to
 524         * process
 525         */
 526        mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
 527        xor_dev->npendings = 0;
 528
 529        spin_unlock_bh(&xor_dev->lock);
 530}
 531
 532static inline
 533int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
 534                                 int *pending_ptr)
 535{
 536        u32 reg;
 537
 538        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
 539
 540        /* get the next pending descriptor index */
 541        *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
 542                        MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
 543
 544        /* get the number of descriptors pending handle */
 545        return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
 546                MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
 547}
 548
 549/*
 550 * handle the descriptors after HW process
 551 */
 552static void mv_xor_v2_tasklet(unsigned long data)
 553{
 554        struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
 555        int pending_ptr, num_of_pending, i;
 556        struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 557
 558        dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
 559
 560        /* get the pending descriptors parameters */
 561        num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 562
 563        /* loop over free descriptors */
 564        for (i = 0; i < num_of_pending; i++) {
 565                struct mv_xor_v2_descriptor *next_pending_hw_desc =
 566                        xor_dev->hw_desq_virt + pending_ptr;
 567
 568                /* get the SW descriptor related to the HW descriptor */
 569                next_pending_sw_desc =
 570                        &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
 571
 572                /* call the callback */
 573                if (next_pending_sw_desc->async_tx.cookie > 0) {
 574                        /*
 575                         * update the channel's completed cookie - no
 576                         * lock is required the IMSG threshold provide
 577                         * the locking
 578                         */
 579                        dma_cookie_complete(&next_pending_sw_desc->async_tx);
 580
 581                        dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
 582                        dmaengine_desc_get_callback_invoke(
 583                                        &next_pending_sw_desc->async_tx, NULL);
 584                }
 585
 586                dma_run_dependencies(&next_pending_sw_desc->async_tx);
 587
 588                /* Lock the channel */
 589                spin_lock_bh(&xor_dev->lock);
 590
 591                /* add the SW descriptor to the free descriptors list */
 592                list_add(&next_pending_sw_desc->free_list,
 593                         &xor_dev->free_sw_desc);
 594
 595                /* Release the channel */
 596                spin_unlock_bh(&xor_dev->lock);
 597
 598                /* increment the next descriptor */
 599                pending_ptr++;
 600                if (pending_ptr >= MV_XOR_V2_DESC_NUM)
 601                        pending_ptr = 0;
 602        }
 603
 604        if (num_of_pending != 0) {
 605                /* free the descriptores */
 606                mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
 607        }
 608}
 609
 610/*
 611 *      Set DMA Interrupt-message (IMSG) parameters
 612 */
 613static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
 614{
 615        struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
 616
 617        writel(msg->address_lo,
 618               xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
 619        writel(msg->address_hi & 0xFFFF,
 620               xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
 621        writel(msg->data,
 622               xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
 623}
 624
 625static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
 626{
 627        u32 reg;
 628
 629        /* write the DESQ size to the DMA engine */
 630        writel(MV_XOR_V2_DESC_NUM,
 631               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
 632
 633        /* write the DESQ address to the DMA enngine*/
 634        writel(lower_32_bits(xor_dev->hw_desq),
 635               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
 636        writel(upper_32_bits(xor_dev->hw_desq),
 637               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 638
 639        /*
 640         * This is a temporary solution, until we activate the
 641         * SMMU. Set the attributes for reading & writing data buffers
 642         * & descriptors to:
 643         *
 644         *  - OuterShareable - Snoops will be performed on CPU caches
 645         *  - Enable cacheable - Bufferable, Modifiable, Other Allocate
 646         *    and Allocate
 647         */
 648        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
 649        reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
 650        reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
 651                MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
 652        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
 653
 654        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
 655        reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
 656        reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
 657                MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
 658        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
 659
 660        /* BW CTRL - set values to optimize the XOR performance:
 661         *
 662         *  - Set WrBurstLen & RdBurstLen - the unit will issue
 663         *    maximum of 256B write/read transactions.
 664         * -  Limit the number of outstanding write & read data
 665         *    (OBB/IBB) requests to the maximal value.
 666        */
 667        reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
 668                MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
 669               (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL  <<
 670                MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
 671               (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
 672                MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
 673               (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
 674                MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
 675        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
 676
 677        /* Disable the AXI timer feature */
 678        reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 679        reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
 680        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 681
 682        /* enable the DMA engine */
 683        writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
 684
 685        return 0;
 686}
 687
 688static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
 689{
 690        struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
 691
 692        /* Set this bit to disable to stop the XOR unit. */
 693        writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
 694
 695        return 0;
 696}
 697
 698static int mv_xor_v2_resume(struct platform_device *dev)
 699{
 700        struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
 701
 702        mv_xor_v2_set_desc_size(xor_dev);
 703        mv_xor_v2_enable_imsg_thrd(xor_dev);
 704        mv_xor_v2_descq_init(xor_dev);
 705
 706        return 0;
 707}
 708
 709static int mv_xor_v2_probe(struct platform_device *pdev)
 710{
 711        struct mv_xor_v2_device *xor_dev;
 712        struct resource *res;
 713        int i, ret = 0;
 714        struct dma_device *dma_dev;
 715        struct mv_xor_v2_sw_desc *sw_desc;
 716        struct msi_desc *msi_desc;
 717
 718        BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
 719                     MV_XOR_V2_EXT_DESC_SIZE);
 720
 721        xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
 722        if (!xor_dev)
 723                return -ENOMEM;
 724
 725        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 726        xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
 727        if (IS_ERR(xor_dev->dma_base))
 728                return PTR_ERR(xor_dev->dma_base);
 729
 730        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 731        xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
 732        if (IS_ERR(xor_dev->glob_base))
 733                return PTR_ERR(xor_dev->glob_base);
 734
 735        platform_set_drvdata(pdev, xor_dev);
 736
 737        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
 738        if (ret)
 739                return ret;
 740
 741        xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
 742        if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
 743                if (!IS_ERR(xor_dev->reg_clk)) {
 744                        ret = clk_prepare_enable(xor_dev->reg_clk);
 745                        if (ret)
 746                                return ret;
 747                } else {
 748                        return PTR_ERR(xor_dev->reg_clk);
 749                }
 750        }
 751
 752        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
 753        if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
 754                ret = EPROBE_DEFER;
 755                goto disable_reg_clk;
 756        }
 757        if (!IS_ERR(xor_dev->clk)) {
 758                ret = clk_prepare_enable(xor_dev->clk);
 759                if (ret)
 760                        goto disable_reg_clk;
 761        }
 762
 763        ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
 764                                             mv_xor_v2_set_msi_msg);
 765        if (ret)
 766                goto disable_clk;
 767
 768        msi_desc = first_msi_entry(&pdev->dev);
 769        if (!msi_desc)
 770                goto free_msi_irqs;
 771        xor_dev->msi_desc = msi_desc;
 772
 773        ret = devm_request_irq(&pdev->dev, msi_desc->irq,
 774                               mv_xor_v2_interrupt_handler, 0,
 775                               dev_name(&pdev->dev), xor_dev);
 776        if (ret)
 777                goto free_msi_irqs;
 778
 779        tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
 780                     (unsigned long) xor_dev);
 781
 782        xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
 783
 784        dma_cookie_init(&xor_dev->dmachan);
 785
 786        /*
 787         * allocate coherent memory for hardware descriptors
 788         * note: writecombine gives slightly better performance, but
 789         * requires that we explicitly flush the writes
 790         */
 791        xor_dev->hw_desq_virt =
 792                dma_alloc_coherent(&pdev->dev,
 793                                   xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
 794                                   &xor_dev->hw_desq, GFP_KERNEL);
 795        if (!xor_dev->hw_desq_virt) {
 796                ret = -ENOMEM;
 797                goto free_msi_irqs;
 798        }
 799
 800        /* alloc memory for the SW descriptors */
 801        xor_dev->sw_desq = devm_kcalloc(&pdev->dev,
 802                                        MV_XOR_V2_DESC_NUM, sizeof(*sw_desc),
 803                                        GFP_KERNEL);
 804        if (!xor_dev->sw_desq) {
 805                ret = -ENOMEM;
 806                goto free_hw_desq;
 807        }
 808
 809        spin_lock_init(&xor_dev->lock);
 810
 811        /* init the free SW descriptors list */
 812        INIT_LIST_HEAD(&xor_dev->free_sw_desc);
 813
 814        /* add all SW descriptors to the free list */
 815        for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
 816                struct mv_xor_v2_sw_desc *sw_desc =
 817                        xor_dev->sw_desq + i;
 818                sw_desc->idx = i;
 819                dma_async_tx_descriptor_init(&sw_desc->async_tx,
 820                                             &xor_dev->dmachan);
 821                sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
 822                async_tx_ack(&sw_desc->async_tx);
 823
 824                list_add(&sw_desc->free_list,
 825                         &xor_dev->free_sw_desc);
 826        }
 827
 828        dma_dev = &xor_dev->dmadev;
 829
 830        /* set DMA capabilities */
 831        dma_cap_zero(dma_dev->cap_mask);
 832        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 833        dma_cap_set(DMA_XOR, dma_dev->cap_mask);
 834        dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
 835
 836        /* init dma link list */
 837        INIT_LIST_HEAD(&dma_dev->channels);
 838
 839        /* set base routines */
 840        dma_dev->device_tx_status = dma_cookie_status;
 841        dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
 842        dma_dev->dev = &pdev->dev;
 843
 844        dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
 845        dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
 846        dma_dev->max_xor = 8;
 847        dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
 848
 849        xor_dev->dmachan.device = dma_dev;
 850
 851        list_add_tail(&xor_dev->dmachan.device_node,
 852                      &dma_dev->channels);
 853
 854        mv_xor_v2_enable_imsg_thrd(xor_dev);
 855
 856        mv_xor_v2_descq_init(xor_dev);
 857
 858        ret = dma_async_device_register(dma_dev);
 859        if (ret)
 860                goto free_hw_desq;
 861
 862        dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
 863
 864        return 0;
 865
 866free_hw_desq:
 867        dma_free_coherent(&pdev->dev,
 868                          xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
 869                          xor_dev->hw_desq_virt, xor_dev->hw_desq);
 870free_msi_irqs:
 871        platform_msi_domain_free_irqs(&pdev->dev);
 872disable_clk:
 873        clk_disable_unprepare(xor_dev->clk);
 874disable_reg_clk:
 875        clk_disable_unprepare(xor_dev->reg_clk);
 876        return ret;
 877}
 878
 879static int mv_xor_v2_remove(struct platform_device *pdev)
 880{
 881        struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
 882
 883        dma_async_device_unregister(&xor_dev->dmadev);
 884
 885        dma_free_coherent(&pdev->dev,
 886                          xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
 887                          xor_dev->hw_desq_virt, xor_dev->hw_desq);
 888
 889        devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
 890
 891        platform_msi_domain_free_irqs(&pdev->dev);
 892
 893        tasklet_kill(&xor_dev->irq_tasklet);
 894
 895        clk_disable_unprepare(xor_dev->clk);
 896
 897        return 0;
 898}
 899
 900#ifdef CONFIG_OF
 901static const struct of_device_id mv_xor_v2_dt_ids[] = {
 902        { .compatible = "marvell,xor-v2", },
 903        {},
 904};
 905MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
 906#endif
 907
 908static struct platform_driver mv_xor_v2_driver = {
 909        .probe          = mv_xor_v2_probe,
 910        .suspend        = mv_xor_v2_suspend,
 911        .resume         = mv_xor_v2_resume,
 912        .remove         = mv_xor_v2_remove,
 913        .driver         = {
 914                .name   = "mv_xor_v2",
 915                .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
 916        },
 917};
 918
 919module_platform_driver(mv_xor_v2_driver);
 920
 921MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
 922MODULE_LICENSE("GPL");
 923