linux/drivers/dma/mv_xor_v2.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2015-2016 Marvell International Ltd.
   4
   5 */
   6
   7#include <linux/clk.h>
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/module.h>
  12#include <linux/msi.h>
  13#include <linux/of.h>
  14#include <linux/of_irq.h>
  15#include <linux/platform_device.h>
  16#include <linux/spinlock.h>
  17
  18#include "dmaengine.h"
  19
  20/* DMA Engine Registers */
  21#define MV_XOR_V2_DMA_DESQ_BALR_OFF                     0x000
  22#define MV_XOR_V2_DMA_DESQ_BAHR_OFF                     0x004
  23#define MV_XOR_V2_DMA_DESQ_SIZE_OFF                     0x008
  24#define MV_XOR_V2_DMA_DESQ_DONE_OFF                     0x00C
  25#define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK          0x7FFF
  26#define   MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT         0
  27#define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK         0x1FFF
  28#define   MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT        16
  29#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF                   0x010
  30#define   MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK            0x3F3F
  31#define   MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE       0x202
  32#define   MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE             0x3C3C
  33#define MV_XOR_V2_DMA_IMSG_CDAT_OFF                     0x014
  34#define MV_XOR_V2_DMA_IMSG_THRD_OFF                     0x018
  35#define   MV_XOR_V2_DMA_IMSG_THRD_MASK                  0x7FFF
  36#define   MV_XOR_V2_DMA_IMSG_TIMER_EN                   BIT(18)
  37#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF                   0x01C
  38  /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
  39#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF                    0x04C
  40#define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK           0xFFFF
  41#define   MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT          16
  42#define MV_XOR_V2_DMA_IMSG_BALR_OFF                     0x050
  43#define MV_XOR_V2_DMA_IMSG_BAHR_OFF                     0x054
  44#define MV_XOR_V2_DMA_DESQ_CTRL_OFF                     0x100
  45#define   MV_XOR_V2_DMA_DESQ_CTRL_32B                   1
  46#define   MV_XOR_V2_DMA_DESQ_CTRL_128B                  7
  47#define MV_XOR_V2_DMA_DESQ_STOP_OFF                     0x800
  48#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF                  0x804
  49#define MV_XOR_V2_DMA_DESQ_ADD_OFF                      0x808
  50#define MV_XOR_V2_DMA_IMSG_TMOT                         0x810
  51#define   MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK            0x1FFF
  52
  53/* XOR Global registers */
  54#define MV_XOR_V2_GLOB_BW_CTRL                          0x4
  55#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT      0
  56#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL        64
  57#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT      8
  58#define   MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL        8
  59#define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT     12
  60#define   MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL       4
  61#define   MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT     16
  62#define   MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL       4
  63#define MV_XOR_V2_GLOB_PAUSE                            0x014
  64#define   MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL         0x8
  65#define MV_XOR_V2_GLOB_SYS_INT_CAUSE                    0x200
  66#define MV_XOR_V2_GLOB_SYS_INT_MASK                     0x204
  67#define MV_XOR_V2_GLOB_MEM_INT_CAUSE                    0x220
  68#define MV_XOR_V2_GLOB_MEM_INT_MASK                     0x224
  69
  70#define MV_XOR_V2_MIN_DESC_SIZE                         32
  71#define MV_XOR_V2_EXT_DESC_SIZE                         128
  72
  73#define MV_XOR_V2_DESC_RESERVED_SIZE                    12
  74#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE                 12
  75
  76#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF                8
  77
  78/*
  79 * Descriptors queue size. With 32 bytes descriptors, up to 2^14
  80 * descriptors are allowed, with 128 bytes descriptors, up to 2^12
  81 * descriptors are allowed. This driver uses 128 bytes descriptors,
  82 * but experimentation has shown that a set of 1024 descriptors is
  83 * sufficient to reach a good level of performance.
  84 */
  85#define MV_XOR_V2_DESC_NUM                              1024
  86
  87/*
  88 * Threshold values for descriptors and timeout, determined by
  89 * experimentation as giving a good level of performance.
  90 */
  91#define MV_XOR_V2_DONE_IMSG_THRD  0x14
  92#define MV_XOR_V2_TIMER_THRD      0xB0
  93
  94/**
  95 * struct mv_xor_v2_descriptor - DMA HW descriptor
  96 * @desc_id: used by S/W and is not affected by H/W.
  97 * @flags: error and status flags
  98 * @crc32_result: CRC32 calculation result
  99 * @desc_ctrl: operation mode and control flags
 100 * @buff_size: amount of bytes to be processed
 101 * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
 102 * AW-Attributes
 103 * @data_buff_addr: Source (and might be RAID6 destination)
 104 * addresses of data buffers in RAID5 and RAID6
 105 * @reserved: reserved
 106 */
 107struct mv_xor_v2_descriptor {
 108        u16 desc_id;
 109        u16 flags;
 110        u32 crc32_result;
 111        u32 desc_ctrl;
 112
 113        /* Definitions for desc_ctrl */
 114#define DESC_NUM_ACTIVE_D_BUF_SHIFT     22
 115#define DESC_OP_MODE_SHIFT              28
 116#define DESC_OP_MODE_NOP                0       /* Idle operation */
 117#define DESC_OP_MODE_MEMCPY             1       /* Pure-DMA operation */
 118#define DESC_OP_MODE_MEMSET             2       /* Mem-Fill operation */
 119#define DESC_OP_MODE_MEMINIT            3       /* Mem-Init operation */
 120#define DESC_OP_MODE_MEM_COMPARE        4       /* Mem-Compare operation */
 121#define DESC_OP_MODE_CRC32              5       /* CRC32 calculation */
 122#define DESC_OP_MODE_XOR                6       /* RAID5 (XOR) operation */
 123#define DESC_OP_MODE_RAID6              7       /* RAID6 P&Q-generation */
 124#define DESC_OP_MODE_RAID6_REC          8       /* RAID6 Recovery */
 125#define DESC_Q_BUFFER_ENABLE            BIT(16)
 126#define DESC_P_BUFFER_ENABLE            BIT(17)
 127#define DESC_IOD                        BIT(27)
 128
 129        u32 buff_size;
 130        u32 fill_pattern_src_addr[4];
 131        u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
 132        u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
 133};
 134
 135/**
 136 * struct mv_xor_v2_device - implements a xor device
 137 * @lock: lock for the engine
 138 * @clk: reference to the 'core' clock
 139 * @reg_clk: reference to the 'reg' clock
 140 * @dma_base: memory mapped DMA register base
 141 * @glob_base: memory mapped global register base
 142 * @irq_tasklet: tasklet used for IRQ handling call-backs
 143 * @free_sw_desc: linked list of free SW descriptors
 144 * @dmadev: dma device
 145 * @dmachan: dma channel
 146 * @hw_desq: HW descriptors queue
 147 * @hw_desq_virt: virtual address of DESCQ
 148 * @sw_desq: SW descriptors queue
 149 * @desc_size: HW descriptor size
 150 * @npendings: number of pending descriptors (for which tx_submit has
 151 * @hw_queue_idx: HW queue index
 152 * @msi_desc: local interrupt descriptor information
 153 * been called, but not yet issue_pending)
 154 */
 155struct mv_xor_v2_device {
 156        spinlock_t lock;
 157        void __iomem *dma_base;
 158        void __iomem *glob_base;
 159        struct clk *clk;
 160        struct clk *reg_clk;
 161        struct tasklet_struct irq_tasklet;
 162        struct list_head free_sw_desc;
 163        struct dma_device dmadev;
 164        struct dma_chan dmachan;
 165        dma_addr_t hw_desq;
 166        struct mv_xor_v2_descriptor *hw_desq_virt;
 167        struct mv_xor_v2_sw_desc *sw_desq;
 168        int desc_size;
 169        unsigned int npendings;
 170        unsigned int hw_queue_idx;
 171        struct msi_desc *msi_desc;
 172};
 173
 174/**
 175 * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
 176 * @idx: descriptor index
 177 * @async_tx: support for the async_tx api
 178 * @hw_desc: assosiated HW descriptor
 179 * @free_list: node of the free SW descriprots list
 180*/
 181struct mv_xor_v2_sw_desc {
 182        int idx;
 183        struct dma_async_tx_descriptor async_tx;
 184        struct mv_xor_v2_descriptor hw_desc;
 185        struct list_head free_list;
 186};
 187
 188/*
 189 * Fill the data buffers to a HW descriptor
 190 */
 191static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
 192                                        struct mv_xor_v2_descriptor *desc,
 193                                        dma_addr_t src, int index)
 194{
 195        int arr_index = ((index >> 1) * 3);
 196
 197        /*
 198         * Fill the buffer's addresses to the descriptor.
 199         *
 200         * The format of the buffers address for 2 sequential buffers
 201         * X and X + 1:
 202         *
 203         *  First word:  Buffer-DX-Address-Low[31:0]
 204         *  Second word: Buffer-DX+1-Address-Low[31:0]
 205         *  Third word:  DX+1-Buffer-Address-High[47:32] [31:16]
 206         *               DX-Buffer-Address-High[47:32] [15:0]
 207         */
 208        if ((index & 0x1) == 0) {
 209                desc->data_buff_addr[arr_index] = lower_32_bits(src);
 210
 211                desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
 212                desc->data_buff_addr[arr_index + 2] |=
 213                        upper_32_bits(src) & 0xFFFF;
 214        } else {
 215                desc->data_buff_addr[arr_index + 1] =
 216                        lower_32_bits(src);
 217
 218                desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
 219                desc->data_buff_addr[arr_index + 2] |=
 220                        (upper_32_bits(src) & 0xFFFF) << 16;
 221        }
 222}
 223
 224/*
 225 * notify the engine of new descriptors, and update the available index.
 226 */
 227static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
 228                                       int num_of_desc)
 229{
 230        /* write the number of new descriptors in the DESQ. */
 231        writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
 232}
 233
 234/*
 235 * free HW descriptors
 236 */
 237static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
 238                                          int num_of_desc)
 239{
 240        /* write the number of new descriptors in the DESQ. */
 241        writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
 242}
 243
 244/*
 245 * Set descriptor size
 246 * Return the HW descriptor size in bytes
 247 */
 248static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
 249{
 250        writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
 251               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
 252
 253        return MV_XOR_V2_EXT_DESC_SIZE;
 254}
 255
 256/*
 257 * Set the IMSG threshold
 258 */
 259static inline
 260void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
 261{
 262        u32 reg;
 263
 264        /* Configure threshold of number of descriptors, and enable timer */
 265        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
 266        reg &= ~MV_XOR_V2_DMA_IMSG_THRD_MASK;
 267        reg |= MV_XOR_V2_DONE_IMSG_THRD;
 268        reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
 269        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
 270
 271        /* Configure Timer Threshold */
 272        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
 273        reg &= ~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK;
 274        reg |= MV_XOR_V2_TIMER_THRD;
 275        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
 276}
 277
 278static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
 279{
 280        struct mv_xor_v2_device *xor_dev = data;
 281        unsigned int ndescs;
 282        u32 reg;
 283
 284        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
 285
 286        ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
 287                  MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
 288
 289        /* No descriptors to process */
 290        if (!ndescs)
 291                return IRQ_NONE;
 292
 293        /* schedule a tasklet to handle descriptors callbacks */
 294        tasklet_schedule(&xor_dev->irq_tasklet);
 295
 296        return IRQ_HANDLED;
 297}
 298
 299/*
 300 * submit a descriptor to the DMA engine
 301 */
 302static dma_cookie_t
 303mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
 304{
 305        void *dest_hw_desc;
 306        dma_cookie_t cookie;
 307        struct mv_xor_v2_sw_desc *sw_desc =
 308                container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
 309        struct mv_xor_v2_device *xor_dev =
 310                container_of(tx->chan, struct mv_xor_v2_device, dmachan);
 311
 312        dev_dbg(xor_dev->dmadev.dev,
 313                "%s sw_desc %p: async_tx %p\n",
 314                __func__, sw_desc, &sw_desc->async_tx);
 315
 316        /* assign coookie */
 317        spin_lock_bh(&xor_dev->lock);
 318        cookie = dma_cookie_assign(tx);
 319
 320        /* copy the HW descriptor from the SW descriptor to the DESQ */
 321        dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
 322
 323        memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
 324
 325        xor_dev->npendings++;
 326        xor_dev->hw_queue_idx++;
 327        if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
 328                xor_dev->hw_queue_idx = 0;
 329
 330        spin_unlock_bh(&xor_dev->lock);
 331
 332        return cookie;
 333}
 334
 335/*
 336 * Prepare a SW descriptor
 337 */
 338static struct mv_xor_v2_sw_desc *
 339mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
 340{
 341        struct mv_xor_v2_sw_desc *sw_desc;
 342        bool found = false;
 343
 344        /* Lock the channel */
 345        spin_lock_bh(&xor_dev->lock);
 346
 347        if (list_empty(&xor_dev->free_sw_desc)) {
 348                spin_unlock_bh(&xor_dev->lock);
 349                /* schedule tasklet to free some descriptors */
 350                tasklet_schedule(&xor_dev->irq_tasklet);
 351                return NULL;
 352        }
 353
 354        list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
 355                if (async_tx_test_ack(&sw_desc->async_tx)) {
 356                        found = true;
 357                        break;
 358                }
 359        }
 360
 361        if (!found) {
 362                spin_unlock_bh(&xor_dev->lock);
 363                return NULL;
 364        }
 365
 366        list_del(&sw_desc->free_list);
 367
 368        /* Release the channel */
 369        spin_unlock_bh(&xor_dev->lock);
 370
 371        return sw_desc;
 372}
 373
 374/*
 375 * Prepare a HW descriptor for a memcpy operation
 376 */
 377static struct dma_async_tx_descriptor *
 378mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
 379                          dma_addr_t src, size_t len, unsigned long flags)
 380{
 381        struct mv_xor_v2_sw_desc *sw_desc;
 382        struct mv_xor_v2_descriptor *hw_descriptor;
 383        struct mv_xor_v2_device *xor_dev;
 384
 385        xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
 386
 387        dev_dbg(xor_dev->dmadev.dev,
 388                "%s len: %zu src %pad dest %pad flags: %ld\n",
 389                __func__, len, &src, &dest, flags);
 390
 391        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
 392        if (!sw_desc)
 393                return NULL;
 394
 395        sw_desc->async_tx.flags = flags;
 396
 397        /* set the HW descriptor */
 398        hw_descriptor = &sw_desc->hw_desc;
 399
 400        /* save the SW descriptor ID to restore when operation is done */
 401        hw_descriptor->desc_id = sw_desc->idx;
 402
 403        /* Set the MEMCPY control word */
 404        hw_descriptor->desc_ctrl =
 405                DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
 406
 407        if (flags & DMA_PREP_INTERRUPT)
 408                hw_descriptor->desc_ctrl |= DESC_IOD;
 409
 410        /* Set source address */
 411        hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
 412        hw_descriptor->fill_pattern_src_addr[1] =
 413                upper_32_bits(src) & 0xFFFF;
 414
 415        /* Set Destination address */
 416        hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
 417        hw_descriptor->fill_pattern_src_addr[3] =
 418                upper_32_bits(dest) & 0xFFFF;
 419
 420        /* Set buffers size */
 421        hw_descriptor->buff_size = len;
 422
 423        /* return the async tx descriptor */
 424        return &sw_desc->async_tx;
 425}
 426
 427/*
 428 * Prepare a HW descriptor for a XOR operation
 429 */
 430static struct dma_async_tx_descriptor *
 431mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 432                       unsigned int src_cnt, size_t len, unsigned long flags)
 433{
 434        struct mv_xor_v2_sw_desc *sw_desc;
 435        struct mv_xor_v2_descriptor *hw_descriptor;
 436        struct mv_xor_v2_device *xor_dev =
 437                container_of(chan, struct mv_xor_v2_device, dmachan);
 438        int i;
 439
 440        if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
 441                return NULL;
 442
 443        dev_dbg(xor_dev->dmadev.dev,
 444                "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
 445                __func__, src_cnt, len, &dest, flags);
 446
 447        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
 448        if (!sw_desc)
 449                return NULL;
 450
 451        sw_desc->async_tx.flags = flags;
 452
 453        /* set the HW descriptor */
 454        hw_descriptor = &sw_desc->hw_desc;
 455
 456        /* save the SW descriptor ID to restore when operation is done */
 457        hw_descriptor->desc_id = sw_desc->idx;
 458
 459        /* Set the XOR control word */
 460        hw_descriptor->desc_ctrl =
 461                DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
 462        hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
 463
 464        if (flags & DMA_PREP_INTERRUPT)
 465                hw_descriptor->desc_ctrl |= DESC_IOD;
 466
 467        /* Set the data buffers */
 468        for (i = 0; i < src_cnt; i++)
 469                mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
 470
 471        hw_descriptor->desc_ctrl |=
 472                src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
 473
 474        /* Set Destination address */
 475        hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
 476        hw_descriptor->fill_pattern_src_addr[3] =
 477                upper_32_bits(dest) & 0xFFFF;
 478
 479        /* Set buffers size */
 480        hw_descriptor->buff_size = len;
 481
 482        /* return the async tx descriptor */
 483        return &sw_desc->async_tx;
 484}
 485
 486/*
 487 * Prepare a HW descriptor for interrupt operation.
 488 */
 489static struct dma_async_tx_descriptor *
 490mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 491{
 492        struct mv_xor_v2_sw_desc *sw_desc;
 493        struct mv_xor_v2_descriptor *hw_descriptor;
 494        struct mv_xor_v2_device *xor_dev =
 495                container_of(chan, struct mv_xor_v2_device, dmachan);
 496
 497        sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
 498        if (!sw_desc)
 499                return NULL;
 500
 501        /* set the HW descriptor */
 502        hw_descriptor = &sw_desc->hw_desc;
 503
 504        /* save the SW descriptor ID to restore when operation is done */
 505        hw_descriptor->desc_id = sw_desc->idx;
 506
 507        /* Set the INTERRUPT control word */
 508        hw_descriptor->desc_ctrl =
 509                DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
 510        hw_descriptor->desc_ctrl |= DESC_IOD;
 511
 512        /* return the async tx descriptor */
 513        return &sw_desc->async_tx;
 514}
 515
 516/*
 517 * push pending transactions to hardware
 518 */
 519static void mv_xor_v2_issue_pending(struct dma_chan *chan)
 520{
 521        struct mv_xor_v2_device *xor_dev =
 522                container_of(chan, struct mv_xor_v2_device, dmachan);
 523
 524        spin_lock_bh(&xor_dev->lock);
 525
 526        /*
 527         * update the engine with the number of descriptors to
 528         * process
 529         */
 530        mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
 531        xor_dev->npendings = 0;
 532
 533        spin_unlock_bh(&xor_dev->lock);
 534}
 535
 536static inline
 537int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
 538                                 int *pending_ptr)
 539{
 540        u32 reg;
 541
 542        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
 543
 544        /* get the next pending descriptor index */
 545        *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
 546                        MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
 547
 548        /* get the number of descriptors pending handle */
 549        return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
 550                MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
 551}
 552
 553/*
 554 * handle the descriptors after HW process
 555 */
 556static void mv_xor_v2_tasklet(struct tasklet_struct *t)
 557{
 558        struct mv_xor_v2_device *xor_dev = from_tasklet(xor_dev, t,
 559                                                        irq_tasklet);
 560        int pending_ptr, num_of_pending, i;
 561        struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
 562
 563        dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
 564
 565        /* get the pending descriptors parameters */
 566        num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
 567
 568        /* loop over free descriptors */
 569        for (i = 0; i < num_of_pending; i++) {
 570                struct mv_xor_v2_descriptor *next_pending_hw_desc =
 571                        xor_dev->hw_desq_virt + pending_ptr;
 572
 573                /* get the SW descriptor related to the HW descriptor */
 574                next_pending_sw_desc =
 575                        &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
 576
 577                /* call the callback */
 578                if (next_pending_sw_desc->async_tx.cookie > 0) {
 579                        /*
 580                         * update the channel's completed cookie - no
 581                         * lock is required the IMSG threshold provide
 582                         * the locking
 583                         */
 584                        dma_cookie_complete(&next_pending_sw_desc->async_tx);
 585
 586                        dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
 587                        dmaengine_desc_get_callback_invoke(
 588                                        &next_pending_sw_desc->async_tx, NULL);
 589                }
 590
 591                dma_run_dependencies(&next_pending_sw_desc->async_tx);
 592
 593                /* Lock the channel */
 594                spin_lock_bh(&xor_dev->lock);
 595
 596                /* add the SW descriptor to the free descriptors list */
 597                list_add(&next_pending_sw_desc->free_list,
 598                         &xor_dev->free_sw_desc);
 599
 600                /* Release the channel */
 601                spin_unlock_bh(&xor_dev->lock);
 602
 603                /* increment the next descriptor */
 604                pending_ptr++;
 605                if (pending_ptr >= MV_XOR_V2_DESC_NUM)
 606                        pending_ptr = 0;
 607        }
 608
 609        if (num_of_pending != 0) {
 610                /* free the descriptores */
 611                mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
 612        }
 613}
 614
 615/*
 616 *      Set DMA Interrupt-message (IMSG) parameters
 617 */
 618static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
 619{
 620        struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
 621
 622        writel(msg->address_lo,
 623               xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
 624        writel(msg->address_hi & 0xFFFF,
 625               xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
 626        writel(msg->data,
 627               xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
 628}
 629
 630static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
 631{
 632        u32 reg;
 633
 634        /* write the DESQ size to the DMA engine */
 635        writel(MV_XOR_V2_DESC_NUM,
 636               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
 637
 638        /* write the DESQ address to the DMA enngine*/
 639        writel(lower_32_bits(xor_dev->hw_desq),
 640               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
 641        writel(upper_32_bits(xor_dev->hw_desq),
 642               xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
 643
 644        /*
 645         * This is a temporary solution, until we activate the
 646         * SMMU. Set the attributes for reading & writing data buffers
 647         * & descriptors to:
 648         *
 649         *  - OuterShareable - Snoops will be performed on CPU caches
 650         *  - Enable cacheable - Bufferable, Modifiable, Other Allocate
 651         *    and Allocate
 652         */
 653        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
 654        reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
 655        reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
 656                MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
 657        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
 658
 659        reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
 660        reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
 661        reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
 662                MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
 663        writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
 664
 665        /* BW CTRL - set values to optimize the XOR performance:
 666         *
 667         *  - Set WrBurstLen & RdBurstLen - the unit will issue
 668         *    maximum of 256B write/read transactions.
 669         * -  Limit the number of outstanding write & read data
 670         *    (OBB/IBB) requests to the maximal value.
 671        */
 672        reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
 673                MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
 674               (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL  <<
 675                MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
 676               (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
 677                MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
 678               (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
 679                MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
 680        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
 681
 682        /* Disable the AXI timer feature */
 683        reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 684        reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
 685        writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
 686
 687        /* enable the DMA engine */
 688        writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
 689
 690        return 0;
 691}
 692
 693static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
 694{
 695        struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
 696
 697        /* Set this bit to disable to stop the XOR unit. */
 698        writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
 699
 700        return 0;
 701}
 702
 703static int mv_xor_v2_resume(struct platform_device *dev)
 704{
 705        struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
 706
 707        mv_xor_v2_set_desc_size(xor_dev);
 708        mv_xor_v2_enable_imsg_thrd(xor_dev);
 709        mv_xor_v2_descq_init(xor_dev);
 710
 711        return 0;
 712}
 713
 714static int mv_xor_v2_probe(struct platform_device *pdev)
 715{
 716        struct mv_xor_v2_device *xor_dev;
 717        struct resource *res;
 718        int i, ret = 0;
 719        struct dma_device *dma_dev;
 720        struct mv_xor_v2_sw_desc *sw_desc;
 721        struct msi_desc *msi_desc;
 722
 723        BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
 724                     MV_XOR_V2_EXT_DESC_SIZE);
 725
 726        xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
 727        if (!xor_dev)
 728                return -ENOMEM;
 729
 730        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 731        xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
 732        if (IS_ERR(xor_dev->dma_base))
 733                return PTR_ERR(xor_dev->dma_base);
 734
 735        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 736        xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
 737        if (IS_ERR(xor_dev->glob_base))
 738                return PTR_ERR(xor_dev->glob_base);
 739
 740        platform_set_drvdata(pdev, xor_dev);
 741
 742        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
 743        if (ret)
 744                return ret;
 745
 746        xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
 747        if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
 748                if (!IS_ERR(xor_dev->reg_clk)) {
 749                        ret = clk_prepare_enable(xor_dev->reg_clk);
 750                        if (ret)
 751                                return ret;
 752                } else {
 753                        return PTR_ERR(xor_dev->reg_clk);
 754                }
 755        }
 756
 757        xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
 758        if (PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
 759                ret = EPROBE_DEFER;
 760                goto disable_reg_clk;
 761        }
 762        if (!IS_ERR(xor_dev->clk)) {
 763                ret = clk_prepare_enable(xor_dev->clk);
 764                if (ret)
 765                        goto disable_reg_clk;
 766        }
 767
 768        ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
 769                                             mv_xor_v2_set_msi_msg);
 770        if (ret)
 771                goto disable_clk;
 772
 773        msi_desc = first_msi_entry(&pdev->dev);
 774        if (!msi_desc) {
 775                ret = -ENODEV;
 776                goto free_msi_irqs;
 777        }
 778        xor_dev->msi_desc = msi_desc;
 779
 780        ret = devm_request_irq(&pdev->dev, msi_desc->irq,
 781                               mv_xor_v2_interrupt_handler, 0,
 782                               dev_name(&pdev->dev), xor_dev);
 783        if (ret)
 784                goto free_msi_irqs;
 785
 786        tasklet_setup(&xor_dev->irq_tasklet, mv_xor_v2_tasklet);
 787
 788        xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
 789
 790        dma_cookie_init(&xor_dev->dmachan);
 791
 792        /*
 793         * allocate coherent memory for hardware descriptors
 794         * note: writecombine gives slightly better performance, but
 795         * requires that we explicitly flush the writes
 796         */
 797        xor_dev->hw_desq_virt =
 798                dma_alloc_coherent(&pdev->dev,
 799                                   xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
 800                                   &xor_dev->hw_desq, GFP_KERNEL);
 801        if (!xor_dev->hw_desq_virt) {
 802                ret = -ENOMEM;
 803                goto free_msi_irqs;
 804        }
 805
 806        /* alloc memory for the SW descriptors */
 807        xor_dev->sw_desq = devm_kcalloc(&pdev->dev,
 808                                        MV_XOR_V2_DESC_NUM, sizeof(*sw_desc),
 809                                        GFP_KERNEL);
 810        if (!xor_dev->sw_desq) {
 811                ret = -ENOMEM;
 812                goto free_hw_desq;
 813        }
 814
 815        spin_lock_init(&xor_dev->lock);
 816
 817        /* init the free SW descriptors list */
 818        INIT_LIST_HEAD(&xor_dev->free_sw_desc);
 819
 820        /* add all SW descriptors to the free list */
 821        for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
 822                struct mv_xor_v2_sw_desc *sw_desc =
 823                        xor_dev->sw_desq + i;
 824                sw_desc->idx = i;
 825                dma_async_tx_descriptor_init(&sw_desc->async_tx,
 826                                             &xor_dev->dmachan);
 827                sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
 828                async_tx_ack(&sw_desc->async_tx);
 829
 830                list_add(&sw_desc->free_list,
 831                         &xor_dev->free_sw_desc);
 832        }
 833
 834        dma_dev = &xor_dev->dmadev;
 835
 836        /* set DMA capabilities */
 837        dma_cap_zero(dma_dev->cap_mask);
 838        dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
 839        dma_cap_set(DMA_XOR, dma_dev->cap_mask);
 840        dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
 841
 842        /* init dma link list */
 843        INIT_LIST_HEAD(&dma_dev->channels);
 844
 845        /* set base routines */
 846        dma_dev->device_tx_status = dma_cookie_status;
 847        dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
 848        dma_dev->dev = &pdev->dev;
 849
 850        dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
 851        dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
 852        dma_dev->max_xor = 8;
 853        dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
 854
 855        xor_dev->dmachan.device = dma_dev;
 856
 857        list_add_tail(&xor_dev->dmachan.device_node,
 858                      &dma_dev->channels);
 859
 860        mv_xor_v2_enable_imsg_thrd(xor_dev);
 861
 862        mv_xor_v2_descq_init(xor_dev);
 863
 864        ret = dma_async_device_register(dma_dev);
 865        if (ret)
 866                goto free_hw_desq;
 867
 868        dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
 869
 870        return 0;
 871
 872free_hw_desq:
 873        dma_free_coherent(&pdev->dev,
 874                          xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
 875                          xor_dev->hw_desq_virt, xor_dev->hw_desq);
 876free_msi_irqs:
 877        platform_msi_domain_free_irqs(&pdev->dev);
 878disable_clk:
 879        clk_disable_unprepare(xor_dev->clk);
 880disable_reg_clk:
 881        clk_disable_unprepare(xor_dev->reg_clk);
 882        return ret;
 883}
 884
 885static int mv_xor_v2_remove(struct platform_device *pdev)
 886{
 887        struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
 888
 889        dma_async_device_unregister(&xor_dev->dmadev);
 890
 891        dma_free_coherent(&pdev->dev,
 892                          xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
 893                          xor_dev->hw_desq_virt, xor_dev->hw_desq);
 894
 895        devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
 896
 897        platform_msi_domain_free_irqs(&pdev->dev);
 898
 899        tasklet_kill(&xor_dev->irq_tasklet);
 900
 901        clk_disable_unprepare(xor_dev->clk);
 902
 903        return 0;
 904}
 905
 906#ifdef CONFIG_OF
 907static const struct of_device_id mv_xor_v2_dt_ids[] = {
 908        { .compatible = "marvell,xor-v2", },
 909        {},
 910};
 911MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
 912#endif
 913
 914static struct platform_driver mv_xor_v2_driver = {
 915        .probe          = mv_xor_v2_probe,
 916        .suspend        = mv_xor_v2_suspend,
 917        .resume         = mv_xor_v2_resume,
 918        .remove         = mv_xor_v2_remove,
 919        .driver         = {
 920                .name   = "mv_xor_v2",
 921                .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
 922        },
 923};
 924
 925module_platform_driver(mv_xor_v2_driver);
 926
 927MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
 928MODULE_LICENSE("GPL");
 929