linux/drivers/dma/fsl-qdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright 2014-2015 Freescale
   3// Copyright 2018 NXP
   4
   5/*
   6 * Driver for NXP Layerscape Queue Direct Memory Access Controller
   7 *
   8 * Author:
   9 *  Wen He <wen.he_1@nxp.com>
  10 *  Jiaheng Fan <jiaheng.fan@nxp.com>
  11 *
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/delay.h>
  16#include <linux/of_irq.h>
  17#include <linux/of_platform.h>
  18#include <linux/of_dma.h>
  19#include <linux/dma-mapping.h>
  20
  21#include "virt-dma.h"
  22#include "fsldma.h"
  23
  24/* Register related definition */
  25#define FSL_QDMA_DMR                    0x0
  26#define FSL_QDMA_DSR                    0x4
  27#define FSL_QDMA_DEIER                  0xe00
  28#define FSL_QDMA_DEDR                   0xe04
  29#define FSL_QDMA_DECFDW0R               0xe10
  30#define FSL_QDMA_DECFDW1R               0xe14
  31#define FSL_QDMA_DECFDW2R               0xe18
  32#define FSL_QDMA_DECFDW3R               0xe1c
  33#define FSL_QDMA_DECFQIDR               0xe30
  34#define FSL_QDMA_DECBR                  0xe34
  35
  36#define FSL_QDMA_BCQMR(x)               (0xc0 + 0x100 * (x))
  37#define FSL_QDMA_BCQSR(x)               (0xc4 + 0x100 * (x))
  38#define FSL_QDMA_BCQEDPA_SADDR(x)       (0xc8 + 0x100 * (x))
  39#define FSL_QDMA_BCQDPA_SADDR(x)        (0xcc + 0x100 * (x))
  40#define FSL_QDMA_BCQEEPA_SADDR(x)       (0xd0 + 0x100 * (x))
  41#define FSL_QDMA_BCQEPA_SADDR(x)        (0xd4 + 0x100 * (x))
  42#define FSL_QDMA_BCQIER(x)              (0xe0 + 0x100 * (x))
  43#define FSL_QDMA_BCQIDR(x)              (0xe4 + 0x100 * (x))
  44
  45#define FSL_QDMA_SQDPAR                 0x80c
  46#define FSL_QDMA_SQEPAR                 0x814
  47#define FSL_QDMA_BSQMR                  0x800
  48#define FSL_QDMA_BSQSR                  0x804
  49#define FSL_QDMA_BSQICR                 0x828
  50#define FSL_QDMA_CQMR                   0xa00
  51#define FSL_QDMA_CQDSCR1                0xa08
  52#define FSL_QDMA_CQDSCR2                0xa0c
  53#define FSL_QDMA_CQIER                  0xa10
  54#define FSL_QDMA_CQEDR                  0xa14
  55#define FSL_QDMA_SQCCMR                 0xa20
  56
  57/* Registers for bit and genmask */
  58#define FSL_QDMA_CQIDR_SQT              BIT(15)
  59#define QDMA_CCDF_FOTMAT                BIT(29)
  60#define QDMA_CCDF_SER                   BIT(30)
  61#define QDMA_SG_FIN                     BIT(30)
  62#define QDMA_SG_LEN_MASK                GENMASK(29, 0)
  63#define QDMA_CCDF_MASK                  GENMASK(28, 20)
  64
  65#define FSL_QDMA_DEDR_CLEAR             GENMASK(31, 0)
  66#define FSL_QDMA_BCQIDR_CLEAR           GENMASK(31, 0)
  67#define FSL_QDMA_DEIER_CLEAR            GENMASK(31, 0)
  68
  69#define FSL_QDMA_BCQIER_CQTIE           BIT(15)
  70#define FSL_QDMA_BCQIER_CQPEIE          BIT(23)
  71#define FSL_QDMA_BSQICR_ICEN            BIT(31)
  72
  73#define FSL_QDMA_BSQICR_ICST(x)         ((x) << 16)
  74#define FSL_QDMA_CQIER_MEIE             BIT(31)
  75#define FSL_QDMA_CQIER_TEIE             BIT(0)
  76#define FSL_QDMA_SQCCMR_ENTER_WM        BIT(21)
  77
  78#define FSL_QDMA_BCQMR_EN               BIT(31)
  79#define FSL_QDMA_BCQMR_EI               BIT(30)
  80#define FSL_QDMA_BCQMR_CD_THLD(x)       ((x) << 20)
  81#define FSL_QDMA_BCQMR_CQ_SIZE(x)       ((x) << 16)
  82
  83#define FSL_QDMA_BCQSR_QF               BIT(16)
  84#define FSL_QDMA_BCQSR_XOFF             BIT(0)
  85
  86#define FSL_QDMA_BSQMR_EN               BIT(31)
  87#define FSL_QDMA_BSQMR_DI               BIT(30)
  88#define FSL_QDMA_BSQMR_CQ_SIZE(x)       ((x) << 16)
  89
  90#define FSL_QDMA_BSQSR_QE               BIT(17)
  91
  92#define FSL_QDMA_DMR_DQD                BIT(30)
  93#define FSL_QDMA_DSR_DB         BIT(31)
  94
  95/* Size related definition */
  96#define FSL_QDMA_QUEUE_MAX              8
  97#define FSL_QDMA_COMMAND_BUFFER_SIZE    64
  98#define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
  99#define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
 100#define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
 101#define FSL_QDMA_QUEUE_NUM_MAX          8
 102
 103/* Field definition for CMD */
 104#define FSL_QDMA_CMD_RWTTYPE            0x4
 105#define FSL_QDMA_CMD_LWC                0x2
 106#define FSL_QDMA_CMD_RWTTYPE_OFFSET     28
 107#define FSL_QDMA_CMD_NS_OFFSET          27
 108#define FSL_QDMA_CMD_DQOS_OFFSET        24
 109#define FSL_QDMA_CMD_WTHROTL_OFFSET     20
 110#define FSL_QDMA_CMD_DSEN_OFFSET        19
 111#define FSL_QDMA_CMD_LWC_OFFSET         16
 112
 113/* Field definition for Descriptor offset */
 114#define QDMA_CCDF_STATUS                20
 115#define QDMA_CCDF_OFFSET                20
 116
 117/* Field definition for safe loop count*/
 118#define FSL_QDMA_HALT_COUNT             1500
 119#define FSL_QDMA_MAX_SIZE               16385
 120#define FSL_QDMA_COMP_TIMEOUT           1000
 121#define FSL_COMMAND_QUEUE_OVERFLLOW     10
 122
 123#define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x)                  \
 124        (((fsl_qdma_engine)->block_offset) * (x))
 125
 126/**
 127 * struct fsl_qdma_format - This is the struct holding describing compound
 128 *                          descriptor format with qDMA.
 129 * @status:                 Command status and enqueue status notification.
 130 * @cfg:                    Frame offset and frame format.
 131 * @addr_lo:                Holding the compound descriptor of the lower
 132 *                          32-bits address in memory 40-bit address.
 133 * @addr_hi:                Same as above member, but point high 8-bits in
 134 *                          memory 40-bit address.
 135 * @__reserved1:            Reserved field.
 136 * @cfg8b_w1:               Compound descriptor command queue origin produced
 137 *                          by qDMA and dynamic debug field.
 138 * @data                    Pointer to the memory 40-bit address, describes DMA
 139 *                          source information and DMA destination information.
 140 */
 141struct fsl_qdma_format {
 142        __le32 status;
 143        __le32 cfg;
 144        union {
 145                struct {
 146                        __le32 addr_lo;
 147                        u8 addr_hi;
 148                        u8 __reserved1[2];
 149                        u8 cfg8b_w1;
 150                } __packed;
 151                __le64 data;
 152        };
 153} __packed;
 154
 155/* qDMA status notification pre information */
 156struct fsl_pre_status {
 157        u64 addr;
 158        u8 queue;
 159};
 160
 161static DEFINE_PER_CPU(struct fsl_pre_status, pre);
 162
 163struct fsl_qdma_chan {
 164        struct virt_dma_chan            vchan;
 165        struct virt_dma_desc            vdesc;
 166        enum dma_status                 status;
 167        struct fsl_qdma_engine          *qdma;
 168        struct fsl_qdma_queue           *queue;
 169};
 170
 171struct fsl_qdma_queue {
 172        struct fsl_qdma_format  *virt_head;
 173        struct fsl_qdma_format  *virt_tail;
 174        struct list_head        comp_used;
 175        struct list_head        comp_free;
 176        struct dma_pool         *comp_pool;
 177        struct dma_pool         *desc_pool;
 178        spinlock_t              queue_lock;
 179        dma_addr_t              bus_addr;
 180        u32                     n_cq;
 181        u32                     id;
 182        struct fsl_qdma_format  *cq;
 183        void __iomem            *block_base;
 184};
 185
 186struct fsl_qdma_comp {
 187        dma_addr_t              bus_addr;
 188        dma_addr_t              desc_bus_addr;
 189        struct fsl_qdma_format  *virt_addr;
 190        struct fsl_qdma_format  *desc_virt_addr;
 191        struct fsl_qdma_chan    *qchan;
 192        struct virt_dma_desc    vdesc;
 193        struct list_head        list;
 194};
 195
 196struct fsl_qdma_engine {
 197        struct dma_device       dma_dev;
 198        void __iomem            *ctrl_base;
 199        void __iomem            *status_base;
 200        void __iomem            *block_base;
 201        u32                     n_chans;
 202        u32                     n_queues;
 203        struct mutex            fsl_qdma_mutex;
 204        int                     error_irq;
 205        int                     *queue_irq;
 206        u32                     feature;
 207        struct fsl_qdma_queue   *queue;
 208        struct fsl_qdma_queue   **status;
 209        struct fsl_qdma_chan    *chans;
 210        int                     block_number;
 211        int                     block_offset;
 212        int                     irq_base;
 213        int                     desc_allocated;
 214
 215};
 216
 217static inline u64
 218qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
 219{
 220        return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
 221}
 222
 223static inline void
 224qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
 225{
 226        ccdf->addr_hi = upper_32_bits(addr);
 227        ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
 228}
 229
 230static inline u8
 231qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
 232{
 233        return ccdf->cfg8b_w1 & U8_MAX;
 234}
 235
 236static inline int
 237qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
 238{
 239        return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
 240}
 241
 242static inline void
 243qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
 244{
 245        ccdf->cfg = cpu_to_le32(QDMA_CCDF_FOTMAT | offset);
 246}
 247
 248static inline int
 249qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
 250{
 251        return (le32_to_cpu(ccdf->status) & QDMA_CCDF_MASK) >> QDMA_CCDF_STATUS;
 252}
 253
 254static inline void
 255qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
 256{
 257        ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
 258}
 259
 260static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
 261{
 262        csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
 263}
 264
 265static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
 266{
 267        csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
 268}
 269
 270static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
 271{
 272        return FSL_DMA_IN(qdma, addr, 32);
 273}
 274
 275static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
 276                        void __iomem *addr)
 277{
 278        FSL_DMA_OUT(qdma, addr, val, 32);
 279}
 280
 281static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
 282{
 283        return container_of(chan, struct fsl_qdma_chan, vchan.chan);
 284}
 285
 286static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
 287{
 288        return container_of(vd, struct fsl_qdma_comp, vdesc);
 289}
 290
 291static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
 292{
 293        struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
 294        struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 295        struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
 296        struct fsl_qdma_comp *comp_temp, *_comp_temp;
 297        unsigned long flags;
 298        LIST_HEAD(head);
 299
 300        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
 301        vchan_get_all_descriptors(&fsl_chan->vchan, &head);
 302        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
 303
 304        vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
 305
 306        if (!fsl_queue->comp_pool && !fsl_queue->comp_pool)
 307                return;
 308
 309        list_for_each_entry_safe(comp_temp, _comp_temp,
 310                                 &fsl_queue->comp_used, list) {
 311                dma_pool_free(fsl_queue->comp_pool,
 312                              comp_temp->virt_addr,
 313                              comp_temp->bus_addr);
 314                dma_pool_free(fsl_queue->desc_pool,
 315                              comp_temp->desc_virt_addr,
 316                              comp_temp->desc_bus_addr);
 317                list_del(&comp_temp->list);
 318                kfree(comp_temp);
 319        }
 320
 321        list_for_each_entry_safe(comp_temp, _comp_temp,
 322                                 &fsl_queue->comp_free, list) {
 323                dma_pool_free(fsl_queue->comp_pool,
 324                              comp_temp->virt_addr,
 325                              comp_temp->bus_addr);
 326                dma_pool_free(fsl_queue->desc_pool,
 327                              comp_temp->desc_virt_addr,
 328                              comp_temp->desc_bus_addr);
 329                list_del(&comp_temp->list);
 330                kfree(comp_temp);
 331        }
 332
 333        dma_pool_destroy(fsl_queue->comp_pool);
 334        dma_pool_destroy(fsl_queue->desc_pool);
 335
 336        fsl_qdma->desc_allocated--;
 337        fsl_queue->comp_pool = NULL;
 338        fsl_queue->desc_pool = NULL;
 339}
 340
 341static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
 342                                      dma_addr_t dst, dma_addr_t src, u32 len)
 343{
 344        struct fsl_qdma_format *sdf, *ddf;
 345        struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
 346
 347        ccdf = fsl_comp->virt_addr;
 348        csgf_desc = fsl_comp->virt_addr + 1;
 349        csgf_src = fsl_comp->virt_addr + 2;
 350        csgf_dest = fsl_comp->virt_addr + 3;
 351        sdf = fsl_comp->desc_virt_addr;
 352        ddf = fsl_comp->desc_virt_addr + 1;
 353
 354        memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
 355        memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
 356        /* Head Command Descriptor(Frame Descriptor) */
 357        qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
 358        qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
 359        qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
 360        /* Status notification is enqueued to status queue. */
 361        /* Compound Command Descriptor(Frame List Table) */
 362        qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
 363        /* It must be 32 as Compound S/G Descriptor */
 364        qdma_csgf_set_len(csgf_desc, 32);
 365        qdma_desc_addr_set64(csgf_src, src);
 366        qdma_csgf_set_len(csgf_src, len);
 367        qdma_desc_addr_set64(csgf_dest, dst);
 368        qdma_csgf_set_len(csgf_dest, len);
 369        /* This entry is the last entry. */
 370        qdma_csgf_set_f(csgf_dest, len);
 371        /* Descriptor Buffer */
 372        sdf->data =
 373                cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
 374                            FSL_QDMA_CMD_RWTTYPE_OFFSET);
 375        ddf->data =
 376                cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
 377                            FSL_QDMA_CMD_RWTTYPE_OFFSET);
 378        ddf->data |=
 379                cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
 380}
 381
 382/*
 383 * Pre-request full command descriptor for enqueue.
 384 */
 385static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
 386{
 387        int i;
 388        struct fsl_qdma_comp *comp_temp, *_comp_temp;
 389
 390        for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
 391                comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
 392                if (!comp_temp)
 393                        goto err_alloc;
 394                comp_temp->virt_addr =
 395                        dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
 396                                       &comp_temp->bus_addr);
 397                if (!comp_temp->virt_addr)
 398                        goto err_dma_alloc;
 399
 400                comp_temp->desc_virt_addr =
 401                        dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
 402                                       &comp_temp->desc_bus_addr);
 403                if (!comp_temp->desc_virt_addr)
 404                        goto err_desc_dma_alloc;
 405
 406                list_add_tail(&comp_temp->list, &queue->comp_free);
 407        }
 408
 409        return 0;
 410
 411err_desc_dma_alloc:
 412        dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
 413                      comp_temp->bus_addr);
 414
 415err_dma_alloc:
 416        kfree(comp_temp);
 417
 418err_alloc:
 419        list_for_each_entry_safe(comp_temp, _comp_temp,
 420                                 &queue->comp_free, list) {
 421                if (comp_temp->virt_addr)
 422                        dma_pool_free(queue->comp_pool,
 423                                      comp_temp->virt_addr,
 424                                      comp_temp->bus_addr);
 425                if (comp_temp->desc_virt_addr)
 426                        dma_pool_free(queue->desc_pool,
 427                                      comp_temp->desc_virt_addr,
 428                                      comp_temp->desc_bus_addr);
 429
 430                list_del(&comp_temp->list);
 431                kfree(comp_temp);
 432        }
 433
 434        return -ENOMEM;
 435}
 436
 437/*
 438 * Request a command descriptor for enqueue.
 439 */
 440static struct fsl_qdma_comp
 441*fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
 442{
 443        unsigned long flags;
 444        struct fsl_qdma_comp *comp_temp;
 445        int timeout = FSL_QDMA_COMP_TIMEOUT;
 446        struct fsl_qdma_queue *queue = fsl_chan->queue;
 447
 448        while (timeout--) {
 449                spin_lock_irqsave(&queue->queue_lock, flags);
 450                if (!list_empty(&queue->comp_free)) {
 451                        comp_temp = list_first_entry(&queue->comp_free,
 452                                                     struct fsl_qdma_comp,
 453                                                     list);
 454                        list_del(&comp_temp->list);
 455
 456                        spin_unlock_irqrestore(&queue->queue_lock, flags);
 457                        comp_temp->qchan = fsl_chan;
 458                        return comp_temp;
 459                }
 460                spin_unlock_irqrestore(&queue->queue_lock, flags);
 461                udelay(1);
 462        }
 463
 464        return NULL;
 465}
 466
 467static struct fsl_qdma_queue
 468*fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
 469                                struct fsl_qdma_engine *fsl_qdma)
 470{
 471        int ret, len, i, j;
 472        int queue_num, block_number;
 473        unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
 474        struct fsl_qdma_queue *queue_head, *queue_temp;
 475
 476        queue_num = fsl_qdma->n_queues;
 477        block_number = fsl_qdma->block_number;
 478
 479        if (queue_num > FSL_QDMA_QUEUE_MAX)
 480                queue_num = FSL_QDMA_QUEUE_MAX;
 481        len = sizeof(*queue_head) * queue_num * block_number;
 482        queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
 483        if (!queue_head)
 484                return NULL;
 485
 486        ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
 487                                             queue_size, queue_num);
 488        if (ret) {
 489                dev_err(&pdev->dev, "Can't get queue-sizes.\n");
 490                return NULL;
 491        }
 492        for (j = 0; j < block_number; j++) {
 493                for (i = 0; i < queue_num; i++) {
 494                        if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
 495                            queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
 496                                dev_err(&pdev->dev,
 497                                        "Get wrong queue-sizes.\n");
 498                                return NULL;
 499                        }
 500                        queue_temp = queue_head + i + (j * queue_num);
 501
 502                        queue_temp->cq =
 503                        dma_alloc_coherent(&pdev->dev,
 504                                           sizeof(struct fsl_qdma_format) *
 505                                           queue_size[i],
 506                                           &queue_temp->bus_addr,
 507                                           GFP_KERNEL);
 508                        if (!queue_temp->cq)
 509                                return NULL;
 510                        queue_temp->block_base = fsl_qdma->block_base +
 511                                FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 512                        queue_temp->n_cq = queue_size[i];
 513                        queue_temp->id = i;
 514                        queue_temp->virt_head = queue_temp->cq;
 515                        queue_temp->virt_tail = queue_temp->cq;
 516                        /*
 517                         * List for queue command buffer
 518                         */
 519                        INIT_LIST_HEAD(&queue_temp->comp_used);
 520                        spin_lock_init(&queue_temp->queue_lock);
 521                }
 522        }
 523        return queue_head;
 524}
 525
 526static struct fsl_qdma_queue
 527*fsl_qdma_prep_status_queue(struct platform_device *pdev)
 528{
 529        int ret;
 530        unsigned int status_size;
 531        struct fsl_qdma_queue *status_head;
 532        struct device_node *np = pdev->dev.of_node;
 533
 534        ret = of_property_read_u32(np, "status-sizes", &status_size);
 535        if (ret) {
 536                dev_err(&pdev->dev, "Can't get status-sizes.\n");
 537                return NULL;
 538        }
 539        if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
 540            status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
 541                dev_err(&pdev->dev, "Get wrong status_size.\n");
 542                return NULL;
 543        }
 544        status_head = devm_kzalloc(&pdev->dev,
 545                                   sizeof(*status_head), GFP_KERNEL);
 546        if (!status_head)
 547                return NULL;
 548
 549        /*
 550         * Buffer for queue command
 551         */
 552        status_head->cq = dma_alloc_coherent(&pdev->dev,
 553                                             sizeof(struct fsl_qdma_format) *
 554                                             status_size,
 555                                             &status_head->bus_addr,
 556                                             GFP_KERNEL);
 557        if (!status_head->cq) {
 558                devm_kfree(&pdev->dev, status_head);
 559                return NULL;
 560        }
 561        status_head->n_cq = status_size;
 562        status_head->virt_head = status_head->cq;
 563        status_head->virt_tail = status_head->cq;
 564        status_head->comp_pool = NULL;
 565
 566        return status_head;
 567}
 568
 569static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
 570{
 571        u32 reg;
 572        int i, j, count = FSL_QDMA_HALT_COUNT;
 573        void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
 574
 575        /* Disable the command queue and wait for idle state. */
 576        reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
 577        reg |= FSL_QDMA_DMR_DQD;
 578        qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
 579        for (j = 0; j < fsl_qdma->block_number; j++) {
 580                block = fsl_qdma->block_base +
 581                        FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 582                for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
 583                        qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
 584        }
 585        while (1) {
 586                reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
 587                if (!(reg & FSL_QDMA_DSR_DB))
 588                        break;
 589                if (count-- < 0)
 590                        return -EBUSY;
 591                udelay(100);
 592        }
 593
 594        for (j = 0; j < fsl_qdma->block_number; j++) {
 595                block = fsl_qdma->block_base +
 596                        FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 597
 598                /* Disable status queue. */
 599                qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
 600
 601                /*
 602                 * clear the command queue interrupt detect register for
 603                 * all queues.
 604                 */
 605                qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
 606                            block + FSL_QDMA_BCQIDR(0));
 607        }
 608
 609        return 0;
 610}
 611
 612static int
 613fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
 614                                 void *block,
 615                                 int id)
 616{
 617        bool duplicate;
 618        u32 reg, i, count;
 619        struct fsl_qdma_queue *temp_queue;
 620        struct fsl_qdma_format *status_addr;
 621        struct fsl_qdma_comp *fsl_comp = NULL;
 622        struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 623        struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
 624
 625        count = FSL_QDMA_MAX_SIZE;
 626
 627        while (count--) {
 628                duplicate = 0;
 629                reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
 630                if (reg & FSL_QDMA_BSQSR_QE)
 631                        return 0;
 632
 633                status_addr = fsl_status->virt_head;
 634
 635                if (qdma_ccdf_get_queue(status_addr) ==
 636                   __this_cpu_read(pre.queue) &&
 637                        qdma_ccdf_addr_get64(status_addr) ==
 638                        __this_cpu_read(pre.addr))
 639                        duplicate = 1;
 640                i = qdma_ccdf_get_queue(status_addr) +
 641                        id * fsl_qdma->n_queues;
 642                __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
 643                __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
 644                temp_queue = fsl_queue + i;
 645
 646                spin_lock(&temp_queue->queue_lock);
 647                if (list_empty(&temp_queue->comp_used)) {
 648                        if (!duplicate) {
 649                                spin_unlock(&temp_queue->queue_lock);
 650                                return -EAGAIN;
 651                        }
 652                } else {
 653                        fsl_comp = list_first_entry(&temp_queue->comp_used,
 654                                                    struct fsl_qdma_comp, list);
 655                        if (fsl_comp->bus_addr + 16 !=
 656                                __this_cpu_read(pre.addr)) {
 657                                if (!duplicate) {
 658                                        spin_unlock(&temp_queue->queue_lock);
 659                                        return -EAGAIN;
 660                                }
 661                        }
 662                }
 663
 664                if (duplicate) {
 665                        reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
 666                        reg |= FSL_QDMA_BSQMR_DI;
 667                        qdma_desc_addr_set64(status_addr, 0x0);
 668                        fsl_status->virt_head++;
 669                        if (fsl_status->virt_head == fsl_status->cq
 670                                                   + fsl_status->n_cq)
 671                                fsl_status->virt_head = fsl_status->cq;
 672                        qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
 673                        spin_unlock(&temp_queue->queue_lock);
 674                        continue;
 675                }
 676                list_del(&fsl_comp->list);
 677
 678                reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
 679                reg |= FSL_QDMA_BSQMR_DI;
 680                qdma_desc_addr_set64(status_addr, 0x0);
 681                fsl_status->virt_head++;
 682                if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
 683                        fsl_status->virt_head = fsl_status->cq;
 684                qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
 685                spin_unlock(&temp_queue->queue_lock);
 686
 687                spin_lock(&fsl_comp->qchan->vchan.lock);
 688                vchan_cookie_complete(&fsl_comp->vdesc);
 689                fsl_comp->qchan->status = DMA_COMPLETE;
 690                spin_unlock(&fsl_comp->qchan->vchan.lock);
 691        }
 692
 693        return 0;
 694}
 695
 696static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
 697{
 698        unsigned int intr;
 699        struct fsl_qdma_engine *fsl_qdma = dev_id;
 700        void __iomem *status = fsl_qdma->status_base;
 701
 702        intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
 703
 704        if (intr) {
 705                dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
 706                return IRQ_NONE;
 707        }
 708
 709        qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
 710        return IRQ_HANDLED;
 711}
 712
 713static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
 714{
 715        int id;
 716        unsigned int intr, reg;
 717        struct fsl_qdma_engine *fsl_qdma = dev_id;
 718        void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
 719
 720        id = irq - fsl_qdma->irq_base;
 721        if (id < 0 && id > fsl_qdma->block_number) {
 722                dev_err(fsl_qdma->dma_dev.dev,
 723                        "irq %d is wrong irq_base is %d\n",
 724                        irq, fsl_qdma->irq_base);
 725        }
 726
 727        block = fsl_qdma->block_base +
 728                FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
 729
 730        intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
 731
 732        if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
 733                intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
 734
 735        if (intr != 0) {
 736                reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
 737                reg |= FSL_QDMA_DMR_DQD;
 738                qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
 739                qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
 740                dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
 741        }
 742
 743        /* Clear all detected events and interrupts. */
 744        qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
 745                    block + FSL_QDMA_BCQIDR(0));
 746
 747        return IRQ_HANDLED;
 748}
 749
 750static int
 751fsl_qdma_irq_init(struct platform_device *pdev,
 752                  struct fsl_qdma_engine *fsl_qdma)
 753{
 754        int i;
 755        int cpu;
 756        int ret;
 757        char irq_name[20];
 758
 759        fsl_qdma->error_irq =
 760                platform_get_irq_byname(pdev, "qdma-error");
 761        if (fsl_qdma->error_irq < 0) {
 762                dev_err(&pdev->dev, "Can't get qdma controller irq.\n");
 763                return fsl_qdma->error_irq;
 764        }
 765
 766        ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
 767                               fsl_qdma_error_handler, 0,
 768                               "qDMA error", fsl_qdma);
 769        if (ret) {
 770                dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
 771                return  ret;
 772        }
 773
 774        for (i = 0; i < fsl_qdma->block_number; i++) {
 775                sprintf(irq_name, "qdma-queue%d", i);
 776                fsl_qdma->queue_irq[i] =
 777                                platform_get_irq_byname(pdev, irq_name);
 778
 779                if (fsl_qdma->queue_irq[i] < 0) {
 780                        dev_err(&pdev->dev,
 781                                "Can't get qdma queue %d irq.\n", i);
 782                        return fsl_qdma->queue_irq[i];
 783                }
 784
 785                ret = devm_request_irq(&pdev->dev,
 786                                       fsl_qdma->queue_irq[i],
 787                                       fsl_qdma_queue_handler,
 788                                       0,
 789                                       "qDMA queue",
 790                                       fsl_qdma);
 791                if (ret) {
 792                        dev_err(&pdev->dev,
 793                                "Can't register qDMA queue IRQ.\n");
 794                        return  ret;
 795                }
 796
 797                cpu = i % num_online_cpus();
 798                ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
 799                                            get_cpu_mask(cpu));
 800                if (ret) {
 801                        dev_err(&pdev->dev,
 802                                "Can't set cpu %d affinity to IRQ %d.\n",
 803                                cpu,
 804                                fsl_qdma->queue_irq[i]);
 805                        return  ret;
 806                }
 807        }
 808
 809        return 0;
 810}
 811
 812static void fsl_qdma_irq_exit(struct platform_device *pdev,
 813                              struct fsl_qdma_engine *fsl_qdma)
 814{
 815        int i;
 816
 817        devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
 818        for (i = 0; i < fsl_qdma->block_number; i++)
 819                devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
 820}
 821
 822static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
 823{
 824        u32 reg;
 825        int i, j, ret;
 826        struct fsl_qdma_queue *temp;
 827        void __iomem *status = fsl_qdma->status_base;
 828        void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
 829        struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
 830
 831        /* Try to halt the qDMA engine first. */
 832        ret = fsl_qdma_halt(fsl_qdma);
 833        if (ret) {
 834                dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
 835                return ret;
 836        }
 837
 838        for (i = 0; i < fsl_qdma->block_number; i++) {
 839                /*
 840                 * Clear the command queue interrupt detect register for
 841                 * all queues.
 842                 */
 843
 844                block = fsl_qdma->block_base +
 845                        FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
 846                qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
 847                            block + FSL_QDMA_BCQIDR(0));
 848        }
 849
 850        for (j = 0; j < fsl_qdma->block_number; j++) {
 851                block = fsl_qdma->block_base +
 852                        FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
 853                for (i = 0; i < fsl_qdma->n_queues; i++) {
 854                        temp = fsl_queue + i + (j * fsl_qdma->n_queues);
 855                        /*
 856                         * Initialize Command Queue registers to
 857                         * point to the first
 858                         * command descriptor in memory.
 859                         * Dequeue Pointer Address Registers
 860                         * Enqueue Pointer Address Registers
 861                         */
 862
 863                        qdma_writel(fsl_qdma, temp->bus_addr,
 864                                    block + FSL_QDMA_BCQDPA_SADDR(i));
 865                        qdma_writel(fsl_qdma, temp->bus_addr,
 866                                    block + FSL_QDMA_BCQEPA_SADDR(i));
 867
 868                        /* Initialize the queue mode. */
 869                        reg = FSL_QDMA_BCQMR_EN;
 870                        reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
 871                        reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
 872                        qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
 873                }
 874
 875                /*
 876                 * Workaround for erratum: ERR010812.
 877                 * We must enable XOFF to avoid the enqueue rejection occurs.
 878                 * Setting SQCCMR ENTER_WM to 0x20.
 879                 */
 880
 881                qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
 882                            block + FSL_QDMA_SQCCMR);
 883
 884                /*
 885                 * Initialize status queue registers to point to the first
 886                 * command descriptor in memory.
 887                 * Dequeue Pointer Address Registers
 888                 * Enqueue Pointer Address Registers
 889                 */
 890
 891                qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
 892                            block + FSL_QDMA_SQEPAR);
 893                qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
 894                            block + FSL_QDMA_SQDPAR);
 895                /* Initialize status queue interrupt. */
 896                qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
 897                            block + FSL_QDMA_BCQIER(0));
 898                qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
 899                                   FSL_QDMA_BSQICR_ICST(5) | 0x8000,
 900                                   block + FSL_QDMA_BSQICR);
 901                qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
 902                                   FSL_QDMA_CQIER_TEIE,
 903                                   block + FSL_QDMA_CQIER);
 904
 905                /* Initialize the status queue mode. */
 906                reg = FSL_QDMA_BSQMR_EN;
 907                reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
 908                        (fsl_qdma->status[j]->n_cq) - 6);
 909
 910                qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
 911                reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
 912        }
 913
 914        /* Initialize controller interrupt register. */
 915        qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
 916        qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
 917
 918        reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
 919        reg &= ~FSL_QDMA_DMR_DQD;
 920        qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
 921
 922        return 0;
 923}
 924
 925static struct dma_async_tx_descriptor *
 926fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
 927                     dma_addr_t src, size_t len, unsigned long flags)
 928{
 929        struct fsl_qdma_comp *fsl_comp;
 930        struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
 931
 932        fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
 933
 934        if (!fsl_comp)
 935                return NULL;
 936
 937        fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
 938
 939        return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
 940}
 941
 942static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
 943{
 944        u32 reg;
 945        struct virt_dma_desc *vdesc;
 946        struct fsl_qdma_comp *fsl_comp;
 947        struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 948        void __iomem *block = fsl_queue->block_base;
 949
 950        reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
 951        if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
 952                return;
 953        vdesc = vchan_next_desc(&fsl_chan->vchan);
 954        if (!vdesc)
 955                return;
 956        list_del(&vdesc->node);
 957        fsl_comp = to_fsl_qdma_comp(vdesc);
 958
 959        memcpy(fsl_queue->virt_head++,
 960               fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
 961        if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
 962                fsl_queue->virt_head = fsl_queue->cq;
 963
 964        list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
 965        barrier();
 966        reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
 967        reg |= FSL_QDMA_BCQMR_EI;
 968        qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
 969        fsl_chan->status = DMA_IN_PROGRESS;
 970}
 971
 972static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
 973{
 974        unsigned long flags;
 975        struct fsl_qdma_comp *fsl_comp;
 976        struct fsl_qdma_queue *fsl_queue;
 977
 978        fsl_comp = to_fsl_qdma_comp(vdesc);
 979        fsl_queue = fsl_comp->qchan->queue;
 980
 981        spin_lock_irqsave(&fsl_queue->queue_lock, flags);
 982        list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
 983        spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
 984}
 985
 986static void fsl_qdma_issue_pending(struct dma_chan *chan)
 987{
 988        unsigned long flags;
 989        struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
 990        struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
 991
 992        spin_lock_irqsave(&fsl_queue->queue_lock, flags);
 993        spin_lock(&fsl_chan->vchan.lock);
 994        if (vchan_issue_pending(&fsl_chan->vchan))
 995                fsl_qdma_enqueue_desc(fsl_chan);
 996        spin_unlock(&fsl_chan->vchan.lock);
 997        spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
 998}
 999
1000static void fsl_qdma_synchronize(struct dma_chan *chan)
1001{
1002        struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1003
1004        vchan_synchronize(&fsl_chan->vchan);
1005}
1006
1007static int fsl_qdma_terminate_all(struct dma_chan *chan)
1008{
1009        LIST_HEAD(head);
1010        unsigned long flags;
1011        struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1012
1013        spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1014        vchan_get_all_descriptors(&fsl_chan->vchan, &head);
1015        spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1016        vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
1017        return 0;
1018}
1019
1020static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
1021{
1022        int ret;
1023        struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1024        struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1025        struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1026
1027        if (fsl_queue->comp_pool && fsl_queue->desc_pool)
1028                return fsl_qdma->desc_allocated;
1029
1030        INIT_LIST_HEAD(&fsl_queue->comp_free);
1031
1032        /*
1033         * The dma pool for queue command buffer
1034         */
1035        fsl_queue->comp_pool =
1036        dma_pool_create("comp_pool",
1037                        chan->device->dev,
1038                        FSL_QDMA_COMMAND_BUFFER_SIZE,
1039                        64, 0);
1040        if (!fsl_queue->comp_pool)
1041                return -ENOMEM;
1042
1043        /*
1044         * The dma pool for Descriptor(SD/DD) buffer
1045         */
1046        fsl_queue->desc_pool =
1047        dma_pool_create("desc_pool",
1048                        chan->device->dev,
1049                        FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
1050                        32, 0);
1051        if (!fsl_queue->desc_pool)
1052                goto err_desc_pool;
1053
1054        ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
1055        if (ret) {
1056                dev_err(chan->device->dev,
1057                        "failed to alloc dma buffer for S/G descriptor\n");
1058                goto err_mem;
1059        }
1060
1061        fsl_qdma->desc_allocated++;
1062        return fsl_qdma->desc_allocated;
1063
1064err_mem:
1065        dma_pool_destroy(fsl_queue->desc_pool);
1066err_desc_pool:
1067        dma_pool_destroy(fsl_queue->comp_pool);
1068        return -ENOMEM;
1069}
1070
1071static int fsl_qdma_probe(struct platform_device *pdev)
1072{
1073        int ret, i;
1074        int blk_num, blk_off;
1075        u32 len, chans, queues;
1076        struct resource *res;
1077        struct fsl_qdma_chan *fsl_chan;
1078        struct fsl_qdma_engine *fsl_qdma;
1079        struct device_node *np = pdev->dev.of_node;
1080
1081        ret = of_property_read_u32(np, "dma-channels", &chans);
1082        if (ret) {
1083                dev_err(&pdev->dev, "Can't get dma-channels.\n");
1084                return ret;
1085        }
1086
1087        ret = of_property_read_u32(np, "block-offset", &blk_off);
1088        if (ret) {
1089                dev_err(&pdev->dev, "Can't get block-offset.\n");
1090                return ret;
1091        }
1092
1093        ret = of_property_read_u32(np, "block-number", &blk_num);
1094        if (ret) {
1095                dev_err(&pdev->dev, "Can't get block-number.\n");
1096                return ret;
1097        }
1098
1099        blk_num = min_t(int, blk_num, num_online_cpus());
1100
1101        len = sizeof(*fsl_qdma);
1102        fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1103        if (!fsl_qdma)
1104                return -ENOMEM;
1105
1106        len = sizeof(*fsl_chan) * chans;
1107        fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1108        if (!fsl_qdma->chans)
1109                return -ENOMEM;
1110
1111        len = sizeof(struct fsl_qdma_queue *) * blk_num;
1112        fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1113        if (!fsl_qdma->status)
1114                return -ENOMEM;
1115
1116        len = sizeof(int) * blk_num;
1117        fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1118        if (!fsl_qdma->queue_irq)
1119                return -ENOMEM;
1120
1121        ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
1122        if (ret) {
1123                dev_err(&pdev->dev, "Can't get queues.\n");
1124                return ret;
1125        }
1126
1127        fsl_qdma->desc_allocated = 0;
1128        fsl_qdma->n_chans = chans;
1129        fsl_qdma->n_queues = queues;
1130        fsl_qdma->block_number = blk_num;
1131        fsl_qdma->block_offset = blk_off;
1132
1133        mutex_init(&fsl_qdma->fsl_qdma_mutex);
1134
1135        for (i = 0; i < fsl_qdma->block_number; i++) {
1136                fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1137                if (!fsl_qdma->status[i])
1138                        return -ENOMEM;
1139        }
1140        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141        fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
1142        if (IS_ERR(fsl_qdma->ctrl_base))
1143                return PTR_ERR(fsl_qdma->ctrl_base);
1144
1145        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1146        fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
1147        if (IS_ERR(fsl_qdma->status_base))
1148                return PTR_ERR(fsl_qdma->status_base);
1149
1150        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1151        fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
1152        if (IS_ERR(fsl_qdma->block_base))
1153                return PTR_ERR(fsl_qdma->block_base);
1154        fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1155        if (!fsl_qdma->queue)
1156                return -ENOMEM;
1157
1158        ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1159        if (ret)
1160                return ret;
1161
1162        fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
1163        fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1164        INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1165
1166        for (i = 0; i < fsl_qdma->n_chans; i++) {
1167                struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1168
1169                fsl_chan->qdma = fsl_qdma;
1170                fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1171                                                        fsl_qdma->block_number);
1172                fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
1173                vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1174        }
1175
1176        dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1177
1178        fsl_qdma->dma_dev.dev = &pdev->dev;
1179        fsl_qdma->dma_dev.device_free_chan_resources =
1180                fsl_qdma_free_chan_resources;
1181        fsl_qdma->dma_dev.device_alloc_chan_resources =
1182                fsl_qdma_alloc_chan_resources;
1183        fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1184        fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1185        fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1186        fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1187        fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1188
1189        dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1190
1191        platform_set_drvdata(pdev, fsl_qdma);
1192
1193        ret = dma_async_device_register(&fsl_qdma->dma_dev);
1194        if (ret) {
1195                dev_err(&pdev->dev,
1196                        "Can't register NXP Layerscape qDMA engine.\n");
1197                return ret;
1198        }
1199
1200        ret = fsl_qdma_reg_init(fsl_qdma);
1201        if (ret) {
1202                dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
1203                return ret;
1204        }
1205
1206        return 0;
1207}
1208
1209static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
1210{
1211        struct fsl_qdma_chan *chan, *_chan;
1212
1213        list_for_each_entry_safe(chan, _chan,
1214                                 &dmadev->channels, vchan.chan.device_node) {
1215                list_del(&chan->vchan.chan.device_node);
1216                tasklet_kill(&chan->vchan.task);
1217        }
1218}
1219
1220static int fsl_qdma_remove(struct platform_device *pdev)
1221{
1222        int i;
1223        struct fsl_qdma_queue *status;
1224        struct device_node *np = pdev->dev.of_node;
1225        struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1226
1227        fsl_qdma_irq_exit(pdev, fsl_qdma);
1228        fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1229        of_dma_controller_free(np);
1230        dma_async_device_unregister(&fsl_qdma->dma_dev);
1231
1232        for (i = 0; i < fsl_qdma->block_number; i++) {
1233                status = fsl_qdma->status[i];
1234                dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
1235                                status->n_cq, status->cq, status->bus_addr);
1236        }
1237        return 0;
1238}
1239
1240static const struct of_device_id fsl_qdma_dt_ids[] = {
1241        { .compatible = "fsl,ls1021a-qdma", },
1242        { /* sentinel */ }
1243};
1244MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
1245
1246static struct platform_driver fsl_qdma_driver = {
1247        .driver         = {
1248                .name   = "fsl-qdma",
1249                .of_match_table = fsl_qdma_dt_ids,
1250        },
1251        .probe          = fsl_qdma_probe,
1252        .remove         = fsl_qdma_remove,
1253};
1254
1255module_platform_driver(fsl_qdma_driver);
1256
1257MODULE_ALIAS("platform:fsl-qdma");
1258MODULE_LICENSE("GPL v2");
1259MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
1260