linux/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright 2019 NXP
   3
   4#include <linux/init.h>
   5#include <linux/module.h>
   6#include <linux/dmapool.h>
   7#include <linux/of_irq.h>
   8#include <linux/iommu.h>
   9#include <linux/sys_soc.h>
  10#include <linux/fsl/mc.h>
  11#include <soc/fsl/dpaa2-io.h>
  12
  13#include "../virt-dma.h"
  14#include "dpdmai.h"
  15#include "dpaa2-qdma.h"
  16
  17static bool smmu_disable = true;
  18
  19static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
  20{
  21        return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
  22}
  23
  24static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
  25{
  26        return container_of(vd, struct dpaa2_qdma_comp, vdesc);
  27}
  28
  29static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
  30{
  31        struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  32        struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  33        struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
  34
  35        dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
  36                                              sizeof(struct dpaa2_fd),
  37                                              sizeof(struct dpaa2_fd), 0);
  38        if (!dpaa2_chan->fd_pool)
  39                goto err;
  40
  41        dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
  42                                              sizeof(struct dpaa2_fl_entry),
  43                                              sizeof(struct dpaa2_fl_entry), 0);
  44        if (!dpaa2_chan->fl_pool)
  45                goto err_fd;
  46
  47        dpaa2_chan->sdd_pool =
  48                dma_pool_create("sdd_pool", dev,
  49                                sizeof(struct dpaa2_qdma_sd_d),
  50                                sizeof(struct dpaa2_qdma_sd_d), 0);
  51        if (!dpaa2_chan->sdd_pool)
  52                goto err_fl;
  53
  54        return dpaa2_qdma->desc_allocated++;
  55err_fl:
  56        dma_pool_destroy(dpaa2_chan->fl_pool);
  57err_fd:
  58        dma_pool_destroy(dpaa2_chan->fd_pool);
  59err:
  60        return -ENOMEM;
  61}
  62
  63static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
  64{
  65        struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
  66        struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
  67        unsigned long flags;
  68
  69        LIST_HEAD(head);
  70
  71        spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
  72        vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
  73        spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
  74
  75        vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
  76
  77        dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
  78        dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
  79
  80        dma_pool_destroy(dpaa2_chan->fd_pool);
  81        dma_pool_destroy(dpaa2_chan->fl_pool);
  82        dma_pool_destroy(dpaa2_chan->sdd_pool);
  83        dpaa2_qdma->desc_allocated--;
  84}
  85
  86/*
  87 * Request a command descriptor for enqueue.
  88 */
  89static struct dpaa2_qdma_comp *
  90dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
  91{
  92        struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
  93        struct device *dev = &qdma_priv->dpdmai_dev->dev;
  94        struct dpaa2_qdma_comp *comp_temp = NULL;
  95        unsigned long flags;
  96
  97        spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
  98        if (list_empty(&dpaa2_chan->comp_free)) {
  99                spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
 100                comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
 101                if (!comp_temp)
 102                        goto err;
 103                comp_temp->fd_virt_addr =
 104                        dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
 105                                       &comp_temp->fd_bus_addr);
 106                if (!comp_temp->fd_virt_addr)
 107                        goto err_comp;
 108
 109                comp_temp->fl_virt_addr =
 110                        dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
 111                                       &comp_temp->fl_bus_addr);
 112                if (!comp_temp->fl_virt_addr)
 113                        goto err_fd_virt;
 114
 115                comp_temp->desc_virt_addr =
 116                        dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
 117                                       &comp_temp->desc_bus_addr);
 118                if (!comp_temp->desc_virt_addr)
 119                        goto err_fl_virt;
 120
 121                comp_temp->qchan = dpaa2_chan;
 122                return comp_temp;
 123        }
 124
 125        comp_temp = list_first_entry(&dpaa2_chan->comp_free,
 126                                     struct dpaa2_qdma_comp, list);
 127        list_del(&comp_temp->list);
 128        spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
 129
 130        comp_temp->qchan = dpaa2_chan;
 131
 132        return comp_temp;
 133
 134err_fl_virt:
 135                dma_pool_free(dpaa2_chan->fl_pool,
 136                              comp_temp->fl_virt_addr,
 137                              comp_temp->fl_bus_addr);
 138err_fd_virt:
 139                dma_pool_free(dpaa2_chan->fd_pool,
 140                              comp_temp->fd_virt_addr,
 141                              comp_temp->fd_bus_addr);
 142err_comp:
 143        kfree(comp_temp);
 144err:
 145        dev_err(dev, "Failed to request descriptor\n");
 146        return NULL;
 147}
 148
 149static void
 150dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
 151{
 152        struct dpaa2_fd *fd;
 153
 154        fd = dpaa2_comp->fd_virt_addr;
 155        memset(fd, 0, sizeof(struct dpaa2_fd));
 156
 157        /* fd populated */
 158        dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
 159
 160        /*
 161         * Bypass memory translation, Frame list format, short length disable
 162         * we need to disable BMT if fsl-mc use iova addr
 163         */
 164        if (smmu_disable)
 165                dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
 166        dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
 167
 168        dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
 169}
 170
 171/* first frame list for descriptor buffer */
 172static void
 173dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
 174                                 struct dpaa2_qdma_comp *dpaa2_comp,
 175                                 bool wrt_changed)
 176{
 177        struct dpaa2_qdma_sd_d *sdd;
 178
 179        sdd = dpaa2_comp->desc_virt_addr;
 180        memset(sdd, 0, 2 * (sizeof(*sdd)));
 181
 182        /* source descriptor CMD */
 183        sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
 184        sdd++;
 185
 186        /* dest descriptor CMD */
 187        if (wrt_changed)
 188                sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
 189        else
 190                sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
 191
 192        memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
 193
 194        /* first frame list to source descriptor */
 195        dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
 196        dpaa2_fl_set_len(f_list, 0x20);
 197        dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
 198
 199        /* bypass memory translation */
 200        if (smmu_disable)
 201                f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
 202}
 203
 204/* source and destination frame list */
 205static void
 206dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
 207                           dma_addr_t dst, dma_addr_t src,
 208                           size_t len, uint8_t fmt)
 209{
 210        /* source frame list to source buffer */
 211        memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
 212
 213        dpaa2_fl_set_addr(f_list, src);
 214        dpaa2_fl_set_len(f_list, len);
 215
 216        /* single buffer frame or scatter gather frame */
 217        dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
 218
 219        /* bypass memory translation */
 220        if (smmu_disable)
 221                f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
 222
 223        f_list++;
 224
 225        /* destination frame list to destination buffer */
 226        memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
 227
 228        dpaa2_fl_set_addr(f_list, dst);
 229        dpaa2_fl_set_len(f_list, len);
 230        dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
 231        /* single buffer frame or scatter gather frame */
 232        dpaa2_fl_set_final(f_list, QDMA_FL_F);
 233        /* bypass memory translation */
 234        if (smmu_disable)
 235                f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
 236}
 237
 238static struct dma_async_tx_descriptor
 239*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
 240                        dma_addr_t src, size_t len, ulong flags)
 241{
 242        struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
 243        struct dpaa2_qdma_engine *dpaa2_qdma;
 244        struct dpaa2_qdma_comp *dpaa2_comp;
 245        struct dpaa2_fl_entry *f_list;
 246        bool wrt_changed;
 247
 248        dpaa2_qdma = dpaa2_chan->qdma;
 249        dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
 250        if (!dpaa2_comp)
 251                return NULL;
 252
 253        wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
 254
 255        /* populate Frame descriptor */
 256        dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
 257
 258        f_list = dpaa2_comp->fl_virt_addr;
 259
 260        /* first frame list for descriptor buffer (logn format) */
 261        dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
 262
 263        f_list++;
 264
 265        dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
 266
 267        return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
 268}
 269
 270static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
 271{
 272        struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
 273        struct dpaa2_qdma_comp *dpaa2_comp;
 274        struct virt_dma_desc *vdesc;
 275        struct dpaa2_fd *fd;
 276        unsigned long flags;
 277        int err;
 278
 279        spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
 280        spin_lock(&dpaa2_chan->vchan.lock);
 281        if (vchan_issue_pending(&dpaa2_chan->vchan)) {
 282                vdesc = vchan_next_desc(&dpaa2_chan->vchan);
 283                if (!vdesc)
 284                        goto err_enqueue;
 285                dpaa2_comp = to_fsl_qdma_comp(vdesc);
 286
 287                fd = dpaa2_comp->fd_virt_addr;
 288
 289                list_del(&vdesc->node);
 290                list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
 291
 292                err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
 293                if (err) {
 294                        list_move_tail(&dpaa2_comp->list,
 295                                       &dpaa2_chan->comp_free);
 296                }
 297        }
 298err_enqueue:
 299        spin_unlock(&dpaa2_chan->vchan.lock);
 300        spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
 301}
 302
 303static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
 304{
 305        struct dpaa2_qdma_priv_per_prio *ppriv;
 306        struct device *dev = &ls_dev->dev;
 307        struct dpaa2_qdma_priv *priv;
 308        u8 prio_def = DPDMAI_PRIO_NUM;
 309        int err = -EINVAL;
 310        int i;
 311
 312        priv = dev_get_drvdata(dev);
 313
 314        priv->dev = dev;
 315        priv->dpqdma_id = ls_dev->obj_desc.id;
 316
 317        /* Get the handle for the DPDMAI this interface is associate with */
 318        err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
 319        if (err) {
 320                dev_err(dev, "dpdmai_open() failed\n");
 321                return err;
 322        }
 323
 324        dev_dbg(dev, "Opened dpdmai object successfully\n");
 325
 326        err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
 327                                    &priv->dpdmai_attr);
 328        if (err) {
 329                dev_err(dev, "dpdmai_get_attributes() failed\n");
 330                goto exit;
 331        }
 332
 333        if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
 334                err = -EINVAL;
 335                dev_err(dev, "DPDMAI major version mismatch\n"
 336                             "Found %u.%u, supported version is %u.%u\n",
 337                                priv->dpdmai_attr.version.major,
 338                                priv->dpdmai_attr.version.minor,
 339                                DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
 340                goto exit;
 341        }
 342
 343        if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
 344                err = -EINVAL;
 345                dev_err(dev, "DPDMAI minor version mismatch\n"
 346                             "Found %u.%u, supported version is %u.%u\n",
 347                                priv->dpdmai_attr.version.major,
 348                                priv->dpdmai_attr.version.minor,
 349                                DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
 350                goto exit;
 351        }
 352
 353        priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
 354        ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
 355        if (!ppriv) {
 356                err = -ENOMEM;
 357                goto exit;
 358        }
 359        priv->ppriv = ppriv;
 360
 361        for (i = 0; i < priv->num_pairs; i++) {
 362                err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
 363                                          i, &priv->rx_queue_attr[i]);
 364                if (err) {
 365                        dev_err(dev, "dpdmai_get_rx_queue() failed\n");
 366                        goto exit;
 367                }
 368                ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
 369
 370                err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
 371                                          i, &priv->tx_fqid[i]);
 372                if (err) {
 373                        dev_err(dev, "dpdmai_get_tx_queue() failed\n");
 374                        goto exit;
 375                }
 376                ppriv->req_fqid = priv->tx_fqid[i];
 377                ppriv->prio = i;
 378                ppriv->priv = priv;
 379                ppriv++;
 380        }
 381
 382        return 0;
 383exit:
 384        dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
 385        return err;
 386}
 387
 388static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
 389{
 390        struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
 391                        struct dpaa2_qdma_priv_per_prio, nctx);
 392        struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
 393        struct dpaa2_qdma_priv *priv = ppriv->priv;
 394        u32 n_chans = priv->dpaa2_qdma->n_chans;
 395        struct dpaa2_qdma_chan *qchan;
 396        const struct dpaa2_fd *fd_eq;
 397        const struct dpaa2_fd *fd;
 398        struct dpaa2_dq *dq;
 399        int is_last = 0;
 400        int found;
 401        u8 status;
 402        int err;
 403        int i;
 404
 405        do {
 406                err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
 407                                               ppriv->store);
 408        } while (err);
 409
 410        while (!is_last) {
 411                do {
 412                        dq = dpaa2_io_store_next(ppriv->store, &is_last);
 413                } while (!is_last && !dq);
 414                if (!dq) {
 415                        dev_err(priv->dev, "FQID returned no valid frames!\n");
 416                        continue;
 417                }
 418
 419                /* obtain FD and process the error */
 420                fd = dpaa2_dq_fd(dq);
 421
 422                status = dpaa2_fd_get_ctrl(fd) & 0xff;
 423                if (status)
 424                        dev_err(priv->dev, "FD error occurred\n");
 425                found = 0;
 426                for (i = 0; i < n_chans; i++) {
 427                        qchan = &priv->dpaa2_qdma->chans[i];
 428                        spin_lock(&qchan->queue_lock);
 429                        if (list_empty(&qchan->comp_used)) {
 430                                spin_unlock(&qchan->queue_lock);
 431                                continue;
 432                        }
 433                        list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
 434                                                 &qchan->comp_used, list) {
 435                                fd_eq = dpaa2_comp->fd_virt_addr;
 436
 437                                if (le64_to_cpu(fd_eq->simple.addr) ==
 438                                    le64_to_cpu(fd->simple.addr)) {
 439                                        spin_lock(&qchan->vchan.lock);
 440                                        vchan_cookie_complete(&
 441                                                        dpaa2_comp->vdesc);
 442                                        spin_unlock(&qchan->vchan.lock);
 443                                        found = 1;
 444                                        break;
 445                                }
 446                        }
 447                        spin_unlock(&qchan->queue_lock);
 448                        if (found)
 449                                break;
 450                }
 451        }
 452
 453        dpaa2_io_service_rearm(NULL, ctx);
 454}
 455
 456static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
 457{
 458        struct dpaa2_qdma_priv_per_prio *ppriv;
 459        struct device *dev = priv->dev;
 460        int err = -EINVAL;
 461        int i, num;
 462
 463        num = priv->num_pairs;
 464        ppriv = priv->ppriv;
 465        for (i = 0; i < num; i++) {
 466                ppriv->nctx.is_cdan = 0;
 467                ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
 468                ppriv->nctx.id = ppriv->rsp_fqid;
 469                ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
 470                err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
 471                if (err) {
 472                        dev_err(dev, "Notification register failed\n");
 473                        goto err_service;
 474                }
 475
 476                ppriv->store =
 477                        dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
 478                if (!ppriv->store) {
 479                        err = -ENOMEM;
 480                        dev_err(dev, "dpaa2_io_store_create() failed\n");
 481                        goto err_store;
 482                }
 483
 484                ppriv++;
 485        }
 486        return 0;
 487
 488err_store:
 489        dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
 490err_service:
 491        ppriv--;
 492        while (ppriv >= priv->ppriv) {
 493                dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
 494                dpaa2_io_store_destroy(ppriv->store);
 495                ppriv--;
 496        }
 497        return err;
 498}
 499
 500static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
 501{
 502        struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
 503        int i;
 504
 505        for (i = 0; i < priv->num_pairs; i++) {
 506                dpaa2_io_store_destroy(ppriv->store);
 507                ppriv++;
 508        }
 509}
 510
 511static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
 512{
 513        struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
 514        struct device *dev = priv->dev;
 515        int i;
 516
 517        for (i = 0; i < priv->num_pairs; i++) {
 518                dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
 519                ppriv++;
 520        }
 521}
 522
 523static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
 524{
 525        struct dpdmai_rx_queue_cfg rx_queue_cfg;
 526        struct dpaa2_qdma_priv_per_prio *ppriv;
 527        struct device *dev = priv->dev;
 528        struct fsl_mc_device *ls_dev;
 529        int i, num;
 530        int err;
 531
 532        ls_dev = to_fsl_mc_device(dev);
 533        num = priv->num_pairs;
 534        ppriv = priv->ppriv;
 535        for (i = 0; i < num; i++) {
 536                rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
 537                                        DPDMAI_QUEUE_OPT_DEST;
 538                rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
 539                rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
 540                rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
 541                rx_queue_cfg.dest_cfg.priority = ppriv->prio;
 542                err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
 543                                          rx_queue_cfg.dest_cfg.priority,
 544                                          &rx_queue_cfg);
 545                if (err) {
 546                        dev_err(dev, "dpdmai_set_rx_queue() failed\n");
 547                        return err;
 548                }
 549
 550                ppriv++;
 551        }
 552
 553        return 0;
 554}
 555
 556static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
 557{
 558        struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
 559        struct device *dev = priv->dev;
 560        struct fsl_mc_device *ls_dev;
 561        int err = 0;
 562        int i;
 563
 564        ls_dev = to_fsl_mc_device(dev);
 565
 566        for (i = 0; i < priv->num_pairs; i++) {
 567                ppriv->nctx.qman64 = 0;
 568                ppriv->nctx.dpio_id = 0;
 569                ppriv++;
 570        }
 571
 572        err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
 573        if (err)
 574                dev_err(dev, "dpdmai_reset() failed\n");
 575
 576        return err;
 577}
 578
 579static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
 580                                   struct list_head *head)
 581{
 582        struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
 583        unsigned long flags;
 584
 585        list_for_each_entry_safe(comp_tmp, _comp_tmp,
 586                                 head, list) {
 587                spin_lock_irqsave(&qchan->queue_lock, flags);
 588                list_del(&comp_tmp->list);
 589                spin_unlock_irqrestore(&qchan->queue_lock, flags);
 590                dma_pool_free(qchan->fd_pool,
 591                              comp_tmp->fd_virt_addr,
 592                              comp_tmp->fd_bus_addr);
 593                dma_pool_free(qchan->fl_pool,
 594                              comp_tmp->fl_virt_addr,
 595                              comp_tmp->fl_bus_addr);
 596                dma_pool_free(qchan->sdd_pool,
 597                              comp_tmp->desc_virt_addr,
 598                              comp_tmp->desc_bus_addr);
 599                kfree(comp_tmp);
 600        }
 601}
 602
 603static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
 604{
 605        struct dpaa2_qdma_chan *qchan;
 606        int num, i;
 607
 608        num = dpaa2_qdma->n_chans;
 609        for (i = 0; i < num; i++) {
 610                qchan = &dpaa2_qdma->chans[i];
 611                dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
 612                dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
 613                dma_pool_destroy(qchan->fd_pool);
 614                dma_pool_destroy(qchan->fl_pool);
 615                dma_pool_destroy(qchan->sdd_pool);
 616        }
 617}
 618
 619static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
 620{
 621        struct dpaa2_qdma_comp *dpaa2_comp;
 622        struct dpaa2_qdma_chan *qchan;
 623        unsigned long flags;
 624
 625        dpaa2_comp = to_fsl_qdma_comp(vdesc);
 626        qchan = dpaa2_comp->qchan;
 627        spin_lock_irqsave(&qchan->queue_lock, flags);
 628        list_move_tail(&dpaa2_comp->list, &qchan->comp_free);
 629        spin_unlock_irqrestore(&qchan->queue_lock, flags);
 630}
 631
 632static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
 633{
 634        struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
 635        struct dpaa2_qdma_chan *dpaa2_chan;
 636        int num = priv->num_pairs;
 637        int i;
 638
 639        INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
 640        for (i = 0; i < dpaa2_qdma->n_chans; i++) {
 641                dpaa2_chan = &dpaa2_qdma->chans[i];
 642                dpaa2_chan->qdma = dpaa2_qdma;
 643                dpaa2_chan->fqid = priv->tx_fqid[i % num];
 644                dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
 645                vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
 646                spin_lock_init(&dpaa2_chan->queue_lock);
 647                INIT_LIST_HEAD(&dpaa2_chan->comp_used);
 648                INIT_LIST_HEAD(&dpaa2_chan->comp_free);
 649        }
 650        return 0;
 651}
 652
 653static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
 654{
 655        struct device *dev = &dpdmai_dev->dev;
 656        struct dpaa2_qdma_engine *dpaa2_qdma;
 657        struct dpaa2_qdma_priv *priv;
 658        int err;
 659
 660        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 661        if (!priv)
 662                return -ENOMEM;
 663        dev_set_drvdata(dev, priv);
 664        priv->dpdmai_dev = dpdmai_dev;
 665
 666        priv->iommu_domain = iommu_get_domain_for_dev(dev);
 667        if (priv->iommu_domain)
 668                smmu_disable = false;
 669
 670        /* obtain a MC portal */
 671        err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
 672        if (err) {
 673                if (err == -ENXIO)
 674                        err = -EPROBE_DEFER;
 675                else
 676                        dev_err(dev, "MC portal allocation failed\n");
 677                goto err_mcportal;
 678        }
 679
 680        /* DPDMAI initialization */
 681        err = dpaa2_qdma_setup(dpdmai_dev);
 682        if (err) {
 683                dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
 684                goto err_dpdmai_setup;
 685        }
 686
 687        /* DPIO */
 688        err = dpaa2_qdma_dpio_setup(priv);
 689        if (err) {
 690                dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
 691                goto err_dpio_setup;
 692        }
 693
 694        /* DPDMAI binding to DPIO */
 695        err = dpaa2_dpdmai_bind(priv);
 696        if (err) {
 697                dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
 698                goto err_bind;
 699        }
 700
 701        /* DPDMAI enable */
 702        err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
 703        if (err) {
 704                dev_err(dev, "dpdmai_enable() failed\n");
 705                goto err_enable;
 706        }
 707
 708        dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
 709        if (!dpaa2_qdma) {
 710                err = -ENOMEM;
 711                goto err_eng;
 712        }
 713
 714        priv->dpaa2_qdma = dpaa2_qdma;
 715        dpaa2_qdma->priv = priv;
 716
 717        dpaa2_qdma->desc_allocated = 0;
 718        dpaa2_qdma->n_chans = NUM_CH;
 719
 720        dpaa2_dpdmai_init_channels(dpaa2_qdma);
 721
 722        if (soc_device_match(soc_fixup_tuning))
 723                dpaa2_qdma->qdma_wrtype_fixup = true;
 724        else
 725                dpaa2_qdma->qdma_wrtype_fixup = false;
 726
 727        dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
 728        dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
 729        dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
 730
 731        dpaa2_qdma->dma_dev.dev = dev;
 732        dpaa2_qdma->dma_dev.device_alloc_chan_resources =
 733                dpaa2_qdma_alloc_chan_resources;
 734        dpaa2_qdma->dma_dev.device_free_chan_resources =
 735                dpaa2_qdma_free_chan_resources;
 736        dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
 737        dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
 738        dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
 739
 740        err = dma_async_device_register(&dpaa2_qdma->dma_dev);
 741        if (err) {
 742                dev_err(dev, "Can't register NXP QDMA engine.\n");
 743                goto err_dpaa2_qdma;
 744        }
 745
 746        return 0;
 747
 748err_dpaa2_qdma:
 749        kfree(dpaa2_qdma);
 750err_eng:
 751        dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
 752err_enable:
 753        dpaa2_dpdmai_dpio_unbind(priv);
 754err_bind:
 755        dpaa2_dpmai_store_free(priv);
 756        dpaa2_dpdmai_dpio_free(priv);
 757err_dpio_setup:
 758        kfree(priv->ppriv);
 759        dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
 760err_dpdmai_setup:
 761        fsl_mc_portal_free(priv->mc_io);
 762err_mcportal:
 763        kfree(priv);
 764        dev_set_drvdata(dev, NULL);
 765        return err;
 766}
 767
 768static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
 769{
 770        struct dpaa2_qdma_engine *dpaa2_qdma;
 771        struct dpaa2_qdma_priv *priv;
 772        struct device *dev;
 773
 774        dev = &ls_dev->dev;
 775        priv = dev_get_drvdata(dev);
 776        dpaa2_qdma = priv->dpaa2_qdma;
 777
 778        dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
 779        dpaa2_dpdmai_dpio_unbind(priv);
 780        dpaa2_dpmai_store_free(priv);
 781        dpaa2_dpdmai_dpio_free(priv);
 782        dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
 783        fsl_mc_portal_free(priv->mc_io);
 784        dev_set_drvdata(dev, NULL);
 785        dpaa2_dpdmai_free_channels(dpaa2_qdma);
 786
 787        dma_async_device_unregister(&dpaa2_qdma->dma_dev);
 788        kfree(priv);
 789        kfree(dpaa2_qdma);
 790
 791        return 0;
 792}
 793
 794static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
 795{
 796        struct dpaa2_qdma_priv *priv;
 797        struct device *dev;
 798
 799        dev = &ls_dev->dev;
 800        priv = dev_get_drvdata(dev);
 801
 802        dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
 803        dpaa2_dpdmai_dpio_unbind(priv);
 804        dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
 805        dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle);
 806}
 807
 808static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
 809        {
 810                .vendor = FSL_MC_VENDOR_FREESCALE,
 811                .obj_type = "dpdmai",
 812        },
 813        { .vendor = 0x0 }
 814};
 815
 816static struct fsl_mc_driver dpaa2_qdma_driver = {
 817        .driver         = {
 818                .name   = "dpaa2-qdma",
 819                .owner  = THIS_MODULE,
 820        },
 821        .probe          = dpaa2_qdma_probe,
 822        .remove         = dpaa2_qdma_remove,
 823        .shutdown       = dpaa2_qdma_shutdown,
 824        .match_id_table = dpaa2_qdma_id_table
 825};
 826
 827static int __init dpaa2_qdma_driver_init(void)
 828{
 829        return fsl_mc_driver_register(&(dpaa2_qdma_driver));
 830}
 831late_initcall(dpaa2_qdma_driver_init);
 832
 833static void __exit fsl_qdma_exit(void)
 834{
 835        fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
 836}
 837module_exit(fsl_qdma_exit);
 838
 839MODULE_ALIAS("platform:fsl-dpaa2-qdma");
 840MODULE_LICENSE("GPL v2");
 841MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
 842