dpdk/drivers/bus/dpaa/base/qbman/qman.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
   2 *
   3 * Copyright 2008-2016 Freescale Semiconductor Inc.
   4 * Copyright 2017,2019 NXP
   5 *
   6 */
   7
   8#include "qman.h"
   9#include <rte_branch_prediction.h>
  10#include <rte_dpaa_bus.h>
  11#include <rte_eventdev.h>
  12#include <rte_byteorder.h>
  13
  14#include <dpaa_bits.h>
  15
  16/* Compilation constants */
  17#define DQRR_MAXFILL    15
  18#define EQCR_ITHRESH    4       /* if EQCR congests, interrupt threshold */
  19#define IRQNAME         "QMan portal %d"
  20#define MAX_IRQNAME     16      /* big enough for "QMan portal %d" */
  21/* maximum number of DQRR entries to process in qman_poll() */
  22#define FSL_QMAN_POLL_LIMIT 8
  23
  24/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
  25 * inter-processor locking only. Note, FQLOCK() is always called either under a
  26 * local_irq_save() or from interrupt context - hence there's no need for irq
  27 * protection (and indeed, attempting to nest irq-protection doesn't work, as
  28 * the "irq en/disable" machinery isn't recursive...).
  29 */
  30#define FQLOCK(fq) \
  31        do { \
  32                struct qman_fq *__fq478 = (fq); \
  33                if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
  34                        spin_lock(&__fq478->fqlock); \
  35        } while (0)
  36#define FQUNLOCK(fq) \
  37        do { \
  38                struct qman_fq *__fq478 = (fq); \
  39                if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
  40                        spin_unlock(&__fq478->fqlock); \
  41        } while (0)
  42
  43static qman_cb_free_mbuf qman_free_mbuf_cb;
  44
  45static inline void fq_set(struct qman_fq *fq, u32 mask)
  46{
  47        dpaa_set_bits(mask, &fq->flags);
  48}
  49
  50static inline void fq_clear(struct qman_fq *fq, u32 mask)
  51{
  52        dpaa_clear_bits(mask, &fq->flags);
  53}
  54
  55static inline int fq_isset(struct qman_fq *fq, u32 mask)
  56{
  57        return fq->flags & mask;
  58}
  59
  60static inline int fq_isclear(struct qman_fq *fq, u32 mask)
  61{
  62        return !(fq->flags & mask);
  63}
  64
  65struct qman_portal {
  66        struct qm_portal p;
  67        /* PORTAL_BITS_*** - dynamic, strictly internal */
  68        unsigned long bits;
  69        /* interrupt sources processed by portal_isr(), configurable */
  70        unsigned long irq_sources;
  71        u32 use_eqcr_ci_stashing;
  72        u32 slowpoll;   /* only used when interrupts are off */
  73        /* only 1 volatile dequeue at a time */
  74        struct qman_fq *vdqcr_owned;
  75        u32 sdqcr;
  76        int dqrr_disable_ref;
  77        /* A portal-specific handler for DCP ERNs. If this is NULL, the global
  78         * handler is called instead.
  79         */
  80        qman_cb_dc_ern cb_dc_ern;
  81        /* When the cpu-affine portal is activated, this is non-NULL */
  82        const struct qm_portal_config *config;
  83        struct dpa_rbtree retire_table;
  84        char irqname[MAX_IRQNAME];
  85        /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
  86        struct qman_cgrs *cgrs;
  87        /* linked-list of CSCN handlers. */
  88        struct list_head cgr_cbs;
  89        /* list lock */
  90        spinlock_t cgr_lock;
  91        /* track if memory was allocated by the driver */
  92#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
  93        /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
  94         * do byte swaps of DQRR read only memory.  First entry must be aligned
  95         * to 2 ** 10 to ensure DQRR index calculations based shadow copy
  96         * address (6 bits for address shift + 4 bits for the DQRR size).
  97         */
  98        struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE]
  99                    __rte_aligned(1024);
 100#endif
 101};
 102
 103/* Global handler for DCP ERNs. Used when the portal receiving the message does
 104 * not have a portal-specific handler.
 105 */
 106static qman_cb_dc_ern cb_dc_ern;
 107
 108static cpumask_t affine_mask;
 109static DEFINE_SPINLOCK(affine_mask_lock);
 110static u16 affine_channels[NR_CPUS];
 111static RTE_DEFINE_PER_LCORE(struct qman_portal, qman_affine_portal);
 112
 113static inline struct qman_portal *get_affine_portal(void)
 114{
 115        return &RTE_PER_LCORE(qman_affine_portal);
 116}
 117
 118/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
 119 * retirement notifications (the fact they are sometimes h/w-consumed means that
 120 * contextB isn't always a s/w demux - and as we can't know which case it is
 121 * when looking at the notification, we have to use the slow lookup for all of
 122 * them). NB, it's possible to have multiple FQ objects refer to the same FQID
 123 * (though at most one of them should be the consumer), so this table isn't for
 124 * all FQs - FQs are added when retirement commands are issued, and removed when
 125 * they complete, which also massively reduces the size of this table.
 126 */
 127IMPLEMENT_DPAA_RBTREE(fqtree, struct qman_fq, node, fqid);
 128/*
 129 * This is what everything can wait on, even if it migrates to a different cpu
 130 * to the one whose affine portal it is waiting on.
 131 */
 132static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
 133
 134static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
 135{
 136        int ret = fqtree_push(&p->retire_table, fq);
 137
 138        if (ret)
 139                pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
 140        return ret;
 141}
 142
 143static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
 144{
 145        fqtree_del(&p->retire_table, fq);
 146}
 147
 148static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
 149{
 150        return fqtree_find(&p->retire_table, fqid);
 151}
 152
 153#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
 154static void **qman_fq_lookup_table;
 155static size_t qman_fq_lookup_table_size;
 156
 157int qman_setup_fq_lookup_table(size_t num_entries)
 158{
 159        num_entries++;
 160        /* Allocate 1 more entry since the first entry is not used */
 161        qman_fq_lookup_table = vmalloc((num_entries * sizeof(void *)));
 162        if (!qman_fq_lookup_table) {
 163                pr_err("QMan: Could not allocate fq lookup table\n");
 164                return -ENOMEM;
 165        }
 166        memset(qman_fq_lookup_table, 0, num_entries * sizeof(void *));
 167        qman_fq_lookup_table_size = num_entries;
 168        pr_debug("QMan: Allocated lookup table at %p, entry count %lu\n",
 169                qman_fq_lookup_table,
 170                        (unsigned long)qman_fq_lookup_table_size);
 171        return 0;
 172}
 173
 174void qman_set_fq_lookup_table(void **fq_table)
 175{
 176        qman_fq_lookup_table = fq_table;
 177}
 178
 179/* global structure that maintains fq object mapping */
 180static DEFINE_SPINLOCK(fq_hash_table_lock);
 181
 182static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
 183{
 184        u32 i;
 185
 186        spin_lock(&fq_hash_table_lock);
 187        /* Can't use index zero because this has special meaning
 188         * in context_b field.
 189         */
 190        for (i = 1; i < qman_fq_lookup_table_size; i++) {
 191                if (qman_fq_lookup_table[i] == NULL) {
 192                        *entry = i;
 193                        qman_fq_lookup_table[i] = fq;
 194                        spin_unlock(&fq_hash_table_lock);
 195                        return 0;
 196                }
 197        }
 198        spin_unlock(&fq_hash_table_lock);
 199        return -ENOMEM;
 200}
 201
 202static void clear_fq_table_entry(u32 entry)
 203{
 204        spin_lock(&fq_hash_table_lock);
 205        DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
 206        qman_fq_lookup_table[entry] = NULL;
 207        spin_unlock(&fq_hash_table_lock);
 208}
 209
 210static inline struct qman_fq *get_fq_table_entry(u32 entry)
 211{
 212        DPAA_BUG_ON(entry >= qman_fq_lookup_table_size);
 213        return qman_fq_lookup_table[entry];
 214}
 215#endif
 216
 217static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
 218{
 219        /* Byteswap the FQD to HW format */
 220        fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
 221        fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
 222        fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
 223        fqd->context_b = cpu_to_be32(fqd->context_b);
 224        fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
 225        fqd->opaque_td = cpu_to_be16(fqd->opaque_td);
 226}
 227
 228static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
 229{
 230        /* Byteswap the FQD to CPU format */
 231        fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
 232        fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
 233        fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
 234        fqd->context_b = be32_to_cpu(fqd->context_b);
 235        fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
 236}
 237
 238static inline void cpu_to_hw_fd(struct qm_fd *fd)
 239{
 240        fd->addr = cpu_to_be40(fd->addr);
 241        fd->status = cpu_to_be32(fd->status);
 242        fd->opaque = cpu_to_be32(fd->opaque);
 243}
 244
 245static inline void hw_fd_to_cpu(struct qm_fd *fd)
 246{
 247        fd->addr = be40_to_cpu(fd->addr);
 248        fd->status = be32_to_cpu(fd->status);
 249        fd->opaque = be32_to_cpu(fd->opaque);
 250}
 251
 252/* In the case that slow- and fast-path handling are both done by qman_poll()
 253 * (ie. because there is no interrupt handling), we ought to balance how often
 254 * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
 255 * sources, so we call the fast poll 'n' times before calling the slow poll
 256 * once. The idle decrementer constant is used when the last slow-poll detected
 257 * no work to do, and the busy decrementer constant when the last slow-poll had
 258 * work to do.
 259 */
 260#define SLOW_POLL_IDLE   1000
 261#define SLOW_POLL_BUSY   10
 262static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
 263static inline unsigned int __poll_portal_fast(struct qman_portal *p,
 264                                              unsigned int poll_limit);
 265
 266/* Portal interrupt handler */
 267static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
 268{
 269        struct qman_portal *p = ptr;
 270        /*
 271         * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
 272         * it could race against a Query Congestion State command also given
 273         * as part of the handling of this interrupt source. We mustn't
 274         * clear it a second time in this top-level function.
 275         */
 276        u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
 277                ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
 278        u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
 279        /* DQRR-handling if it's interrupt-driven */
 280        if (is & QM_PIRQ_DQRI)
 281                __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
 282        /* Handling of anything else that's interrupt-driven */
 283        clear |= __poll_portal_slow(p, is);
 284        qm_isr_status_clear(&p->p, clear);
 285        return IRQ_HANDLED;
 286}
 287
 288/* This inner version is used privately by qman_create_affine_portal(), as well
 289 * as by the exported qman_stop_dequeues().
 290 */
 291static inline void qman_stop_dequeues_ex(struct qman_portal *p)
 292{
 293        if (!(p->dqrr_disable_ref++))
 294                qm_dqrr_set_maxfill(&p->p, 0);
 295}
 296
 297static int drain_mr_fqrni(struct qm_portal *p)
 298{
 299        const struct qm_mr_entry *msg;
 300loop:
 301        msg = qm_mr_current(p);
 302        if (!msg) {
 303                /*
 304                 * if MR was full and h/w had other FQRNI entries to produce, we
 305                 * need to allow it time to produce those entries once the
 306                 * existing entries are consumed. A worst-case situation
 307                 * (fully-loaded system) means h/w sequencers may have to do 3-4
 308                 * other things before servicing the portal's MR pump, each of
 309                 * which (if slow) may take ~50 qman cycles (which is ~200
 310                 * processor cycles). So rounding up and then multiplying this
 311                 * worst-case estimate by a factor of 10, just to be
 312                 * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
 313                 * one entry at a time, so h/w has an opportunity to produce new
 314                 * entries well before the ring has been fully consumed, so
 315                 * we're being *really* paranoid here.
 316                 */
 317                u64 now, then = mfatb();
 318
 319                do {
 320                        now = mfatb();
 321                } while ((then + 10000) > now);
 322                msg = qm_mr_current(p);
 323                if (!msg)
 324                        return 0;
 325        }
 326        if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
 327                /* We aren't draining anything but FQRNIs */
 328                pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
 329                return -1;
 330        }
 331        qm_mr_next(p);
 332        qm_mr_cci_consume(p, 1);
 333        goto loop;
 334}
 335
 336static inline int qm_eqcr_init(struct qm_portal *portal,
 337                               enum qm_eqcr_pmode pmode,
 338                               unsigned int eq_stash_thresh,
 339                               int eq_stash_prio)
 340{
 341        /* This use of 'register', as well as all other occurrences, is because
 342         * it has been observed to generate much faster code with gcc than is
 343         * otherwise the case.
 344         */
 345        register struct qm_eqcr *eqcr = &portal->eqcr;
 346        u32 cfg;
 347        u8 pi;
 348
 349        eqcr->ring = portal->addr.ce + QM_CL_EQCR;
 350        eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
 351        qm_cl_invalidate(EQCR_CI);
 352        pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
 353        eqcr->cursor = eqcr->ring + pi;
 354        eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
 355                        QM_EQCR_VERB_VBIT : 0;
 356        eqcr->available = QM_EQCR_SIZE - 1 -
 357                        qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
 358        eqcr->ithresh = qm_in(EQCR_ITR);
 359#ifdef RTE_LIBRTE_DPAA_HWDEBUG
 360        eqcr->busy = 0;
 361        eqcr->pmode = pmode;
 362#endif
 363        cfg = (qm_in(CFG) & 0x00ffffff) |
 364                (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
 365                (eq_stash_prio << 26)   | /* QCSP_CFG: EP */
 366                ((pmode & 0x3) << 24);  /* QCSP_CFG::EPM */
 367        qm_out(CFG, cfg);
 368        return 0;
 369}
 370
 371static inline void qm_eqcr_finish(struct qm_portal *portal)
 372{
 373        register struct qm_eqcr *eqcr = &portal->eqcr;
 374        u8 pi, ci;
 375        u32 cfg;
 376
 377        /*
 378         * Disable EQCI stashing because the QMan only
 379         * presents the value it previously stashed to
 380         * maintain coherency.  Setting the stash threshold
 381         * to 1 then 0 ensures that QMan has resyncronized
 382         * its internal copy so that the portal is clean
 383         * when it is reinitialized in the future
 384         */
 385        cfg = (qm_in(CFG) & 0x0fffffff) |
 386                (1 << 28); /* QCSP_CFG: EST */
 387        qm_out(CFG, cfg);
 388        cfg &= 0x0fffffff; /* stash threshold = 0 */
 389        qm_out(CFG, cfg);
 390
 391        pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
 392        ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
 393
 394        /* Refresh EQCR CI cache value */
 395        qm_cl_invalidate(EQCR_CI);
 396        eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
 397
 398#ifdef RTE_LIBRTE_DPAA_HWDEBUG
 399        DPAA_ASSERT(!eqcr->busy);
 400#endif
 401        if (pi != EQCR_PTR2IDX(eqcr->cursor))
 402                pr_crit("losing uncommitted EQCR entries\n");
 403        if (ci != eqcr->ci)
 404                pr_crit("missing existing EQCR completions\n");
 405        if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
 406                pr_crit("EQCR destroyed unquiesced\n");
 407}
 408
 409static inline int qm_dqrr_init(struct qm_portal *portal,
 410                        __maybe_unused const struct qm_portal_config *config,
 411                        enum qm_dqrr_dmode dmode,
 412                        __maybe_unused enum qm_dqrr_pmode pmode,
 413                        enum qm_dqrr_cmode cmode, u8 max_fill)
 414{
 415        register struct qm_dqrr *dqrr = &portal->dqrr;
 416        u32 cfg;
 417
 418        /* Make sure the DQRR will be idle when we enable */
 419        qm_out(DQRR_SDQCR, 0);
 420        qm_out(DQRR_VDQCR, 0);
 421        qm_out(DQRR_PDQCR, 0);
 422        dqrr->ring = portal->addr.ce + QM_CL_DQRR;
 423        dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
 424        dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
 425        dqrr->cursor = dqrr->ring + dqrr->ci;
 426        dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
 427        dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
 428                        QM_DQRR_VERB_VBIT : 0;
 429        dqrr->ithresh = qm_in(DQRR_ITR);
 430#ifdef RTE_LIBRTE_DPAA_HWDEBUG
 431        dqrr->dmode = dmode;
 432        dqrr->pmode = pmode;
 433        dqrr->cmode = cmode;
 434#endif
 435        /* Invalidate every ring entry before beginning */
 436        for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
 437                dccivac(qm_cl(dqrr->ring, cfg));
 438        cfg = (qm_in(CFG) & 0xff000f00) |
 439                ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
 440                ((dmode & 1) << 18) |                   /* DP */
 441                ((cmode & 3) << 16) |                   /* DCM */
 442                0xa0 |                                  /* RE+SE */
 443                (0 ? 0x40 : 0) |                        /* Ignore RP */
 444                (0 ? 0x10 : 0);                         /* Ignore SP */
 445        qm_out(CFG, cfg);
 446        qm_dqrr_set_maxfill(portal, max_fill);
 447        return 0;
 448}
 449
 450static inline void qm_dqrr_finish(struct qm_portal *portal)
 451{
 452        __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
 453#ifdef RTE_LIBRTE_DPAA_HWDEBUG
 454        if ((dqrr->cmode != qm_dqrr_cdc) &&
 455            (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
 456                pr_crit("Ignoring completed DQRR entries\n");
 457#endif
 458}
 459
 460static inline int qm_mr_init(struct qm_portal *portal,
 461                             __maybe_unused enum qm_mr_pmode pmode,
 462                             enum qm_mr_cmode cmode)
 463{
 464        register struct qm_mr *mr = &portal->mr;
 465        u32 cfg;
 466
 467        mr->ring = portal->addr.ce + QM_CL_MR;
 468        mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
 469        mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
 470        mr->cursor = mr->ring + mr->ci;
 471        mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
 472        mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
 473        mr->ithresh = qm_in(MR_ITR);
 474#ifdef RTE_LIBRTE_DPAA_HWDEBUG
 475        mr->pmode = pmode;
 476        mr->cmode = cmode;
 477#endif
 478        cfg = (qm_in(CFG) & 0xfffff0ff) |
 479                ((cmode & 1) << 8);             /* QCSP_CFG:MM */
 480        qm_out(CFG, cfg);
 481        return 0;
 482}
 483
 484static inline void qm_mr_pvb_update(struct qm_portal *portal)
 485{
 486        register struct qm_mr *mr = &portal->mr;
 487        const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
 488
 489#ifdef RTE_LIBRTE_DPAA_HWDEBUG
 490        DPAA_ASSERT(mr->pmode == qm_mr_pvb);
 491#endif
 492        /* when accessing 'verb', use __raw_readb() to ensure that compiler
 493         * inlining doesn't try to optimise out "excess reads".
 494         */
 495        if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
 496                mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
 497                if (!mr->pi)
 498                        mr->vbit ^= QM_MR_VERB_VBIT;
 499                mr->fill++;
 500                res = MR_INC(res);
 501        }
 502        dcbit_ro(res);
 503}
 504
 505struct qman_portal *
 506qman_init_portal(struct qman_portal *portal,
 507                   const struct qm_portal_config *c,
 508                   const struct qman_cgrs *cgrs)
 509{
 510        struct qm_portal *p;
 511        char buf[16];
 512        int ret;
 513        u32 isdr;
 514
 515        p = &portal->p;
 516
 517        if (!c)
 518                c = portal->config;
 519
 520        if (dpaa_svr_family == SVR_LS1043A_FAMILY)
 521                portal->use_eqcr_ci_stashing = 3;
 522        else
 523                portal->use_eqcr_ci_stashing =
 524                                        ((qman_ip_rev >= QMAN_REV30) ? 1 : 0);
 525
 526        /*
 527         * prep the low-level portal struct with the mapped addresses from the
 528         * config, everything that follows depends on it and "config" is more
 529         * for (de)reference
 530         */
 531        p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
 532        p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
 533        /*
 534         * If CI-stashing is used, the current defaults use a threshold of 3,
 535         * and stash with high-than-DQRR priority.
 536         */
 537        if (qm_eqcr_init(p, qm_eqcr_pvb,
 538                         portal->use_eqcr_ci_stashing, 1)) {
 539                pr_err("Qman EQCR initialisation failed\n");
 540                goto fail_eqcr;
 541        }
 542        if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
 543                         qm_dqrr_cdc, DQRR_MAXFILL)) {
 544                pr_err("Qman DQRR initialisation failed\n");
 545                goto fail_dqrr;
 546        }
 547        if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
 548                pr_err("Qman MR initialisation failed\n");
 549                goto fail_mr;
 550        }
 551        if (qm_mc_init(p)) {
 552                pr_err("Qman MC initialisation failed\n");
 553                goto fail_mc;
 554        }
 555
 556        /* static interrupt-gating controls */
 557        qm_dqrr_set_ithresh(p, 0);
 558        qm_mr_set_ithresh(p, 0);
 559        qm_isr_set_iperiod(p, 0);
 560        portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
 561        if (!portal->cgrs)
 562                goto fail_cgrs;
 563        /* initial snapshot is no-depletion */
 564        qman_cgrs_init(&portal->cgrs[1]);
 565        if (cgrs)
 566                portal->cgrs[0] = *cgrs;
 567        else
 568                /* if the given mask is NULL, assume all CGRs can be seen */
 569                qman_cgrs_fill(&portal->cgrs[0]);
 570        INIT_LIST_HEAD(&portal->cgr_cbs);
 571        spin_lock_init(&portal->cgr_lock);
 572        portal->bits = 0;
 573        portal->slowpoll = 0;
 574        portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
 575                        QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
 576                        QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
 577        portal->dqrr_disable_ref = 0;
 578        portal->cb_dc_ern = NULL;
 579        sprintf(buf, "qportal-%d", c->channel);
 580        dpa_rbtree_init(&portal->retire_table);
 581        isdr = 0xffffffff;
 582        qm_isr_disable_write(p, isdr);
 583        portal->irq_sources = 0;
 584        qm_isr_enable_write(p, portal->irq_sources);
 585        qm_isr_status_clear(p, 0xffffffff);
 586        snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
 587        if (request_irq(c->irq, portal_isr, 0, portal->irqname,
 588                        portal)) {
 589                pr_err("request_irq() failed\n");
 590                goto fail_irq;
 591        }
 592
 593        /* Need EQCR to be empty before continuing */
 594        isdr &= ~QM_PIRQ_EQCI;
 595        qm_isr_disable_write(p, isdr);
 596        ret = qm_eqcr_get_fill(p);
 597        if (ret) {
 598                pr_err("Qman EQCR unclean\n");
 599                goto fail_eqcr_empty;
 600        }
 601        isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
 602        qm_isr_disable_write(p, isdr);
 603        if (qm_dqrr_current(p)) {
 604                pr_err("Qman DQRR unclean\n");
 605                qm_dqrr_cdc_consume_n(p, 0xffff);
 606        }
 607        if (qm_mr_current(p) && drain_mr_fqrni(p)) {
 608                /* special handling, drain just in case it's a few FQRNIs */
 609                if (drain_mr_fqrni(p))
 610                        goto fail_dqrr_mr_empty;
 611        }
 612        /* Success */
 613        portal->config = c;
 614        qm_isr_disable_write(p, 0);
 615        qm_isr_uninhibit(p);
 616        /* Write a sane SDQCR */
 617        qm_dqrr_sdqcr_set(p, portal->sdqcr);
 618        return portal;
 619fail_dqrr_mr_empty:
 620fail_eqcr_empty:
 621        free_irq(c->irq, portal);
 622fail_irq:
 623        kfree(portal->cgrs);
 624        spin_lock_destroy(&portal->cgr_lock);
 625fail_cgrs:
 626        qm_mc_finish(p);
 627fail_mc:
 628        qm_mr_finish(p);
 629fail_mr:
 630        qm_dqrr_finish(p);
 631fail_dqrr:
 632        qm_eqcr_finish(p);
 633fail_eqcr:
 634        return NULL;
 635}
 636
 637#define MAX_GLOBAL_PORTALS 8
 638static struct qman_portal global_portals[MAX_GLOBAL_PORTALS];
 639static rte_atomic16_t global_portals_used[MAX_GLOBAL_PORTALS];
 640
 641struct qman_portal *
 642qman_alloc_global_portal(struct qm_portal_config *q_pcfg)
 643{
 644        unsigned int i;
 645
 646        for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
 647                if (rte_atomic16_test_and_set(&global_portals_used[i])) {
 648                        global_portals[i].config = q_pcfg;
 649                        return &global_portals[i];
 650                }
 651        }
 652        pr_err("No portal available (%x)\n", MAX_GLOBAL_PORTALS);
 653
 654        return NULL;
 655}
 656
 657int
 658qman_free_global_portal(struct qman_portal *portal)
 659{
 660        unsigned int i;
 661
 662        for (i = 0; i < MAX_GLOBAL_PORTALS; i++) {
 663                if (&global_portals[i] == portal) {
 664                        rte_atomic16_clear(&global_portals_used[i]);
 665                        return 0;
 666                }
 667        }
 668        return -1;
 669}
 670
 671void
 672qman_portal_uninhibit_isr(struct qman_portal *portal)
 673{
 674        qm_isr_uninhibit(&portal->p);
 675}
 676
 677struct qman_portal *qman_create_affine_portal(const struct qm_portal_config *c,
 678                                              const struct qman_cgrs *cgrs)
 679{
 680        struct qman_portal *res;
 681        struct qman_portal *portal = get_affine_portal();
 682
 683        /* A criteria for calling this function (from qman_driver.c) is that
 684         * we're already affine to the cpu and won't schedule onto another cpu.
 685         */
 686        res = qman_init_portal(portal, c, cgrs);
 687        if (res) {
 688                spin_lock(&affine_mask_lock);
 689                CPU_SET(c->cpu, &affine_mask);
 690                affine_channels[c->cpu] =
 691                        c->channel;
 692                spin_unlock(&affine_mask_lock);
 693        }
 694        return res;
 695}
 696
 697static inline
 698void qman_destroy_portal(struct qman_portal *qm)
 699{
 700        const struct qm_portal_config *pcfg;
 701
 702        /* Stop dequeues on the portal */
 703        qm_dqrr_sdqcr_set(&qm->p, 0);
 704
 705        /*
 706         * NB we do this to "quiesce" EQCR. If we add enqueue-completions or
 707         * something related to QM_PIRQ_EQCI, this may need fixing.
 708         * Also, due to the prefetching model used for CI updates in the enqueue
 709         * path, this update will only invalidate the CI cacheline *after*
 710         * working on it, so we need to call this twice to ensure a full update
 711         * irrespective of where the enqueue processing was at when the teardown
 712         * began.
 713         */
 714        qm_eqcr_cce_update(&qm->p);
 715        qm_eqcr_cce_update(&qm->p);
 716        pcfg = qm->config;
 717
 718        free_irq(pcfg->irq, qm);
 719
 720        kfree(qm->cgrs);
 721        qm_mc_finish(&qm->p);
 722        qm_mr_finish(&qm->p);
 723        qm_dqrr_finish(&qm->p);
 724        qm_eqcr_finish(&qm->p);
 725
 726        qm->config = NULL;
 727
 728        spin_lock_destroy(&qm->cgr_lock);
 729}
 730
 731const struct qm_portal_config *
 732qman_destroy_affine_portal(struct qman_portal *qp)
 733{
 734        /* We don't want to redirect if we're a slave, use "raw" */
 735        struct qman_portal *qm;
 736        const struct qm_portal_config *pcfg;
 737        int cpu;
 738
 739        if (qp == NULL)
 740                qm = get_affine_portal();
 741        else
 742                qm = qp;
 743        pcfg = qm->config;
 744        cpu = pcfg->cpu;
 745
 746        qman_destroy_portal(qm);
 747
 748        spin_lock(&affine_mask_lock);
 749        CPU_CLR(cpu, &affine_mask);
 750        spin_unlock(&affine_mask_lock);
 751
 752        qman_free_global_portal(qm);
 753
 754        return pcfg;
 755}
 756
 757int qman_get_portal_index(void)
 758{
 759        struct qman_portal *p = get_affine_portal();
 760        return p->config->index;
 761}
 762
 763/* Inline helper to reduce nesting in __poll_portal_slow() */
 764static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
 765                                   const struct qm_mr_entry *msg, u8 verb)
 766{
 767        FQLOCK(fq);
 768        switch (verb) {
 769        case QM_MR_VERB_FQRL:
 770                DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
 771                fq_clear(fq, QMAN_FQ_STATE_ORL);
 772                table_del_fq(p, fq);
 773                break;
 774        case QM_MR_VERB_FQRN:
 775                DPAA_ASSERT((fq->state == qman_fq_state_parked) ||
 776                            (fq->state == qman_fq_state_sched));
 777                DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
 778                fq_clear(fq, QMAN_FQ_STATE_CHANGING);
 779                if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
 780                        fq_set(fq, QMAN_FQ_STATE_NE);
 781                if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
 782                        fq_set(fq, QMAN_FQ_STATE_ORL);
 783                else
 784                        table_del_fq(p, fq);
 785                fq->state = qman_fq_state_retired;
 786                break;
 787        case QM_MR_VERB_FQPN:
 788                DPAA_ASSERT(fq->state == qman_fq_state_sched);
 789                DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
 790                fq->state = qman_fq_state_parked;
 791        }
 792        FQUNLOCK(fq);
 793}
 794
 795void
 796qman_ern_register_cb(qman_cb_free_mbuf cb)
 797{
 798        qman_free_mbuf_cb = cb;
 799}
 800
 801
 802void
 803qman_ern_poll_free(void)
 804{
 805        struct qman_portal *p = get_affine_portal();
 806        u8 verb, num = 0;
 807        const struct qm_mr_entry *msg;
 808        const struct qm_fd *fd;
 809        struct qm_mr_entry swapped_msg;
 810
 811        qm_mr_pvb_update(&p->p);
 812        msg = qm_mr_current(&p->p);
 813
 814        while (msg != NULL) {
 815                swapped_msg = *msg;
 816                hw_fd_to_cpu(&swapped_msg.ern.fd);
 817                verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
 818                fd = &swapped_msg.ern.fd;
 819
 820                if (unlikely(verb & 0x20)) {
 821                        printf("HW ERN notification, Nothing to do\n");
 822                } else {
 823                        if ((fd->bpid & 0xff) != 0xff)
 824                                qman_free_mbuf_cb(fd);
 825                }
 826
 827                num++;
 828                qm_mr_next(&p->p);
 829                qm_mr_pvb_update(&p->p);
 830                msg = qm_mr_current(&p->p);
 831        }
 832
 833        qm_mr_cci_consume(&p->p, num);
 834}
 835
 836static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
 837{
 838        const struct qm_mr_entry *msg;
 839        struct qm_mr_entry swapped_msg;
 840
 841        if (is & QM_PIRQ_CSCI) {
 842                struct qman_cgrs rr, c;
 843                struct qm_mc_result *mcr;
 844                struct qman_cgr *cgr;
 845
 846                spin_lock(&p->cgr_lock);
 847                /*
 848                 * The CSCI bit must be cleared _before_ issuing the
 849                 * Query Congestion State command, to ensure that a long
 850                 * CGR State Change callback cannot miss an intervening
 851                 * state change.
 852                 */
 853                qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
 854                qm_mc_start(&p->p);
 855                qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
 856                while (!(mcr = qm_mc_result(&p->p)))
 857                        cpu_relax();
 858                /* mask out the ones I'm not interested in */
 859                qman_cgrs_and(&rr, (const struct qman_cgrs *)
 860                        &mcr->querycongestion.state, &p->cgrs[0]);
 861                /* check previous snapshot for delta, enter/exit congestion */
 862                qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
 863                /* update snapshot */
 864                qman_cgrs_cp(&p->cgrs[1], &rr);
 865                /* Invoke callback */
 866                list_for_each_entry(cgr, &p->cgr_cbs, node)
 867                        if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
 868                                cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
 869                spin_unlock(&p->cgr_lock);
 870        }
 871
 872        if (is & QM_PIRQ_EQRI) {
 873                qm_eqcr_cce_update(&p->p);
 874                qm_eqcr_set_ithresh(&p->p, 0);
 875                wake_up(&affine_queue);
 876        }
 877
 878        if (is & QM_PIRQ_MRI) {
 879                struct qman_fq *fq;
 880                u8 verb, num = 0;
 881mr_loop:
 882                qm_mr_pvb_update(&p->p);
 883                msg = qm_mr_current(&p->p);
 884                if (!msg)
 885                        goto mr_done;
 886                swapped_msg = *msg;
 887                hw_fd_to_cpu(&swapped_msg.ern.fd);
 888                verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
 889                /* The message is a software ERN iff the 0x20 bit is set */
 890                if (verb & 0x20) {
 891                        switch (verb) {
 892                        case QM_MR_VERB_FQRNI:
 893                                /* nada, we drop FQRNIs on the floor */
 894                                break;
 895                        case QM_MR_VERB_FQRN:
 896                        case QM_MR_VERB_FQRL:
 897                                /* Lookup in the retirement table */
 898                                fq = table_find_fq(p,
 899                                                   be32_to_cpu(msg->fq.fqid));
 900                                DPAA_BUG_ON(!fq);
 901                                fq_state_change(p, fq, &swapped_msg, verb);
 902                                if (fq->cb.fqs)
 903                                        fq->cb.fqs(p, fq, &swapped_msg);
 904                                break;
 905                        case QM_MR_VERB_FQPN:
 906                                /* Parked */
 907#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
 908                                fq = get_fq_table_entry(msg->fq.contextB);
 909#else
 910                                fq = (void *)(uintptr_t)msg->fq.contextB;
 911#endif
 912                                fq_state_change(p, fq, msg, verb);
 913                                if (fq->cb.fqs)
 914                                        fq->cb.fqs(p, fq, &swapped_msg);
 915                                break;
 916                        case QM_MR_VERB_DC_ERN:
 917                                /* DCP ERN */
 918                                if (p->cb_dc_ern)
 919                                        p->cb_dc_ern(p, msg);
 920                                else if (cb_dc_ern)
 921                                        cb_dc_ern(p, msg);
 922                                else {
 923                                        static int warn_once;
 924
 925                                        if (!warn_once) {
 926                                                pr_crit("Leaking DCP ERNs!\n");
 927                                                warn_once = 1;
 928                                        }
 929                                }
 930                                break;
 931                        default:
 932                                pr_crit("Invalid MR verb 0x%02x\n", verb);
 933                        }
 934                } else {
 935                        /* Its a software ERN */
 936#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
 937                        fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
 938#else
 939                        fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
 940#endif
 941                        fq->cb.ern(p, fq, &swapped_msg);
 942                }
 943                num++;
 944                qm_mr_next(&p->p);
 945                goto mr_loop;
 946mr_done:
 947                qm_mr_cci_consume(&p->p, num);
 948        }
 949        /*
 950         * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
 951         * processing. If that interrupt source has meanwhile been re-asserted,
 952         * we mustn't clear it here (or in the top-level interrupt handler).
 953         */
 954        return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
 955}
 956
 957/*
 958 * remove some slowish-path stuff from the "fast path" and make sure it isn't
 959 * inlined.
 960 */
 961static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
 962{
 963        p->vdqcr_owned = NULL;
 964        FQLOCK(fq);
 965        fq_clear(fq, QMAN_FQ_STATE_VDQCR);
 966        FQUNLOCK(fq);
 967        wake_up(&affine_queue);
 968}
 969
 970/*
 971 * The only states that would conflict with other things if they ran at the
 972 * same time on the same cpu are:
 973 *
 974 *   (i) setting/clearing vdqcr_owned, and
 975 *  (ii) clearing the NE (Not Empty) flag.
 976 *
 977 * Both are safe. Because;
 978 *
 979 *   (i) this clearing can only occur after qman_set_vdq() has set the
 980 *       vdqcr_owned field (which it does before setting VDQCR), and
 981 *       qman_volatile_dequeue() blocks interrupts and preemption while this is
 982 *       done so that we can't interfere.
 983 *  (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
 984 *       with (i) that API prevents us from interfering until it's safe.
 985 *
 986 * The good thing is that qman_set_vdq() and qman_retire_fq() run far
 987 * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
 988 * advantage comes from this function not having to "lock" anything at all.
 989 *
 990 * Note also that the callbacks are invoked at points which are safe against the
 991 * above potential conflicts, but that this function itself is not re-entrant
 992 * (this is because the function tracks one end of each FIFO in the portal and
 993 * we do *not* want to lock that). So the consequence is that it is safe for
 994 * user callbacks to call into any QMan API.
 995 */
 996static inline unsigned int __poll_portal_fast(struct qman_portal *p,
 997                                              unsigned int poll_limit)
 998{
 999        const struct qm_dqrr_entry *dq;
1000        struct qman_fq *fq;
1001        enum qman_cb_dqrr_result res;
1002        unsigned int limit = 0;
1003#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1004        struct qm_dqrr_entry *shadow;
1005#endif
1006        do {
1007                qm_dqrr_pvb_update(&p->p);
1008                dq = qm_dqrr_current(&p->p);
1009                if (unlikely(!dq))
1010                        break;
1011#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1012        /* If running on an LE system the fields of the
1013         * dequeue entry must be swapper.  Because the
1014         * QMan HW will ignore writes the DQRR entry is
1015         * copied and the index stored within the copy
1016         */
1017                shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1018                *shadow = *dq;
1019                dq = shadow;
1020                shadow->fqid = be32_to_cpu(shadow->fqid);
1021                shadow->seqnum = be16_to_cpu(shadow->seqnum);
1022                hw_fd_to_cpu(&shadow->fd);
1023#endif
1024
1025                if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
1026                        /*
1027                         * VDQCR: don't trust context_b as the FQ may have
1028                         * been configured for h/w consumption and we're
1029                         * draining it post-retirement.
1030                         */
1031                        fq = p->vdqcr_owned;
1032                        /*
1033                         * We only set QMAN_FQ_STATE_NE when retiring, so we
1034                         * only need to check for clearing it when doing
1035                         * volatile dequeues.  It's one less thing to check
1036                         * in the critical path (SDQCR).
1037                         */
1038                        if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1039                                fq_clear(fq, QMAN_FQ_STATE_NE);
1040                        /*
1041                         * This is duplicated from the SDQCR code, but we
1042                         * have stuff to do before *and* after this callback,
1043                         * and we don't want multiple if()s in the critical
1044                         * path (SDQCR).
1045                         */
1046                        res = fq->cb.dqrr(p, fq, dq);
1047                        if (res == qman_cb_dqrr_stop)
1048                                break;
1049                        /* Check for VDQCR completion */
1050                        if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1051                                clear_vdqcr(p, fq);
1052                } else {
1053                        /* SDQCR: context_b points to the FQ */
1054#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1055                        fq = get_fq_table_entry(dq->contextB);
1056#else
1057                        fq = (void *)(uintptr_t)dq->contextB;
1058#endif
1059                        /* Now let the callback do its stuff */
1060                        res = fq->cb.dqrr(p, fq, dq);
1061                        /*
1062                         * The callback can request that we exit without
1063                         * consuming this entry nor advancing;
1064                         */
1065                        if (res == qman_cb_dqrr_stop)
1066                                break;
1067                }
1068                /* Interpret 'dq' from a driver perspective. */
1069                /*
1070                 * Parking isn't possible unless HELDACTIVE was set. NB,
1071                 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1072                 * check for HELDACTIVE to cover both.
1073                 */
1074                DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1075                            (res != qman_cb_dqrr_park));
1076                /* just means "skip it, I'll consume it myself later on" */
1077                if (res != qman_cb_dqrr_defer)
1078                        qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1079                                                 res == qman_cb_dqrr_park);
1080                /* Move forward */
1081                qm_dqrr_next(&p->p);
1082                /*
1083                 * Entry processed and consumed, increment our counter.  The
1084                 * callback can request that we exit after consuming the
1085                 * entry, and we also exit if we reach our processing limit,
1086                 * so loop back only if neither of these conditions is met.
1087                 */
1088        } while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
1089
1090        return limit;
1091}
1092
1093int qman_irqsource_add(u32 bits)
1094{
1095        struct qman_portal *p = get_affine_portal();
1096
1097        bits = bits & QM_PIRQ_VISIBLE;
1098
1099        /* Clear any previously remaining interrupt conditions in
1100         * QCSP_ISR. This prevents raising a false interrupt when
1101         * interrupt conditions are enabled in QCSP_IER.
1102         */
1103        qm_isr_status_clear(&p->p, bits);
1104        dpaa_set_bits(bits, &p->irq_sources);
1105        qm_isr_enable_write(&p->p, p->irq_sources);
1106
1107        return 0;
1108}
1109
1110int qman_fq_portal_irqsource_add(struct qman_portal *p, u32 bits)
1111{
1112        bits = bits & QM_PIRQ_VISIBLE;
1113
1114        /* Clear any previously remaining interrupt conditions in
1115         * QCSP_ISR. This prevents raising a false interrupt when
1116         * interrupt conditions are enabled in QCSP_IER.
1117         */
1118        qm_isr_status_clear(&p->p, bits);
1119        dpaa_set_bits(bits, &p->irq_sources);
1120        qm_isr_enable_write(&p->p, p->irq_sources);
1121
1122        return 0;
1123}
1124
1125int qman_irqsource_remove(u32 bits)
1126{
1127        struct qman_portal *p = get_affine_portal();
1128        u32 ier;
1129
1130        /* Our interrupt handler only processes+clears status register bits that
1131         * are in p->irq_sources. As we're trimming that mask, if one of them
1132         * were to assert in the status register just before we remove it from
1133         * the enable register, there would be an interrupt-storm when we
1134         * release the IRQ lock. So we wait for the enable register update to
1135         * take effect in h/w (by reading it back) and then clear all other bits
1136         * in the status register. Ie. we clear them from ISR once it's certain
1137         * IER won't allow them to reassert.
1138         */
1139
1140        bits &= QM_PIRQ_VISIBLE;
1141        dpaa_clear_bits(bits, &p->irq_sources);
1142        qm_isr_enable_write(&p->p, p->irq_sources);
1143        ier = qm_isr_enable_read(&p->p);
1144        /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1145         * data-dependency, ie. to protect against re-ordering.
1146         */
1147        qm_isr_status_clear(&p->p, ~ier);
1148        return 0;
1149}
1150
1151int qman_fq_portal_irqsource_remove(struct qman_portal *p, u32 bits)
1152{
1153        u32 ier;
1154
1155        /* Our interrupt handler only processes+clears status register bits that
1156         * are in p->irq_sources. As we're trimming that mask, if one of them
1157         * were to assert in the status register just before we remove it from
1158         * the enable register, there would be an interrupt-storm when we
1159         * release the IRQ lock. So we wait for the enable register update to
1160         * take effect in h/w (by reading it back) and then clear all other bits
1161         * in the status register. Ie. we clear them from ISR once it's certain
1162         * IER won't allow them to reassert.
1163         */
1164
1165        bits &= QM_PIRQ_VISIBLE;
1166        dpaa_clear_bits(bits, &p->irq_sources);
1167        qm_isr_enable_write(&p->p, p->irq_sources);
1168        ier = qm_isr_enable_read(&p->p);
1169        /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
1170         * data-dependency, ie. to protect against re-ordering.
1171         */
1172        qm_isr_status_clear(&p->p, ~ier);
1173        return 0;
1174}
1175
1176u16 qman_affine_channel(int cpu)
1177{
1178        if (cpu < 0) {
1179                struct qman_portal *portal = get_affine_portal();
1180
1181                cpu = portal->config->cpu;
1182        }
1183        DPAA_BUG_ON(!CPU_ISSET(cpu, &affine_mask));
1184        return affine_channels[cpu];
1185}
1186
1187unsigned int qman_portal_poll_rx(unsigned int poll_limit,
1188                                 void **bufs,
1189                                 struct qman_portal *p)
1190{
1191        struct qm_portal *portal = &p->p;
1192        register struct qm_dqrr *dqrr = &portal->dqrr;
1193        struct qm_dqrr_entry *dq[QM_DQRR_SIZE], *shadow[QM_DQRR_SIZE];
1194        struct qman_fq *fq;
1195        unsigned int limit = 0, rx_number = 0;
1196        uint32_t consume = 0;
1197
1198        do {
1199                qm_dqrr_pvb_update(&p->p);
1200                if (!dqrr->fill)
1201                        break;
1202
1203                dq[rx_number] = dqrr->cursor;
1204                dqrr->cursor = DQRR_CARRYCLEAR(dqrr->cursor + 1);
1205                /* Prefetch the next DQRR entry */
1206                rte_prefetch0(dqrr->cursor);
1207
1208#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1209                /* If running on an LE system the fields of the
1210                 * dequeue entry must be swapper.  Because the
1211                 * QMan HW will ignore writes the DQRR entry is
1212                 * copied and the index stored within the copy
1213                 */
1214                shadow[rx_number] =
1215                        &p->shadow_dqrr[DQRR_PTR2IDX(dq[rx_number])];
1216                shadow[rx_number]->fd.opaque_addr =
1217                        dq[rx_number]->fd.opaque_addr;
1218                shadow[rx_number]->fd.addr =
1219                        be40_to_cpu(dq[rx_number]->fd.addr);
1220                shadow[rx_number]->fd.opaque =
1221                        be32_to_cpu(dq[rx_number]->fd.opaque);
1222#else
1223                shadow[rx_number] = dq[rx_number];
1224#endif
1225
1226                /* SDQCR: context_b points to the FQ */
1227#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1228                fq = qman_fq_lookup_table[dq[rx_number]->contextB];
1229#else
1230                fq = (void *)dq[rx_number]->contextB;
1231#endif
1232                if (fq->cb.dqrr_prepare)
1233                        fq->cb.dqrr_prepare(shadow[rx_number],
1234                                            &bufs[rx_number]);
1235
1236                consume |= (1 << (31 - DQRR_PTR2IDX(shadow[rx_number])));
1237                rx_number++;
1238                --dqrr->fill;
1239        } while (++limit < poll_limit);
1240
1241        if (rx_number)
1242                fq->cb.dqrr_dpdk_pull_cb(&fq, shadow, bufs, rx_number);
1243
1244        /* Consume all the DQRR enries together */
1245        qm_out(DQRR_DCAP, (1 << 8) | consume);
1246
1247        return rx_number;
1248}
1249
1250void qman_clear_irq(void)
1251{
1252        struct qman_portal *p = get_affine_portal();
1253        u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
1254                ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
1255        qm_isr_status_clear(&p->p, clear);
1256}
1257
1258u32 qman_portal_dequeue(struct rte_event ev[], unsigned int poll_limit,
1259                        void **bufs)
1260{
1261        const struct qm_dqrr_entry *dq;
1262        struct qman_fq *fq;
1263        enum qman_cb_dqrr_result res;
1264        unsigned int limit = 0;
1265        struct qman_portal *p = get_affine_portal();
1266#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1267        struct qm_dqrr_entry *shadow;
1268#endif
1269        unsigned int rx_number = 0;
1270
1271        do {
1272                qm_dqrr_pvb_update(&p->p);
1273                dq = qm_dqrr_current(&p->p);
1274                if (!dq)
1275                        break;
1276#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
1277                /*
1278                 * If running on an LE system the fields of the
1279                 * dequeue entry must be swapper.  Because the
1280                 * QMan HW will ignore writes the DQRR entry is
1281                 * copied and the index stored within the copy
1282                 */
1283                shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1284                *shadow = *dq;
1285                dq = shadow;
1286                shadow->fqid = be32_to_cpu(shadow->fqid);
1287                shadow->seqnum = be16_to_cpu(shadow->seqnum);
1288                hw_fd_to_cpu(&shadow->fd);
1289#endif
1290
1291               /* SDQCR: context_b points to the FQ */
1292#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1293                fq = get_fq_table_entry(dq->contextB);
1294#else
1295                fq = (void *)(uintptr_t)dq->contextB;
1296#endif
1297                /* Now let the callback do its stuff */
1298                res = fq->cb.dqrr_dpdk_cb(&ev[rx_number], p, fq,
1299                                         dq, &bufs[rx_number]);
1300                rx_number++;
1301                /* Interpret 'dq' from a driver perspective. */
1302                /*
1303                 * Parking isn't possible unless HELDACTIVE was set. NB,
1304                 * FORCEELIGIBLE implies HELDACTIVE, so we only need to
1305                 * check for HELDACTIVE to cover both.
1306                 */
1307                DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
1308                            (res != qman_cb_dqrr_park));
1309                if (res != qman_cb_dqrr_defer)
1310                        qm_dqrr_cdc_consume_1ptr(&p->p, dq,
1311                                                 res == qman_cb_dqrr_park);
1312                /* Move forward */
1313                qm_dqrr_next(&p->p);
1314                /*
1315                 * Entry processed and consumed, increment our counter.  The
1316                 * callback can request that we exit after consuming the
1317                 * entry, and we also exit if we reach our processing limit,
1318                 * so loop back only if neither of these conditions is met.
1319                 */
1320        } while (++limit < poll_limit);
1321
1322        return limit;
1323}
1324
1325struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq)
1326{
1327        struct qman_portal *p = get_affine_portal();
1328        const struct qm_dqrr_entry *dq;
1329#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1330        struct qm_dqrr_entry *shadow;
1331#endif
1332
1333        qm_dqrr_pvb_update(&p->p);
1334        dq = qm_dqrr_current(&p->p);
1335        if (!dq)
1336                return NULL;
1337
1338        if (!(dq->stat & QM_DQRR_STAT_FD_VALID)) {
1339                /* Invalid DQRR - put the portal and consume the DQRR.
1340                 * Return NULL to user as no packet is seen.
1341                 */
1342                qman_dqrr_consume(fq, (struct qm_dqrr_entry *)dq);
1343                return NULL;
1344        }
1345
1346#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1347        shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
1348        *shadow = *dq;
1349        dq = shadow;
1350        shadow->fqid = be32_to_cpu(shadow->fqid);
1351        shadow->seqnum = be16_to_cpu(shadow->seqnum);
1352        hw_fd_to_cpu(&shadow->fd);
1353#endif
1354
1355        if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
1356                fq_clear(fq, QMAN_FQ_STATE_NE);
1357
1358        return (struct qm_dqrr_entry *)dq;
1359}
1360
1361void qman_dqrr_consume(struct qman_fq *fq,
1362                       struct qm_dqrr_entry *dq)
1363{
1364        struct qman_portal *p = get_affine_portal();
1365
1366        if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
1367                clear_vdqcr(p, fq);
1368
1369        qm_dqrr_cdc_consume_1ptr(&p->p, dq, 0);
1370        qm_dqrr_next(&p->p);
1371}
1372
1373int qman_poll_dqrr(unsigned int limit)
1374{
1375        struct qman_portal *p = get_affine_portal();
1376        int ret;
1377
1378        ret = __poll_portal_fast(p, limit);
1379        return ret;
1380}
1381
1382void qman_poll(void)
1383{
1384        struct qman_portal *p = get_affine_portal();
1385
1386        if ((~p->irq_sources) & QM_PIRQ_SLOW) {
1387                if (!(p->slowpoll--)) {
1388                        u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
1389                        u32 active = __poll_portal_slow(p, is);
1390
1391                        if (active) {
1392                                qm_isr_status_clear(&p->p, active);
1393                                p->slowpoll = SLOW_POLL_BUSY;
1394                        } else
1395                                p->slowpoll = SLOW_POLL_IDLE;
1396                }
1397        }
1398        if ((~p->irq_sources) & QM_PIRQ_DQRI)
1399                __poll_portal_fast(p, FSL_QMAN_POLL_LIMIT);
1400}
1401
1402void qman_stop_dequeues(void)
1403{
1404        struct qman_portal *p = get_affine_portal();
1405
1406        qman_stop_dequeues_ex(p);
1407}
1408
1409void qman_start_dequeues(void)
1410{
1411        struct qman_portal *p = get_affine_portal();
1412
1413        DPAA_ASSERT(p->dqrr_disable_ref > 0);
1414        if (!(--p->dqrr_disable_ref))
1415                qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
1416}
1417
1418void qman_static_dequeue_add(u32 pools, struct qman_portal *qp)
1419{
1420        struct qman_portal *p = qp ? qp : get_affine_portal();
1421
1422        pools &= p->config->pools;
1423        p->sdqcr |= pools;
1424        qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1425}
1426
1427void qman_static_dequeue_del(u32 pools, struct qman_portal *qp)
1428{
1429        struct qman_portal *p = qp ? qp : get_affine_portal();
1430
1431        pools &= p->config->pools;
1432        p->sdqcr &= ~pools;
1433        qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
1434}
1435
1436u32 qman_static_dequeue_get(struct qman_portal *qp)
1437{
1438        struct qman_portal *p = qp ? qp : get_affine_portal();
1439        return p->sdqcr;
1440}
1441
1442void qman_dca(const struct qm_dqrr_entry *dq, int park_request)
1443{
1444        struct qman_portal *p = get_affine_portal();
1445
1446        qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
1447}
1448
1449void qman_dca_index(u8 index, int park_request)
1450{
1451        struct qman_portal *p = get_affine_portal();
1452
1453        qm_dqrr_cdc_consume_1(&p->p, index, park_request);
1454}
1455
1456/* Frame queue API */
1457static const char *mcr_result_str(u8 result)
1458{
1459        switch (result) {
1460        case QM_MCR_RESULT_NULL:
1461                return "QM_MCR_RESULT_NULL";
1462        case QM_MCR_RESULT_OK:
1463                return "QM_MCR_RESULT_OK";
1464        case QM_MCR_RESULT_ERR_FQID:
1465                return "QM_MCR_RESULT_ERR_FQID";
1466        case QM_MCR_RESULT_ERR_FQSTATE:
1467                return "QM_MCR_RESULT_ERR_FQSTATE";
1468        case QM_MCR_RESULT_ERR_NOTEMPTY:
1469                return "QM_MCR_RESULT_ERR_NOTEMPTY";
1470        case QM_MCR_RESULT_PENDING:
1471                return "QM_MCR_RESULT_PENDING";
1472        case QM_MCR_RESULT_ERR_BADCOMMAND:
1473                return "QM_MCR_RESULT_ERR_BADCOMMAND";
1474        }
1475        return "<unknown MCR result>";
1476}
1477
1478int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
1479{
1480        struct qm_fqd fqd;
1481        struct qm_mcr_queryfq_np np;
1482        struct qm_mc_command *mcc;
1483        struct qm_mc_result *mcr;
1484        struct qman_portal *p;
1485
1486        if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
1487                int ret = qman_alloc_fqid(&fqid);
1488
1489                if (ret)
1490                        return ret;
1491        }
1492        spin_lock_init(&fq->fqlock);
1493        fq->fqid = fqid;
1494        fq->fqid_le = cpu_to_be32(fqid);
1495        fq->flags = flags;
1496        fq->state = qman_fq_state_oos;
1497        fq->cgr_groupid = 0;
1498#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1499        if (unlikely(find_empty_fq_table_entry(&fq->key, fq))) {
1500                pr_info("Find empty table entry failed\n");
1501                return -ENOMEM;
1502        }
1503        fq->qman_fq_lookup_table = qman_fq_lookup_table;
1504#endif
1505        if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
1506                return 0;
1507        /* Everything else is AS_IS support */
1508        p = get_affine_portal();
1509        mcc = qm_mc_start(&p->p);
1510        mcc->queryfq.fqid = cpu_to_be32(fqid);
1511        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1512        while (!(mcr = qm_mc_result(&p->p)))
1513                cpu_relax();
1514        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
1515        if (mcr->result != QM_MCR_RESULT_OK) {
1516                pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
1517                goto err;
1518        }
1519        fqd = mcr->queryfq.fqd;
1520        hw_fqd_to_cpu(&fqd);
1521        mcc = qm_mc_start(&p->p);
1522        mcc->queryfq_np.fqid = cpu_to_be32(fqid);
1523        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1524        while (!(mcr = qm_mc_result(&p->p)))
1525                cpu_relax();
1526        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
1527        if (mcr->result != QM_MCR_RESULT_OK) {
1528                pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
1529                goto err;
1530        }
1531        np = mcr->queryfq_np;
1532        /* Phew, have queryfq and queryfq_np results, stitch together
1533         * the FQ object from those.
1534         */
1535        fq->cgr_groupid = fqd.cgid;
1536        switch (np.state & QM_MCR_NP_STATE_MASK) {
1537        case QM_MCR_NP_STATE_OOS:
1538                break;
1539        case QM_MCR_NP_STATE_RETIRED:
1540                fq->state = qman_fq_state_retired;
1541                if (np.frm_cnt)
1542                        fq_set(fq, QMAN_FQ_STATE_NE);
1543                break;
1544        case QM_MCR_NP_STATE_TEN_SCHED:
1545        case QM_MCR_NP_STATE_TRU_SCHED:
1546        case QM_MCR_NP_STATE_ACTIVE:
1547                fq->state = qman_fq_state_sched;
1548                if (np.state & QM_MCR_NP_STATE_R)
1549                        fq_set(fq, QMAN_FQ_STATE_CHANGING);
1550                break;
1551        case QM_MCR_NP_STATE_PARKED:
1552                fq->state = qman_fq_state_parked;
1553                break;
1554        default:
1555                DPAA_ASSERT(NULL == "invalid FQ state");
1556        }
1557        if (fqd.fq_ctrl & QM_FQCTRL_CGE)
1558                fq->state |= QMAN_FQ_STATE_CGR_EN;
1559        return 0;
1560err:
1561        if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
1562                qman_release_fqid(fqid);
1563        return -EIO;
1564}
1565
1566void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
1567{
1568        /*
1569         * We don't need to lock the FQ as it is a pre-condition that the FQ be
1570         * quiesced. Instead, run some checks.
1571         */
1572        switch (fq->state) {
1573        case qman_fq_state_parked:
1574                DPAA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
1575                /* Fallthrough */
1576        case qman_fq_state_oos:
1577                if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
1578                        qman_release_fqid(fq->fqid);
1579#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1580                clear_fq_table_entry(fq->key);
1581#endif
1582                return;
1583        default:
1584                break;
1585        }
1586        DPAA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
1587}
1588
1589u32 qman_fq_fqid(struct qman_fq *fq)
1590{
1591        return fq->fqid;
1592}
1593
1594void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
1595{
1596        if (state)
1597                *state = fq->state;
1598        if (flags)
1599                *flags = fq->flags;
1600}
1601
1602int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
1603{
1604        struct qm_mc_command *mcc;
1605        struct qm_mc_result *mcr;
1606        struct qman_portal *p;
1607
1608        u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1609                QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
1610
1611        if ((fq->state != qman_fq_state_oos) &&
1612            (fq->state != qman_fq_state_parked))
1613                return -EINVAL;
1614#ifdef RTE_LIBRTE_DPAA_HWDEBUG
1615        if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1616                return -EINVAL;
1617#endif
1618        if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
1619                /* And can't be set at the same time as TDTHRESH */
1620                if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
1621                        return -EINVAL;
1622        }
1623        /* Issue an INITFQ_[PARKED|SCHED] management command */
1624        p = get_affine_portal();
1625        FQLOCK(fq);
1626        if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1627                     ((fq->state != qman_fq_state_oos) &&
1628                                (fq->state != qman_fq_state_parked)))) {
1629                FQUNLOCK(fq);
1630                return -EBUSY;
1631        }
1632        mcc = qm_mc_start(&p->p);
1633        if (opts)
1634                mcc->initfq = *opts;
1635        mcc->initfq.fqid = cpu_to_be32(fq->fqid);
1636        mcc->initfq.count = 0;
1637        /*
1638         * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a
1639         * demux pointer. Otherwise, the caller-provided value is allowed to
1640         * stand, don't overwrite it.
1641         */
1642        if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
1643                dma_addr_t phys_fq;
1644
1645                mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
1646#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1647                mcc->initfq.fqd.context_b = cpu_to_be32(fq->key);
1648#else
1649                mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
1650#endif
1651                /*
1652                 *  and the physical address - NB, if the user wasn't trying to
1653                 * set CONTEXTA, clear the stashing settings.
1654                 */
1655                if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
1656                        mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
1657                        memset(&mcc->initfq.fqd.context_a, 0,
1658                               sizeof(mcc->initfq.fqd.context_a));
1659                } else {
1660                        phys_fq = rte_mem_virt2iova(fq);
1661                        qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
1662                }
1663        }
1664        if (flags & QMAN_INITFQ_FLAG_LOCAL) {
1665                mcc->initfq.fqd.dest.channel = p->config->channel;
1666                if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
1667                        mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
1668                        mcc->initfq.fqd.dest.wq = 4;
1669                }
1670        }
1671        mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
1672        cpu_to_hw_fqd(&mcc->initfq.fqd);
1673        qm_mc_commit(&p->p, myverb);
1674        while (!(mcr = qm_mc_result(&p->p)))
1675                cpu_relax();
1676        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1677        res = mcr->result;
1678        if (res != QM_MCR_RESULT_OK) {
1679                FQUNLOCK(fq);
1680                return -EIO;
1681        }
1682        if (opts) {
1683                if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
1684                        if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
1685                                fq_set(fq, QMAN_FQ_STATE_CGR_EN);
1686                        else
1687                                fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
1688                }
1689                if (opts->we_mask & QM_INITFQ_WE_CGID)
1690                        fq->cgr_groupid = opts->fqd.cgid;
1691        }
1692        fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
1693                qman_fq_state_sched : qman_fq_state_parked;
1694        FQUNLOCK(fq);
1695        return 0;
1696}
1697
1698int qman_schedule_fq(struct qman_fq *fq)
1699{
1700        struct qm_mc_command *mcc;
1701        struct qm_mc_result *mcr;
1702        struct qman_portal *p;
1703
1704        int ret = 0;
1705        u8 res;
1706
1707        if (fq->state != qman_fq_state_parked)
1708                return -EINVAL;
1709#ifdef RTE_LIBRTE_DPAA_HWDEBUG
1710        if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1711                return -EINVAL;
1712#endif
1713        /* Issue a ALTERFQ_SCHED management command */
1714        p = get_affine_portal();
1715
1716        FQLOCK(fq);
1717        if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1718                     (fq->state != qman_fq_state_parked))) {
1719                ret = -EBUSY;
1720                goto out;
1721        }
1722        mcc = qm_mc_start(&p->p);
1723        mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1724        qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
1725        while (!(mcr = qm_mc_result(&p->p)))
1726                cpu_relax();
1727        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
1728        res = mcr->result;
1729        if (res != QM_MCR_RESULT_OK) {
1730                ret = -EIO;
1731                goto out;
1732        }
1733        fq->state = qman_fq_state_sched;
1734out:
1735        FQUNLOCK(fq);
1736
1737        return ret;
1738}
1739
1740int qman_retire_fq(struct qman_fq *fq, u32 *flags)
1741{
1742        struct qm_mc_command *mcc;
1743        struct qm_mc_result *mcr;
1744        struct qman_portal *p;
1745
1746        int rval;
1747        u8 res;
1748
1749        if ((fq->state != qman_fq_state_parked) &&
1750            (fq->state != qman_fq_state_sched))
1751                return -EINVAL;
1752#ifdef RTE_LIBRTE_DPAA_HWDEBUG
1753        if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1754                return -EINVAL;
1755#endif
1756        p = get_affine_portal();
1757
1758        FQLOCK(fq);
1759        if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1760                     (fq->state == qman_fq_state_retired) ||
1761                                (fq->state == qman_fq_state_oos))) {
1762                rval = -EBUSY;
1763                goto out;
1764        }
1765        rval = table_push_fq(p, fq);
1766        if (rval)
1767                goto out;
1768        mcc = qm_mc_start(&p->p);
1769        mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1770        qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
1771        while (!(mcr = qm_mc_result(&p->p)))
1772                cpu_relax();
1773        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
1774        res = mcr->result;
1775        /*
1776         * "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
1777         * and defer the flags until FQRNI or FQRN (respectively) show up. But
1778         * "Friendly" is to process OK immediately, and not set CHANGING. We do
1779         * friendly, otherwise the caller doesn't necessarily have a fully
1780         * "retired" FQ on return even if the retirement was immediate. However
1781         * this does mean some code duplication between here and
1782         * fq_state_change().
1783         */
1784        if (likely(res == QM_MCR_RESULT_OK)) {
1785                rval = 0;
1786                /* Process 'fq' right away, we'll ignore FQRNI */
1787                if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
1788                        fq_set(fq, QMAN_FQ_STATE_NE);
1789                if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
1790                        fq_set(fq, QMAN_FQ_STATE_ORL);
1791                else
1792                        table_del_fq(p, fq);
1793                if (flags)
1794                        *flags = fq->flags;
1795                fq->state = qman_fq_state_retired;
1796                if (fq->cb.fqs) {
1797                        /*
1798                         * Another issue with supporting "immediate" retirement
1799                         * is that we're forced to drop FQRNIs, because by the
1800                         * time they're seen it may already be "too late" (the
1801                         * fq may have been OOS'd and free()'d already). But if
1802                         * the upper layer wants a callback whether it's
1803                         * immediate or not, we have to fake a "MR" entry to
1804                         * look like an FQRNI...
1805                         */
1806                        struct qm_mr_entry msg;
1807
1808                        msg.ern.verb = QM_MR_VERB_FQRNI;
1809                        msg.fq.fqs = mcr->alterfq.fqs;
1810                        msg.fq.fqid = fq->fqid;
1811#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
1812                        msg.fq.contextB = fq->key;
1813#else
1814                        msg.fq.contextB = (u32)(uintptr_t)fq;
1815#endif
1816                        fq->cb.fqs(p, fq, &msg);
1817                }
1818        } else if (res == QM_MCR_RESULT_PENDING) {
1819                rval = 1;
1820                fq_set(fq, QMAN_FQ_STATE_CHANGING);
1821        } else {
1822                rval = -EIO;
1823                table_del_fq(p, fq);
1824        }
1825out:
1826        FQUNLOCK(fq);
1827        return rval;
1828}
1829
1830int qman_oos_fq(struct qman_fq *fq)
1831{
1832        struct qm_mc_command *mcc;
1833        struct qm_mc_result *mcr;
1834        struct qman_portal *p;
1835
1836        int ret = 0;
1837        u8 res;
1838
1839        if (fq->state != qman_fq_state_retired)
1840                return -EINVAL;
1841#ifdef RTE_LIBRTE_DPAA_HWDEBUG
1842        if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1843                return -EINVAL;
1844#endif
1845        p = get_affine_portal();
1846        FQLOCK(fq);
1847        if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
1848                     (fq->state != qman_fq_state_retired))) {
1849                ret = -EBUSY;
1850                goto out;
1851        }
1852        mcc = qm_mc_start(&p->p);
1853        mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
1854        qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
1855        while (!(mcr = qm_mc_result(&p->p)))
1856                cpu_relax();
1857        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
1858        res = mcr->result;
1859        if (res != QM_MCR_RESULT_OK) {
1860                ret = -EIO;
1861                goto out;
1862        }
1863        fq->state = qman_fq_state_oos;
1864out:
1865        FQUNLOCK(fq);
1866        return ret;
1867}
1868
1869int qman_fq_flow_control(struct qman_fq *fq, int xon)
1870{
1871        struct qm_mc_command *mcc;
1872        struct qm_mc_result *mcr;
1873        struct qman_portal *p;
1874
1875        int ret = 0;
1876        u8 res;
1877        u8 myverb;
1878
1879        if ((fq->state == qman_fq_state_oos) ||
1880            (fq->state == qman_fq_state_retired) ||
1881                (fq->state == qman_fq_state_parked))
1882                return -EINVAL;
1883
1884#ifdef RTE_LIBRTE_DPAA_HWDEBUG
1885        if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
1886                return -EINVAL;
1887#endif
1888        /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
1889        p = get_affine_portal();
1890        FQLOCK(fq);
1891        if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
1892                     (fq->state == qman_fq_state_parked) ||
1893                        (fq->state == qman_fq_state_oos) ||
1894                        (fq->state == qman_fq_state_retired))) {
1895                ret = -EBUSY;
1896                goto out;
1897        }
1898        mcc = qm_mc_start(&p->p);
1899        mcc->alterfq.fqid = fq->fqid;
1900        mcc->alterfq.count = 0;
1901        myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
1902
1903        qm_mc_commit(&p->p, myverb);
1904        while (!(mcr = qm_mc_result(&p->p)))
1905                cpu_relax();
1906        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
1907
1908        res = mcr->result;
1909        if (res != QM_MCR_RESULT_OK) {
1910                ret = -EIO;
1911                goto out;
1912        }
1913out:
1914        FQUNLOCK(fq);
1915        return ret;
1916}
1917
1918int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
1919{
1920        struct qm_mc_command *mcc;
1921        struct qm_mc_result *mcr;
1922        struct qman_portal *p = get_affine_portal();
1923
1924        u8 res;
1925
1926        mcc = qm_mc_start(&p->p);
1927        mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1928        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
1929        while (!(mcr = qm_mc_result(&p->p)))
1930                cpu_relax();
1931        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
1932        res = mcr->result;
1933        if (res == QM_MCR_RESULT_OK)
1934                *fqd = mcr->queryfq.fqd;
1935        hw_fqd_to_cpu(fqd);
1936        if (res != QM_MCR_RESULT_OK)
1937                return -EIO;
1938        return 0;
1939}
1940
1941int qman_query_fq_has_pkts(struct qman_fq *fq)
1942{
1943        struct qm_mc_command *mcc;
1944        struct qm_mc_result *mcr;
1945        struct qman_portal *p = get_affine_portal();
1946
1947        int ret = 0;
1948        u8 res;
1949
1950        mcc = qm_mc_start(&p->p);
1951        mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1952        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1953        while (!(mcr = qm_mc_result(&p->p)))
1954                cpu_relax();
1955        res = mcr->result;
1956        if (res == QM_MCR_RESULT_OK)
1957                ret = !!mcr->queryfq_np.frm_cnt;
1958        return ret;
1959}
1960
1961int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
1962{
1963        struct qm_mc_command *mcc;
1964        struct qm_mc_result *mcr;
1965        struct qman_portal *p = get_affine_portal();
1966
1967        u8 res;
1968
1969        mcc = qm_mc_start(&p->p);
1970        mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
1971        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
1972        while (!(mcr = qm_mc_result(&p->p)))
1973                cpu_relax();
1974        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
1975        res = mcr->result;
1976        if (res == QM_MCR_RESULT_OK) {
1977                *np = mcr->queryfq_np;
1978                np->fqd_link = be24_to_cpu(np->fqd_link);
1979                np->odp_seq = be16_to_cpu(np->odp_seq);
1980                np->orp_nesn = be16_to_cpu(np->orp_nesn);
1981                np->orp_ea_hseq  = be16_to_cpu(np->orp_ea_hseq);
1982                np->orp_ea_tseq  = be16_to_cpu(np->orp_ea_tseq);
1983                np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
1984                np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
1985                np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
1986                np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
1987                np->ics_surp = be16_to_cpu(np->ics_surp);
1988                np->byte_cnt = be32_to_cpu(np->byte_cnt);
1989                np->frm_cnt = be24_to_cpu(np->frm_cnt);
1990                np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
1991                np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
1992                np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
1993                np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
1994                np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
1995        }
1996        if (res == QM_MCR_RESULT_ERR_FQID)
1997                return -ERANGE;
1998        else if (res != QM_MCR_RESULT_OK)
1999                return -EIO;
2000        return 0;
2001}
2002
2003int qman_query_fq_frm_cnt(struct qman_fq *fq, u32 *frm_cnt)
2004{
2005        struct qm_mc_command *mcc;
2006        struct qm_mc_result *mcr;
2007        struct qman_portal *p = get_affine_portal();
2008
2009        mcc = qm_mc_start(&p->p);
2010        mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
2011        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
2012        while (!(mcr = qm_mc_result(&p->p)))
2013                cpu_relax();
2014        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2015
2016        if (mcr->result == QM_MCR_RESULT_OK)
2017                *frm_cnt = be24_to_cpu(mcr->queryfq_np.frm_cnt);
2018        else if (mcr->result == QM_MCR_RESULT_ERR_FQID)
2019                return -ERANGE;
2020        else if (mcr->result != QM_MCR_RESULT_OK)
2021                return -EIO;
2022        return 0;
2023}
2024
2025int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
2026{
2027        struct qm_mc_command *mcc;
2028        struct qm_mc_result *mcr;
2029        struct qman_portal *p = get_affine_portal();
2030
2031        u8 res, myverb;
2032
2033        myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
2034                                 QM_MCR_VERB_QUERYWQ;
2035        mcc = qm_mc_start(&p->p);
2036        mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
2037        qm_mc_commit(&p->p, myverb);
2038        while (!(mcr = qm_mc_result(&p->p)))
2039                cpu_relax();
2040        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
2041        res = mcr->result;
2042        if (res == QM_MCR_RESULT_OK) {
2043                int i, array_len;
2044
2045                wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
2046                array_len = ARRAY_SIZE(mcr->querywq.wq_len);
2047                for (i = 0; i < array_len; i++)
2048                        wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
2049        }
2050        if (res != QM_MCR_RESULT_OK) {
2051                pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
2052                return -EIO;
2053        }
2054        return 0;
2055}
2056
2057int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
2058                       struct qm_mcr_cgrtestwrite *result)
2059{
2060        struct qm_mc_command *mcc;
2061        struct qm_mc_result *mcr;
2062        struct qman_portal *p = get_affine_portal();
2063
2064        u8 res;
2065
2066        mcc = qm_mc_start(&p->p);
2067        mcc->cgrtestwrite.cgid = cgr->cgrid;
2068        mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
2069        mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
2070        qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
2071        while (!(mcr = qm_mc_result(&p->p)))
2072                cpu_relax();
2073        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
2074        res = mcr->result;
2075        if (res == QM_MCR_RESULT_OK)
2076                *result = mcr->cgrtestwrite;
2077        if (res != QM_MCR_RESULT_OK) {
2078                pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
2079                return -EIO;
2080        }
2081        return 0;
2082}
2083
2084int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
2085{
2086        struct qm_mc_command *mcc;
2087        struct qm_mc_result *mcr;
2088        struct qman_portal *p = get_affine_portal();
2089        u8 res;
2090        unsigned int i;
2091
2092        mcc = qm_mc_start(&p->p);
2093        mcc->querycgr.cgid = cgr->cgrid;
2094        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
2095        while (!(mcr = qm_mc_result(&p->p)))
2096                cpu_relax();
2097        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
2098        res = mcr->result;
2099        if (res == QM_MCR_RESULT_OK)
2100                *cgrd = mcr->querycgr;
2101        if (res != QM_MCR_RESULT_OK) {
2102                pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
2103                return -EIO;
2104        }
2105        cgrd->cgr.wr_parm_g.word =
2106                be32_to_cpu(cgrd->cgr.wr_parm_g.word);
2107        cgrd->cgr.wr_parm_y.word =
2108                be32_to_cpu(cgrd->cgr.wr_parm_y.word);
2109        cgrd->cgr.wr_parm_r.word =
2110                be32_to_cpu(cgrd->cgr.wr_parm_r.word);
2111        cgrd->cgr.cscn_targ =  be32_to_cpu(cgrd->cgr.cscn_targ);
2112        cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
2113        for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
2114                cgrd->cscn_targ_swp[i] =
2115                        be32_to_cpu(cgrd->cscn_targ_swp[i]);
2116        return 0;
2117}
2118
2119int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
2120{
2121        struct qm_mc_result *mcr;
2122        struct qman_portal *p = get_affine_portal();
2123        u8 res;
2124        unsigned int i;
2125
2126        qm_mc_start(&p->p);
2127        qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
2128        while (!(mcr = qm_mc_result(&p->p)))
2129                cpu_relax();
2130        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2131                        QM_MCC_VERB_QUERYCONGESTION);
2132        res = mcr->result;
2133        if (res == QM_MCR_RESULT_OK)
2134                *congestion = mcr->querycongestion;
2135        if (res != QM_MCR_RESULT_OK) {
2136                pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
2137                return -EIO;
2138        }
2139        for (i = 0; i < ARRAY_SIZE(congestion->state.state); i++)
2140                congestion->state.state[i] =
2141                        be32_to_cpu(congestion->state.state[i]);
2142        return 0;
2143}
2144
2145int qman_set_vdq(struct qman_fq *fq, u16 num, uint32_t vdqcr_flags)
2146{
2147        struct qman_portal *p = get_affine_portal();
2148        uint32_t vdqcr;
2149        int ret = -EBUSY;
2150
2151        vdqcr = vdqcr_flags;
2152        vdqcr |= QM_VDQCR_NUMFRAMES_SET(num);
2153
2154        if ((fq->state != qman_fq_state_parked) &&
2155            (fq->state != qman_fq_state_retired)) {
2156                ret = -EINVAL;
2157                goto out;
2158        }
2159        if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) {
2160                ret = -EBUSY;
2161                goto out;
2162        }
2163        vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2164
2165        if (!p->vdqcr_owned) {
2166                FQLOCK(fq);
2167                if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2168                        goto escape;
2169                fq_set(fq, QMAN_FQ_STATE_VDQCR);
2170                FQUNLOCK(fq);
2171                p->vdqcr_owned = fq;
2172                ret = 0;
2173        }
2174escape:
2175        if (!ret)
2176                qm_dqrr_vdqcr_set(&p->p, vdqcr);
2177
2178out:
2179        return ret;
2180}
2181
2182int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
2183                          u32 vdqcr)
2184{
2185        struct qman_portal *p;
2186        int ret = -EBUSY;
2187
2188        if ((fq->state != qman_fq_state_parked) &&
2189            (fq->state != qman_fq_state_retired))
2190                return -EINVAL;
2191        if (vdqcr & QM_VDQCR_FQID_MASK)
2192                return -EINVAL;
2193        if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2194                return -EBUSY;
2195        vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
2196
2197        p = get_affine_portal();
2198
2199        if (!p->vdqcr_owned) {
2200                FQLOCK(fq);
2201                if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
2202                        goto escape;
2203                fq_set(fq, QMAN_FQ_STATE_VDQCR);
2204                FQUNLOCK(fq);
2205                p->vdqcr_owned = fq;
2206                ret = 0;
2207        }
2208escape:
2209        if (ret)
2210                return ret;
2211
2212        /* VDQCR is set */
2213        qm_dqrr_vdqcr_set(&p->p, vdqcr);
2214        return 0;
2215}
2216
2217static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
2218{
2219        if (avail)
2220                qm_eqcr_cce_prefetch(&p->p);
2221        else
2222                qm_eqcr_cce_update(&p->p);
2223}
2224
2225int qman_eqcr_is_empty(void)
2226{
2227        struct qman_portal *p = get_affine_portal();
2228        u8 avail;
2229
2230        update_eqcr_ci(p, 0);
2231        avail = qm_eqcr_get_fill(&p->p);
2232        return (avail == 0);
2233}
2234
2235void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
2236{
2237        if (affine) {
2238                struct qman_portal *p = get_affine_portal();
2239
2240                p->cb_dc_ern = handler;
2241        } else
2242                cb_dc_ern = handler;
2243}
2244
2245static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
2246                                        struct qman_fq *fq,
2247                                        const struct qm_fd *fd,
2248                                        u32 flags)
2249{
2250        struct qm_eqcr_entry *eq;
2251        u8 avail;
2252
2253        if (p->use_eqcr_ci_stashing) {
2254                /*
2255                 * The stashing case is easy, only update if we need to in
2256                 * order to try and liberate ring entries.
2257                 */
2258                eq = qm_eqcr_start_stash(&p->p);
2259        } else {
2260                /*
2261                 * The non-stashing case is harder, need to prefetch ahead of
2262                 * time.
2263                 */
2264                avail = qm_eqcr_get_avail(&p->p);
2265                if (avail < 2)
2266                        update_eqcr_ci(p, avail);
2267                eq = qm_eqcr_start_no_stash(&p->p);
2268        }
2269
2270        if (unlikely(!eq))
2271                return NULL;
2272
2273        if (flags & QMAN_ENQUEUE_FLAG_DCA)
2274                eq->dca = QM_EQCR_DCA_ENABLE |
2275                        ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
2276                                        QM_EQCR_DCA_PARK : 0) |
2277                        ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
2278        eq->fqid = cpu_to_be32(fq->fqid);
2279#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
2280        eq->tag = cpu_to_be32(fq->key);
2281#else
2282        eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
2283#endif
2284        eq->fd = *fd;
2285        cpu_to_hw_fd(&eq->fd);
2286        return eq;
2287}
2288
2289int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
2290{
2291        struct qman_portal *p = get_affine_portal();
2292        struct qm_eqcr_entry *eq;
2293
2294        eq = try_p_eq_start(p, fq, fd, flags);
2295        if (!eq)
2296                return -EBUSY;
2297        /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2298        qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
2299                (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2300        /* Factor the below out, it's used from qman_enqueue_orp() too */
2301        return 0;
2302}
2303
2304int qman_enqueue_multi(struct qman_fq *fq,
2305                       const struct qm_fd *fd, u32 *flags,
2306                int frames_to_send)
2307{
2308        struct qman_portal *p = get_affine_portal();
2309        struct qm_portal *portal = &p->p;
2310
2311        register struct qm_eqcr *eqcr = &portal->eqcr;
2312        struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2313
2314        u8 i = 0, diff, old_ci, sent = 0;
2315
2316        /* Update the available entries if no entry is free */
2317        if (!eqcr->available) {
2318                old_ci = eqcr->ci;
2319                eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2320                diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2321                eqcr->available += diff;
2322                if (!diff)
2323                        return 0;
2324        }
2325
2326        /* try to send as many frames as possible */
2327        while (eqcr->available && frames_to_send--) {
2328                eq->fqid = fq->fqid_le;
2329                eq->fd.opaque_addr = fd->opaque_addr;
2330                eq->fd.addr = cpu_to_be40(fd->addr);
2331                eq->fd.status = cpu_to_be32(fd->status);
2332                eq->fd.opaque = cpu_to_be32(fd->opaque);
2333                if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2334                        eq->dca = QM_EQCR_DCA_ENABLE |
2335                                ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2336                }
2337                i++;
2338                eq = (void *)((unsigned long)(eq + 1) &
2339                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
2340                eqcr->available--;
2341                sent++;
2342                fd++;
2343        }
2344        lwsync();
2345
2346        /* In order for flushes to complete faster, all lines are recorded in
2347         * 32 bit word.
2348         */
2349        eq = eqcr->cursor;
2350        for (i = 0; i < sent; i++) {
2351                eq->__dont_write_directly__verb =
2352                        QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2353                prev_eq = eq;
2354                eq = (void *)((unsigned long)(eq + 1) &
2355                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
2356                if (unlikely((prev_eq + 1) != eq))
2357                        eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2358        }
2359
2360        /* We need  to flush all the lines but without load/store operations
2361         * between them
2362         */
2363        eq = eqcr->cursor;
2364        for (i = 0; i < sent; i++) {
2365                dcbf(eq);
2366                eq = (void *)((unsigned long)(eq + 1) &
2367                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
2368        }
2369        /* Update cursor for the next call */
2370        eqcr->cursor = eq;
2371        return sent;
2372}
2373
2374int
2375qman_enqueue_multi_fq(struct qman_fq *fq[], const struct qm_fd *fd,
2376                      u32 *flags, int frames_to_send)
2377{
2378        struct qman_portal *p = get_affine_portal();
2379        struct qm_portal *portal = &p->p;
2380
2381        register struct qm_eqcr *eqcr = &portal->eqcr;
2382        struct qm_eqcr_entry *eq = eqcr->cursor, *prev_eq;
2383
2384        u8 i = 0, diff, old_ci, sent = 0;
2385
2386        /* Update the available entries if no entry is free */
2387        if (!eqcr->available) {
2388                old_ci = eqcr->ci;
2389                eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
2390                diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
2391                eqcr->available += diff;
2392                if (!diff)
2393                        return 0;
2394        }
2395
2396        /* try to send as many frames as possible */
2397        while (eqcr->available && frames_to_send--) {
2398                eq->fqid = fq[sent]->fqid_le;
2399                eq->fd.opaque_addr = fd->opaque_addr;
2400                eq->fd.addr = cpu_to_be40(fd->addr);
2401                eq->fd.status = cpu_to_be32(fd->status);
2402                eq->fd.opaque = cpu_to_be32(fd->opaque);
2403                if (flags && (flags[i] & QMAN_ENQUEUE_FLAG_DCA)) {
2404                        eq->dca = QM_EQCR_DCA_ENABLE |
2405                                ((flags[i] >> 8) & QM_EQCR_DCA_IDXMASK);
2406                }
2407                i++;
2408
2409                eq = (void *)((unsigned long)(eq + 1) &
2410                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
2411                eqcr->available--;
2412                sent++;
2413                fd++;
2414        }
2415        lwsync();
2416
2417        /* In order for flushes to complete faster, all lines are recorded in
2418         * 32 bit word.
2419         */
2420        eq = eqcr->cursor;
2421        for (i = 0; i < sent; i++) {
2422                eq->__dont_write_directly__verb =
2423                        QM_EQCR_VERB_CMD_ENQUEUE | eqcr->vbit;
2424                prev_eq = eq;
2425                eq = (void *)((unsigned long)(eq + 1) &
2426                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
2427                if (unlikely((prev_eq + 1) != eq))
2428                        eqcr->vbit ^= QM_EQCR_VERB_VBIT;
2429        }
2430
2431        /* We need  to flush all the lines but without load/store operations
2432         * between them
2433         */
2434        eq = eqcr->cursor;
2435        for (i = 0; i < sent; i++) {
2436                dcbf(eq);
2437                eq = (void *)((unsigned long)(eq + 1) &
2438                        (~(unsigned long)(QM_EQCR_SIZE << 6)));
2439        }
2440        /* Update cursor for the next call */
2441        eqcr->cursor = eq;
2442        return sent;
2443}
2444
2445int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
2446                     struct qman_fq *orp, u16 orp_seqnum)
2447{
2448        struct qman_portal *p  = get_affine_portal();
2449        struct qm_eqcr_entry *eq;
2450
2451        eq = try_p_eq_start(p, fq, fd, flags);
2452        if (!eq)
2453                return -EBUSY;
2454        /* Process ORP-specifics here */
2455        if (flags & QMAN_ENQUEUE_FLAG_NLIS)
2456                orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
2457        else {
2458                orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
2459                if (flags & QMAN_ENQUEUE_FLAG_NESN)
2460                        orp_seqnum |= QM_EQCR_SEQNUM_NESN;
2461                else
2462                        /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
2463                        orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
2464        }
2465        eq->seqnum = cpu_to_be16(orp_seqnum);
2466        eq->orp = cpu_to_be32(orp->fqid);
2467        /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
2468        qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
2469                ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
2470                                0 : QM_EQCR_VERB_CMD_ENQUEUE) |
2471                (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
2472
2473        return 0;
2474}
2475
2476int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
2477                    struct qm_mcc_initcgr *opts)
2478{
2479        struct qm_mc_command *mcc;
2480        struct qm_mc_result *mcr;
2481        struct qman_portal *p = get_affine_portal();
2482
2483        u8 res;
2484        u8 verb = QM_MCC_VERB_MODIFYCGR;
2485
2486        mcc = qm_mc_start(&p->p);
2487        if (opts)
2488                mcc->initcgr = *opts;
2489        mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
2490        mcc->initcgr.cgr.wr_parm_g.word =
2491                cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
2492        mcc->initcgr.cgr.wr_parm_y.word =
2493                cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
2494        mcc->initcgr.cgr.wr_parm_r.word =
2495                cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
2496        mcc->initcgr.cgr.cscn_targ =  cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
2497        mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
2498
2499        mcc->initcgr.cgid = cgr->cgrid;
2500        if (flags & QMAN_CGR_FLAG_USE_INIT)
2501                verb = QM_MCC_VERB_INITCGR;
2502        qm_mc_commit(&p->p, verb);
2503        while (!(mcr = qm_mc_result(&p->p)))
2504                cpu_relax();
2505
2506        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
2507        res = mcr->result;
2508        return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
2509}
2510
2511#define TARG_MASK(n) (0x80000000 >> (n->config->channel - \
2512                                        QM_CHANNEL_SWPORTAL0))
2513#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
2514#define PORTAL_IDX(n) (n->config->channel - QM_CHANNEL_SWPORTAL0)
2515
2516int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
2517                    struct qm_mcc_initcgr *opts)
2518{
2519        struct qm_mcr_querycgr cgr_state;
2520        struct qm_mcc_initcgr local_opts;
2521        int ret;
2522        struct qman_portal *p;
2523
2524        /* We have to check that the provided CGRID is within the limits of the
2525         * data-structures, for obvious reasons. However we'll let h/w take
2526         * care of determining whether it's within the limits of what exists on
2527         * the SoC.
2528         */
2529        if (cgr->cgrid >= __CGR_NUM)
2530                return -EINVAL;
2531
2532        p = get_affine_portal();
2533
2534        memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2535        cgr->chan = p->config->channel;
2536        spin_lock(&p->cgr_lock);
2537
2538        /* if no opts specified, just add it to the list */
2539        if (!opts)
2540                goto add_list;
2541
2542        ret = qman_query_cgr(cgr, &cgr_state);
2543        if (ret)
2544                goto release_lock;
2545        if (opts)
2546                local_opts = *opts;
2547        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2548                local_opts.cgr.cscn_targ_upd_ctrl =
2549                        QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
2550        else
2551                /* Overwrite TARG */
2552                local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2553                                                        TARG_MASK(p);
2554        local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2555
2556        /* send init if flags indicate so */
2557        if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2558                ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
2559        else
2560                ret = qman_modify_cgr(cgr, 0, &local_opts);
2561        if (ret)
2562                goto release_lock;
2563add_list:
2564        list_add(&cgr->node, &p->cgr_cbs);
2565
2566        /* Determine if newly added object requires its callback to be called */
2567        ret = qman_query_cgr(cgr, &cgr_state);
2568        if (ret) {
2569                /* we can't go back, so proceed and return success, but screen
2570                 * and wail to the log file.
2571                 */
2572                pr_crit("CGR HW state partially modified\n");
2573                ret = 0;
2574                goto release_lock;
2575        }
2576        if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
2577                                                              cgr->cgrid))
2578                cgr->cb(p, cgr, 1);
2579release_lock:
2580        spin_unlock(&p->cgr_lock);
2581        return ret;
2582}
2583
2584int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
2585                           struct qm_mcc_initcgr *opts)
2586{
2587        struct qm_mcc_initcgr local_opts;
2588        struct qm_mcr_querycgr cgr_state;
2589        int ret;
2590
2591        if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
2592                pr_warn("QMan version doesn't support CSCN => DCP portal\n");
2593                return -EINVAL;
2594        }
2595        /* We have to check that the provided CGRID is within the limits of the
2596         * data-structures, for obvious reasons. However we'll let h/w take
2597         * care of determining whether it's within the limits of what exists on
2598         * the SoC.
2599         */
2600        if (cgr->cgrid >= __CGR_NUM)
2601                return -EINVAL;
2602
2603        ret = qman_query_cgr(cgr, &cgr_state);
2604        if (ret)
2605                return ret;
2606
2607        memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2608        if (opts)
2609                local_opts = *opts;
2610
2611        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2612                local_opts.cgr.cscn_targ_upd_ctrl =
2613                                QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
2614                                QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
2615        else
2616                local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
2617                                        TARG_DCP_MASK(dcp_portal);
2618        local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
2619
2620        /* send init if flags indicate so */
2621        if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
2622                ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
2623                                      &local_opts);
2624        else
2625                ret = qman_modify_cgr(cgr, 0, &local_opts);
2626
2627        return ret;
2628}
2629
2630int qman_delete_cgr(struct qman_cgr *cgr)
2631{
2632        struct qm_mcr_querycgr cgr_state;
2633        struct qm_mcc_initcgr local_opts;
2634        int ret = 0;
2635        struct qman_cgr *i;
2636        struct qman_portal *p = get_affine_portal();
2637
2638        if (cgr->chan != p->config->channel) {
2639                pr_crit("Attempting to delete cgr from different portal than"
2640                        " it was create: create 0x%x, delete 0x%x\n",
2641                        cgr->chan, p->config->channel);
2642                ret = -EINVAL;
2643                goto put_portal;
2644        }
2645        memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
2646        spin_lock(&p->cgr_lock);
2647        list_del(&cgr->node);
2648        /*
2649         * If there are no other CGR objects for this CGRID in the list,
2650         * update CSCN_TARG accordingly
2651         */
2652        list_for_each_entry(i, &p->cgr_cbs, node)
2653                if ((i->cgrid == cgr->cgrid) && i->cb)
2654                        goto release_lock;
2655        ret = qman_query_cgr(cgr, &cgr_state);
2656        if (ret)  {
2657                /* add back to the list */
2658                list_add(&cgr->node, &p->cgr_cbs);
2659                goto release_lock;
2660        }
2661        /* Overwrite TARG */
2662        local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
2663        if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
2664                local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
2665        else
2666                local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
2667                                                         ~(TARG_MASK(p));
2668        ret = qman_modify_cgr(cgr, 0, &local_opts);
2669        if (ret)
2670                /* add back to the list */
2671                list_add(&cgr->node, &p->cgr_cbs);
2672release_lock:
2673        spin_unlock(&p->cgr_lock);
2674put_portal:
2675        return ret;
2676}
2677
2678int qman_shutdown_fq(u32 fqid)
2679{
2680        struct qman_portal *p;
2681        struct qm_portal *low_p;
2682        struct qm_mc_command *mcc;
2683        struct qm_mc_result *mcr;
2684        u8 state;
2685        int orl_empty, fq_empty, drain = 0;
2686        u32 result;
2687        u32 channel, wq;
2688        u16 dest_wq;
2689
2690        p = get_affine_portal();
2691        low_p = &p->p;
2692
2693        /* Determine the state of the FQID */
2694        mcc = qm_mc_start(low_p);
2695        mcc->queryfq_np.fqid = cpu_to_be32(fqid);
2696        qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ_NP);
2697        while (!(mcr = qm_mc_result(low_p)))
2698                cpu_relax();
2699        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
2700        state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
2701        if (state == QM_MCR_NP_STATE_OOS)
2702                return 0; /* Already OOS, no need to do anymore checks */
2703
2704        /* Query which channel the FQ is using */
2705        mcc = qm_mc_start(low_p);
2706        mcc->queryfq.fqid = cpu_to_be32(fqid);
2707        qm_mc_commit(low_p, QM_MCC_VERB_QUERYFQ);
2708        while (!(mcr = qm_mc_result(low_p)))
2709                cpu_relax();
2710        DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
2711
2712        /* Need to store these since the MCR gets reused */
2713        dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
2714        channel = dest_wq & 0x7;
2715        wq = dest_wq >> 3;
2716
2717        switch (state) {
2718        case QM_MCR_NP_STATE_TEN_SCHED:
2719        case QM_MCR_NP_STATE_TRU_SCHED:
2720        case QM_MCR_NP_STATE_ACTIVE:
2721        case QM_MCR_NP_STATE_PARKED:
2722                orl_empty = 0;
2723                mcc = qm_mc_start(low_p);
2724                mcc->alterfq.fqid = cpu_to_be32(fqid);
2725                qm_mc_commit(low_p, QM_MCC_VERB_ALTER_RETIRE);
2726                while (!(mcr = qm_mc_result(low_p)))
2727                        cpu_relax();
2728                DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2729                           QM_MCR_VERB_ALTER_RETIRE);
2730                result = mcr->result; /* Make a copy as we reuse MCR below */
2731
2732                if (result == QM_MCR_RESULT_PENDING) {
2733                        /* Need to wait for the FQRN in the message ring, which
2734                         * will only occur once the FQ has been drained.  In
2735                         * order for the FQ to drain the portal needs to be set
2736                         * to dequeue from the channel the FQ is scheduled on
2737                         */
2738                        const struct qm_mr_entry *msg;
2739                        const struct qm_dqrr_entry *dqrr = NULL;
2740                        int found_fqrn = 0;
2741                        __maybe_unused u16 dequeue_wq = 0;
2742
2743                        /* Flag that we need to drain FQ */
2744                        drain = 1;
2745
2746                        if (channel >= qm_channel_pool1 &&
2747                            channel < (u16)(qm_channel_pool1 + 15)) {
2748                                /* Pool channel, enable the bit in the portal */
2749                                dequeue_wq = (channel -
2750                                              qm_channel_pool1 + 1) << 4 | wq;
2751                        } else if (channel < qm_channel_pool1) {
2752                                /* Dedicated channel */
2753                                dequeue_wq = wq;
2754                        } else {
2755                                pr_info("Cannot recover FQ 0x%x,"
2756                                        " it is scheduled on channel 0x%x",
2757                                        fqid, channel);
2758                                return -EBUSY;
2759                        }
2760                        /* Set the sdqcr to drain this channel */
2761                        if (channel < qm_channel_pool1)
2762                                qm_dqrr_sdqcr_set(low_p,
2763                                                  QM_SDQCR_TYPE_ACTIVE |
2764                                          QM_SDQCR_CHANNELS_DEDICATED);
2765                        else
2766                                qm_dqrr_sdqcr_set(low_p,
2767                                                  QM_SDQCR_TYPE_ACTIVE |
2768                                                  QM_SDQCR_CHANNELS_POOL_CONV
2769                                                  (channel));
2770                        while (!found_fqrn) {
2771                                /* Keep draining DQRR while checking the MR*/
2772                                qm_dqrr_pvb_update(low_p);
2773                                dqrr = qm_dqrr_current(low_p);
2774                                while (dqrr) {
2775                                        qm_dqrr_cdc_consume_1ptr(
2776                                                low_p, dqrr, 0);
2777                                        qm_dqrr_pvb_update(low_p);
2778                                        qm_dqrr_next(low_p);
2779                                        dqrr = qm_dqrr_current(low_p);
2780                                }
2781                                /* Process message ring too */
2782                                qm_mr_pvb_update(low_p);
2783                                msg = qm_mr_current(low_p);
2784                                while (msg) {
2785                                        if ((msg->ern.verb &
2786                                             QM_MR_VERB_TYPE_MASK)
2787                                            == QM_MR_VERB_FQRN)
2788                                                found_fqrn = 1;
2789                                        qm_mr_next(low_p);
2790                                        qm_mr_cci_consume_to_current(low_p);
2791                                        qm_mr_pvb_update(low_p);
2792                                        msg = qm_mr_current(low_p);
2793                                }
2794                                cpu_relax();
2795                        }
2796                }
2797                if (result != QM_MCR_RESULT_OK &&
2798                    result !=  QM_MCR_RESULT_PENDING) {
2799                        /* error */
2800                        pr_err("qman_retire_fq failed on FQ 0x%x,"
2801                               " result=0x%x\n", fqid, result);
2802                        return -1;
2803                }
2804                if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
2805                        /* ORL had no entries, no need to wait until the
2806                         * ERNs come in.
2807                         */
2808                        orl_empty = 1;
2809                }
2810                /* Retirement succeeded, check to see if FQ needs
2811                 * to be drained.
2812                 */
2813                if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
2814                        /* FQ is Not Empty, drain using volatile DQ commands */
2815                        fq_empty = 0;
2816                        do {
2817                                const struct qm_dqrr_entry *dqrr = NULL;
2818                                u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
2819
2820                                qm_dqrr_vdqcr_set(low_p, vdqcr);
2821
2822                                /* Wait for a dequeue to occur */
2823                                while (dqrr == NULL) {
2824                                        qm_dqrr_pvb_update(low_p);
2825                                        dqrr = qm_dqrr_current(low_p);
2826                                        if (!dqrr)
2827                                                cpu_relax();
2828                                }
2829                                /* Process the dequeues, making sure to
2830                                 * empty the ring completely.
2831                                 */
2832                                while (dqrr) {
2833                                        if (dqrr->fqid == fqid &&
2834                                            dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
2835                                                fq_empty = 1;
2836                                        qm_dqrr_cdc_consume_1ptr(low_p,
2837                                                                 dqrr, 0);
2838                                        qm_dqrr_pvb_update(low_p);
2839                                        qm_dqrr_next(low_p);
2840                                        dqrr = qm_dqrr_current(low_p);
2841                                }
2842                        } while (fq_empty == 0);
2843                }
2844                qm_dqrr_sdqcr_set(low_p, 0);
2845
2846                /* Wait for the ORL to have been completely drained */
2847                while (orl_empty == 0) {
2848                        const struct qm_mr_entry *msg;
2849
2850                        qm_mr_pvb_update(low_p);
2851                        msg = qm_mr_current(low_p);
2852                        while (msg) {
2853                                if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
2854                                    QM_MR_VERB_FQRL)
2855                                        orl_empty = 1;
2856                                qm_mr_next(low_p);
2857                                qm_mr_cci_consume_to_current(low_p);
2858                                qm_mr_pvb_update(low_p);
2859                                msg = qm_mr_current(low_p);
2860                        }
2861                        cpu_relax();
2862                }
2863                mcc = qm_mc_start(low_p);
2864                mcc->alterfq.fqid = cpu_to_be32(fqid);
2865                qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2866                while (!(mcr = qm_mc_result(low_p)))
2867                        cpu_relax();
2868                DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2869                           QM_MCR_VERB_ALTER_OOS);
2870                if (mcr->result != QM_MCR_RESULT_OK) {
2871                        pr_err(
2872                        "OOS after drain Failed on FQID 0x%x, result 0x%x\n",
2873                               fqid, mcr->result);
2874                        return -1;
2875                }
2876                return 0;
2877
2878        case QM_MCR_NP_STATE_RETIRED:
2879                /* Send OOS Command */
2880                mcc = qm_mc_start(low_p);
2881                mcc->alterfq.fqid = cpu_to_be32(fqid);
2882                qm_mc_commit(low_p, QM_MCC_VERB_ALTER_OOS);
2883                while (!(mcr = qm_mc_result(low_p)))
2884                        cpu_relax();
2885                DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
2886                           QM_MCR_VERB_ALTER_OOS);
2887                if (mcr->result) {
2888                        pr_err("OOS Failed on FQID 0x%x\n", fqid);
2889                        return -1;
2890                }
2891                return 0;
2892
2893        }
2894        return -1;
2895}
2896