linux/drivers/soc/fsl/dpio/qbman-portal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
   4 * Copyright 2016-2019 NXP
   5 *
   6 */
   7
   8#include <asm/cacheflush.h>
   9#include <linux/io.h>
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <soc/fsl/dpaa2-global.h>
  13
  14#include "qbman-portal.h"
  15
  16/* All QBMan command and result structures use this "valid bit" encoding */
  17#define QB_VALID_BIT ((u32)0x80)
  18
  19/* QBMan portal management command codes */
  20#define QBMAN_MC_ACQUIRE       0x30
  21#define QBMAN_WQCHAN_CONFIGURE 0x46
  22
  23/* CINH register offsets */
  24#define QBMAN_CINH_SWP_EQCR_PI      0x800
  25#define QBMAN_CINH_SWP_EQCR_CI      0x840
  26#define QBMAN_CINH_SWP_EQAR    0x8c0
  27#define QBMAN_CINH_SWP_CR_RT        0x900
  28#define QBMAN_CINH_SWP_VDQCR_RT     0x940
  29#define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
  30#define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
  31#define QBMAN_CINH_SWP_DQPI    0xa00
  32#define QBMAN_CINH_SWP_DCAP    0xac0
  33#define QBMAN_CINH_SWP_SDQCR   0xb00
  34#define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
  35#define QBMAN_CINH_SWP_RCR_PI       0xc00
  36#define QBMAN_CINH_SWP_RAR     0xcc0
  37#define QBMAN_CINH_SWP_ISR     0xe00
  38#define QBMAN_CINH_SWP_IER     0xe40
  39#define QBMAN_CINH_SWP_ISDR    0xe80
  40#define QBMAN_CINH_SWP_IIR     0xec0
  41
  42/* CENA register offsets */
  43#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
  44#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
  45#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
  46#define QBMAN_CENA_SWP_CR      0x600
  47#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
  48#define QBMAN_CENA_SWP_VDQCR   0x780
  49#define QBMAN_CENA_SWP_EQCR_CI 0x840
  50#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
  51
  52/* CENA register offsets in memory-backed mode */
  53#define QBMAN_CENA_SWP_DQRR_MEM(n)  (0x800 + ((u32)(n) << 6))
  54#define QBMAN_CENA_SWP_RCR_MEM(n)   (0x1400 + ((u32)(n) << 6))
  55#define QBMAN_CENA_SWP_CR_MEM       0x1600
  56#define QBMAN_CENA_SWP_RR_MEM       0x1680
  57#define QBMAN_CENA_SWP_VDQCR_MEM    0x1780
  58
  59/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
  60#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
  61
  62/* Define token used to determine if response written to memory is valid */
  63#define QMAN_DQ_TOKEN_VALID 1
  64
  65/* SDQCR attribute codes */
  66#define QB_SDQCR_FC_SHIFT   29
  67#define QB_SDQCR_FC_MASK    0x1
  68#define QB_SDQCR_DCT_SHIFT  24
  69#define QB_SDQCR_DCT_MASK   0x3
  70#define QB_SDQCR_TOK_SHIFT  16
  71#define QB_SDQCR_TOK_MASK   0xff
  72#define QB_SDQCR_SRC_SHIFT  0
  73#define QB_SDQCR_SRC_MASK   0xffff
  74
  75/* opaque token for static dequeues */
  76#define QMAN_SDQCR_TOKEN    0xbb
  77
  78#define QBMAN_EQCR_DCA_IDXMASK          0x0f
  79#define QBMAN_ENQUEUE_FLAG_DCA          (1ULL << 31)
  80
  81#define EQ_DESC_SIZE_WITHOUT_FD 29
  82#define EQ_DESC_SIZE_FD_START 32
  83
  84enum qbman_sdqcr_dct {
  85        qbman_sdqcr_dct_null = 0,
  86        qbman_sdqcr_dct_prio_ics,
  87        qbman_sdqcr_dct_active_ics,
  88        qbman_sdqcr_dct_active
  89};
  90
  91enum qbman_sdqcr_fc {
  92        qbman_sdqcr_fc_one = 0,
  93        qbman_sdqcr_fc_up_to_3 = 1
  94};
  95
  96/* Internal Function declaration */
  97static int qbman_swp_enqueue_direct(struct qbman_swp *s,
  98                                    const struct qbman_eq_desc *d,
  99                                    const struct dpaa2_fd *fd);
 100static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
 101                                      const struct qbman_eq_desc *d,
 102                                      const struct dpaa2_fd *fd);
 103static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 104                                             const struct qbman_eq_desc *d,
 105                                             const struct dpaa2_fd *fd,
 106                                             uint32_t *flags,
 107                                             int num_frames);
 108static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
 109                                               const struct qbman_eq_desc *d,
 110                                               const struct dpaa2_fd *fd,
 111                                               uint32_t *flags,
 112                                               int num_frames);
 113static int
 114qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
 115                                       const struct qbman_eq_desc *d,
 116                                       const struct dpaa2_fd *fd,
 117                                       int num_frames);
 118static
 119int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
 120                                             const struct qbman_eq_desc *d,
 121                                             const struct dpaa2_fd *fd,
 122                                             int num_frames);
 123static int qbman_swp_pull_direct(struct qbman_swp *s,
 124                                 struct qbman_pull_desc *d);
 125static int qbman_swp_pull_mem_back(struct qbman_swp *s,
 126                                   struct qbman_pull_desc *d);
 127
 128const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
 129const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
 130
 131static int qbman_swp_release_direct(struct qbman_swp *s,
 132                                    const struct qbman_release_desc *d,
 133                                    const u64 *buffers,
 134                                    unsigned int num_buffers);
 135static int qbman_swp_release_mem_back(struct qbman_swp *s,
 136                                      const struct qbman_release_desc *d,
 137                                      const u64 *buffers,
 138                                      unsigned int num_buffers);
 139
 140/* Function pointers */
 141int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
 142                             const struct qbman_eq_desc *d,
 143                             const struct dpaa2_fd *fd)
 144        = qbman_swp_enqueue_direct;
 145
 146int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
 147                                      const struct qbman_eq_desc *d,
 148                                      const struct dpaa2_fd *fd,
 149                                      uint32_t *flags,
 150                                             int num_frames)
 151        = qbman_swp_enqueue_multiple_direct;
 152
 153int
 154(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
 155                                       const struct qbman_eq_desc *d,
 156                                       const struct dpaa2_fd *fd,
 157                                       int num_frames)
 158        = qbman_swp_enqueue_multiple_desc_direct;
 159
 160int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
 161                        = qbman_swp_pull_direct;
 162
 163const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
 164                        = qbman_swp_dqrr_next_direct;
 165
 166int (*qbman_swp_release_ptr)(struct qbman_swp *s,
 167                             const struct qbman_release_desc *d,
 168                             const u64 *buffers,
 169                             unsigned int num_buffers)
 170                        = qbman_swp_release_direct;
 171
 172/* Portal Access */
 173
 174static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
 175{
 176        return readl_relaxed(p->addr_cinh + offset);
 177}
 178
 179static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
 180                                        u32 value)
 181{
 182        writel_relaxed(value, p->addr_cinh + offset);
 183}
 184
 185static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
 186{
 187        return p->addr_cena + offset;
 188}
 189
 190#define QBMAN_CINH_SWP_CFG   0xd00
 191
 192#define SWP_CFG_DQRR_MF_SHIFT 20
 193#define SWP_CFG_EST_SHIFT     16
 194#define SWP_CFG_CPBS_SHIFT    15
 195#define SWP_CFG_WN_SHIFT      14
 196#define SWP_CFG_RPM_SHIFT     12
 197#define SWP_CFG_DCM_SHIFT     10
 198#define SWP_CFG_EPM_SHIFT     8
 199#define SWP_CFG_VPM_SHIFT     7
 200#define SWP_CFG_CPM_SHIFT     6
 201#define SWP_CFG_SD_SHIFT      5
 202#define SWP_CFG_SP_SHIFT      4
 203#define SWP_CFG_SE_SHIFT      3
 204#define SWP_CFG_DP_SHIFT      2
 205#define SWP_CFG_DE_SHIFT      1
 206#define SWP_CFG_EP_SHIFT      0
 207
 208static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
 209                                    u8 epm, int sd, int sp, int se,
 210                                    int dp, int de, int ep)
 211{
 212        return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
 213                est << SWP_CFG_EST_SHIFT |
 214                wn << SWP_CFG_WN_SHIFT |
 215                rpm << SWP_CFG_RPM_SHIFT |
 216                dcm << SWP_CFG_DCM_SHIFT |
 217                epm << SWP_CFG_EPM_SHIFT |
 218                sd << SWP_CFG_SD_SHIFT |
 219                sp << SWP_CFG_SP_SHIFT |
 220                se << SWP_CFG_SE_SHIFT |
 221                dp << SWP_CFG_DP_SHIFT |
 222                de << SWP_CFG_DE_SHIFT |
 223                ep << SWP_CFG_EP_SHIFT);
 224}
 225
 226#define QMAN_RT_MODE       0x00000100
 227
 228static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
 229{
 230        /* 'first' is included, 'last' is excluded */
 231        if (first <= last)
 232                return last - first;
 233        else
 234                return (2 * ringsize) - (first - last);
 235}
 236
 237/**
 238 * qbman_swp_init() - Create a functional object representing the given
 239 *                    QBMan portal descriptor.
 240 * @d: the given qbman swp descriptor
 241 *
 242 * Return qbman_swp portal for success, NULL if the object cannot
 243 * be created.
 244 */
 245struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
 246{
 247        struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
 248        u32 reg;
 249        u32 mask_size;
 250        u32 eqcr_pi;
 251
 252        if (!p)
 253                return NULL;
 254
 255        spin_lock_init(&p->access_spinlock);
 256
 257        p->desc = d;
 258        p->mc.valid_bit = QB_VALID_BIT;
 259        p->sdq = 0;
 260        p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
 261        p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
 262        p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
 263        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
 264                p->mr.valid_bit = QB_VALID_BIT;
 265
 266        atomic_set(&p->vdq.available, 1);
 267        p->vdq.valid_bit = QB_VALID_BIT;
 268        p->dqrr.next_idx = 0;
 269        p->dqrr.valid_bit = QB_VALID_BIT;
 270
 271        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
 272                p->dqrr.dqrr_size = 4;
 273                p->dqrr.reset_bug = 1;
 274        } else {
 275                p->dqrr.dqrr_size = 8;
 276                p->dqrr.reset_bug = 0;
 277        }
 278
 279        p->addr_cena = d->cena_bar;
 280        p->addr_cinh = d->cinh_bar;
 281
 282        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 283
 284                reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 285                        1, /* Writes Non-cacheable */
 286                        0, /* EQCR_CI stashing threshold */
 287                        3, /* RPM: RCR in array mode */
 288                        2, /* DCM: Discrete consumption ack */
 289                        2, /* EPM: EQCR in ring mode */
 290                        1, /* mem stashing drop enable enable */
 291                        1, /* mem stashing priority enable */
 292                        1, /* mem stashing enable */
 293                        1, /* dequeue stashing priority enable */
 294                        0, /* dequeue stashing enable enable */
 295                        0); /* EQCR_CI stashing priority enable */
 296        } else {
 297                memset(p->addr_cena, 0, 64 * 1024);
 298                reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 299                        1, /* Writes Non-cacheable */
 300                        1, /* EQCR_CI stashing threshold */
 301                        3, /* RPM: RCR in array mode */
 302                        2, /* DCM: Discrete consumption ack */
 303                        0, /* EPM: EQCR in ring mode */
 304                        1, /* mem stashing drop enable */
 305                        1, /* mem stashing priority enable */
 306                        1, /* mem stashing enable */
 307                        1, /* dequeue stashing priority enable */
 308                        0, /* dequeue stashing enable */
 309                        0); /* EQCR_CI stashing priority enable */
 310                reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
 311                       1 << SWP_CFG_VPM_SHIFT |  /* VDQCR read triggered mode */
 312                       1 << SWP_CFG_CPM_SHIFT;   /* CR read triggered mode */
 313        }
 314
 315        qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
 316        reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
 317        if (!reg) {
 318                pr_err("qbman: the portal is not enabled!\n");
 319                kfree(p);
 320                return NULL;
 321        }
 322
 323        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
 324                qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
 325                qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
 326        }
 327        /*
 328         * SDQCR needs to be initialized to 0 when no channels are
 329         * being dequeued from or else the QMan HW will indicate an
 330         * error.  The values that were calculated above will be
 331         * applied when dequeues from a specific channel are enabled.
 332         */
 333        qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
 334
 335        p->eqcr.pi_ring_size = 8;
 336        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
 337                p->eqcr.pi_ring_size = 32;
 338                qbman_swp_enqueue_ptr =
 339                        qbman_swp_enqueue_mem_back;
 340                qbman_swp_enqueue_multiple_ptr =
 341                        qbman_swp_enqueue_multiple_mem_back;
 342                qbman_swp_enqueue_multiple_desc_ptr =
 343                        qbman_swp_enqueue_multiple_desc_mem_back;
 344                qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
 345                qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
 346                qbman_swp_release_ptr = qbman_swp_release_mem_back;
 347        }
 348
 349        for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
 350                p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
 351        eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
 352        p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
 353        p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
 354        p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
 355                        & p->eqcr.pi_ci_mask;
 356        p->eqcr.available = p->eqcr.pi_ring_size;
 357
 358        return p;
 359}
 360
 361/**
 362 * qbman_swp_finish() - Create and destroy a functional object representing
 363 *                      the given QBMan portal descriptor.
 364 * @p: the qbman_swp object to be destroyed
 365 */
 366void qbman_swp_finish(struct qbman_swp *p)
 367{
 368        kfree(p);
 369}
 370
 371/**
 372 * qbman_swp_interrupt_read_status()
 373 * @p: the given software portal
 374 *
 375 * Return the value in the SWP_ISR register.
 376 */
 377u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
 378{
 379        return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
 380}
 381
 382/**
 383 * qbman_swp_interrupt_clear_status()
 384 * @p: the given software portal
 385 * @mask: The mask to clear in SWP_ISR register
 386 */
 387void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
 388{
 389        qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
 390}
 391
 392/**
 393 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
 394 * @p: the given software portal
 395 *
 396 * Return the value in the SWP_IER register.
 397 */
 398u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
 399{
 400        return qbman_read_register(p, QBMAN_CINH_SWP_IER);
 401}
 402
 403/**
 404 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
 405 * @p: the given software portal
 406 * @mask: The mask of bits to enable in SWP_IER
 407 */
 408void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
 409{
 410        qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
 411}
 412
 413/**
 414 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
 415 * @p: the given software portal object
 416 *
 417 * Return the value in the SWP_IIR register.
 418 */
 419int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
 420{
 421        return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
 422}
 423
 424/**
 425 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
 426 * @p: the given software portal object
 427 * @mask: The mask to set in SWP_IIR register
 428 */
 429void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
 430{
 431        qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
 432}
 433
 434/*
 435 * Different management commands all use this common base layer of code to issue
 436 * commands and poll for results.
 437 */
 438
 439/*
 440 * Returns a pointer to where the caller should fill in their management command
 441 * (caller should ignore the verb byte)
 442 */
 443void *qbman_swp_mc_start(struct qbman_swp *p)
 444{
 445        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
 446                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
 447        else
 448                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
 449}
 450
 451/*
 452 * Commits merges in the caller-supplied command verb (which should not include
 453 * the valid-bit) and submits the command to hardware
 454 */
 455void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
 456{
 457        u8 *v = cmd;
 458
 459        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 460                dma_wmb();
 461                *v = cmd_verb | p->mc.valid_bit;
 462        } else {
 463                *v = cmd_verb | p->mc.valid_bit;
 464                dma_wmb();
 465                qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
 466        }
 467}
 468
 469/*
 470 * Checks for a completed response (returns non-NULL if only if the response
 471 * is complete).
 472 */
 473void *qbman_swp_mc_result(struct qbman_swp *p)
 474{
 475        u32 *ret, verb;
 476
 477        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 478                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
 479                /* Remove the valid-bit - command completed if the rest
 480                 * is non-zero.
 481                 */
 482                verb = ret[0] & ~QB_VALID_BIT;
 483                if (!verb)
 484                        return NULL;
 485                p->mc.valid_bit ^= QB_VALID_BIT;
 486        } else {
 487                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
 488                /* Command completed if the valid bit is toggled */
 489                if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
 490                        return NULL;
 491                /* Command completed if the rest is non-zero */
 492                verb = ret[0] & ~QB_VALID_BIT;
 493                if (!verb)
 494                        return NULL;
 495                p->mr.valid_bit ^= QB_VALID_BIT;
 496        }
 497
 498        return ret;
 499}
 500
 501#define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
 502enum qb_enqueue_commands {
 503        enqueue_empty = 0,
 504        enqueue_response_always = 1,
 505        enqueue_rejects_to_fq = 2
 506};
 507
 508#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
 509#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
 510#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
 511#define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
 512
 513/**
 514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
 515 *                         default/starting state.
 516 */
 517void qbman_eq_desc_clear(struct qbman_eq_desc *d)
 518{
 519        memset(d, 0, sizeof(*d));
 520}
 521
 522/**
 523 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
 524 * @d:                the enqueue descriptor.
 525 * @response_success: 1 = enqueue with response always; 0 = enqueue with
 526 *                    rejections returned on a FQ.
 527 */
 528void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
 529{
 530        d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
 531        if (respond_success)
 532                d->verb |= enqueue_response_always;
 533        else
 534                d->verb |= enqueue_rejects_to_fq;
 535}
 536
 537/*
 538 * Exactly one of the following descriptor "targets" should be set. (Calling any
 539 * one of these will replace the effect of any prior call to one of these.)
 540 *   -enqueue to a frame queue
 541 *   -enqueue to a queuing destination
 542 */
 543
 544/**
 545 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
 546 * @d:    the enqueue descriptor
 547 * @fqid: the id of the frame queue to be enqueued
 548 */
 549void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
 550{
 551        d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
 552        d->tgtid = cpu_to_le32(fqid);
 553}
 554
 555/**
 556 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
 557 * @d:       the enqueue descriptor
 558 * @qdid:    the id of the queuing destination to be enqueued
 559 * @qd_bin:  the queuing destination bin
 560 * @qd_prio: the queuing destination priority
 561 */
 562void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
 563                          u32 qd_bin, u32 qd_prio)
 564{
 565        d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
 566        d->tgtid = cpu_to_le32(qdid);
 567        d->qdbin = cpu_to_le16(qd_bin);
 568        d->qpri = qd_prio;
 569}
 570
 571#define EQAR_IDX(eqar)     ((eqar) & 0x7)
 572#define EQAR_VB(eqar)      ((eqar) & 0x80)
 573#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
 574
 575#define QB_RT_BIT ((u32)0x100)
 576/**
 577 * qbman_swp_enqueue_direct() - Issue an enqueue command
 578 * @s:  the software portal used for enqueue
 579 * @d:  the enqueue descriptor
 580 * @fd: the frame descriptor to be enqueued
 581 *
 582 * Please note that 'fd' should only be NULL if the "action" of the
 583 * descriptor is "orp_hole" or "orp_nesn".
 584 *
 585 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 586 */
 587static
 588int qbman_swp_enqueue_direct(struct qbman_swp *s,
 589                             const struct qbman_eq_desc *d,
 590                             const struct dpaa2_fd *fd)
 591{
 592        int flags = 0;
 593        int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
 594
 595        if (ret >= 0)
 596                ret = 0;
 597        else
 598                ret = -EBUSY;
 599        return  ret;
 600}
 601
 602/**
 603 * qbman_swp_enqueue_mem_back() - Issue an enqueue command
 604 * @s:  the software portal used for enqueue
 605 * @d:  the enqueue descriptor
 606 * @fd: the frame descriptor to be enqueued
 607 *
 608 * Please note that 'fd' should only be NULL if the "action" of the
 609 * descriptor is "orp_hole" or "orp_nesn".
 610 *
 611 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 612 */
 613static
 614int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
 615                               const struct qbman_eq_desc *d,
 616                               const struct dpaa2_fd *fd)
 617{
 618        int flags = 0;
 619        int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
 620
 621        if (ret >= 0)
 622                ret = 0;
 623        else
 624                ret = -EBUSY;
 625        return  ret;
 626}
 627
 628/**
 629 * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
 630 * using one enqueue descriptor
 631 * @s:  the software portal used for enqueue
 632 * @d:  the enqueue descriptor
 633 * @fd: table pointer of frame descriptor table to be enqueued
 634 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
 635 * @num_frames: number of fd to be enqueued
 636 *
 637 * Return the number of fd enqueued, or a negative error number.
 638 */
 639static
 640int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 641                                      const struct qbman_eq_desc *d,
 642                                      const struct dpaa2_fd *fd,
 643                                      uint32_t *flags,
 644                                      int num_frames)
 645{
 646        uint32_t *p = NULL;
 647        const uint32_t *cl = (uint32_t *)d;
 648        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 649        int i, num_enqueued = 0;
 650
 651        spin_lock(&s->access_spinlock);
 652        half_mask = (s->eqcr.pi_ci_mask>>1);
 653        full_mask = s->eqcr.pi_ci_mask;
 654
 655        if (!s->eqcr.available) {
 656                eqcr_ci = s->eqcr.ci;
 657                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
 658                s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
 659                s->eqcr.ci &= full_mask;
 660
 661                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 662                                        eqcr_ci, s->eqcr.ci);
 663                if (!s->eqcr.available) {
 664                        spin_unlock(&s->access_spinlock);
 665                        return 0;
 666                }
 667        }
 668
 669        eqcr_pi = s->eqcr.pi;
 670        num_enqueued = (s->eqcr.available < num_frames) ?
 671                        s->eqcr.available : num_frames;
 672        s->eqcr.available -= num_enqueued;
 673        /* Fill in the EQCR ring */
 674        for (i = 0; i < num_enqueued; i++) {
 675                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 676                /* Skip copying the verb */
 677                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 678                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 679                       &fd[i], sizeof(*fd));
 680                eqcr_pi++;
 681        }
 682
 683        dma_wmb();
 684
 685        /* Set the verb byte, have to substitute in the valid-bit */
 686        eqcr_pi = s->eqcr.pi;
 687        for (i = 0; i < num_enqueued; i++) {
 688                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 689                p[0] = cl[0] | s->eqcr.pi_vb;
 690                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 691                        struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 692
 693                        d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 694                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
 695                }
 696                eqcr_pi++;
 697                if (!(eqcr_pi & half_mask))
 698                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 699        }
 700
 701        /* Flush all the cacheline without load/store in between */
 702        eqcr_pi = s->eqcr.pi;
 703        for (i = 0; i < num_enqueued; i++)
 704                eqcr_pi++;
 705        s->eqcr.pi = eqcr_pi & full_mask;
 706        spin_unlock(&s->access_spinlock);
 707
 708        return num_enqueued;
 709}
 710
 711/**
 712 * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
 713 * using one enqueue descriptor
 714 * @s:  the software portal used for enqueue
 715 * @d:  the enqueue descriptor
 716 * @fd: table pointer of frame descriptor table to be enqueued
 717 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
 718 * @num_frames: number of fd to be enqueued
 719 *
 720 * Return the number of fd enqueued, or a negative error number.
 721 */
 722static
 723int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
 724                                        const struct qbman_eq_desc *d,
 725                                        const struct dpaa2_fd *fd,
 726                                        uint32_t *flags,
 727                                        int num_frames)
 728{
 729        uint32_t *p = NULL;
 730        const uint32_t *cl = (uint32_t *)(d);
 731        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 732        int i, num_enqueued = 0;
 733        unsigned long irq_flags;
 734
 735        spin_lock(&s->access_spinlock);
 736        local_irq_save(irq_flags);
 737
 738        half_mask = (s->eqcr.pi_ci_mask>>1);
 739        full_mask = s->eqcr.pi_ci_mask;
 740        if (!s->eqcr.available) {
 741                eqcr_ci = s->eqcr.ci;
 742                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
 743                s->eqcr.ci = *p & full_mask;
 744                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 745                                        eqcr_ci, s->eqcr.ci);
 746                if (!s->eqcr.available) {
 747                        local_irq_restore(irq_flags);
 748                        spin_unlock(&s->access_spinlock);
 749                        return 0;
 750                }
 751        }
 752
 753        eqcr_pi = s->eqcr.pi;
 754        num_enqueued = (s->eqcr.available < num_frames) ?
 755                        s->eqcr.available : num_frames;
 756        s->eqcr.available -= num_enqueued;
 757        /* Fill in the EQCR ring */
 758        for (i = 0; i < num_enqueued; i++) {
 759                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 760                /* Skip copying the verb */
 761                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 762                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 763                       &fd[i], sizeof(*fd));
 764                eqcr_pi++;
 765        }
 766
 767        /* Set the verb byte, have to substitute in the valid-bit */
 768        eqcr_pi = s->eqcr.pi;
 769        for (i = 0; i < num_enqueued; i++) {
 770                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 771                p[0] = cl[0] | s->eqcr.pi_vb;
 772                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 773                        struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 774
 775                        d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 776                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
 777                }
 778                eqcr_pi++;
 779                if (!(eqcr_pi & half_mask))
 780                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 781        }
 782        s->eqcr.pi = eqcr_pi & full_mask;
 783
 784        dma_wmb();
 785        qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
 786                                (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
 787        local_irq_restore(irq_flags);
 788        spin_unlock(&s->access_spinlock);
 789
 790        return num_enqueued;
 791}
 792
 793/**
 794 * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
 795 * using multiple enqueue descriptor
 796 * @s:  the software portal used for enqueue
 797 * @d:  table of minimal enqueue descriptor
 798 * @fd: table pointer of frame descriptor table to be enqueued
 799 * @num_frames: number of fd to be enqueued
 800 *
 801 * Return the number of fd enqueued, or a negative error number.
 802 */
 803static
 804int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
 805                                           const struct qbman_eq_desc *d,
 806                                           const struct dpaa2_fd *fd,
 807                                           int num_frames)
 808{
 809        uint32_t *p;
 810        const uint32_t *cl;
 811        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 812        int i, num_enqueued = 0;
 813
 814        half_mask = (s->eqcr.pi_ci_mask>>1);
 815        full_mask = s->eqcr.pi_ci_mask;
 816        if (!s->eqcr.available) {
 817                eqcr_ci = s->eqcr.ci;
 818                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
 819                s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
 820                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 821                                        eqcr_ci, s->eqcr.ci);
 822                if (!s->eqcr.available)
 823                        return 0;
 824        }
 825
 826        eqcr_pi = s->eqcr.pi;
 827        num_enqueued = (s->eqcr.available < num_frames) ?
 828                        s->eqcr.available : num_frames;
 829        s->eqcr.available -= num_enqueued;
 830        /* Fill in the EQCR ring */
 831        for (i = 0; i < num_enqueued; i++) {
 832                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 833                cl = (uint32_t *)(&d[i]);
 834                /* Skip copying the verb */
 835                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 836                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 837                       &fd[i], sizeof(*fd));
 838                eqcr_pi++;
 839        }
 840
 841        dma_wmb();
 842
 843        /* Set the verb byte, have to substitute in the valid-bit */
 844        eqcr_pi = s->eqcr.pi;
 845        for (i = 0; i < num_enqueued; i++) {
 846                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 847                cl = (uint32_t *)(&d[i]);
 848                p[0] = cl[0] | s->eqcr.pi_vb;
 849                eqcr_pi++;
 850                if (!(eqcr_pi & half_mask))
 851                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 852        }
 853
 854        /* Flush all the cacheline without load/store in between */
 855        eqcr_pi = s->eqcr.pi;
 856        for (i = 0; i < num_enqueued; i++)
 857                eqcr_pi++;
 858        s->eqcr.pi = eqcr_pi & full_mask;
 859
 860        return num_enqueued;
 861}
 862
 863/**
 864 * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
 865 * using multiple enqueue descriptor
 866 * @s:  the software portal used for enqueue
 867 * @d:  table of minimal enqueue descriptor
 868 * @fd: table pointer of frame descriptor table to be enqueued
 869 * @num_frames: number of fd to be enqueued
 870 *
 871 * Return the number of fd enqueued, or a negative error number.
 872 */
 873static
 874int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
 875                                           const struct qbman_eq_desc *d,
 876                                           const struct dpaa2_fd *fd,
 877                                           int num_frames)
 878{
 879        uint32_t *p;
 880        const uint32_t *cl;
 881        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 882        int i, num_enqueued = 0;
 883
 884        half_mask = (s->eqcr.pi_ci_mask>>1);
 885        full_mask = s->eqcr.pi_ci_mask;
 886        if (!s->eqcr.available) {
 887                eqcr_ci = s->eqcr.ci;
 888                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
 889                s->eqcr.ci = *p & full_mask;
 890                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 891                                        eqcr_ci, s->eqcr.ci);
 892                if (!s->eqcr.available)
 893                        return 0;
 894        }
 895
 896        eqcr_pi = s->eqcr.pi;
 897        num_enqueued = (s->eqcr.available < num_frames) ?
 898                        s->eqcr.available : num_frames;
 899        s->eqcr.available -= num_enqueued;
 900        /* Fill in the EQCR ring */
 901        for (i = 0; i < num_enqueued; i++) {
 902                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 903                cl = (uint32_t *)(&d[i]);
 904                /* Skip copying the verb */
 905                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 906                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 907                       &fd[i], sizeof(*fd));
 908                eqcr_pi++;
 909        }
 910
 911        /* Set the verb byte, have to substitute in the valid-bit */
 912        eqcr_pi = s->eqcr.pi;
 913        for (i = 0; i < num_enqueued; i++) {
 914                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 915                cl = (uint32_t *)(&d[i]);
 916                p[0] = cl[0] | s->eqcr.pi_vb;
 917                eqcr_pi++;
 918                if (!(eqcr_pi & half_mask))
 919                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 920        }
 921
 922        s->eqcr.pi = eqcr_pi & full_mask;
 923
 924        dma_wmb();
 925        qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
 926                                (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
 927
 928        return num_enqueued;
 929}
 930
 931/* Static (push) dequeue */
 932
 933/**
 934 * qbman_swp_push_get() - Get the push dequeue setup
 935 * @p:           the software portal object
 936 * @channel_idx: the channel index to query
 937 * @enabled:     returned boolean to show whether the push dequeue is enabled
 938 *               for the given channel
 939 */
 940void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
 941{
 942        u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 943
 944        WARN_ON(channel_idx > 15);
 945        *enabled = src | (1 << channel_idx);
 946}
 947
 948/**
 949 * qbman_swp_push_set() - Enable or disable push dequeue
 950 * @p:           the software portal object
 951 * @channel_idx: the channel index (0 to 15)
 952 * @enable:      enable or disable push dequeue
 953 */
 954void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
 955{
 956        u16 dqsrc;
 957
 958        WARN_ON(channel_idx > 15);
 959        if (enable)
 960                s->sdq |= 1 << channel_idx;
 961        else
 962                s->sdq &= ~(1 << channel_idx);
 963
 964        /* Read make the complete src map.  If no channels are enabled
 965         * the SDQCR must be 0 or else QMan will assert errors
 966         */
 967        dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 968        if (dqsrc != 0)
 969                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
 970        else
 971                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
 972}
 973
 974#define QB_VDQCR_VERB_DCT_SHIFT    0
 975#define QB_VDQCR_VERB_DT_SHIFT     2
 976#define QB_VDQCR_VERB_RLS_SHIFT    4
 977#define QB_VDQCR_VERB_WAE_SHIFT    5
 978
 979enum qb_pull_dt_e {
 980        qb_pull_dt_channel,
 981        qb_pull_dt_workqueue,
 982        qb_pull_dt_framequeue
 983};
 984
 985/**
 986 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
 987 *                           default/starting state
 988 * @d: the pull dequeue descriptor to be cleared
 989 */
 990void qbman_pull_desc_clear(struct qbman_pull_desc *d)
 991{
 992        memset(d, 0, sizeof(*d));
 993}
 994
 995/**
 996 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
 997 * @d:            the pull dequeue descriptor to be set
 998 * @storage:      the pointer of the memory to store the dequeue result
 999 * @storage_phys: the physical address of the storage memory
1000 * @stash:        to indicate whether write allocate is enabled
1001 *
1002 * If not called, or if called with 'storage' as NULL, the result pull dequeues
1003 * will produce results to DQRR. If 'storage' is non-NULL, then results are
1004 * produced to the given memory location (using the DMA address which
1005 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1006 * those writes to main-memory express a cache-warming attribute.
1007 */
1008void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1009                                 struct dpaa2_dq *storage,
1010                                 dma_addr_t storage_phys,
1011                                 int stash)
1012{
1013        /* save the virtual address */
1014        d->rsp_addr_virt = (u64)(uintptr_t)storage;
1015
1016        if (!storage) {
1017                d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1018                return;
1019        }
1020        d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1021        if (stash)
1022                d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1023        else
1024                d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1025
1026        d->rsp_addr = cpu_to_le64(storage_phys);
1027}
1028
1029/**
1030 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1031 * @d:         the pull dequeue descriptor to be set
1032 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1033 */
1034void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1035{
1036        d->numf = numframes - 1;
1037}
1038
1039/*
1040 * Exactly one of the following descriptor "actions" should be set. (Calling any
1041 * one of these will replace the effect of any prior call to one of these.)
1042 * - pull dequeue from the given frame queue (FQ)
1043 * - pull dequeue from any FQ in the given work queue (WQ)
1044 * - pull dequeue from any FQ in any WQ in the given channel
1045 */
1046
1047/**
1048 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1049 * @fqid: the frame queue index of the given FQ
1050 */
1051void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1052{
1053        d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1054        d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1055        d->dq_src = cpu_to_le32(fqid);
1056}
1057
1058/**
1059 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1060 * @wqid: composed of channel id and wqid within the channel
1061 * @dct:  the dequeue command type
1062 */
1063void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1064                            enum qbman_pull_type_e dct)
1065{
1066        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1067        d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1068        d->dq_src = cpu_to_le32(wqid);
1069}
1070
1071/**
1072 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1073 *                                 dequeues
1074 * @chid: the channel id to be dequeued
1075 * @dct:  the dequeue command type
1076 */
1077void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1078                                 enum qbman_pull_type_e dct)
1079{
1080        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1081        d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1082        d->dq_src = cpu_to_le32(chid);
1083}
1084
1085/**
1086 * qbman_swp_pull_direct() - Issue the pull dequeue command
1087 * @s: the software portal object
1088 * @d: the software portal descriptor which has been configured with
1089 *     the set of qbman_pull_desc_set_*() calls
1090 *
1091 * Return 0 for success, and -EBUSY if the software portal is not ready
1092 * to do pull dequeue.
1093 */
1094static
1095int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1096{
1097        struct qbman_pull_desc *p;
1098
1099        if (!atomic_dec_and_test(&s->vdq.available)) {
1100                atomic_inc(&s->vdq.available);
1101                return -EBUSY;
1102        }
1103        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1104        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1105                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1106        else
1107                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1108        p->numf = d->numf;
1109        p->tok = QMAN_DQ_TOKEN_VALID;
1110        p->dq_src = d->dq_src;
1111        p->rsp_addr = d->rsp_addr;
1112        p->rsp_addr_virt = d->rsp_addr_virt;
1113        dma_wmb();
1114        /* Set the verb byte, have to substitute in the valid-bit */
1115        p->verb = d->verb | s->vdq.valid_bit;
1116        s->vdq.valid_bit ^= QB_VALID_BIT;
1117
1118        return 0;
1119}
1120
1121/**
1122 * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1123 * @s: the software portal object
1124 * @d: the software portal descriptor which has been configured with
1125 *     the set of qbman_pull_desc_set_*() calls
1126 *
1127 * Return 0 for success, and -EBUSY if the software portal is not ready
1128 * to do pull dequeue.
1129 */
1130static
1131int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1132{
1133        struct qbman_pull_desc *p;
1134
1135        if (!atomic_dec_and_test(&s->vdq.available)) {
1136                atomic_inc(&s->vdq.available);
1137                return -EBUSY;
1138        }
1139        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1140        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1141                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1142        else
1143                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1144        p->numf = d->numf;
1145        p->tok = QMAN_DQ_TOKEN_VALID;
1146        p->dq_src = d->dq_src;
1147        p->rsp_addr = d->rsp_addr;
1148        p->rsp_addr_virt = d->rsp_addr_virt;
1149
1150        /* Set the verb byte, have to substitute in the valid-bit */
1151        p->verb = d->verb | s->vdq.valid_bit;
1152        s->vdq.valid_bit ^= QB_VALID_BIT;
1153        dma_wmb();
1154        qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1155
1156        return 0;
1157}
1158
1159#define QMAN_DQRR_PI_MASK   0xf
1160
1161/**
1162 * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1163 * @s: the software portal object
1164 *
1165 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1166 * only once, so repeated calls can return a sequence of DQRR entries, without
1167 * requiring they be consumed immediately or in any particular order.
1168 */
1169const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1170{
1171        u32 verb;
1172        u32 response_verb;
1173        u32 flags;
1174        struct dpaa2_dq *p;
1175
1176        /* Before using valid-bit to detect if something is there, we have to
1177         * handle the case of the DQRR reset bug...
1178         */
1179        if (unlikely(s->dqrr.reset_bug)) {
1180                /*
1181                 * We pick up new entries by cache-inhibited producer index,
1182                 * which means that a non-coherent mapping would require us to
1183                 * invalidate and read *only* once that PI has indicated that
1184                 * there's an entry here. The first trip around the DQRR ring
1185                 * will be much less efficient than all subsequent trips around
1186                 * it...
1187                 */
1188                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1189                        QMAN_DQRR_PI_MASK;
1190
1191                /* there are new entries if pi != next_idx */
1192                if (pi == s->dqrr.next_idx)
1193                        return NULL;
1194
1195                /*
1196                 * if next_idx is/was the last ring index, and 'pi' is
1197                 * different, we can disable the workaround as all the ring
1198                 * entries have now been DMA'd to so valid-bit checking is
1199                 * repaired. Note: this logic needs to be based on next_idx
1200                 * (which increments one at a time), rather than on pi (which
1201                 * can burst and wrap-around between our snapshots of it).
1202                 */
1203                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1204                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1205                                 s->dqrr.next_idx, pi);
1206                        s->dqrr.reset_bug = 0;
1207                }
1208                prefetch(qbman_get_cmd(s,
1209                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1210        }
1211
1212        p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1213        verb = p->dq.verb;
1214
1215        /*
1216         * If the valid-bit isn't of the expected polarity, nothing there. Note,
1217         * in the DQRR reset bug workaround, we shouldn't need to skip these
1218         * check, because we've already determined that a new entry is available
1219         * and we've invalidated the cacheline before reading it, so the
1220         * valid-bit behaviour is repaired and should tell us what we already
1221         * knew from reading PI.
1222         */
1223        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1224                prefetch(qbman_get_cmd(s,
1225                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1226                return NULL;
1227        }
1228        /*
1229         * There's something there. Move "next_idx" attention to the next ring
1230         * entry (and prefetch it) before returning what we found.
1231         */
1232        s->dqrr.next_idx++;
1233        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1234        if (!s->dqrr.next_idx)
1235                s->dqrr.valid_bit ^= QB_VALID_BIT;
1236
1237        /*
1238         * If this is the final response to a volatile dequeue command
1239         * indicate that the vdq is available
1240         */
1241        flags = p->dq.stat;
1242        response_verb = verb & QBMAN_RESULT_MASK;
1243        if ((response_verb == QBMAN_RESULT_DQ) &&
1244            (flags & DPAA2_DQ_STAT_VOLATILE) &&
1245            (flags & DPAA2_DQ_STAT_EXPIRED))
1246                atomic_inc(&s->vdq.available);
1247
1248        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1249
1250        return p;
1251}
1252
1253/**
1254 * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1255 * @s: the software portal object
1256 *
1257 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1258 * only once, so repeated calls can return a sequence of DQRR entries, without
1259 * requiring they be consumed immediately or in any particular order.
1260 */
1261const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1262{
1263        u32 verb;
1264        u32 response_verb;
1265        u32 flags;
1266        struct dpaa2_dq *p;
1267
1268        /* Before using valid-bit to detect if something is there, we have to
1269         * handle the case of the DQRR reset bug...
1270         */
1271        if (unlikely(s->dqrr.reset_bug)) {
1272                /*
1273                 * We pick up new entries by cache-inhibited producer index,
1274                 * which means that a non-coherent mapping would require us to
1275                 * invalidate and read *only* once that PI has indicated that
1276                 * there's an entry here. The first trip around the DQRR ring
1277                 * will be much less efficient than all subsequent trips around
1278                 * it...
1279                 */
1280                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1281                        QMAN_DQRR_PI_MASK;
1282
1283                /* there are new entries if pi != next_idx */
1284                if (pi == s->dqrr.next_idx)
1285                        return NULL;
1286
1287                /*
1288                 * if next_idx is/was the last ring index, and 'pi' is
1289                 * different, we can disable the workaround as all the ring
1290                 * entries have now been DMA'd to so valid-bit checking is
1291                 * repaired. Note: this logic needs to be based on next_idx
1292                 * (which increments one at a time), rather than on pi (which
1293                 * can burst and wrap-around between our snapshots of it).
1294                 */
1295                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1296                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1297                                 s->dqrr.next_idx, pi);
1298                        s->dqrr.reset_bug = 0;
1299                }
1300                prefetch(qbman_get_cmd(s,
1301                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1302        }
1303
1304        p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1305        verb = p->dq.verb;
1306
1307        /*
1308         * If the valid-bit isn't of the expected polarity, nothing there. Note,
1309         * in the DQRR reset bug workaround, we shouldn't need to skip these
1310         * check, because we've already determined that a new entry is available
1311         * and we've invalidated the cacheline before reading it, so the
1312         * valid-bit behaviour is repaired and should tell us what we already
1313         * knew from reading PI.
1314         */
1315        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1316                prefetch(qbman_get_cmd(s,
1317                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1318                return NULL;
1319        }
1320        /*
1321         * There's something there. Move "next_idx" attention to the next ring
1322         * entry (and prefetch it) before returning what we found.
1323         */
1324        s->dqrr.next_idx++;
1325        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1326        if (!s->dqrr.next_idx)
1327                s->dqrr.valid_bit ^= QB_VALID_BIT;
1328
1329        /*
1330         * If this is the final response to a volatile dequeue command
1331         * indicate that the vdq is available
1332         */
1333        flags = p->dq.stat;
1334        response_verb = verb & QBMAN_RESULT_MASK;
1335        if ((response_verb == QBMAN_RESULT_DQ) &&
1336            (flags & DPAA2_DQ_STAT_VOLATILE) &&
1337            (flags & DPAA2_DQ_STAT_EXPIRED))
1338                atomic_inc(&s->vdq.available);
1339
1340        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1341
1342        return p;
1343}
1344
1345/**
1346 * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
1347 *                             qbman_swp_dqrr_next().
1348 * @s: the software portal object
1349 * @dq: the DQRR entry to be consumed
1350 */
1351void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1352{
1353        qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1354}
1355
1356/**
1357 * qbman_result_has_new_result() - Check and get the dequeue response from the
1358 *                                 dq storage memory set in pull dequeue command
1359 * @s: the software portal object
1360 * @dq: the dequeue result read from the memory
1361 *
1362 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1363 * dequeue result.
1364 *
1365 * Only used for user-provided storage of dequeue results, not DQRR. For
1366 * efficiency purposes, the driver will perform any required endianness
1367 * conversion to ensure that the user's dequeue result storage is in host-endian
1368 * format. As such, once the user has called qbman_result_has_new_result() and
1369 * been returned a valid dequeue result, they should not call it again on
1370 * the same memory location (except of course if another dequeue command has
1371 * been executed to produce a new result to that location).
1372 */
1373int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1374{
1375        if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1376                return 0;
1377
1378        /*
1379         * Set token to be 0 so we will detect change back to 1
1380         * next time the looping is traversed. Const is cast away here
1381         * as we want users to treat the dequeue responses as read only.
1382         */
1383        ((struct dpaa2_dq *)dq)->dq.tok = 0;
1384
1385        /*
1386         * Determine whether VDQCR is available based on whether the
1387         * current result is sitting in the first storage location of
1388         * the busy command.
1389         */
1390        if (s->vdq.storage == dq) {
1391                s->vdq.storage = NULL;
1392                atomic_inc(&s->vdq.available);
1393        }
1394
1395        return 1;
1396}
1397
1398/**
1399 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1400 *                              default/starting state.
1401 */
1402void qbman_release_desc_clear(struct qbman_release_desc *d)
1403{
1404        memset(d, 0, sizeof(*d));
1405        d->verb = 1 << 5; /* Release Command Valid */
1406}
1407
1408/**
1409 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1410 */
1411void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1412{
1413        d->bpid = cpu_to_le16(bpid);
1414}
1415
1416/**
1417 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1418 * interrupt source should be asserted after the release command is completed.
1419 */
1420void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1421{
1422        if (enable)
1423                d->verb |= 1 << 6;
1424        else
1425                d->verb &= ~(1 << 6);
1426}
1427
1428#define RAR_IDX(rar)     ((rar) & 0x7)
1429#define RAR_VB(rar)      ((rar) & 0x80)
1430#define RAR_SUCCESS(rar) ((rar) & 0x100)
1431
1432/**
1433 * qbman_swp_release_direct() - Issue a buffer release command
1434 * @s:           the software portal object
1435 * @d:           the release descriptor
1436 * @buffers:     a pointer pointing to the buffer address to be released
1437 * @num_buffers: number of buffers to be released,  must be less than 8
1438 *
1439 * Return 0 for success, -EBUSY if the release command ring is not ready.
1440 */
1441int qbman_swp_release_direct(struct qbman_swp *s,
1442                             const struct qbman_release_desc *d,
1443                             const u64 *buffers, unsigned int num_buffers)
1444{
1445        int i;
1446        struct qbman_release_desc *p;
1447        u32 rar;
1448
1449        if (!num_buffers || (num_buffers > 7))
1450                return -EINVAL;
1451
1452        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1453        if (!RAR_SUCCESS(rar))
1454                return -EBUSY;
1455
1456        /* Start the release command */
1457        p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1458
1459        /* Copy the caller's buffer pointers to the command */
1460        for (i = 0; i < num_buffers; i++)
1461                p->buf[i] = cpu_to_le64(buffers[i]);
1462        p->bpid = d->bpid;
1463
1464        /*
1465         * Set the verb byte, have to substitute in the valid-bit
1466         * and the number of buffers.
1467         */
1468        dma_wmb();
1469        p->verb = d->verb | RAR_VB(rar) | num_buffers;
1470
1471        return 0;
1472}
1473
1474/**
1475 * qbman_swp_release_mem_back() - Issue a buffer release command
1476 * @s:           the software portal object
1477 * @d:           the release descriptor
1478 * @buffers:     a pointer pointing to the buffer address to be released
1479 * @num_buffers: number of buffers to be released,  must be less than 8
1480 *
1481 * Return 0 for success, -EBUSY if the release command ring is not ready.
1482 */
1483int qbman_swp_release_mem_back(struct qbman_swp *s,
1484                               const struct qbman_release_desc *d,
1485                               const u64 *buffers, unsigned int num_buffers)
1486{
1487        int i;
1488        struct qbman_release_desc *p;
1489        u32 rar;
1490
1491        if (!num_buffers || (num_buffers > 7))
1492                return -EINVAL;
1493
1494        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1495        if (!RAR_SUCCESS(rar))
1496                return -EBUSY;
1497
1498        /* Start the release command */
1499        p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1500
1501        /* Copy the caller's buffer pointers to the command */
1502        for (i = 0; i < num_buffers; i++)
1503                p->buf[i] = cpu_to_le64(buffers[i]);
1504        p->bpid = d->bpid;
1505
1506        p->verb = d->verb | RAR_VB(rar) | num_buffers;
1507        dma_wmb();
1508        qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1509                             RAR_IDX(rar)  * 4, QMAN_RT_MODE);
1510
1511        return 0;
1512}
1513
1514struct qbman_acquire_desc {
1515        u8 verb;
1516        u8 reserved;
1517        __le16 bpid;
1518        u8 num;
1519        u8 reserved2[59];
1520};
1521
1522struct qbman_acquire_rslt {
1523        u8 verb;
1524        u8 rslt;
1525        __le16 reserved;
1526        u8 num;
1527        u8 reserved2[3];
1528        __le64 buf[7];
1529};
1530
1531/**
1532 * qbman_swp_acquire() - Issue a buffer acquire command
1533 * @s:           the software portal object
1534 * @bpid:        the buffer pool index
1535 * @buffers:     a pointer pointing to the acquired buffer addresses
1536 * @num_buffers: number of buffers to be acquired, must be less than 8
1537 *
1538 * Return 0 for success, or negative error code if the acquire command
1539 * fails.
1540 */
1541int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1542                      unsigned int num_buffers)
1543{
1544        struct qbman_acquire_desc *p;
1545        struct qbman_acquire_rslt *r;
1546        int i;
1547
1548        if (!num_buffers || (num_buffers > 7))
1549                return -EINVAL;
1550
1551        /* Start the management command */
1552        p = qbman_swp_mc_start(s);
1553
1554        if (!p)
1555                return -EBUSY;
1556
1557        /* Encode the caller-provided attributes */
1558        p->bpid = cpu_to_le16(bpid);
1559        p->num = num_buffers;
1560
1561        /* Complete the management command */
1562        r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1563        if (unlikely(!r)) {
1564                pr_err("qbman: acquire from BPID %d failed, no response\n",
1565                       bpid);
1566                return -EIO;
1567        }
1568
1569        /* Decode the outcome */
1570        WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1571
1572        /* Determine success or failure */
1573        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1574                pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1575                       bpid, r->rslt);
1576                return -EIO;
1577        }
1578
1579        WARN_ON(r->num > num_buffers);
1580
1581        /* Copy the acquired buffers to the caller's array */
1582        for (i = 0; i < r->num; i++)
1583                buffers[i] = le64_to_cpu(r->buf[i]);
1584
1585        return (int)r->num;
1586}
1587
1588struct qbman_alt_fq_state_desc {
1589        u8 verb;
1590        u8 reserved[3];
1591        __le32 fqid;
1592        u8 reserved2[56];
1593};
1594
1595struct qbman_alt_fq_state_rslt {
1596        u8 verb;
1597        u8 rslt;
1598        u8 reserved[62];
1599};
1600
1601#define ALT_FQ_FQID_MASK 0x00FFFFFF
1602
1603int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1604                           u8 alt_fq_verb)
1605{
1606        struct qbman_alt_fq_state_desc *p;
1607        struct qbman_alt_fq_state_rslt *r;
1608
1609        /* Start the management command */
1610        p = qbman_swp_mc_start(s);
1611        if (!p)
1612                return -EBUSY;
1613
1614        p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1615
1616        /* Complete the management command */
1617        r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1618        if (unlikely(!r)) {
1619                pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1620                       alt_fq_verb);
1621                return -EIO;
1622        }
1623
1624        /* Decode the outcome */
1625        WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1626
1627        /* Determine success or failure */
1628        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1629                pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1630                       fqid, r->verb, r->rslt);
1631                return -EIO;
1632        }
1633
1634        return 0;
1635}
1636
1637struct qbman_cdan_ctrl_desc {
1638        u8 verb;
1639        u8 reserved;
1640        __le16 ch;
1641        u8 we;
1642        u8 ctrl;
1643        __le16 reserved2;
1644        __le64 cdan_ctx;
1645        u8 reserved3[48];
1646
1647};
1648
1649struct qbman_cdan_ctrl_rslt {
1650        u8 verb;
1651        u8 rslt;
1652        __le16 ch;
1653        u8 reserved[60];
1654};
1655
1656int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1657                       u8 we_mask, u8 cdan_en,
1658                       u64 ctx)
1659{
1660        struct qbman_cdan_ctrl_desc *p = NULL;
1661        struct qbman_cdan_ctrl_rslt *r = NULL;
1662
1663        /* Start the management command */
1664        p = qbman_swp_mc_start(s);
1665        if (!p)
1666                return -EBUSY;
1667
1668        /* Encode the caller-provided attributes */
1669        p->ch = cpu_to_le16(channelid);
1670        p->we = we_mask;
1671        if (cdan_en)
1672                p->ctrl = 1;
1673        else
1674                p->ctrl = 0;
1675        p->cdan_ctx = cpu_to_le64(ctx);
1676
1677        /* Complete the management command */
1678        r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1679        if (unlikely(!r)) {
1680                pr_err("qbman: wqchan config failed, no response\n");
1681                return -EIO;
1682        }
1683
1684        WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1685
1686        /* Determine success or failure */
1687        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1688                pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1689                       channelid, r->rslt);
1690                return -EIO;
1691        }
1692
1693        return 0;
1694}
1695
1696#define QBMAN_RESPONSE_VERB_MASK        0x7f
1697#define QBMAN_FQ_QUERY_NP               0x45
1698#define QBMAN_BP_QUERY                  0x32
1699
1700struct qbman_fq_query_desc {
1701        u8 verb;
1702        u8 reserved[3];
1703        __le32 fqid;
1704        u8 reserved2[56];
1705};
1706
1707int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1708                         struct qbman_fq_query_np_rslt *r)
1709{
1710        struct qbman_fq_query_desc *p;
1711        void *resp;
1712
1713        p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1714        if (!p)
1715                return -EBUSY;
1716
1717        /* FQID is a 24 bit value */
1718        p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1719        resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1720        if (!resp) {
1721                pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1722                       fqid);
1723                return -EIO;
1724        }
1725        *r = *(struct qbman_fq_query_np_rslt *)resp;
1726        /* Decode the outcome */
1727        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1728
1729        /* Determine success or failure */
1730        if (r->rslt != QBMAN_MC_RSLT_OK) {
1731                pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1732                       p->fqid, r->rslt);
1733                return -EIO;
1734        }
1735
1736        return 0;
1737}
1738
1739u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1740{
1741        return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1742}
1743
1744u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1745{
1746        return le32_to_cpu(r->byte_cnt);
1747}
1748
1749struct qbman_bp_query_desc {
1750        u8 verb;
1751        u8 reserved;
1752        __le16 bpid;
1753        u8 reserved2[60];
1754};
1755
1756int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1757                   struct qbman_bp_query_rslt *r)
1758{
1759        struct qbman_bp_query_desc *p;
1760        void *resp;
1761
1762        p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1763        if (!p)
1764                return -EBUSY;
1765
1766        p->bpid = cpu_to_le16(bpid);
1767        resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1768        if (!resp) {
1769                pr_err("qbman: Query BPID %d fields failed, no response\n",
1770                       bpid);
1771                return -EIO;
1772        }
1773        *r = *(struct qbman_bp_query_rslt *)resp;
1774        /* Decode the outcome */
1775        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1776
1777        /* Determine success or failure */
1778        if (r->rslt != QBMAN_MC_RSLT_OK) {
1779                pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1780                       bpid, r->rslt);
1781                return -EIO;
1782        }
1783
1784        return 0;
1785}
1786
1787u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1788{
1789        return le32_to_cpu(a->fill);
1790}
1791