linux/drivers/soc/fsl/dpio/qbman-portal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
   4 * Copyright 2016-2019 NXP
   5 *
   6 */
   7
   8#include <asm/cacheflush.h>
   9#include <linux/io.h>
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <soc/fsl/dpaa2-global.h>
  13
  14#include "qbman-portal.h"
  15
  16/* All QBMan command and result structures use this "valid bit" encoding */
  17#define QB_VALID_BIT ((u32)0x80)
  18
  19/* QBMan portal management command codes */
  20#define QBMAN_MC_ACQUIRE       0x30
  21#define QBMAN_WQCHAN_CONFIGURE 0x46
  22
  23/* CINH register offsets */
  24#define QBMAN_CINH_SWP_EQCR_PI      0x800
  25#define QBMAN_CINH_SWP_EQCR_CI      0x840
  26#define QBMAN_CINH_SWP_EQAR    0x8c0
  27#define QBMAN_CINH_SWP_CR_RT        0x900
  28#define QBMAN_CINH_SWP_VDQCR_RT     0x940
  29#define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
  30#define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
  31#define QBMAN_CINH_SWP_DQPI    0xa00
  32#define QBMAN_CINH_SWP_DCAP    0xac0
  33#define QBMAN_CINH_SWP_SDQCR   0xb00
  34#define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
  35#define QBMAN_CINH_SWP_RCR_PI       0xc00
  36#define QBMAN_CINH_SWP_RAR     0xcc0
  37#define QBMAN_CINH_SWP_ISR     0xe00
  38#define QBMAN_CINH_SWP_IER     0xe40
  39#define QBMAN_CINH_SWP_ISDR    0xe80
  40#define QBMAN_CINH_SWP_IIR     0xec0
  41
  42/* CENA register offsets */
  43#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
  44#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
  45#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
  46#define QBMAN_CENA_SWP_CR      0x600
  47#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
  48#define QBMAN_CENA_SWP_VDQCR   0x780
  49#define QBMAN_CENA_SWP_EQCR_CI 0x840
  50#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
  51
  52/* CENA register offsets in memory-backed mode */
  53#define QBMAN_CENA_SWP_DQRR_MEM(n)  (0x800 + ((u32)(n) << 6))
  54#define QBMAN_CENA_SWP_RCR_MEM(n)   (0x1400 + ((u32)(n) << 6))
  55#define QBMAN_CENA_SWP_CR_MEM       0x1600
  56#define QBMAN_CENA_SWP_RR_MEM       0x1680
  57#define QBMAN_CENA_SWP_VDQCR_MEM    0x1780
  58
  59/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
  60#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
  61
  62/* Define token used to determine if response written to memory is valid */
  63#define QMAN_DQ_TOKEN_VALID 1
  64
  65/* SDQCR attribute codes */
  66#define QB_SDQCR_FC_SHIFT   29
  67#define QB_SDQCR_FC_MASK    0x1
  68#define QB_SDQCR_DCT_SHIFT  24
  69#define QB_SDQCR_DCT_MASK   0x3
  70#define QB_SDQCR_TOK_SHIFT  16
  71#define QB_SDQCR_TOK_MASK   0xff
  72#define QB_SDQCR_SRC_SHIFT  0
  73#define QB_SDQCR_SRC_MASK   0xffff
  74
  75/* opaque token for static dequeues */
  76#define QMAN_SDQCR_TOKEN    0xbb
  77
  78#define QBMAN_EQCR_DCA_IDXMASK          0x0f
  79#define QBMAN_ENQUEUE_FLAG_DCA          (1ULL << 31)
  80
  81#define EQ_DESC_SIZE_WITHOUT_FD 29
  82#define EQ_DESC_SIZE_FD_START 32
  83
  84enum qbman_sdqcr_dct {
  85        qbman_sdqcr_dct_null = 0,
  86        qbman_sdqcr_dct_prio_ics,
  87        qbman_sdqcr_dct_active_ics,
  88        qbman_sdqcr_dct_active
  89};
  90
  91enum qbman_sdqcr_fc {
  92        qbman_sdqcr_fc_one = 0,
  93        qbman_sdqcr_fc_up_to_3 = 1
  94};
  95
  96/* Internal Function declaration */
  97static int qbman_swp_enqueue_direct(struct qbman_swp *s,
  98                                    const struct qbman_eq_desc *d,
  99                                    const struct dpaa2_fd *fd);
 100static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
 101                                      const struct qbman_eq_desc *d,
 102                                      const struct dpaa2_fd *fd);
 103static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 104                                             const struct qbman_eq_desc *d,
 105                                             const struct dpaa2_fd *fd,
 106                                             uint32_t *flags,
 107                                             int num_frames);
 108static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
 109                                               const struct qbman_eq_desc *d,
 110                                               const struct dpaa2_fd *fd,
 111                                               uint32_t *flags,
 112                                               int num_frames);
 113static int
 114qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
 115                                       const struct qbman_eq_desc *d,
 116                                       const struct dpaa2_fd *fd,
 117                                       int num_frames);
 118static
 119int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
 120                                             const struct qbman_eq_desc *d,
 121                                             const struct dpaa2_fd *fd,
 122                                             int num_frames);
 123static int qbman_swp_pull_direct(struct qbman_swp *s,
 124                                 struct qbman_pull_desc *d);
 125static int qbman_swp_pull_mem_back(struct qbman_swp *s,
 126                                   struct qbman_pull_desc *d);
 127
 128const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
 129const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
 130
 131static int qbman_swp_release_direct(struct qbman_swp *s,
 132                                    const struct qbman_release_desc *d,
 133                                    const u64 *buffers,
 134                                    unsigned int num_buffers);
 135static int qbman_swp_release_mem_back(struct qbman_swp *s,
 136                                      const struct qbman_release_desc *d,
 137                                      const u64 *buffers,
 138                                      unsigned int num_buffers);
 139
 140/* Function pointers */
 141int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
 142                             const struct qbman_eq_desc *d,
 143                             const struct dpaa2_fd *fd)
 144        = qbman_swp_enqueue_direct;
 145
 146int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
 147                                      const struct qbman_eq_desc *d,
 148                                      const struct dpaa2_fd *fd,
 149                                      uint32_t *flags,
 150                                             int num_frames)
 151        = qbman_swp_enqueue_multiple_direct;
 152
 153int
 154(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
 155                                       const struct qbman_eq_desc *d,
 156                                       const struct dpaa2_fd *fd,
 157                                       int num_frames)
 158        = qbman_swp_enqueue_multiple_desc_direct;
 159
 160int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
 161                        = qbman_swp_pull_direct;
 162
 163const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
 164                        = qbman_swp_dqrr_next_direct;
 165
 166int (*qbman_swp_release_ptr)(struct qbman_swp *s,
 167                             const struct qbman_release_desc *d,
 168                             const u64 *buffers,
 169                             unsigned int num_buffers)
 170                        = qbman_swp_release_direct;
 171
 172/* Portal Access */
 173
 174static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
 175{
 176        return readl_relaxed(p->addr_cinh + offset);
 177}
 178
 179static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
 180                                        u32 value)
 181{
 182        writel_relaxed(value, p->addr_cinh + offset);
 183}
 184
 185static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
 186{
 187        return p->addr_cena + offset;
 188}
 189
 190#define QBMAN_CINH_SWP_CFG   0xd00
 191
 192#define SWP_CFG_DQRR_MF_SHIFT 20
 193#define SWP_CFG_EST_SHIFT     16
 194#define SWP_CFG_CPBS_SHIFT    15
 195#define SWP_CFG_WN_SHIFT      14
 196#define SWP_CFG_RPM_SHIFT     12
 197#define SWP_CFG_DCM_SHIFT     10
 198#define SWP_CFG_EPM_SHIFT     8
 199#define SWP_CFG_VPM_SHIFT     7
 200#define SWP_CFG_CPM_SHIFT     6
 201#define SWP_CFG_SD_SHIFT      5
 202#define SWP_CFG_SP_SHIFT      4
 203#define SWP_CFG_SE_SHIFT      3
 204#define SWP_CFG_DP_SHIFT      2
 205#define SWP_CFG_DE_SHIFT      1
 206#define SWP_CFG_EP_SHIFT      0
 207
 208static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
 209                                    u8 epm, int sd, int sp, int se,
 210                                    int dp, int de, int ep)
 211{
 212        return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
 213                est << SWP_CFG_EST_SHIFT |
 214                wn << SWP_CFG_WN_SHIFT |
 215                rpm << SWP_CFG_RPM_SHIFT |
 216                dcm << SWP_CFG_DCM_SHIFT |
 217                epm << SWP_CFG_EPM_SHIFT |
 218                sd << SWP_CFG_SD_SHIFT |
 219                sp << SWP_CFG_SP_SHIFT |
 220                se << SWP_CFG_SE_SHIFT |
 221                dp << SWP_CFG_DP_SHIFT |
 222                de << SWP_CFG_DE_SHIFT |
 223                ep << SWP_CFG_EP_SHIFT);
 224}
 225
 226#define QMAN_RT_MODE       0x00000100
 227
 228static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
 229{
 230        /* 'first' is included, 'last' is excluded */
 231        if (first <= last)
 232                return last - first;
 233        else
 234                return (2 * ringsize) - (first - last);
 235}
 236
 237/**
 238 * qbman_swp_init() - Create a functional object representing the given
 239 *                    QBMan portal descriptor.
 240 * @d: the given qbman swp descriptor
 241 *
 242 * Return qbman_swp portal for success, NULL if the object cannot
 243 * be created.
 244 */
 245struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
 246{
 247        struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
 248        u32 reg;
 249        u32 mask_size;
 250        u32 eqcr_pi;
 251
 252        if (!p)
 253                return NULL;
 254
 255        spin_lock_init(&p->access_spinlock);
 256
 257        p->desc = d;
 258        p->mc.valid_bit = QB_VALID_BIT;
 259        p->sdq = 0;
 260        p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
 261        p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
 262        p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
 263        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
 264                p->mr.valid_bit = QB_VALID_BIT;
 265
 266        atomic_set(&p->vdq.available, 1);
 267        p->vdq.valid_bit = QB_VALID_BIT;
 268        p->dqrr.next_idx = 0;
 269        p->dqrr.valid_bit = QB_VALID_BIT;
 270
 271        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
 272                p->dqrr.dqrr_size = 4;
 273                p->dqrr.reset_bug = 1;
 274        } else {
 275                p->dqrr.dqrr_size = 8;
 276                p->dqrr.reset_bug = 0;
 277        }
 278
 279        p->addr_cena = d->cena_bar;
 280        p->addr_cinh = d->cinh_bar;
 281
 282        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 283
 284                reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 285                        1, /* Writes Non-cacheable */
 286                        0, /* EQCR_CI stashing threshold */
 287                        3, /* RPM: RCR in array mode */
 288                        2, /* DCM: Discrete consumption ack */
 289                        2, /* EPM: EQCR in ring mode */
 290                        1, /* mem stashing drop enable enable */
 291                        1, /* mem stashing priority enable */
 292                        1, /* mem stashing enable */
 293                        1, /* dequeue stashing priority enable */
 294                        0, /* dequeue stashing enable enable */
 295                        0); /* EQCR_CI stashing priority enable */
 296        } else {
 297                memset(p->addr_cena, 0, 64 * 1024);
 298                reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 299                        1, /* Writes Non-cacheable */
 300                        1, /* EQCR_CI stashing threshold */
 301                        3, /* RPM: RCR in array mode */
 302                        2, /* DCM: Discrete consumption ack */
 303                        0, /* EPM: EQCR in ring mode */
 304                        1, /* mem stashing drop enable */
 305                        1, /* mem stashing priority enable */
 306                        1, /* mem stashing enable */
 307                        1, /* dequeue stashing priority enable */
 308                        0, /* dequeue stashing enable */
 309                        0); /* EQCR_CI stashing priority enable */
 310                reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
 311                       1 << SWP_CFG_VPM_SHIFT |  /* VDQCR read triggered mode */
 312                       1 << SWP_CFG_CPM_SHIFT;   /* CR read triggered mode */
 313        }
 314
 315        qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
 316        reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
 317        if (!reg) {
 318                pr_err("qbman: the portal is not enabled!\n");
 319                kfree(p);
 320                return NULL;
 321        }
 322
 323        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
 324                qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
 325                qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
 326        }
 327        /*
 328         * SDQCR needs to be initialized to 0 when no channels are
 329         * being dequeued from or else the QMan HW will indicate an
 330         * error.  The values that were calculated above will be
 331         * applied when dequeues from a specific channel are enabled.
 332         */
 333        qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
 334
 335        p->eqcr.pi_ring_size = 8;
 336        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
 337                p->eqcr.pi_ring_size = 32;
 338                qbman_swp_enqueue_ptr =
 339                        qbman_swp_enqueue_mem_back;
 340                qbman_swp_enqueue_multiple_ptr =
 341                        qbman_swp_enqueue_multiple_mem_back;
 342                qbman_swp_enqueue_multiple_desc_ptr =
 343                        qbman_swp_enqueue_multiple_desc_mem_back;
 344                qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
 345                qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
 346                qbman_swp_release_ptr = qbman_swp_release_mem_back;
 347        }
 348
 349        for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
 350                p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
 351        eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
 352        p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
 353        p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
 354        p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
 355                        & p->eqcr.pi_ci_mask;
 356        p->eqcr.available = p->eqcr.pi_ring_size;
 357
 358        return p;
 359}
 360
 361/**
 362 * qbman_swp_finish() - Create and destroy a functional object representing
 363 *                      the given QBMan portal descriptor.
 364 * @p: the qbman_swp object to be destroyed
 365 */
 366void qbman_swp_finish(struct qbman_swp *p)
 367{
 368        kfree(p);
 369}
 370
 371/**
 372 * qbman_swp_interrupt_read_status()
 373 * @p: the given software portal
 374 *
 375 * Return the value in the SWP_ISR register.
 376 */
 377u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
 378{
 379        return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
 380}
 381
 382/**
 383 * qbman_swp_interrupt_clear_status()
 384 * @p: the given software portal
 385 * @mask: The mask to clear in SWP_ISR register
 386 */
 387void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
 388{
 389        qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
 390}
 391
 392/**
 393 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
 394 * @p: the given software portal
 395 *
 396 * Return the value in the SWP_IER register.
 397 */
 398u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
 399{
 400        return qbman_read_register(p, QBMAN_CINH_SWP_IER);
 401}
 402
 403/**
 404 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
 405 * @p: the given software portal
 406 * @mask: The mask of bits to enable in SWP_IER
 407 */
 408void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
 409{
 410        qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
 411}
 412
 413/**
 414 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
 415 * @p: the given software portal object
 416 *
 417 * Return the value in the SWP_IIR register.
 418 */
 419int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
 420{
 421        return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
 422}
 423
 424/**
 425 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
 426 * @p: the given software portal object
 427 * @inhibit: whether to inhibit the IRQs
 428 */
 429void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
 430{
 431        qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
 432}
 433
 434/*
 435 * Different management commands all use this common base layer of code to issue
 436 * commands and poll for results.
 437 */
 438
 439/*
 440 * Returns a pointer to where the caller should fill in their management command
 441 * (caller should ignore the verb byte)
 442 */
 443void *qbman_swp_mc_start(struct qbman_swp *p)
 444{
 445        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
 446                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
 447        else
 448                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
 449}
 450
 451/*
 452 * Commits merges in the caller-supplied command verb (which should not include
 453 * the valid-bit) and submits the command to hardware
 454 */
 455void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
 456{
 457        u8 *v = cmd;
 458
 459        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 460                dma_wmb();
 461                *v = cmd_verb | p->mc.valid_bit;
 462        } else {
 463                *v = cmd_verb | p->mc.valid_bit;
 464                dma_wmb();
 465                qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
 466        }
 467}
 468
 469/*
 470 * Checks for a completed response (returns non-NULL if only if the response
 471 * is complete).
 472 */
 473void *qbman_swp_mc_result(struct qbman_swp *p)
 474{
 475        u32 *ret, verb;
 476
 477        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 478                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
 479                /* Remove the valid-bit - command completed if the rest
 480                 * is non-zero.
 481                 */
 482                verb = ret[0] & ~QB_VALID_BIT;
 483                if (!verb)
 484                        return NULL;
 485                p->mc.valid_bit ^= QB_VALID_BIT;
 486        } else {
 487                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
 488                /* Command completed if the valid bit is toggled */
 489                if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
 490                        return NULL;
 491                /* Command completed if the rest is non-zero */
 492                verb = ret[0] & ~QB_VALID_BIT;
 493                if (!verb)
 494                        return NULL;
 495                p->mr.valid_bit ^= QB_VALID_BIT;
 496        }
 497
 498        return ret;
 499}
 500
 501#define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
 502enum qb_enqueue_commands {
 503        enqueue_empty = 0,
 504        enqueue_response_always = 1,
 505        enqueue_rejects_to_fq = 2
 506};
 507
 508#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
 509#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
 510#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
 511#define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
 512
 513/*
 514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
 515 *                         default/starting state.
 516 */
 517void qbman_eq_desc_clear(struct qbman_eq_desc *d)
 518{
 519        memset(d, 0, sizeof(*d));
 520}
 521
 522/**
 523 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
 524 * @d:                the enqueue descriptor.
 525 * @respond_success:  1 = enqueue with response always; 0 = enqueue with
 526 *                    rejections returned on a FQ.
 527 */
 528void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
 529{
 530        d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
 531        if (respond_success)
 532                d->verb |= enqueue_response_always;
 533        else
 534                d->verb |= enqueue_rejects_to_fq;
 535}
 536
 537/*
 538 * Exactly one of the following descriptor "targets" should be set. (Calling any
 539 * one of these will replace the effect of any prior call to one of these.)
 540 *   -enqueue to a frame queue
 541 *   -enqueue to a queuing destination
 542 */
 543
 544/**
 545 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
 546 * @d:    the enqueue descriptor
 547 * @fqid: the id of the frame queue to be enqueued
 548 */
 549void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
 550{
 551        d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
 552        d->tgtid = cpu_to_le32(fqid);
 553}
 554
 555/**
 556 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
 557 * @d:       the enqueue descriptor
 558 * @qdid:    the id of the queuing destination to be enqueued
 559 * @qd_bin:  the queuing destination bin
 560 * @qd_prio: the queuing destination priority
 561 */
 562void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
 563                          u32 qd_bin, u32 qd_prio)
 564{
 565        d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
 566        d->tgtid = cpu_to_le32(qdid);
 567        d->qdbin = cpu_to_le16(qd_bin);
 568        d->qpri = qd_prio;
 569}
 570
 571#define EQAR_IDX(eqar)     ((eqar) & 0x7)
 572#define EQAR_VB(eqar)      ((eqar) & 0x80)
 573#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
 574
 575#define QB_RT_BIT ((u32)0x100)
 576/**
 577 * qbman_swp_enqueue_direct() - Issue an enqueue command
 578 * @s:  the software portal used for enqueue
 579 * @d:  the enqueue descriptor
 580 * @fd: the frame descriptor to be enqueued
 581 *
 582 * Please note that 'fd' should only be NULL if the "action" of the
 583 * descriptor is "orp_hole" or "orp_nesn".
 584 *
 585 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 586 */
 587static
 588int qbman_swp_enqueue_direct(struct qbman_swp *s,
 589                             const struct qbman_eq_desc *d,
 590                             const struct dpaa2_fd *fd)
 591{
 592        int flags = 0;
 593        int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
 594
 595        if (ret >= 0)
 596                ret = 0;
 597        else
 598                ret = -EBUSY;
 599        return  ret;
 600}
 601
 602/**
 603 * qbman_swp_enqueue_mem_back() - Issue an enqueue command
 604 * @s:  the software portal used for enqueue
 605 * @d:  the enqueue descriptor
 606 * @fd: the frame descriptor to be enqueued
 607 *
 608 * Please note that 'fd' should only be NULL if the "action" of the
 609 * descriptor is "orp_hole" or "orp_nesn".
 610 *
 611 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 612 */
 613static
 614int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
 615                               const struct qbman_eq_desc *d,
 616                               const struct dpaa2_fd *fd)
 617{
 618        int flags = 0;
 619        int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
 620
 621        if (ret >= 0)
 622                ret = 0;
 623        else
 624                ret = -EBUSY;
 625        return  ret;
 626}
 627
 628/**
 629 * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
 630 * using one enqueue descriptor
 631 * @s:  the software portal used for enqueue
 632 * @d:  the enqueue descriptor
 633 * @fd: table pointer of frame descriptor table to be enqueued
 634 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
 635 * @num_frames: number of fd to be enqueued
 636 *
 637 * Return the number of fd enqueued, or a negative error number.
 638 */
 639static
 640int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 641                                      const struct qbman_eq_desc *d,
 642                                      const struct dpaa2_fd *fd,
 643                                      uint32_t *flags,
 644                                      int num_frames)
 645{
 646        uint32_t *p = NULL;
 647        const uint32_t *cl = (uint32_t *)d;
 648        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 649        int i, num_enqueued = 0;
 650
 651        spin_lock(&s->access_spinlock);
 652        half_mask = (s->eqcr.pi_ci_mask>>1);
 653        full_mask = s->eqcr.pi_ci_mask;
 654
 655        if (!s->eqcr.available) {
 656                eqcr_ci = s->eqcr.ci;
 657                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
 658                s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
 659                s->eqcr.ci &= full_mask;
 660
 661                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 662                                        eqcr_ci, s->eqcr.ci);
 663                if (!s->eqcr.available) {
 664                        spin_unlock(&s->access_spinlock);
 665                        return 0;
 666                }
 667        }
 668
 669        eqcr_pi = s->eqcr.pi;
 670        num_enqueued = (s->eqcr.available < num_frames) ?
 671                        s->eqcr.available : num_frames;
 672        s->eqcr.available -= num_enqueued;
 673        /* Fill in the EQCR ring */
 674        for (i = 0; i < num_enqueued; i++) {
 675                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 676                /* Skip copying the verb */
 677                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 678                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 679                       &fd[i], sizeof(*fd));
 680                eqcr_pi++;
 681        }
 682
 683        dma_wmb();
 684
 685        /* Set the verb byte, have to substitute in the valid-bit */
 686        eqcr_pi = s->eqcr.pi;
 687        for (i = 0; i < num_enqueued; i++) {
 688                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 689                p[0] = cl[0] | s->eqcr.pi_vb;
 690                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 691                        struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 692
 693                        d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 694                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
 695                }
 696                eqcr_pi++;
 697                if (!(eqcr_pi & half_mask))
 698                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 699        }
 700
 701        /* Flush all the cacheline without load/store in between */
 702        eqcr_pi = s->eqcr.pi;
 703        for (i = 0; i < num_enqueued; i++)
 704                eqcr_pi++;
 705        s->eqcr.pi = eqcr_pi & full_mask;
 706        spin_unlock(&s->access_spinlock);
 707
 708        return num_enqueued;
 709}
 710
 711/**
 712 * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
 713 * using one enqueue descriptor
 714 * @s:  the software portal used for enqueue
 715 * @d:  the enqueue descriptor
 716 * @fd: table pointer of frame descriptor table to be enqueued
 717 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
 718 * @num_frames: number of fd to be enqueued
 719 *
 720 * Return the number of fd enqueued, or a negative error number.
 721 */
 722static
 723int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
 724                                        const struct qbman_eq_desc *d,
 725                                        const struct dpaa2_fd *fd,
 726                                        uint32_t *flags,
 727                                        int num_frames)
 728{
 729        uint32_t *p = NULL;
 730        const uint32_t *cl = (uint32_t *)(d);
 731        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 732        int i, num_enqueued = 0;
 733        unsigned long irq_flags;
 734
 735        spin_lock(&s->access_spinlock);
 736        local_irq_save(irq_flags);
 737
 738        half_mask = (s->eqcr.pi_ci_mask>>1);
 739        full_mask = s->eqcr.pi_ci_mask;
 740        if (!s->eqcr.available) {
 741                eqcr_ci = s->eqcr.ci;
 742                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
 743                s->eqcr.ci = *p & full_mask;
 744                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 745                                        eqcr_ci, s->eqcr.ci);
 746                if (!s->eqcr.available) {
 747                        local_irq_restore(irq_flags);
 748                        spin_unlock(&s->access_spinlock);
 749                        return 0;
 750                }
 751        }
 752
 753        eqcr_pi = s->eqcr.pi;
 754        num_enqueued = (s->eqcr.available < num_frames) ?
 755                        s->eqcr.available : num_frames;
 756        s->eqcr.available -= num_enqueued;
 757        /* Fill in the EQCR ring */
 758        for (i = 0; i < num_enqueued; i++) {
 759                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 760                /* Skip copying the verb */
 761                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 762                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 763                       &fd[i], sizeof(*fd));
 764                eqcr_pi++;
 765        }
 766
 767        /* Set the verb byte, have to substitute in the valid-bit */
 768        eqcr_pi = s->eqcr.pi;
 769        for (i = 0; i < num_enqueued; i++) {
 770                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 771                p[0] = cl[0] | s->eqcr.pi_vb;
 772                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 773                        struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 774
 775                        d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 776                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
 777                }
 778                eqcr_pi++;
 779                if (!(eqcr_pi & half_mask))
 780                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 781        }
 782        s->eqcr.pi = eqcr_pi & full_mask;
 783
 784        dma_wmb();
 785        qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
 786                                (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
 787        local_irq_restore(irq_flags);
 788        spin_unlock(&s->access_spinlock);
 789
 790        return num_enqueued;
 791}
 792
 793/**
 794 * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
 795 * using multiple enqueue descriptor
 796 * @s:  the software portal used for enqueue
 797 * @d:  table of minimal enqueue descriptor
 798 * @fd: table pointer of frame descriptor table to be enqueued
 799 * @num_frames: number of fd to be enqueued
 800 *
 801 * Return the number of fd enqueued, or a negative error number.
 802 */
 803static
 804int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
 805                                           const struct qbman_eq_desc *d,
 806                                           const struct dpaa2_fd *fd,
 807                                           int num_frames)
 808{
 809        uint32_t *p;
 810        const uint32_t *cl;
 811        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 812        int i, num_enqueued = 0;
 813
 814        half_mask = (s->eqcr.pi_ci_mask>>1);
 815        full_mask = s->eqcr.pi_ci_mask;
 816        if (!s->eqcr.available) {
 817                eqcr_ci = s->eqcr.ci;
 818                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
 819                s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
 820                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 821                                        eqcr_ci, s->eqcr.ci);
 822                if (!s->eqcr.available)
 823                        return 0;
 824        }
 825
 826        eqcr_pi = s->eqcr.pi;
 827        num_enqueued = (s->eqcr.available < num_frames) ?
 828                        s->eqcr.available : num_frames;
 829        s->eqcr.available -= num_enqueued;
 830        /* Fill in the EQCR ring */
 831        for (i = 0; i < num_enqueued; i++) {
 832                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 833                cl = (uint32_t *)(&d[i]);
 834                /* Skip copying the verb */
 835                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 836                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 837                       &fd[i], sizeof(*fd));
 838                eqcr_pi++;
 839        }
 840
 841        dma_wmb();
 842
 843        /* Set the verb byte, have to substitute in the valid-bit */
 844        eqcr_pi = s->eqcr.pi;
 845        for (i = 0; i < num_enqueued; i++) {
 846                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 847                cl = (uint32_t *)(&d[i]);
 848                p[0] = cl[0] | s->eqcr.pi_vb;
 849                eqcr_pi++;
 850                if (!(eqcr_pi & half_mask))
 851                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 852        }
 853
 854        /* Flush all the cacheline without load/store in between */
 855        eqcr_pi = s->eqcr.pi;
 856        for (i = 0; i < num_enqueued; i++)
 857                eqcr_pi++;
 858        s->eqcr.pi = eqcr_pi & full_mask;
 859
 860        return num_enqueued;
 861}
 862
 863/**
 864 * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
 865 * using multiple enqueue descriptor
 866 * @s:  the software portal used for enqueue
 867 * @d:  table of minimal enqueue descriptor
 868 * @fd: table pointer of frame descriptor table to be enqueued
 869 * @num_frames: number of fd to be enqueued
 870 *
 871 * Return the number of fd enqueued, or a negative error number.
 872 */
 873static
 874int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
 875                                           const struct qbman_eq_desc *d,
 876                                           const struct dpaa2_fd *fd,
 877                                           int num_frames)
 878{
 879        uint32_t *p;
 880        const uint32_t *cl;
 881        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 882        int i, num_enqueued = 0;
 883
 884        half_mask = (s->eqcr.pi_ci_mask>>1);
 885        full_mask = s->eqcr.pi_ci_mask;
 886        if (!s->eqcr.available) {
 887                eqcr_ci = s->eqcr.ci;
 888                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
 889                s->eqcr.ci = *p & full_mask;
 890                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 891                                        eqcr_ci, s->eqcr.ci);
 892                if (!s->eqcr.available)
 893                        return 0;
 894        }
 895
 896        eqcr_pi = s->eqcr.pi;
 897        num_enqueued = (s->eqcr.available < num_frames) ?
 898                        s->eqcr.available : num_frames;
 899        s->eqcr.available -= num_enqueued;
 900        /* Fill in the EQCR ring */
 901        for (i = 0; i < num_enqueued; i++) {
 902                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 903                cl = (uint32_t *)(&d[i]);
 904                /* Skip copying the verb */
 905                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 906                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 907                       &fd[i], sizeof(*fd));
 908                eqcr_pi++;
 909        }
 910
 911        /* Set the verb byte, have to substitute in the valid-bit */
 912        eqcr_pi = s->eqcr.pi;
 913        for (i = 0; i < num_enqueued; i++) {
 914                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 915                cl = (uint32_t *)(&d[i]);
 916                p[0] = cl[0] | s->eqcr.pi_vb;
 917                eqcr_pi++;
 918                if (!(eqcr_pi & half_mask))
 919                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 920        }
 921
 922        s->eqcr.pi = eqcr_pi & full_mask;
 923
 924        dma_wmb();
 925        qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
 926                                (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
 927
 928        return num_enqueued;
 929}
 930
 931/* Static (push) dequeue */
 932
 933/**
 934 * qbman_swp_push_get() - Get the push dequeue setup
 935 * @s:           the software portal object
 936 * @channel_idx: the channel index to query
 937 * @enabled:     returned boolean to show whether the push dequeue is enabled
 938 *               for the given channel
 939 */
 940void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
 941{
 942        u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 943
 944        WARN_ON(channel_idx > 15);
 945        *enabled = src | (1 << channel_idx);
 946}
 947
 948/**
 949 * qbman_swp_push_set() - Enable or disable push dequeue
 950 * @s:           the software portal object
 951 * @channel_idx: the channel index (0 to 15)
 952 * @enable:      enable or disable push dequeue
 953 */
 954void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
 955{
 956        u16 dqsrc;
 957
 958        WARN_ON(channel_idx > 15);
 959        if (enable)
 960                s->sdq |= 1 << channel_idx;
 961        else
 962                s->sdq &= ~(1 << channel_idx);
 963
 964        /* Read make the complete src map.  If no channels are enabled
 965         * the SDQCR must be 0 or else QMan will assert errors
 966         */
 967        dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 968        if (dqsrc != 0)
 969                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
 970        else
 971                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
 972}
 973
 974#define QB_VDQCR_VERB_DCT_SHIFT    0
 975#define QB_VDQCR_VERB_DT_SHIFT     2
 976#define QB_VDQCR_VERB_RLS_SHIFT    4
 977#define QB_VDQCR_VERB_WAE_SHIFT    5
 978
 979enum qb_pull_dt_e {
 980        qb_pull_dt_channel,
 981        qb_pull_dt_workqueue,
 982        qb_pull_dt_framequeue
 983};
 984
 985/**
 986 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
 987 *                           default/starting state
 988 * @d: the pull dequeue descriptor to be cleared
 989 */
 990void qbman_pull_desc_clear(struct qbman_pull_desc *d)
 991{
 992        memset(d, 0, sizeof(*d));
 993}
 994
 995/**
 996 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
 997 * @d:            the pull dequeue descriptor to be set
 998 * @storage:      the pointer of the memory to store the dequeue result
 999 * @storage_phys: the physical address of the storage memory
1000 * @stash:        to indicate whether write allocate is enabled
1001 *
1002 * If not called, or if called with 'storage' as NULL, the result pull dequeues
1003 * will produce results to DQRR. If 'storage' is non-NULL, then results are
1004 * produced to the given memory location (using the DMA address which
1005 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1006 * those writes to main-memory express a cache-warming attribute.
1007 */
1008void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1009                                 struct dpaa2_dq *storage,
1010                                 dma_addr_t storage_phys,
1011                                 int stash)
1012{
1013        /* save the virtual address */
1014        d->rsp_addr_virt = (u64)(uintptr_t)storage;
1015
1016        if (!storage) {
1017                d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1018                return;
1019        }
1020        d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1021        if (stash)
1022                d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1023        else
1024                d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1025
1026        d->rsp_addr = cpu_to_le64(storage_phys);
1027}
1028
1029/**
1030 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1031 * @d:         the pull dequeue descriptor to be set
1032 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1033 */
1034void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1035{
1036        d->numf = numframes - 1;
1037}
1038
1039/*
1040 * Exactly one of the following descriptor "actions" should be set. (Calling any
1041 * one of these will replace the effect of any prior call to one of these.)
1042 * - pull dequeue from the given frame queue (FQ)
1043 * - pull dequeue from any FQ in the given work queue (WQ)
1044 * - pull dequeue from any FQ in any WQ in the given channel
1045 */
1046
1047/**
1048 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1049 * @d:    the pull dequeue descriptor to be set
1050 * @fqid: the frame queue index of the given FQ
1051 */
1052void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1053{
1054        d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1055        d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1056        d->dq_src = cpu_to_le32(fqid);
1057}
1058
1059/**
1060 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1061 * @d:    the pull dequeue descriptor to be set
1062 * @wqid: composed of channel id and wqid within the channel
1063 * @dct:  the dequeue command type
1064 */
1065void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1066                            enum qbman_pull_type_e dct)
1067{
1068        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1069        d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1070        d->dq_src = cpu_to_le32(wqid);
1071}
1072
1073/**
1074 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1075 *                                 dequeues
1076 * @d:    the pull dequeue descriptor to be set
1077 * @chid: the channel id to be dequeued
1078 * @dct:  the dequeue command type
1079 */
1080void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1081                                 enum qbman_pull_type_e dct)
1082{
1083        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1084        d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1085        d->dq_src = cpu_to_le32(chid);
1086}
1087
1088/**
1089 * qbman_swp_pull_direct() - Issue the pull dequeue command
1090 * @s: the software portal object
1091 * @d: the software portal descriptor which has been configured with
1092 *     the set of qbman_pull_desc_set_*() calls
1093 *
1094 * Return 0 for success, and -EBUSY if the software portal is not ready
1095 * to do pull dequeue.
1096 */
1097static
1098int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1099{
1100        struct qbman_pull_desc *p;
1101
1102        if (!atomic_dec_and_test(&s->vdq.available)) {
1103                atomic_inc(&s->vdq.available);
1104                return -EBUSY;
1105        }
1106        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1107        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1108                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1109        else
1110                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1111        p->numf = d->numf;
1112        p->tok = QMAN_DQ_TOKEN_VALID;
1113        p->dq_src = d->dq_src;
1114        p->rsp_addr = d->rsp_addr;
1115        p->rsp_addr_virt = d->rsp_addr_virt;
1116        dma_wmb();
1117        /* Set the verb byte, have to substitute in the valid-bit */
1118        p->verb = d->verb | s->vdq.valid_bit;
1119        s->vdq.valid_bit ^= QB_VALID_BIT;
1120
1121        return 0;
1122}
1123
1124/**
1125 * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1126 * @s: the software portal object
1127 * @d: the software portal descriptor which has been configured with
1128 *     the set of qbman_pull_desc_set_*() calls
1129 *
1130 * Return 0 for success, and -EBUSY if the software portal is not ready
1131 * to do pull dequeue.
1132 */
1133static
1134int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1135{
1136        struct qbman_pull_desc *p;
1137
1138        if (!atomic_dec_and_test(&s->vdq.available)) {
1139                atomic_inc(&s->vdq.available);
1140                return -EBUSY;
1141        }
1142        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1143        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1144                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1145        else
1146                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1147        p->numf = d->numf;
1148        p->tok = QMAN_DQ_TOKEN_VALID;
1149        p->dq_src = d->dq_src;
1150        p->rsp_addr = d->rsp_addr;
1151        p->rsp_addr_virt = d->rsp_addr_virt;
1152
1153        /* Set the verb byte, have to substitute in the valid-bit */
1154        p->verb = d->verb | s->vdq.valid_bit;
1155        s->vdq.valid_bit ^= QB_VALID_BIT;
1156        dma_wmb();
1157        qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1158
1159        return 0;
1160}
1161
1162#define QMAN_DQRR_PI_MASK   0xf
1163
1164/**
1165 * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1166 * @s: the software portal object
1167 *
1168 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1169 * only once, so repeated calls can return a sequence of DQRR entries, without
1170 * requiring they be consumed immediately or in any particular order.
1171 */
1172const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1173{
1174        u32 verb;
1175        u32 response_verb;
1176        u32 flags;
1177        struct dpaa2_dq *p;
1178
1179        /* Before using valid-bit to detect if something is there, we have to
1180         * handle the case of the DQRR reset bug...
1181         */
1182        if (unlikely(s->dqrr.reset_bug)) {
1183                /*
1184                 * We pick up new entries by cache-inhibited producer index,
1185                 * which means that a non-coherent mapping would require us to
1186                 * invalidate and read *only* once that PI has indicated that
1187                 * there's an entry here. The first trip around the DQRR ring
1188                 * will be much less efficient than all subsequent trips around
1189                 * it...
1190                 */
1191                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1192                        QMAN_DQRR_PI_MASK;
1193
1194                /* there are new entries if pi != next_idx */
1195                if (pi == s->dqrr.next_idx)
1196                        return NULL;
1197
1198                /*
1199                 * if next_idx is/was the last ring index, and 'pi' is
1200                 * different, we can disable the workaround as all the ring
1201                 * entries have now been DMA'd to so valid-bit checking is
1202                 * repaired. Note: this logic needs to be based on next_idx
1203                 * (which increments one at a time), rather than on pi (which
1204                 * can burst and wrap-around between our snapshots of it).
1205                 */
1206                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1207                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1208                                 s->dqrr.next_idx, pi);
1209                        s->dqrr.reset_bug = 0;
1210                }
1211                prefetch(qbman_get_cmd(s,
1212                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1213        }
1214
1215        p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1216        verb = p->dq.verb;
1217
1218        /*
1219         * If the valid-bit isn't of the expected polarity, nothing there. Note,
1220         * in the DQRR reset bug workaround, we shouldn't need to skip these
1221         * check, because we've already determined that a new entry is available
1222         * and we've invalidated the cacheline before reading it, so the
1223         * valid-bit behaviour is repaired and should tell us what we already
1224         * knew from reading PI.
1225         */
1226        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1227                prefetch(qbman_get_cmd(s,
1228                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1229                return NULL;
1230        }
1231        /*
1232         * There's something there. Move "next_idx" attention to the next ring
1233         * entry (and prefetch it) before returning what we found.
1234         */
1235        s->dqrr.next_idx++;
1236        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1237        if (!s->dqrr.next_idx)
1238                s->dqrr.valid_bit ^= QB_VALID_BIT;
1239
1240        /*
1241         * If this is the final response to a volatile dequeue command
1242         * indicate that the vdq is available
1243         */
1244        flags = p->dq.stat;
1245        response_verb = verb & QBMAN_RESULT_MASK;
1246        if ((response_verb == QBMAN_RESULT_DQ) &&
1247            (flags & DPAA2_DQ_STAT_VOLATILE) &&
1248            (flags & DPAA2_DQ_STAT_EXPIRED))
1249                atomic_inc(&s->vdq.available);
1250
1251        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1252
1253        return p;
1254}
1255
1256/**
1257 * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1258 * @s: the software portal object
1259 *
1260 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1261 * only once, so repeated calls can return a sequence of DQRR entries, without
1262 * requiring they be consumed immediately or in any particular order.
1263 */
1264const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1265{
1266        u32 verb;
1267        u32 response_verb;
1268        u32 flags;
1269        struct dpaa2_dq *p;
1270
1271        /* Before using valid-bit to detect if something is there, we have to
1272         * handle the case of the DQRR reset bug...
1273         */
1274        if (unlikely(s->dqrr.reset_bug)) {
1275                /*
1276                 * We pick up new entries by cache-inhibited producer index,
1277                 * which means that a non-coherent mapping would require us to
1278                 * invalidate and read *only* once that PI has indicated that
1279                 * there's an entry here. The first trip around the DQRR ring
1280                 * will be much less efficient than all subsequent trips around
1281                 * it...
1282                 */
1283                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1284                        QMAN_DQRR_PI_MASK;
1285
1286                /* there are new entries if pi != next_idx */
1287                if (pi == s->dqrr.next_idx)
1288                        return NULL;
1289
1290                /*
1291                 * if next_idx is/was the last ring index, and 'pi' is
1292                 * different, we can disable the workaround as all the ring
1293                 * entries have now been DMA'd to so valid-bit checking is
1294                 * repaired. Note: this logic needs to be based on next_idx
1295                 * (which increments one at a time), rather than on pi (which
1296                 * can burst and wrap-around between our snapshots of it).
1297                 */
1298                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1299                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1300                                 s->dqrr.next_idx, pi);
1301                        s->dqrr.reset_bug = 0;
1302                }
1303                prefetch(qbman_get_cmd(s,
1304                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1305        }
1306
1307        p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1308        verb = p->dq.verb;
1309
1310        /*
1311         * If the valid-bit isn't of the expected polarity, nothing there. Note,
1312         * in the DQRR reset bug workaround, we shouldn't need to skip these
1313         * check, because we've already determined that a new entry is available
1314         * and we've invalidated the cacheline before reading it, so the
1315         * valid-bit behaviour is repaired and should tell us what we already
1316         * knew from reading PI.
1317         */
1318        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1319                prefetch(qbman_get_cmd(s,
1320                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1321                return NULL;
1322        }
1323        /*
1324         * There's something there. Move "next_idx" attention to the next ring
1325         * entry (and prefetch it) before returning what we found.
1326         */
1327        s->dqrr.next_idx++;
1328        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1329        if (!s->dqrr.next_idx)
1330                s->dqrr.valid_bit ^= QB_VALID_BIT;
1331
1332        /*
1333         * If this is the final response to a volatile dequeue command
1334         * indicate that the vdq is available
1335         */
1336        flags = p->dq.stat;
1337        response_verb = verb & QBMAN_RESULT_MASK;
1338        if ((response_verb == QBMAN_RESULT_DQ) &&
1339            (flags & DPAA2_DQ_STAT_VOLATILE) &&
1340            (flags & DPAA2_DQ_STAT_EXPIRED))
1341                atomic_inc(&s->vdq.available);
1342
1343        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1344
1345        return p;
1346}
1347
1348/**
1349 * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
1350 *                             qbman_swp_dqrr_next().
1351 * @s: the software portal object
1352 * @dq: the DQRR entry to be consumed
1353 */
1354void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1355{
1356        qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1357}
1358
1359/**
1360 * qbman_result_has_new_result() - Check and get the dequeue response from the
1361 *                                 dq storage memory set in pull dequeue command
1362 * @s: the software portal object
1363 * @dq: the dequeue result read from the memory
1364 *
1365 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1366 * dequeue result.
1367 *
1368 * Only used for user-provided storage of dequeue results, not DQRR. For
1369 * efficiency purposes, the driver will perform any required endianness
1370 * conversion to ensure that the user's dequeue result storage is in host-endian
1371 * format. As such, once the user has called qbman_result_has_new_result() and
1372 * been returned a valid dequeue result, they should not call it again on
1373 * the same memory location (except of course if another dequeue command has
1374 * been executed to produce a new result to that location).
1375 */
1376int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1377{
1378        if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1379                return 0;
1380
1381        /*
1382         * Set token to be 0 so we will detect change back to 1
1383         * next time the looping is traversed. Const is cast away here
1384         * as we want users to treat the dequeue responses as read only.
1385         */
1386        ((struct dpaa2_dq *)dq)->dq.tok = 0;
1387
1388        /*
1389         * Determine whether VDQCR is available based on whether the
1390         * current result is sitting in the first storage location of
1391         * the busy command.
1392         */
1393        if (s->vdq.storage == dq) {
1394                s->vdq.storage = NULL;
1395                atomic_inc(&s->vdq.available);
1396        }
1397
1398        return 1;
1399}
1400
1401/**
1402 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1403 *                              default/starting state.
1404 * @d: the pull dequeue descriptor to be cleared
1405 */
1406void qbman_release_desc_clear(struct qbman_release_desc *d)
1407{
1408        memset(d, 0, sizeof(*d));
1409        d->verb = 1 << 5; /* Release Command Valid */
1410}
1411
1412/**
1413 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1414 * @d:    the pull dequeue descriptor to be set
1415 * @bpid: the bpid value to be set
1416 */
1417void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1418{
1419        d->bpid = cpu_to_le16(bpid);
1420}
1421
1422/**
1423 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1424 * interrupt source should be asserted after the release command is completed.
1425 * @d:      the pull dequeue descriptor to be set
1426 * @enable: enable (1) or disable (0) value
1427 */
1428void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1429{
1430        if (enable)
1431                d->verb |= 1 << 6;
1432        else
1433                d->verb &= ~(1 << 6);
1434}
1435
1436#define RAR_IDX(rar)     ((rar) & 0x7)
1437#define RAR_VB(rar)      ((rar) & 0x80)
1438#define RAR_SUCCESS(rar) ((rar) & 0x100)
1439
1440/**
1441 * qbman_swp_release_direct() - Issue a buffer release command
1442 * @s:           the software portal object
1443 * @d:           the release descriptor
1444 * @buffers:     a pointer pointing to the buffer address to be released
1445 * @num_buffers: number of buffers to be released,  must be less than 8
1446 *
1447 * Return 0 for success, -EBUSY if the release command ring is not ready.
1448 */
1449int qbman_swp_release_direct(struct qbman_swp *s,
1450                             const struct qbman_release_desc *d,
1451                             const u64 *buffers, unsigned int num_buffers)
1452{
1453        int i;
1454        struct qbman_release_desc *p;
1455        u32 rar;
1456
1457        if (!num_buffers || (num_buffers > 7))
1458                return -EINVAL;
1459
1460        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1461        if (!RAR_SUCCESS(rar))
1462                return -EBUSY;
1463
1464        /* Start the release command */
1465        p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1466
1467        /* Copy the caller's buffer pointers to the command */
1468        for (i = 0; i < num_buffers; i++)
1469                p->buf[i] = cpu_to_le64(buffers[i]);
1470        p->bpid = d->bpid;
1471
1472        /*
1473         * Set the verb byte, have to substitute in the valid-bit
1474         * and the number of buffers.
1475         */
1476        dma_wmb();
1477        p->verb = d->verb | RAR_VB(rar) | num_buffers;
1478
1479        return 0;
1480}
1481
1482/**
1483 * qbman_swp_release_mem_back() - Issue a buffer release command
1484 * @s:           the software portal object
1485 * @d:           the release descriptor
1486 * @buffers:     a pointer pointing to the buffer address to be released
1487 * @num_buffers: number of buffers to be released,  must be less than 8
1488 *
1489 * Return 0 for success, -EBUSY if the release command ring is not ready.
1490 */
1491int qbman_swp_release_mem_back(struct qbman_swp *s,
1492                               const struct qbman_release_desc *d,
1493                               const u64 *buffers, unsigned int num_buffers)
1494{
1495        int i;
1496        struct qbman_release_desc *p;
1497        u32 rar;
1498
1499        if (!num_buffers || (num_buffers > 7))
1500                return -EINVAL;
1501
1502        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1503        if (!RAR_SUCCESS(rar))
1504                return -EBUSY;
1505
1506        /* Start the release command */
1507        p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1508
1509        /* Copy the caller's buffer pointers to the command */
1510        for (i = 0; i < num_buffers; i++)
1511                p->buf[i] = cpu_to_le64(buffers[i]);
1512        p->bpid = d->bpid;
1513
1514        p->verb = d->verb | RAR_VB(rar) | num_buffers;
1515        dma_wmb();
1516        qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1517                             RAR_IDX(rar)  * 4, QMAN_RT_MODE);
1518
1519        return 0;
1520}
1521
1522struct qbman_acquire_desc {
1523        u8 verb;
1524        u8 reserved;
1525        __le16 bpid;
1526        u8 num;
1527        u8 reserved2[59];
1528};
1529
1530struct qbman_acquire_rslt {
1531        u8 verb;
1532        u8 rslt;
1533        __le16 reserved;
1534        u8 num;
1535        u8 reserved2[3];
1536        __le64 buf[7];
1537};
1538
1539/**
1540 * qbman_swp_acquire() - Issue a buffer acquire command
1541 * @s:           the software portal object
1542 * @bpid:        the buffer pool index
1543 * @buffers:     a pointer pointing to the acquired buffer addresses
1544 * @num_buffers: number of buffers to be acquired, must be less than 8
1545 *
1546 * Return 0 for success, or negative error code if the acquire command
1547 * fails.
1548 */
1549int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1550                      unsigned int num_buffers)
1551{
1552        struct qbman_acquire_desc *p;
1553        struct qbman_acquire_rslt *r;
1554        int i;
1555
1556        if (!num_buffers || (num_buffers > 7))
1557                return -EINVAL;
1558
1559        /* Start the management command */
1560        p = qbman_swp_mc_start(s);
1561
1562        if (!p)
1563                return -EBUSY;
1564
1565        /* Encode the caller-provided attributes */
1566        p->bpid = cpu_to_le16(bpid);
1567        p->num = num_buffers;
1568
1569        /* Complete the management command */
1570        r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1571        if (unlikely(!r)) {
1572                pr_err("qbman: acquire from BPID %d failed, no response\n",
1573                       bpid);
1574                return -EIO;
1575        }
1576
1577        /* Decode the outcome */
1578        WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1579
1580        /* Determine success or failure */
1581        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1582                pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1583                       bpid, r->rslt);
1584                return -EIO;
1585        }
1586
1587        WARN_ON(r->num > num_buffers);
1588
1589        /* Copy the acquired buffers to the caller's array */
1590        for (i = 0; i < r->num; i++)
1591                buffers[i] = le64_to_cpu(r->buf[i]);
1592
1593        return (int)r->num;
1594}
1595
1596struct qbman_alt_fq_state_desc {
1597        u8 verb;
1598        u8 reserved[3];
1599        __le32 fqid;
1600        u8 reserved2[56];
1601};
1602
1603struct qbman_alt_fq_state_rslt {
1604        u8 verb;
1605        u8 rslt;
1606        u8 reserved[62];
1607};
1608
1609#define ALT_FQ_FQID_MASK 0x00FFFFFF
1610
1611int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1612                           u8 alt_fq_verb)
1613{
1614        struct qbman_alt_fq_state_desc *p;
1615        struct qbman_alt_fq_state_rslt *r;
1616
1617        /* Start the management command */
1618        p = qbman_swp_mc_start(s);
1619        if (!p)
1620                return -EBUSY;
1621
1622        p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1623
1624        /* Complete the management command */
1625        r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1626        if (unlikely(!r)) {
1627                pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1628                       alt_fq_verb);
1629                return -EIO;
1630        }
1631
1632        /* Decode the outcome */
1633        WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1634
1635        /* Determine success or failure */
1636        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1637                pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1638                       fqid, r->verb, r->rslt);
1639                return -EIO;
1640        }
1641
1642        return 0;
1643}
1644
1645struct qbman_cdan_ctrl_desc {
1646        u8 verb;
1647        u8 reserved;
1648        __le16 ch;
1649        u8 we;
1650        u8 ctrl;
1651        __le16 reserved2;
1652        __le64 cdan_ctx;
1653        u8 reserved3[48];
1654
1655};
1656
1657struct qbman_cdan_ctrl_rslt {
1658        u8 verb;
1659        u8 rslt;
1660        __le16 ch;
1661        u8 reserved[60];
1662};
1663
1664int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1665                       u8 we_mask, u8 cdan_en,
1666                       u64 ctx)
1667{
1668        struct qbman_cdan_ctrl_desc *p = NULL;
1669        struct qbman_cdan_ctrl_rslt *r = NULL;
1670
1671        /* Start the management command */
1672        p = qbman_swp_mc_start(s);
1673        if (!p)
1674                return -EBUSY;
1675
1676        /* Encode the caller-provided attributes */
1677        p->ch = cpu_to_le16(channelid);
1678        p->we = we_mask;
1679        if (cdan_en)
1680                p->ctrl = 1;
1681        else
1682                p->ctrl = 0;
1683        p->cdan_ctx = cpu_to_le64(ctx);
1684
1685        /* Complete the management command */
1686        r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1687        if (unlikely(!r)) {
1688                pr_err("qbman: wqchan config failed, no response\n");
1689                return -EIO;
1690        }
1691
1692        WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1693
1694        /* Determine success or failure */
1695        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1696                pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1697                       channelid, r->rslt);
1698                return -EIO;
1699        }
1700
1701        return 0;
1702}
1703
1704#define QBMAN_RESPONSE_VERB_MASK        0x7f
1705#define QBMAN_FQ_QUERY_NP               0x45
1706#define QBMAN_BP_QUERY                  0x32
1707
1708struct qbman_fq_query_desc {
1709        u8 verb;
1710        u8 reserved[3];
1711        __le32 fqid;
1712        u8 reserved2[56];
1713};
1714
1715int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1716                         struct qbman_fq_query_np_rslt *r)
1717{
1718        struct qbman_fq_query_desc *p;
1719        void *resp;
1720
1721        p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1722        if (!p)
1723                return -EBUSY;
1724
1725        /* FQID is a 24 bit value */
1726        p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1727        resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1728        if (!resp) {
1729                pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1730                       fqid);
1731                return -EIO;
1732        }
1733        *r = *(struct qbman_fq_query_np_rslt *)resp;
1734        /* Decode the outcome */
1735        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1736
1737        /* Determine success or failure */
1738        if (r->rslt != QBMAN_MC_RSLT_OK) {
1739                pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1740                       p->fqid, r->rslt);
1741                return -EIO;
1742        }
1743
1744        return 0;
1745}
1746
1747u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1748{
1749        return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1750}
1751
1752u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1753{
1754        return le32_to_cpu(r->byte_cnt);
1755}
1756
1757struct qbman_bp_query_desc {
1758        u8 verb;
1759        u8 reserved;
1760        __le16 bpid;
1761        u8 reserved2[60];
1762};
1763
1764int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1765                   struct qbman_bp_query_rslt *r)
1766{
1767        struct qbman_bp_query_desc *p;
1768        void *resp;
1769
1770        p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1771        if (!p)
1772                return -EBUSY;
1773
1774        p->bpid = cpu_to_le16(bpid);
1775        resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1776        if (!resp) {
1777                pr_err("qbman: Query BPID %d fields failed, no response\n",
1778                       bpid);
1779                return -EIO;
1780        }
1781        *r = *(struct qbman_bp_query_rslt *)resp;
1782        /* Decode the outcome */
1783        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1784
1785        /* Determine success or failure */
1786        if (r->rslt != QBMAN_MC_RSLT_OK) {
1787                pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1788                       bpid, r->rslt);
1789                return -EIO;
1790        }
1791
1792        return 0;
1793}
1794
1795u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1796{
1797        return le32_to_cpu(a->fill);
1798}
1799