linux/drivers/soc/fsl/dpio/qbman-portal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
   4 * Copyright 2016-2019 NXP
   5 *
   6 */
   7
   8#include <asm/cacheflush.h>
   9#include <linux/io.h>
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <soc/fsl/dpaa2-global.h>
  13
  14#include "qbman-portal.h"
  15
  16/* All QBMan command and result structures use this "valid bit" encoding */
  17#define QB_VALID_BIT ((u32)0x80)
  18
  19/* QBMan portal management command codes */
  20#define QBMAN_MC_ACQUIRE       0x30
  21#define QBMAN_WQCHAN_CONFIGURE 0x46
  22
  23/* CINH register offsets */
  24#define QBMAN_CINH_SWP_EQCR_PI      0x800
  25#define QBMAN_CINH_SWP_EQCR_CI      0x840
  26#define QBMAN_CINH_SWP_EQAR    0x8c0
  27#define QBMAN_CINH_SWP_CR_RT        0x900
  28#define QBMAN_CINH_SWP_VDQCR_RT     0x940
  29#define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
  30#define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
  31#define QBMAN_CINH_SWP_DQPI    0xa00
  32#define QBMAN_CINH_SWP_DCAP    0xac0
  33#define QBMAN_CINH_SWP_SDQCR   0xb00
  34#define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
  35#define QBMAN_CINH_SWP_RCR_PI       0xc00
  36#define QBMAN_CINH_SWP_RAR     0xcc0
  37#define QBMAN_CINH_SWP_ISR     0xe00
  38#define QBMAN_CINH_SWP_IER     0xe40
  39#define QBMAN_CINH_SWP_ISDR    0xe80
  40#define QBMAN_CINH_SWP_IIR     0xec0
  41
  42/* CENA register offsets */
  43#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
  44#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
  45#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
  46#define QBMAN_CENA_SWP_CR      0x600
  47#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
  48#define QBMAN_CENA_SWP_VDQCR   0x780
  49#define QBMAN_CENA_SWP_EQCR_CI 0x840
  50#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
  51
  52/* CENA register offsets in memory-backed mode */
  53#define QBMAN_CENA_SWP_DQRR_MEM(n)  (0x800 + ((u32)(n) << 6))
  54#define QBMAN_CENA_SWP_RCR_MEM(n)   (0x1400 + ((u32)(n) << 6))
  55#define QBMAN_CENA_SWP_CR_MEM       0x1600
  56#define QBMAN_CENA_SWP_RR_MEM       0x1680
  57#define QBMAN_CENA_SWP_VDQCR_MEM    0x1780
  58
  59/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
  60#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
  61
  62/* Define token used to determine if response written to memory is valid */
  63#define QMAN_DQ_TOKEN_VALID 1
  64
  65/* SDQCR attribute codes */
  66#define QB_SDQCR_FC_SHIFT   29
  67#define QB_SDQCR_FC_MASK    0x1
  68#define QB_SDQCR_DCT_SHIFT  24
  69#define QB_SDQCR_DCT_MASK   0x3
  70#define QB_SDQCR_TOK_SHIFT  16
  71#define QB_SDQCR_TOK_MASK   0xff
  72#define QB_SDQCR_SRC_SHIFT  0
  73#define QB_SDQCR_SRC_MASK   0xffff
  74
  75/* opaque token for static dequeues */
  76#define QMAN_SDQCR_TOKEN    0xbb
  77
  78#define QBMAN_EQCR_DCA_IDXMASK          0x0f
  79#define QBMAN_ENQUEUE_FLAG_DCA          (1ULL << 31)
  80
  81#define EQ_DESC_SIZE_WITHOUT_FD 29
  82#define EQ_DESC_SIZE_FD_START 32
  83
  84enum qbman_sdqcr_dct {
  85        qbman_sdqcr_dct_null = 0,
  86        qbman_sdqcr_dct_prio_ics,
  87        qbman_sdqcr_dct_active_ics,
  88        qbman_sdqcr_dct_active
  89};
  90
  91enum qbman_sdqcr_fc {
  92        qbman_sdqcr_fc_one = 0,
  93        qbman_sdqcr_fc_up_to_3 = 1
  94};
  95
  96/* Internal Function declaration */
  97static int qbman_swp_enqueue_direct(struct qbman_swp *s,
  98                                    const struct qbman_eq_desc *d,
  99                                    const struct dpaa2_fd *fd);
 100static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
 101                                      const struct qbman_eq_desc *d,
 102                                      const struct dpaa2_fd *fd);
 103static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 104                                             const struct qbman_eq_desc *d,
 105                                             const struct dpaa2_fd *fd,
 106                                             uint32_t *flags,
 107                                             int num_frames);
 108static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
 109                                               const struct qbman_eq_desc *d,
 110                                               const struct dpaa2_fd *fd,
 111                                               uint32_t *flags,
 112                                               int num_frames);
 113static int
 114qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
 115                                       const struct qbman_eq_desc *d,
 116                                       const struct dpaa2_fd *fd,
 117                                       int num_frames);
 118static
 119int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
 120                                             const struct qbman_eq_desc *d,
 121                                             const struct dpaa2_fd *fd,
 122                                             int num_frames);
 123static int qbman_swp_pull_direct(struct qbman_swp *s,
 124                                 struct qbman_pull_desc *d);
 125static int qbman_swp_pull_mem_back(struct qbman_swp *s,
 126                                   struct qbman_pull_desc *d);
 127
 128const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
 129const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
 130
 131static int qbman_swp_release_direct(struct qbman_swp *s,
 132                                    const struct qbman_release_desc *d,
 133                                    const u64 *buffers,
 134                                    unsigned int num_buffers);
 135static int qbman_swp_release_mem_back(struct qbman_swp *s,
 136                                      const struct qbman_release_desc *d,
 137                                      const u64 *buffers,
 138                                      unsigned int num_buffers);
 139
 140/* Function pointers */
 141int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
 142                             const struct qbman_eq_desc *d,
 143                             const struct dpaa2_fd *fd)
 144        = qbman_swp_enqueue_direct;
 145
 146int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
 147                                      const struct qbman_eq_desc *d,
 148                                      const struct dpaa2_fd *fd,
 149                                      uint32_t *flags,
 150                                             int num_frames)
 151        = qbman_swp_enqueue_multiple_direct;
 152
 153int
 154(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
 155                                       const struct qbman_eq_desc *d,
 156                                       const struct dpaa2_fd *fd,
 157                                       int num_frames)
 158        = qbman_swp_enqueue_multiple_desc_direct;
 159
 160int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
 161                        = qbman_swp_pull_direct;
 162
 163const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
 164                        = qbman_swp_dqrr_next_direct;
 165
 166int (*qbman_swp_release_ptr)(struct qbman_swp *s,
 167                             const struct qbman_release_desc *d,
 168                             const u64 *buffers,
 169                             unsigned int num_buffers)
 170                        = qbman_swp_release_direct;
 171
 172/* Portal Access */
 173
 174static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
 175{
 176        return readl_relaxed(p->addr_cinh + offset);
 177}
 178
 179static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
 180                                        u32 value)
 181{
 182        writel_relaxed(value, p->addr_cinh + offset);
 183}
 184
 185static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
 186{
 187        return p->addr_cena + offset;
 188}
 189
 190#define QBMAN_CINH_SWP_CFG   0xd00
 191
 192#define SWP_CFG_DQRR_MF_SHIFT 20
 193#define SWP_CFG_EST_SHIFT     16
 194#define SWP_CFG_CPBS_SHIFT    15
 195#define SWP_CFG_WN_SHIFT      14
 196#define SWP_CFG_RPM_SHIFT     12
 197#define SWP_CFG_DCM_SHIFT     10
 198#define SWP_CFG_EPM_SHIFT     8
 199#define SWP_CFG_VPM_SHIFT     7
 200#define SWP_CFG_CPM_SHIFT     6
 201#define SWP_CFG_SD_SHIFT      5
 202#define SWP_CFG_SP_SHIFT      4
 203#define SWP_CFG_SE_SHIFT      3
 204#define SWP_CFG_DP_SHIFT      2
 205#define SWP_CFG_DE_SHIFT      1
 206#define SWP_CFG_EP_SHIFT      0
 207
 208static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
 209                                    u8 epm, int sd, int sp, int se,
 210                                    int dp, int de, int ep)
 211{
 212        return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
 213                est << SWP_CFG_EST_SHIFT |
 214                wn << SWP_CFG_WN_SHIFT |
 215                rpm << SWP_CFG_RPM_SHIFT |
 216                dcm << SWP_CFG_DCM_SHIFT |
 217                epm << SWP_CFG_EPM_SHIFT |
 218                sd << SWP_CFG_SD_SHIFT |
 219                sp << SWP_CFG_SP_SHIFT |
 220                se << SWP_CFG_SE_SHIFT |
 221                dp << SWP_CFG_DP_SHIFT |
 222                de << SWP_CFG_DE_SHIFT |
 223                ep << SWP_CFG_EP_SHIFT);
 224}
 225
 226#define QMAN_RT_MODE       0x00000100
 227
 228static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
 229{
 230        /* 'first' is included, 'last' is excluded */
 231        if (first <= last)
 232                return last - first;
 233        else
 234                return (2 * ringsize) - (first - last);
 235}
 236
 237/**
 238 * qbman_swp_init() - Create a functional object representing the given
 239 *                    QBMan portal descriptor.
 240 * @d: the given qbman swp descriptor
 241 *
 242 * Return qbman_swp portal for success, NULL if the object cannot
 243 * be created.
 244 */
 245struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
 246{
 247        struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
 248        u32 reg;
 249        u32 mask_size;
 250        u32 eqcr_pi;
 251
 252        if (!p)
 253                return NULL;
 254
 255        spin_lock_init(&p->access_spinlock);
 256
 257        p->desc = d;
 258        p->mc.valid_bit = QB_VALID_BIT;
 259        p->sdq = 0;
 260        p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
 261        p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
 262        p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
 263        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
 264                p->mr.valid_bit = QB_VALID_BIT;
 265
 266        atomic_set(&p->vdq.available, 1);
 267        p->vdq.valid_bit = QB_VALID_BIT;
 268        p->dqrr.next_idx = 0;
 269        p->dqrr.valid_bit = QB_VALID_BIT;
 270
 271        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
 272                p->dqrr.dqrr_size = 4;
 273                p->dqrr.reset_bug = 1;
 274        } else {
 275                p->dqrr.dqrr_size = 8;
 276                p->dqrr.reset_bug = 0;
 277        }
 278
 279        p->addr_cena = d->cena_bar;
 280        p->addr_cinh = d->cinh_bar;
 281
 282        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 283
 284                reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 285                        1, /* Writes Non-cacheable */
 286                        0, /* EQCR_CI stashing threshold */
 287                        3, /* RPM: RCR in array mode */
 288                        2, /* DCM: Discrete consumption ack */
 289                        2, /* EPM: EQCR in ring mode */
 290                        1, /* mem stashing drop enable enable */
 291                        1, /* mem stashing priority enable */
 292                        1, /* mem stashing enable */
 293                        1, /* dequeue stashing priority enable */
 294                        0, /* dequeue stashing enable enable */
 295                        0); /* EQCR_CI stashing priority enable */
 296        } else {
 297                memset(p->addr_cena, 0, 64 * 1024);
 298                reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 299                        1, /* Writes Non-cacheable */
 300                        1, /* EQCR_CI stashing threshold */
 301                        3, /* RPM: RCR in array mode */
 302                        2, /* DCM: Discrete consumption ack */
 303                        0, /* EPM: EQCR in ring mode */
 304                        1, /* mem stashing drop enable */
 305                        1, /* mem stashing priority enable */
 306                        1, /* mem stashing enable */
 307                        1, /* dequeue stashing priority enable */
 308                        0, /* dequeue stashing enable */
 309                        0); /* EQCR_CI stashing priority enable */
 310                reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
 311                       1 << SWP_CFG_VPM_SHIFT |  /* VDQCR read triggered mode */
 312                       1 << SWP_CFG_CPM_SHIFT;   /* CR read triggered mode */
 313        }
 314
 315        qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
 316        reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
 317        if (!reg) {
 318                pr_err("qbman: the portal is not enabled!\n");
 319                kfree(p);
 320                return NULL;
 321        }
 322
 323        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
 324                qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
 325                qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
 326        }
 327        /*
 328         * SDQCR needs to be initialized to 0 when no channels are
 329         * being dequeued from or else the QMan HW will indicate an
 330         * error.  The values that were calculated above will be
 331         * applied when dequeues from a specific channel are enabled.
 332         */
 333        qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
 334
 335        p->eqcr.pi_ring_size = 8;
 336        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
 337                p->eqcr.pi_ring_size = 32;
 338                qbman_swp_enqueue_ptr =
 339                        qbman_swp_enqueue_mem_back;
 340                qbman_swp_enqueue_multiple_ptr =
 341                        qbman_swp_enqueue_multiple_mem_back;
 342                qbman_swp_enqueue_multiple_desc_ptr =
 343                        qbman_swp_enqueue_multiple_desc_mem_back;
 344                qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
 345                qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
 346                qbman_swp_release_ptr = qbman_swp_release_mem_back;
 347        }
 348
 349        for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
 350                p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
 351        eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
 352        p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
 353        p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
 354        p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
 355                        & p->eqcr.pi_ci_mask;
 356        p->eqcr.available = p->eqcr.pi_ring_size;
 357
 358        return p;
 359}
 360
 361/**
 362 * qbman_swp_finish() - Create and destroy a functional object representing
 363 *                      the given QBMan portal descriptor.
 364 * @p: the qbman_swp object to be destroyed
 365 */
 366void qbman_swp_finish(struct qbman_swp *p)
 367{
 368        kfree(p);
 369}
 370
 371/**
 372 * qbman_swp_interrupt_read_status()
 373 * @p: the given software portal
 374 *
 375 * Return the value in the SWP_ISR register.
 376 */
 377u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
 378{
 379        return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
 380}
 381
 382/**
 383 * qbman_swp_interrupt_clear_status()
 384 * @p: the given software portal
 385 * @mask: The mask to clear in SWP_ISR register
 386 */
 387void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
 388{
 389        qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
 390}
 391
 392/**
 393 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
 394 * @p: the given software portal
 395 *
 396 * Return the value in the SWP_IER register.
 397 */
 398u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
 399{
 400        return qbman_read_register(p, QBMAN_CINH_SWP_IER);
 401}
 402
 403/**
 404 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
 405 * @p: the given software portal
 406 * @mask: The mask of bits to enable in SWP_IER
 407 */
 408void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
 409{
 410        qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
 411}
 412
 413/**
 414 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
 415 * @p: the given software portal object
 416 *
 417 * Return the value in the SWP_IIR register.
 418 */
 419int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
 420{
 421        return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
 422}
 423
 424/**
 425 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
 426 * @p: the given software portal object
 427 * @mask: The mask to set in SWP_IIR register
 428 */
 429void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
 430{
 431        qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
 432}
 433
 434/*
 435 * Different management commands all use this common base layer of code to issue
 436 * commands and poll for results.
 437 */
 438
 439/*
 440 * Returns a pointer to where the caller should fill in their management command
 441 * (caller should ignore the verb byte)
 442 */
 443void *qbman_swp_mc_start(struct qbman_swp *p)
 444{
 445        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
 446                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
 447        else
 448                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
 449}
 450
 451/*
 452 * Commits merges in the caller-supplied command verb (which should not include
 453 * the valid-bit) and submits the command to hardware
 454 */
 455void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
 456{
 457        u8 *v = cmd;
 458
 459        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 460                dma_wmb();
 461                *v = cmd_verb | p->mc.valid_bit;
 462        } else {
 463                *v = cmd_verb | p->mc.valid_bit;
 464                dma_wmb();
 465                qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
 466        }
 467}
 468
 469/*
 470 * Checks for a completed response (returns non-NULL if only if the response
 471 * is complete).
 472 */
 473void *qbman_swp_mc_result(struct qbman_swp *p)
 474{
 475        u32 *ret, verb;
 476
 477        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 478                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
 479                /* Remove the valid-bit - command completed if the rest
 480                 * is non-zero.
 481                 */
 482                verb = ret[0] & ~QB_VALID_BIT;
 483                if (!verb)
 484                        return NULL;
 485                p->mc.valid_bit ^= QB_VALID_BIT;
 486        } else {
 487                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
 488                /* Command completed if the valid bit is toggled */
 489                if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
 490                        return NULL;
 491                /* Command completed if the rest is non-zero */
 492                verb = ret[0] & ~QB_VALID_BIT;
 493                if (!verb)
 494                        return NULL;
 495                p->mr.valid_bit ^= QB_VALID_BIT;
 496        }
 497
 498        return ret;
 499}
 500
 501#define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
 502enum qb_enqueue_commands {
 503        enqueue_empty = 0,
 504        enqueue_response_always = 1,
 505        enqueue_rejects_to_fq = 2
 506};
 507
 508#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
 509#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
 510#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
 511#define QB_ENQUEUE_CMD_DCA_EN_SHIFT          7
 512
 513/**
 514 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
 515 *                         default/starting state.
 516 */
 517void qbman_eq_desc_clear(struct qbman_eq_desc *d)
 518{
 519        memset(d, 0, sizeof(*d));
 520}
 521
 522/**
 523 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
 524 * @d:                the enqueue descriptor.
 525 * @response_success: 1 = enqueue with response always; 0 = enqueue with
 526 *                    rejections returned on a FQ.
 527 */
 528void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
 529{
 530        d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
 531        if (respond_success)
 532                d->verb |= enqueue_response_always;
 533        else
 534                d->verb |= enqueue_rejects_to_fq;
 535}
 536
 537/*
 538 * Exactly one of the following descriptor "targets" should be set. (Calling any
 539 * one of these will replace the effect of any prior call to one of these.)
 540 *   -enqueue to a frame queue
 541 *   -enqueue to a queuing destination
 542 */
 543
 544/**
 545 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
 546 * @d:    the enqueue descriptor
 547 * @fqid: the id of the frame queue to be enqueued
 548 */
 549void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
 550{
 551        d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
 552        d->tgtid = cpu_to_le32(fqid);
 553}
 554
 555/**
 556 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
 557 * @d:       the enqueue descriptor
 558 * @qdid:    the id of the queuing destination to be enqueued
 559 * @qd_bin:  the queuing destination bin
 560 * @qd_prio: the queuing destination priority
 561 */
 562void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
 563                          u32 qd_bin, u32 qd_prio)
 564{
 565        d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
 566        d->tgtid = cpu_to_le32(qdid);
 567        d->qdbin = cpu_to_le16(qd_bin);
 568        d->qpri = qd_prio;
 569}
 570
 571#define EQAR_IDX(eqar)     ((eqar) & 0x7)
 572#define EQAR_VB(eqar)      ((eqar) & 0x80)
 573#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
 574
 575#define QB_RT_BIT ((u32)0x100)
 576/**
 577 * qbman_swp_enqueue_direct() - Issue an enqueue command
 578 * @s:  the software portal used for enqueue
 579 * @d:  the enqueue descriptor
 580 * @fd: the frame descriptor to be enqueued
 581 *
 582 * Please note that 'fd' should only be NULL if the "action" of the
 583 * descriptor is "orp_hole" or "orp_nesn".
 584 *
 585 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 586 */
 587static
 588int qbman_swp_enqueue_direct(struct qbman_swp *s,
 589                             const struct qbman_eq_desc *d,
 590                             const struct dpaa2_fd *fd)
 591{
 592        int flags = 0;
 593        int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
 594
 595        if (ret >= 0)
 596                ret = 0;
 597        else
 598                ret = -EBUSY;
 599        return  ret;
 600}
 601
 602/**
 603 * qbman_swp_enqueue_mem_back() - Issue an enqueue command
 604 * @s:  the software portal used for enqueue
 605 * @d:  the enqueue descriptor
 606 * @fd: the frame descriptor to be enqueued
 607 *
 608 * Please note that 'fd' should only be NULL if the "action" of the
 609 * descriptor is "orp_hole" or "orp_nesn".
 610 *
 611 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 612 */
 613static
 614int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
 615                               const struct qbman_eq_desc *d,
 616                               const struct dpaa2_fd *fd)
 617{
 618        int flags = 0;
 619        int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
 620
 621        if (ret >= 0)
 622                ret = 0;
 623        else
 624                ret = -EBUSY;
 625        return  ret;
 626}
 627
 628/**
 629 * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
 630 * using one enqueue descriptor
 631 * @s:  the software portal used for enqueue
 632 * @d:  the enqueue descriptor
 633 * @fd: table pointer of frame descriptor table to be enqueued
 634 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
 635 * @num_frames: number of fd to be enqueued
 636 *
 637 * Return the number of fd enqueued, or a negative error number.
 638 */
 639static
 640int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
 641                                      const struct qbman_eq_desc *d,
 642                                      const struct dpaa2_fd *fd,
 643                                      uint32_t *flags,
 644                                      int num_frames)
 645{
 646        uint32_t *p = NULL;
 647        const uint32_t *cl = (uint32_t *)d;
 648        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 649        int i, num_enqueued = 0;
 650        uint64_t addr_cena;
 651
 652        spin_lock(&s->access_spinlock);
 653        half_mask = (s->eqcr.pi_ci_mask>>1);
 654        full_mask = s->eqcr.pi_ci_mask;
 655
 656        if (!s->eqcr.available) {
 657                eqcr_ci = s->eqcr.ci;
 658                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
 659                s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
 660                s->eqcr.ci &= full_mask;
 661
 662                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 663                                        eqcr_ci, s->eqcr.ci);
 664                if (!s->eqcr.available) {
 665                        spin_unlock(&s->access_spinlock);
 666                        return 0;
 667                }
 668        }
 669
 670        eqcr_pi = s->eqcr.pi;
 671        num_enqueued = (s->eqcr.available < num_frames) ?
 672                        s->eqcr.available : num_frames;
 673        s->eqcr.available -= num_enqueued;
 674        /* Fill in the EQCR ring */
 675        for (i = 0; i < num_enqueued; i++) {
 676                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 677                /* Skip copying the verb */
 678                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 679                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 680                       &fd[i], sizeof(*fd));
 681                eqcr_pi++;
 682        }
 683
 684        dma_wmb();
 685
 686        /* Set the verb byte, have to substitute in the valid-bit */
 687        eqcr_pi = s->eqcr.pi;
 688        for (i = 0; i < num_enqueued; i++) {
 689                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 690                p[0] = cl[0] | s->eqcr.pi_vb;
 691                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 692                        struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 693
 694                        d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 695                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
 696                }
 697                eqcr_pi++;
 698                if (!(eqcr_pi & half_mask))
 699                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 700        }
 701
 702        /* Flush all the cacheline without load/store in between */
 703        eqcr_pi = s->eqcr.pi;
 704        addr_cena = (size_t)s->addr_cena;
 705        for (i = 0; i < num_enqueued; i++)
 706                eqcr_pi++;
 707        s->eqcr.pi = eqcr_pi & full_mask;
 708        spin_unlock(&s->access_spinlock);
 709
 710        return num_enqueued;
 711}
 712
 713/**
 714 * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
 715 * using one enqueue descriptor
 716 * @s:  the software portal used for enqueue
 717 * @d:  the enqueue descriptor
 718 * @fd: table pointer of frame descriptor table to be enqueued
 719 * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
 720 * @num_frames: number of fd to be enqueued
 721 *
 722 * Return the number of fd enqueued, or a negative error number.
 723 */
 724static
 725int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
 726                                        const struct qbman_eq_desc *d,
 727                                        const struct dpaa2_fd *fd,
 728                                        uint32_t *flags,
 729                                        int num_frames)
 730{
 731        uint32_t *p = NULL;
 732        const uint32_t *cl = (uint32_t *)(d);
 733        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 734        int i, num_enqueued = 0;
 735        unsigned long irq_flags;
 736
 737        spin_lock(&s->access_spinlock);
 738        local_irq_save(irq_flags);
 739
 740        half_mask = (s->eqcr.pi_ci_mask>>1);
 741        full_mask = s->eqcr.pi_ci_mask;
 742        if (!s->eqcr.available) {
 743                eqcr_ci = s->eqcr.ci;
 744                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
 745                s->eqcr.ci = *p & full_mask;
 746                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 747                                        eqcr_ci, s->eqcr.ci);
 748                if (!s->eqcr.available) {
 749                        local_irq_restore(irq_flags);
 750                        spin_unlock(&s->access_spinlock);
 751                        return 0;
 752                }
 753        }
 754
 755        eqcr_pi = s->eqcr.pi;
 756        num_enqueued = (s->eqcr.available < num_frames) ?
 757                        s->eqcr.available : num_frames;
 758        s->eqcr.available -= num_enqueued;
 759        /* Fill in the EQCR ring */
 760        for (i = 0; i < num_enqueued; i++) {
 761                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 762                /* Skip copying the verb */
 763                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 764                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 765                       &fd[i], sizeof(*fd));
 766                eqcr_pi++;
 767        }
 768
 769        /* Set the verb byte, have to substitute in the valid-bit */
 770        eqcr_pi = s->eqcr.pi;
 771        for (i = 0; i < num_enqueued; i++) {
 772                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 773                p[0] = cl[0] | s->eqcr.pi_vb;
 774                if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
 775                        struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
 776
 777                        d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
 778                                ((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
 779                }
 780                eqcr_pi++;
 781                if (!(eqcr_pi & half_mask))
 782                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 783        }
 784        s->eqcr.pi = eqcr_pi & full_mask;
 785
 786        dma_wmb();
 787        qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
 788                                (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
 789        local_irq_restore(irq_flags);
 790        spin_unlock(&s->access_spinlock);
 791
 792        return num_enqueued;
 793}
 794
 795/**
 796 * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
 797 * using multiple enqueue descriptor
 798 * @s:  the software portal used for enqueue
 799 * @d:  table of minimal enqueue descriptor
 800 * @fd: table pointer of frame descriptor table to be enqueued
 801 * @num_frames: number of fd to be enqueued
 802 *
 803 * Return the number of fd enqueued, or a negative error number.
 804 */
 805static
 806int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
 807                                           const struct qbman_eq_desc *d,
 808                                           const struct dpaa2_fd *fd,
 809                                           int num_frames)
 810{
 811        uint32_t *p;
 812        const uint32_t *cl;
 813        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 814        int i, num_enqueued = 0;
 815
 816        half_mask = (s->eqcr.pi_ci_mask>>1);
 817        full_mask = s->eqcr.pi_ci_mask;
 818        if (!s->eqcr.available) {
 819                eqcr_ci = s->eqcr.ci;
 820                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
 821                s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
 822                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 823                                        eqcr_ci, s->eqcr.ci);
 824                if (!s->eqcr.available)
 825                        return 0;
 826        }
 827
 828        eqcr_pi = s->eqcr.pi;
 829        num_enqueued = (s->eqcr.available < num_frames) ?
 830                        s->eqcr.available : num_frames;
 831        s->eqcr.available -= num_enqueued;
 832        /* Fill in the EQCR ring */
 833        for (i = 0; i < num_enqueued; i++) {
 834                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 835                cl = (uint32_t *)(&d[i]);
 836                /* Skip copying the verb */
 837                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 838                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 839                       &fd[i], sizeof(*fd));
 840                eqcr_pi++;
 841        }
 842
 843        dma_wmb();
 844
 845        /* Set the verb byte, have to substitute in the valid-bit */
 846        eqcr_pi = s->eqcr.pi;
 847        for (i = 0; i < num_enqueued; i++) {
 848                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 849                cl = (uint32_t *)(&d[i]);
 850                p[0] = cl[0] | s->eqcr.pi_vb;
 851                eqcr_pi++;
 852                if (!(eqcr_pi & half_mask))
 853                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 854        }
 855
 856        /* Flush all the cacheline without load/store in between */
 857        eqcr_pi = s->eqcr.pi;
 858        for (i = 0; i < num_enqueued; i++)
 859                eqcr_pi++;
 860        s->eqcr.pi = eqcr_pi & full_mask;
 861
 862        return num_enqueued;
 863}
 864
 865/**
 866 * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
 867 * using multiple enqueue descriptor
 868 * @s:  the software portal used for enqueue
 869 * @d:  table of minimal enqueue descriptor
 870 * @fd: table pointer of frame descriptor table to be enqueued
 871 * @num_frames: number of fd to be enqueued
 872 *
 873 * Return the number of fd enqueued, or a negative error number.
 874 */
 875static
 876int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
 877                                           const struct qbman_eq_desc *d,
 878                                           const struct dpaa2_fd *fd,
 879                                           int num_frames)
 880{
 881        uint32_t *p;
 882        const uint32_t *cl;
 883        uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
 884        int i, num_enqueued = 0;
 885
 886        half_mask = (s->eqcr.pi_ci_mask>>1);
 887        full_mask = s->eqcr.pi_ci_mask;
 888        if (!s->eqcr.available) {
 889                eqcr_ci = s->eqcr.ci;
 890                p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
 891                s->eqcr.ci = *p & full_mask;
 892                s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
 893                                        eqcr_ci, s->eqcr.ci);
 894                if (!s->eqcr.available)
 895                        return 0;
 896        }
 897
 898        eqcr_pi = s->eqcr.pi;
 899        num_enqueued = (s->eqcr.available < num_frames) ?
 900                        s->eqcr.available : num_frames;
 901        s->eqcr.available -= num_enqueued;
 902        /* Fill in the EQCR ring */
 903        for (i = 0; i < num_enqueued; i++) {
 904                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 905                cl = (uint32_t *)(&d[i]);
 906                /* Skip copying the verb */
 907                memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
 908                memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
 909                       &fd[i], sizeof(*fd));
 910                eqcr_pi++;
 911        }
 912
 913        /* Set the verb byte, have to substitute in the valid-bit */
 914        eqcr_pi = s->eqcr.pi;
 915        for (i = 0; i < num_enqueued; i++) {
 916                p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
 917                cl = (uint32_t *)(&d[i]);
 918                p[0] = cl[0] | s->eqcr.pi_vb;
 919                eqcr_pi++;
 920                if (!(eqcr_pi & half_mask))
 921                        s->eqcr.pi_vb ^= QB_VALID_BIT;
 922        }
 923
 924        s->eqcr.pi = eqcr_pi & full_mask;
 925
 926        dma_wmb();
 927        qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
 928                                (QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
 929
 930        return num_enqueued;
 931}
 932
 933/* Static (push) dequeue */
 934
 935/**
 936 * qbman_swp_push_get() - Get the push dequeue setup
 937 * @p:           the software portal object
 938 * @channel_idx: the channel index to query
 939 * @enabled:     returned boolean to show whether the push dequeue is enabled
 940 *               for the given channel
 941 */
 942void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
 943{
 944        u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 945
 946        WARN_ON(channel_idx > 15);
 947        *enabled = src | (1 << channel_idx);
 948}
 949
 950/**
 951 * qbman_swp_push_set() - Enable or disable push dequeue
 952 * @p:           the software portal object
 953 * @channel_idx: the channel index (0 to 15)
 954 * @enable:      enable or disable push dequeue
 955 */
 956void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
 957{
 958        u16 dqsrc;
 959
 960        WARN_ON(channel_idx > 15);
 961        if (enable)
 962                s->sdq |= 1 << channel_idx;
 963        else
 964                s->sdq &= ~(1 << channel_idx);
 965
 966        /* Read make the complete src map.  If no channels are enabled
 967         * the SDQCR must be 0 or else QMan will assert errors
 968         */
 969        dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 970        if (dqsrc != 0)
 971                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
 972        else
 973                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
 974}
 975
 976#define QB_VDQCR_VERB_DCT_SHIFT    0
 977#define QB_VDQCR_VERB_DT_SHIFT     2
 978#define QB_VDQCR_VERB_RLS_SHIFT    4
 979#define QB_VDQCR_VERB_WAE_SHIFT    5
 980
 981enum qb_pull_dt_e {
 982        qb_pull_dt_channel,
 983        qb_pull_dt_workqueue,
 984        qb_pull_dt_framequeue
 985};
 986
 987/**
 988 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
 989 *                           default/starting state
 990 * @d: the pull dequeue descriptor to be cleared
 991 */
 992void qbman_pull_desc_clear(struct qbman_pull_desc *d)
 993{
 994        memset(d, 0, sizeof(*d));
 995}
 996
 997/**
 998 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
 999 * @d:            the pull dequeue descriptor to be set
1000 * @storage:      the pointer of the memory to store the dequeue result
1001 * @storage_phys: the physical address of the storage memory
1002 * @stash:        to indicate whether write allocate is enabled
1003 *
1004 * If not called, or if called with 'storage' as NULL, the result pull dequeues
1005 * will produce results to DQRR. If 'storage' is non-NULL, then results are
1006 * produced to the given memory location (using the DMA address which
1007 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
1008 * those writes to main-memory express a cache-warming attribute.
1009 */
1010void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
1011                                 struct dpaa2_dq *storage,
1012                                 dma_addr_t storage_phys,
1013                                 int stash)
1014{
1015        /* save the virtual address */
1016        d->rsp_addr_virt = (u64)(uintptr_t)storage;
1017
1018        if (!storage) {
1019                d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
1020                return;
1021        }
1022        d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
1023        if (stash)
1024                d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
1025        else
1026                d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
1027
1028        d->rsp_addr = cpu_to_le64(storage_phys);
1029}
1030
1031/**
1032 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
1033 * @d:         the pull dequeue descriptor to be set
1034 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
1035 */
1036void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
1037{
1038        d->numf = numframes - 1;
1039}
1040
1041/*
1042 * Exactly one of the following descriptor "actions" should be set. (Calling any
1043 * one of these will replace the effect of any prior call to one of these.)
1044 * - pull dequeue from the given frame queue (FQ)
1045 * - pull dequeue from any FQ in the given work queue (WQ)
1046 * - pull dequeue from any FQ in any WQ in the given channel
1047 */
1048
1049/**
1050 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
1051 * @fqid: the frame queue index of the given FQ
1052 */
1053void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
1054{
1055        d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
1056        d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
1057        d->dq_src = cpu_to_le32(fqid);
1058}
1059
1060/**
1061 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
1062 * @wqid: composed of channel id and wqid within the channel
1063 * @dct:  the dequeue command type
1064 */
1065void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
1066                            enum qbman_pull_type_e dct)
1067{
1068        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1069        d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
1070        d->dq_src = cpu_to_le32(wqid);
1071}
1072
1073/**
1074 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
1075 *                                 dequeues
1076 * @chid: the channel id to be dequeued
1077 * @dct:  the dequeue command type
1078 */
1079void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
1080                                 enum qbman_pull_type_e dct)
1081{
1082        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
1083        d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
1084        d->dq_src = cpu_to_le32(chid);
1085}
1086
1087/**
1088 * qbman_swp_pull_direct() - Issue the pull dequeue command
1089 * @s: the software portal object
1090 * @d: the software portal descriptor which has been configured with
1091 *     the set of qbman_pull_desc_set_*() calls
1092 *
1093 * Return 0 for success, and -EBUSY if the software portal is not ready
1094 * to do pull dequeue.
1095 */
1096static
1097int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
1098{
1099        struct qbman_pull_desc *p;
1100
1101        if (!atomic_dec_and_test(&s->vdq.available)) {
1102                atomic_inc(&s->vdq.available);
1103                return -EBUSY;
1104        }
1105        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1106        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1107                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1108        else
1109                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1110        p->numf = d->numf;
1111        p->tok = QMAN_DQ_TOKEN_VALID;
1112        p->dq_src = d->dq_src;
1113        p->rsp_addr = d->rsp_addr;
1114        p->rsp_addr_virt = d->rsp_addr_virt;
1115        dma_wmb();
1116        /* Set the verb byte, have to substitute in the valid-bit */
1117        p->verb = d->verb | s->vdq.valid_bit;
1118        s->vdq.valid_bit ^= QB_VALID_BIT;
1119
1120        return 0;
1121}
1122
1123/**
1124 * qbman_swp_pull_mem_back() - Issue the pull dequeue command
1125 * @s: the software portal object
1126 * @d: the software portal descriptor which has been configured with
1127 *     the set of qbman_pull_desc_set_*() calls
1128 *
1129 * Return 0 for success, and -EBUSY if the software portal is not ready
1130 * to do pull dequeue.
1131 */
1132static
1133int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
1134{
1135        struct qbman_pull_desc *p;
1136
1137        if (!atomic_dec_and_test(&s->vdq.available)) {
1138                atomic_inc(&s->vdq.available);
1139                return -EBUSY;
1140        }
1141        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
1142        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
1143                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
1144        else
1145                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
1146        p->numf = d->numf;
1147        p->tok = QMAN_DQ_TOKEN_VALID;
1148        p->dq_src = d->dq_src;
1149        p->rsp_addr = d->rsp_addr;
1150        p->rsp_addr_virt = d->rsp_addr_virt;
1151
1152        /* Set the verb byte, have to substitute in the valid-bit */
1153        p->verb = d->verb | s->vdq.valid_bit;
1154        s->vdq.valid_bit ^= QB_VALID_BIT;
1155        dma_wmb();
1156        qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
1157
1158        return 0;
1159}
1160
1161#define QMAN_DQRR_PI_MASK   0xf
1162
1163/**
1164 * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
1165 * @s: the software portal object
1166 *
1167 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1168 * only once, so repeated calls can return a sequence of DQRR entries, without
1169 * requiring they be consumed immediately or in any particular order.
1170 */
1171const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
1172{
1173        u32 verb;
1174        u32 response_verb;
1175        u32 flags;
1176        struct dpaa2_dq *p;
1177
1178        /* Before using valid-bit to detect if something is there, we have to
1179         * handle the case of the DQRR reset bug...
1180         */
1181        if (unlikely(s->dqrr.reset_bug)) {
1182                /*
1183                 * We pick up new entries by cache-inhibited producer index,
1184                 * which means that a non-coherent mapping would require us to
1185                 * invalidate and read *only* once that PI has indicated that
1186                 * there's an entry here. The first trip around the DQRR ring
1187                 * will be much less efficient than all subsequent trips around
1188                 * it...
1189                 */
1190                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1191                        QMAN_DQRR_PI_MASK;
1192
1193                /* there are new entries if pi != next_idx */
1194                if (pi == s->dqrr.next_idx)
1195                        return NULL;
1196
1197                /*
1198                 * if next_idx is/was the last ring index, and 'pi' is
1199                 * different, we can disable the workaround as all the ring
1200                 * entries have now been DMA'd to so valid-bit checking is
1201                 * repaired. Note: this logic needs to be based on next_idx
1202                 * (which increments one at a time), rather than on pi (which
1203                 * can burst and wrap-around between our snapshots of it).
1204                 */
1205                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1206                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1207                                 s->dqrr.next_idx, pi);
1208                        s->dqrr.reset_bug = 0;
1209                }
1210                prefetch(qbman_get_cmd(s,
1211                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1212        }
1213
1214        p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
1215        verb = p->dq.verb;
1216
1217        /*
1218         * If the valid-bit isn't of the expected polarity, nothing there. Note,
1219         * in the DQRR reset bug workaround, we shouldn't need to skip these
1220         * check, because we've already determined that a new entry is available
1221         * and we've invalidated the cacheline before reading it, so the
1222         * valid-bit behaviour is repaired and should tell us what we already
1223         * knew from reading PI.
1224         */
1225        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1226                prefetch(qbman_get_cmd(s,
1227                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1228                return NULL;
1229        }
1230        /*
1231         * There's something there. Move "next_idx" attention to the next ring
1232         * entry (and prefetch it) before returning what we found.
1233         */
1234        s->dqrr.next_idx++;
1235        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1236        if (!s->dqrr.next_idx)
1237                s->dqrr.valid_bit ^= QB_VALID_BIT;
1238
1239        /*
1240         * If this is the final response to a volatile dequeue command
1241         * indicate that the vdq is available
1242         */
1243        flags = p->dq.stat;
1244        response_verb = verb & QBMAN_RESULT_MASK;
1245        if ((response_verb == QBMAN_RESULT_DQ) &&
1246            (flags & DPAA2_DQ_STAT_VOLATILE) &&
1247            (flags & DPAA2_DQ_STAT_EXPIRED))
1248                atomic_inc(&s->vdq.available);
1249
1250        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1251
1252        return p;
1253}
1254
1255/**
1256 * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
1257 * @s: the software portal object
1258 *
1259 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
1260 * only once, so repeated calls can return a sequence of DQRR entries, without
1261 * requiring they be consumed immediately or in any particular order.
1262 */
1263const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
1264{
1265        u32 verb;
1266        u32 response_verb;
1267        u32 flags;
1268        struct dpaa2_dq *p;
1269
1270        /* Before using valid-bit to detect if something is there, we have to
1271         * handle the case of the DQRR reset bug...
1272         */
1273        if (unlikely(s->dqrr.reset_bug)) {
1274                /*
1275                 * We pick up new entries by cache-inhibited producer index,
1276                 * which means that a non-coherent mapping would require us to
1277                 * invalidate and read *only* once that PI has indicated that
1278                 * there's an entry here. The first trip around the DQRR ring
1279                 * will be much less efficient than all subsequent trips around
1280                 * it...
1281                 */
1282                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
1283                        QMAN_DQRR_PI_MASK;
1284
1285                /* there are new entries if pi != next_idx */
1286                if (pi == s->dqrr.next_idx)
1287                        return NULL;
1288
1289                /*
1290                 * if next_idx is/was the last ring index, and 'pi' is
1291                 * different, we can disable the workaround as all the ring
1292                 * entries have now been DMA'd to so valid-bit checking is
1293                 * repaired. Note: this logic needs to be based on next_idx
1294                 * (which increments one at a time), rather than on pi (which
1295                 * can burst and wrap-around between our snapshots of it).
1296                 */
1297                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
1298                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
1299                                 s->dqrr.next_idx, pi);
1300                        s->dqrr.reset_bug = 0;
1301                }
1302                prefetch(qbman_get_cmd(s,
1303                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1304        }
1305
1306        p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
1307        verb = p->dq.verb;
1308
1309        /*
1310         * If the valid-bit isn't of the expected polarity, nothing there. Note,
1311         * in the DQRR reset bug workaround, we shouldn't need to skip these
1312         * check, because we've already determined that a new entry is available
1313         * and we've invalidated the cacheline before reading it, so the
1314         * valid-bit behaviour is repaired and should tell us what we already
1315         * knew from reading PI.
1316         */
1317        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
1318                prefetch(qbman_get_cmd(s,
1319                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1320                return NULL;
1321        }
1322        /*
1323         * There's something there. Move "next_idx" attention to the next ring
1324         * entry (and prefetch it) before returning what we found.
1325         */
1326        s->dqrr.next_idx++;
1327        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
1328        if (!s->dqrr.next_idx)
1329                s->dqrr.valid_bit ^= QB_VALID_BIT;
1330
1331        /*
1332         * If this is the final response to a volatile dequeue command
1333         * indicate that the vdq is available
1334         */
1335        flags = p->dq.stat;
1336        response_verb = verb & QBMAN_RESULT_MASK;
1337        if ((response_verb == QBMAN_RESULT_DQ) &&
1338            (flags & DPAA2_DQ_STAT_VOLATILE) &&
1339            (flags & DPAA2_DQ_STAT_EXPIRED))
1340                atomic_inc(&s->vdq.available);
1341
1342        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
1343
1344        return p;
1345}
1346
1347/**
1348 * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
1349 *                             qbman_swp_dqrr_next().
1350 * @s: the software portal object
1351 * @dq: the DQRR entry to be consumed
1352 */
1353void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
1354{
1355        qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
1356}
1357
1358/**
1359 * qbman_result_has_new_result() - Check and get the dequeue response from the
1360 *                                 dq storage memory set in pull dequeue command
1361 * @s: the software portal object
1362 * @dq: the dequeue result read from the memory
1363 *
1364 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
1365 * dequeue result.
1366 *
1367 * Only used for user-provided storage of dequeue results, not DQRR. For
1368 * efficiency purposes, the driver will perform any required endianness
1369 * conversion to ensure that the user's dequeue result storage is in host-endian
1370 * format. As such, once the user has called qbman_result_has_new_result() and
1371 * been returned a valid dequeue result, they should not call it again on
1372 * the same memory location (except of course if another dequeue command has
1373 * been executed to produce a new result to that location).
1374 */
1375int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
1376{
1377        if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
1378                return 0;
1379
1380        /*
1381         * Set token to be 0 so we will detect change back to 1
1382         * next time the looping is traversed. Const is cast away here
1383         * as we want users to treat the dequeue responses as read only.
1384         */
1385        ((struct dpaa2_dq *)dq)->dq.tok = 0;
1386
1387        /*
1388         * Determine whether VDQCR is available based on whether the
1389         * current result is sitting in the first storage location of
1390         * the busy command.
1391         */
1392        if (s->vdq.storage == dq) {
1393                s->vdq.storage = NULL;
1394                atomic_inc(&s->vdq.available);
1395        }
1396
1397        return 1;
1398}
1399
1400/**
1401 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1402 *                              default/starting state.
1403 */
1404void qbman_release_desc_clear(struct qbman_release_desc *d)
1405{
1406        memset(d, 0, sizeof(*d));
1407        d->verb = 1 << 5; /* Release Command Valid */
1408}
1409
1410/**
1411 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1412 */
1413void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
1414{
1415        d->bpid = cpu_to_le16(bpid);
1416}
1417
1418/**
1419 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1420 * interrupt source should be asserted after the release command is completed.
1421 */
1422void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
1423{
1424        if (enable)
1425                d->verb |= 1 << 6;
1426        else
1427                d->verb &= ~(1 << 6);
1428}
1429
1430#define RAR_IDX(rar)     ((rar) & 0x7)
1431#define RAR_VB(rar)      ((rar) & 0x80)
1432#define RAR_SUCCESS(rar) ((rar) & 0x100)
1433
1434/**
1435 * qbman_swp_release_direct() - Issue a buffer release command
1436 * @s:           the software portal object
1437 * @d:           the release descriptor
1438 * @buffers:     a pointer pointing to the buffer address to be released
1439 * @num_buffers: number of buffers to be released,  must be less than 8
1440 *
1441 * Return 0 for success, -EBUSY if the release command ring is not ready.
1442 */
1443int qbman_swp_release_direct(struct qbman_swp *s,
1444                             const struct qbman_release_desc *d,
1445                             const u64 *buffers, unsigned int num_buffers)
1446{
1447        int i;
1448        struct qbman_release_desc *p;
1449        u32 rar;
1450
1451        if (!num_buffers || (num_buffers > 7))
1452                return -EINVAL;
1453
1454        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1455        if (!RAR_SUCCESS(rar))
1456                return -EBUSY;
1457
1458        /* Start the release command */
1459        p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
1460
1461        /* Copy the caller's buffer pointers to the command */
1462        for (i = 0; i < num_buffers; i++)
1463                p->buf[i] = cpu_to_le64(buffers[i]);
1464        p->bpid = d->bpid;
1465
1466        /*
1467         * Set the verb byte, have to substitute in the valid-bit
1468         * and the number of buffers.
1469         */
1470        dma_wmb();
1471        p->verb = d->verb | RAR_VB(rar) | num_buffers;
1472
1473        return 0;
1474}
1475
1476/**
1477 * qbman_swp_release_mem_back() - Issue a buffer release command
1478 * @s:           the software portal object
1479 * @d:           the release descriptor
1480 * @buffers:     a pointer pointing to the buffer address to be released
1481 * @num_buffers: number of buffers to be released,  must be less than 8
1482 *
1483 * Return 0 for success, -EBUSY if the release command ring is not ready.
1484 */
1485int qbman_swp_release_mem_back(struct qbman_swp *s,
1486                               const struct qbman_release_desc *d,
1487                               const u64 *buffers, unsigned int num_buffers)
1488{
1489        int i;
1490        struct qbman_release_desc *p;
1491        u32 rar;
1492
1493        if (!num_buffers || (num_buffers > 7))
1494                return -EINVAL;
1495
1496        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
1497        if (!RAR_SUCCESS(rar))
1498                return -EBUSY;
1499
1500        /* Start the release command */
1501        p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
1502
1503        /* Copy the caller's buffer pointers to the command */
1504        for (i = 0; i < num_buffers; i++)
1505                p->buf[i] = cpu_to_le64(buffers[i]);
1506        p->bpid = d->bpid;
1507
1508        p->verb = d->verb | RAR_VB(rar) | num_buffers;
1509        dma_wmb();
1510        qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
1511                             RAR_IDX(rar)  * 4, QMAN_RT_MODE);
1512
1513        return 0;
1514}
1515
1516struct qbman_acquire_desc {
1517        u8 verb;
1518        u8 reserved;
1519        __le16 bpid;
1520        u8 num;
1521        u8 reserved2[59];
1522};
1523
1524struct qbman_acquire_rslt {
1525        u8 verb;
1526        u8 rslt;
1527        __le16 reserved;
1528        u8 num;
1529        u8 reserved2[3];
1530        __le64 buf[7];
1531};
1532
1533/**
1534 * qbman_swp_acquire() - Issue a buffer acquire command
1535 * @s:           the software portal object
1536 * @bpid:        the buffer pool index
1537 * @buffers:     a pointer pointing to the acquired buffer addresses
1538 * @num_buffers: number of buffers to be acquired, must be less than 8
1539 *
1540 * Return 0 for success, or negative error code if the acquire command
1541 * fails.
1542 */
1543int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
1544                      unsigned int num_buffers)
1545{
1546        struct qbman_acquire_desc *p;
1547        struct qbman_acquire_rslt *r;
1548        int i;
1549
1550        if (!num_buffers || (num_buffers > 7))
1551                return -EINVAL;
1552
1553        /* Start the management command */
1554        p = qbman_swp_mc_start(s);
1555
1556        if (!p)
1557                return -EBUSY;
1558
1559        /* Encode the caller-provided attributes */
1560        p->bpid = cpu_to_le16(bpid);
1561        p->num = num_buffers;
1562
1563        /* Complete the management command */
1564        r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
1565        if (unlikely(!r)) {
1566                pr_err("qbman: acquire from BPID %d failed, no response\n",
1567                       bpid);
1568                return -EIO;
1569        }
1570
1571        /* Decode the outcome */
1572        WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
1573
1574        /* Determine success or failure */
1575        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1576                pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
1577                       bpid, r->rslt);
1578                return -EIO;
1579        }
1580
1581        WARN_ON(r->num > num_buffers);
1582
1583        /* Copy the acquired buffers to the caller's array */
1584        for (i = 0; i < r->num; i++)
1585                buffers[i] = le64_to_cpu(r->buf[i]);
1586
1587        return (int)r->num;
1588}
1589
1590struct qbman_alt_fq_state_desc {
1591        u8 verb;
1592        u8 reserved[3];
1593        __le32 fqid;
1594        u8 reserved2[56];
1595};
1596
1597struct qbman_alt_fq_state_rslt {
1598        u8 verb;
1599        u8 rslt;
1600        u8 reserved[62];
1601};
1602
1603#define ALT_FQ_FQID_MASK 0x00FFFFFF
1604
1605int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1606                           u8 alt_fq_verb)
1607{
1608        struct qbman_alt_fq_state_desc *p;
1609        struct qbman_alt_fq_state_rslt *r;
1610
1611        /* Start the management command */
1612        p = qbman_swp_mc_start(s);
1613        if (!p)
1614                return -EBUSY;
1615
1616        p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1617
1618        /* Complete the management command */
1619        r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1620        if (unlikely(!r)) {
1621                pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1622                       alt_fq_verb);
1623                return -EIO;
1624        }
1625
1626        /* Decode the outcome */
1627        WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1628
1629        /* Determine success or failure */
1630        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1631                pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1632                       fqid, r->verb, r->rslt);
1633                return -EIO;
1634        }
1635
1636        return 0;
1637}
1638
1639struct qbman_cdan_ctrl_desc {
1640        u8 verb;
1641        u8 reserved;
1642        __le16 ch;
1643        u8 we;
1644        u8 ctrl;
1645        __le16 reserved2;
1646        __le64 cdan_ctx;
1647        u8 reserved3[48];
1648
1649};
1650
1651struct qbman_cdan_ctrl_rslt {
1652        u8 verb;
1653        u8 rslt;
1654        __le16 ch;
1655        u8 reserved[60];
1656};
1657
1658int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1659                       u8 we_mask, u8 cdan_en,
1660                       u64 ctx)
1661{
1662        struct qbman_cdan_ctrl_desc *p = NULL;
1663        struct qbman_cdan_ctrl_rslt *r = NULL;
1664
1665        /* Start the management command */
1666        p = qbman_swp_mc_start(s);
1667        if (!p)
1668                return -EBUSY;
1669
1670        /* Encode the caller-provided attributes */
1671        p->ch = cpu_to_le16(channelid);
1672        p->we = we_mask;
1673        if (cdan_en)
1674                p->ctrl = 1;
1675        else
1676                p->ctrl = 0;
1677        p->cdan_ctx = cpu_to_le64(ctx);
1678
1679        /* Complete the management command */
1680        r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1681        if (unlikely(!r)) {
1682                pr_err("qbman: wqchan config failed, no response\n");
1683                return -EIO;
1684        }
1685
1686        WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1687
1688        /* Determine success or failure */
1689        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1690                pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1691                       channelid, r->rslt);
1692                return -EIO;
1693        }
1694
1695        return 0;
1696}
1697
1698#define QBMAN_RESPONSE_VERB_MASK        0x7f
1699#define QBMAN_FQ_QUERY_NP               0x45
1700#define QBMAN_BP_QUERY                  0x32
1701
1702struct qbman_fq_query_desc {
1703        u8 verb;
1704        u8 reserved[3];
1705        __le32 fqid;
1706        u8 reserved2[56];
1707};
1708
1709int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1710                         struct qbman_fq_query_np_rslt *r)
1711{
1712        struct qbman_fq_query_desc *p;
1713        void *resp;
1714
1715        p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1716        if (!p)
1717                return -EBUSY;
1718
1719        /* FQID is a 24 bit value */
1720        p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1721        resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1722        if (!resp) {
1723                pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1724                       fqid);
1725                return -EIO;
1726        }
1727        *r = *(struct qbman_fq_query_np_rslt *)resp;
1728        /* Decode the outcome */
1729        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1730
1731        /* Determine success or failure */
1732        if (r->rslt != QBMAN_MC_RSLT_OK) {
1733                pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1734                       p->fqid, r->rslt);
1735                return -EIO;
1736        }
1737
1738        return 0;
1739}
1740
1741u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1742{
1743        return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1744}
1745
1746u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1747{
1748        return le32_to_cpu(r->byte_cnt);
1749}
1750
1751struct qbman_bp_query_desc {
1752        u8 verb;
1753        u8 reserved;
1754        __le16 bpid;
1755        u8 reserved2[60];
1756};
1757
1758int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1759                   struct qbman_bp_query_rslt *r)
1760{
1761        struct qbman_bp_query_desc *p;
1762        void *resp;
1763
1764        p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1765        if (!p)
1766                return -EBUSY;
1767
1768        p->bpid = cpu_to_le16(bpid);
1769        resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1770        if (!resp) {
1771                pr_err("qbman: Query BPID %d fields failed, no response\n",
1772                       bpid);
1773                return -EIO;
1774        }
1775        *r = *(struct qbman_bp_query_rslt *)resp;
1776        /* Decode the outcome */
1777        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1778
1779        /* Determine success or failure */
1780        if (r->rslt != QBMAN_MC_RSLT_OK) {
1781                pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1782                       bpid, r->rslt);
1783                return -EIO;
1784        }
1785
1786        return 0;
1787}
1788
1789u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1790{
1791        return le32_to_cpu(a->fill);
1792}
1793