linux/drivers/soc/fsl/dpio/qbman-portal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
   4 * Copyright 2016 NXP
   5 *
   6 */
   7
   8#include <asm/cacheflush.h>
   9#include <linux/io.h>
  10#include <linux/slab.h>
  11#include <soc/fsl/dpaa2-global.h>
  12
  13#include "qbman-portal.h"
  14
  15#define QMAN_REV_4000   0x04000000
  16#define QMAN_REV_4100   0x04010000
  17#define QMAN_REV_4101   0x04010001
  18#define QMAN_REV_5000   0x05000000
  19
  20#define QMAN_REV_MASK   0xffff0000
  21
  22/* All QBMan command and result structures use this "valid bit" encoding */
  23#define QB_VALID_BIT ((u32)0x80)
  24
  25/* QBMan portal management command codes */
  26#define QBMAN_MC_ACQUIRE       0x30
  27#define QBMAN_WQCHAN_CONFIGURE 0x46
  28
  29/* CINH register offsets */
  30#define QBMAN_CINH_SWP_EQCR_PI      0x800
  31#define QBMAN_CINH_SWP_EQAR    0x8c0
  32#define QBMAN_CINH_SWP_CR_RT        0x900
  33#define QBMAN_CINH_SWP_VDQCR_RT     0x940
  34#define QBMAN_CINH_SWP_EQCR_AM_RT   0x980
  35#define QBMAN_CINH_SWP_RCR_AM_RT    0x9c0
  36#define QBMAN_CINH_SWP_DQPI    0xa00
  37#define QBMAN_CINH_SWP_DCAP    0xac0
  38#define QBMAN_CINH_SWP_SDQCR   0xb00
  39#define QBMAN_CINH_SWP_EQCR_AM_RT2  0xb40
  40#define QBMAN_CINH_SWP_RCR_PI       0xc00
  41#define QBMAN_CINH_SWP_RAR     0xcc0
  42#define QBMAN_CINH_SWP_ISR     0xe00
  43#define QBMAN_CINH_SWP_IER     0xe40
  44#define QBMAN_CINH_SWP_ISDR    0xe80
  45#define QBMAN_CINH_SWP_IIR     0xec0
  46
  47/* CENA register offsets */
  48#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
  49#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
  50#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
  51#define QBMAN_CENA_SWP_CR      0x600
  52#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
  53#define QBMAN_CENA_SWP_VDQCR   0x780
  54
  55/* CENA register offsets in memory-backed mode */
  56#define QBMAN_CENA_SWP_DQRR_MEM(n)  (0x800 + ((u32)(n) << 6))
  57#define QBMAN_CENA_SWP_RCR_MEM(n)   (0x1400 + ((u32)(n) << 6))
  58#define QBMAN_CENA_SWP_CR_MEM       0x1600
  59#define QBMAN_CENA_SWP_RR_MEM       0x1680
  60#define QBMAN_CENA_SWP_VDQCR_MEM    0x1780
  61
  62/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
  63#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
  64
  65/* Define token used to determine if response written to memory is valid */
  66#define QMAN_DQ_TOKEN_VALID 1
  67
  68/* SDQCR attribute codes */
  69#define QB_SDQCR_FC_SHIFT   29
  70#define QB_SDQCR_FC_MASK    0x1
  71#define QB_SDQCR_DCT_SHIFT  24
  72#define QB_SDQCR_DCT_MASK   0x3
  73#define QB_SDQCR_TOK_SHIFT  16
  74#define QB_SDQCR_TOK_MASK   0xff
  75#define QB_SDQCR_SRC_SHIFT  0
  76#define QB_SDQCR_SRC_MASK   0xffff
  77
  78/* opaque token for static dequeues */
  79#define QMAN_SDQCR_TOKEN    0xbb
  80
  81enum qbman_sdqcr_dct {
  82        qbman_sdqcr_dct_null = 0,
  83        qbman_sdqcr_dct_prio_ics,
  84        qbman_sdqcr_dct_active_ics,
  85        qbman_sdqcr_dct_active
  86};
  87
  88enum qbman_sdqcr_fc {
  89        qbman_sdqcr_fc_one = 0,
  90        qbman_sdqcr_fc_up_to_3 = 1
  91};
  92
  93/* Portal Access */
  94
  95static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
  96{
  97        return readl_relaxed(p->addr_cinh + offset);
  98}
  99
 100static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
 101                                        u32 value)
 102{
 103        writel_relaxed(value, p->addr_cinh + offset);
 104}
 105
 106static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
 107{
 108        return p->addr_cena + offset;
 109}
 110
 111#define QBMAN_CINH_SWP_CFG   0xd00
 112
 113#define SWP_CFG_DQRR_MF_SHIFT 20
 114#define SWP_CFG_EST_SHIFT     16
 115#define SWP_CFG_CPBS_SHIFT    15
 116#define SWP_CFG_WN_SHIFT      14
 117#define SWP_CFG_RPM_SHIFT     12
 118#define SWP_CFG_DCM_SHIFT     10
 119#define SWP_CFG_EPM_SHIFT     8
 120#define SWP_CFG_VPM_SHIFT     7
 121#define SWP_CFG_CPM_SHIFT     6
 122#define SWP_CFG_SD_SHIFT      5
 123#define SWP_CFG_SP_SHIFT      4
 124#define SWP_CFG_SE_SHIFT      3
 125#define SWP_CFG_DP_SHIFT      2
 126#define SWP_CFG_DE_SHIFT      1
 127#define SWP_CFG_EP_SHIFT      0
 128
 129static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
 130                                    u8 epm, int sd, int sp, int se,
 131                                    int dp, int de, int ep)
 132{
 133        return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
 134                est << SWP_CFG_EST_SHIFT |
 135                wn << SWP_CFG_WN_SHIFT |
 136                rpm << SWP_CFG_RPM_SHIFT |
 137                dcm << SWP_CFG_DCM_SHIFT |
 138                epm << SWP_CFG_EPM_SHIFT |
 139                sd << SWP_CFG_SD_SHIFT |
 140                sp << SWP_CFG_SP_SHIFT |
 141                se << SWP_CFG_SE_SHIFT |
 142                dp << SWP_CFG_DP_SHIFT |
 143                de << SWP_CFG_DE_SHIFT |
 144                ep << SWP_CFG_EP_SHIFT);
 145}
 146
 147#define QMAN_RT_MODE       0x00000100
 148
 149/**
 150 * qbman_swp_init() - Create a functional object representing the given
 151 *                    QBMan portal descriptor.
 152 * @d: the given qbman swp descriptor
 153 *
 154 * Return qbman_swp portal for success, NULL if the object cannot
 155 * be created.
 156 */
 157struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
 158{
 159        struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
 160        u32 reg;
 161
 162        if (!p)
 163                return NULL;
 164        p->desc = d;
 165        p->mc.valid_bit = QB_VALID_BIT;
 166        p->sdq = 0;
 167        p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
 168        p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
 169        p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
 170        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
 171                p->mr.valid_bit = QB_VALID_BIT;
 172
 173        atomic_set(&p->vdq.available, 1);
 174        p->vdq.valid_bit = QB_VALID_BIT;
 175        p->dqrr.next_idx = 0;
 176        p->dqrr.valid_bit = QB_VALID_BIT;
 177
 178        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
 179                p->dqrr.dqrr_size = 4;
 180                p->dqrr.reset_bug = 1;
 181        } else {
 182                p->dqrr.dqrr_size = 8;
 183                p->dqrr.reset_bug = 0;
 184        }
 185
 186        p->addr_cena = d->cena_bar;
 187        p->addr_cinh = d->cinh_bar;
 188
 189        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
 190                memset(p->addr_cena, 0, 64 * 1024);
 191
 192        reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 193                                1, /* Writes Non-cacheable */
 194                                0, /* EQCR_CI stashing threshold */
 195                                3, /* RPM: Valid bit mode, RCR in array mode */
 196                                2, /* DCM: Discrete consumption ack mode */
 197                                3, /* EPM: Valid bit mode, EQCR in array mode */
 198                                1, /* mem stashing drop enable == TRUE */
 199                                1, /* mem stashing priority == TRUE */
 200                                1, /* mem stashing enable == TRUE */
 201                                1, /* dequeue stashing priority == TRUE */
 202                                0, /* dequeue stashing enable == FALSE */
 203                                0); /* EQCR_CI stashing priority == FALSE */
 204        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
 205                reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
 206                       1 << SWP_CFG_VPM_SHIFT |  /* VDQCR read triggered mode */
 207                       1 << SWP_CFG_CPM_SHIFT;   /* CR read triggered mode */
 208
 209        qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
 210        reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
 211        if (!reg) {
 212                pr_err("qbman: the portal is not enabled!\n");
 213                kfree(p);
 214                return NULL;
 215        }
 216
 217        if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
 218                qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
 219                qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
 220        }
 221        /*
 222         * SDQCR needs to be initialized to 0 when no channels are
 223         * being dequeued from or else the QMan HW will indicate an
 224         * error.  The values that were calculated above will be
 225         * applied when dequeues from a specific channel are enabled.
 226         */
 227        qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
 228        return p;
 229}
 230
 231/**
 232 * qbman_swp_finish() - Create and destroy a functional object representing
 233 *                      the given QBMan portal descriptor.
 234 * @p: the qbman_swp object to be destroyed
 235 */
 236void qbman_swp_finish(struct qbman_swp *p)
 237{
 238        kfree(p);
 239}
 240
 241/**
 242 * qbman_swp_interrupt_read_status()
 243 * @p: the given software portal
 244 *
 245 * Return the value in the SWP_ISR register.
 246 */
 247u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
 248{
 249        return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
 250}
 251
 252/**
 253 * qbman_swp_interrupt_clear_status()
 254 * @p: the given software portal
 255 * @mask: The mask to clear in SWP_ISR register
 256 */
 257void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
 258{
 259        qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
 260}
 261
 262/**
 263 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
 264 * @p: the given software portal
 265 *
 266 * Return the value in the SWP_IER register.
 267 */
 268u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
 269{
 270        return qbman_read_register(p, QBMAN_CINH_SWP_IER);
 271}
 272
 273/**
 274 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
 275 * @p: the given software portal
 276 * @mask: The mask of bits to enable in SWP_IER
 277 */
 278void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
 279{
 280        qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
 281}
 282
 283/**
 284 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
 285 * @p: the given software portal object
 286 *
 287 * Return the value in the SWP_IIR register.
 288 */
 289int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
 290{
 291        return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
 292}
 293
 294/**
 295 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
 296 * @p: the given software portal object
 297 * @mask: The mask to set in SWP_IIR register
 298 */
 299void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
 300{
 301        qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
 302}
 303
 304/*
 305 * Different management commands all use this common base layer of code to issue
 306 * commands and poll for results.
 307 */
 308
 309/*
 310 * Returns a pointer to where the caller should fill in their management command
 311 * (caller should ignore the verb byte)
 312 */
 313void *qbman_swp_mc_start(struct qbman_swp *p)
 314{
 315        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
 316                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
 317        else
 318                return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
 319}
 320
 321/*
 322 * Commits merges in the caller-supplied command verb (which should not include
 323 * the valid-bit) and submits the command to hardware
 324 */
 325void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
 326{
 327        u8 *v = cmd;
 328
 329        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 330                dma_wmb();
 331                *v = cmd_verb | p->mc.valid_bit;
 332        } else {
 333                *v = cmd_verb | p->mc.valid_bit;
 334                dma_wmb();
 335                qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
 336        }
 337}
 338
 339/*
 340 * Checks for a completed response (returns non-NULL if only if the response
 341 * is complete).
 342 */
 343void *qbman_swp_mc_result(struct qbman_swp *p)
 344{
 345        u32 *ret, verb;
 346
 347        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 348                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
 349                /* Remove the valid-bit - command completed if the rest
 350                 * is non-zero.
 351                 */
 352                verb = ret[0] & ~QB_VALID_BIT;
 353                if (!verb)
 354                        return NULL;
 355                p->mc.valid_bit ^= QB_VALID_BIT;
 356        } else {
 357                ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
 358                /* Command completed if the valid bit is toggled */
 359                if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
 360                        return NULL;
 361                /* Command completed if the rest is non-zero */
 362                verb = ret[0] & ~QB_VALID_BIT;
 363                if (!verb)
 364                        return NULL;
 365                p->mr.valid_bit ^= QB_VALID_BIT;
 366        }
 367
 368        return ret;
 369}
 370
 371#define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
 372enum qb_enqueue_commands {
 373        enqueue_empty = 0,
 374        enqueue_response_always = 1,
 375        enqueue_rejects_to_fq = 2
 376};
 377
 378#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
 379#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
 380#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
 381
 382/**
 383 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
 384 *                         default/starting state.
 385 */
 386void qbman_eq_desc_clear(struct qbman_eq_desc *d)
 387{
 388        memset(d, 0, sizeof(*d));
 389}
 390
 391/**
 392 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
 393 * @d:                the enqueue descriptor.
 394 * @response_success: 1 = enqueue with response always; 0 = enqueue with
 395 *                    rejections returned on a FQ.
 396 */
 397void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
 398{
 399        d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
 400        if (respond_success)
 401                d->verb |= enqueue_response_always;
 402        else
 403                d->verb |= enqueue_rejects_to_fq;
 404}
 405
 406/*
 407 * Exactly one of the following descriptor "targets" should be set. (Calling any
 408 * one of these will replace the effect of any prior call to one of these.)
 409 *   -enqueue to a frame queue
 410 *   -enqueue to a queuing destination
 411 */
 412
 413/**
 414 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
 415 * @d:    the enqueue descriptor
 416 * @fqid: the id of the frame queue to be enqueued
 417 */
 418void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
 419{
 420        d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
 421        d->tgtid = cpu_to_le32(fqid);
 422}
 423
 424/**
 425 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
 426 * @d:       the enqueue descriptor
 427 * @qdid:    the id of the queuing destination to be enqueued
 428 * @qd_bin:  the queuing destination bin
 429 * @qd_prio: the queuing destination priority
 430 */
 431void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
 432                          u32 qd_bin, u32 qd_prio)
 433{
 434        d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
 435        d->tgtid = cpu_to_le32(qdid);
 436        d->qdbin = cpu_to_le16(qd_bin);
 437        d->qpri = qd_prio;
 438}
 439
 440#define EQAR_IDX(eqar)     ((eqar) & 0x7)
 441#define EQAR_VB(eqar)      ((eqar) & 0x80)
 442#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
 443
 444static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
 445                                                   u8 idx)
 446{
 447        if (idx < 16)
 448                qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
 449                                     QMAN_RT_MODE);
 450        else
 451                qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
 452                                     (idx - 16) * 4,
 453                                     QMAN_RT_MODE);
 454}
 455
 456/**
 457 * qbman_swp_enqueue() - Issue an enqueue command
 458 * @s:  the software portal used for enqueue
 459 * @d:  the enqueue descriptor
 460 * @fd: the frame descriptor to be enqueued
 461 *
 462 * Please note that 'fd' should only be NULL if the "action" of the
 463 * descriptor is "orp_hole" or "orp_nesn".
 464 *
 465 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 466 */
 467int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
 468                      const struct dpaa2_fd *fd)
 469{
 470        struct qbman_eq_desc *p;
 471        u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
 472
 473        if (!EQAR_SUCCESS(eqar))
 474                return -EBUSY;
 475
 476        p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
 477        memcpy(&p->dca, &d->dca, 31);
 478        memcpy(&p->fd, fd, sizeof(*fd));
 479
 480        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 481                /* Set the verb byte, have to substitute in the valid-bit */
 482                dma_wmb();
 483                p->verb = d->verb | EQAR_VB(eqar);
 484        } else {
 485                p->verb = d->verb | EQAR_VB(eqar);
 486                dma_wmb();
 487                qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
 488        }
 489
 490        return 0;
 491}
 492
 493/* Static (push) dequeue */
 494
 495/**
 496 * qbman_swp_push_get() - Get the push dequeue setup
 497 * @p:           the software portal object
 498 * @channel_idx: the channel index to query
 499 * @enabled:     returned boolean to show whether the push dequeue is enabled
 500 *               for the given channel
 501 */
 502void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
 503{
 504        u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 505
 506        WARN_ON(channel_idx > 15);
 507        *enabled = src | (1 << channel_idx);
 508}
 509
 510/**
 511 * qbman_swp_push_set() - Enable or disable push dequeue
 512 * @p:           the software portal object
 513 * @channel_idx: the channel index (0 to 15)
 514 * @enable:      enable or disable push dequeue
 515 */
 516void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
 517{
 518        u16 dqsrc;
 519
 520        WARN_ON(channel_idx > 15);
 521        if (enable)
 522                s->sdq |= 1 << channel_idx;
 523        else
 524                s->sdq &= ~(1 << channel_idx);
 525
 526        /* Read make the complete src map.  If no channels are enabled
 527         * the SDQCR must be 0 or else QMan will assert errors
 528         */
 529        dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 530        if (dqsrc != 0)
 531                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
 532        else
 533                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
 534}
 535
 536#define QB_VDQCR_VERB_DCT_SHIFT    0
 537#define QB_VDQCR_VERB_DT_SHIFT     2
 538#define QB_VDQCR_VERB_RLS_SHIFT    4
 539#define QB_VDQCR_VERB_WAE_SHIFT    5
 540
 541enum qb_pull_dt_e {
 542        qb_pull_dt_channel,
 543        qb_pull_dt_workqueue,
 544        qb_pull_dt_framequeue
 545};
 546
 547/**
 548 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
 549 *                           default/starting state
 550 * @d: the pull dequeue descriptor to be cleared
 551 */
 552void qbman_pull_desc_clear(struct qbman_pull_desc *d)
 553{
 554        memset(d, 0, sizeof(*d));
 555}
 556
 557/**
 558 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
 559 * @d:            the pull dequeue descriptor to be set
 560 * @storage:      the pointer of the memory to store the dequeue result
 561 * @storage_phys: the physical address of the storage memory
 562 * @stash:        to indicate whether write allocate is enabled
 563 *
 564 * If not called, or if called with 'storage' as NULL, the result pull dequeues
 565 * will produce results to DQRR. If 'storage' is non-NULL, then results are
 566 * produced to the given memory location (using the DMA address which
 567 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
 568 * those writes to main-memory express a cache-warming attribute.
 569 */
 570void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
 571                                 struct dpaa2_dq *storage,
 572                                 dma_addr_t storage_phys,
 573                                 int stash)
 574{
 575        /* save the virtual address */
 576        d->rsp_addr_virt = (u64)(uintptr_t)storage;
 577
 578        if (!storage) {
 579                d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
 580                return;
 581        }
 582        d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
 583        if (stash)
 584                d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
 585        else
 586                d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
 587
 588        d->rsp_addr = cpu_to_le64(storage_phys);
 589}
 590
 591/**
 592 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
 593 * @d:         the pull dequeue descriptor to be set
 594 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
 595 */
 596void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
 597{
 598        d->numf = numframes - 1;
 599}
 600
 601/*
 602 * Exactly one of the following descriptor "actions" should be set. (Calling any
 603 * one of these will replace the effect of any prior call to one of these.)
 604 * - pull dequeue from the given frame queue (FQ)
 605 * - pull dequeue from any FQ in the given work queue (WQ)
 606 * - pull dequeue from any FQ in any WQ in the given channel
 607 */
 608
 609/**
 610 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
 611 * @fqid: the frame queue index of the given FQ
 612 */
 613void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
 614{
 615        d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
 616        d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
 617        d->dq_src = cpu_to_le32(fqid);
 618}
 619
 620/**
 621 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
 622 * @wqid: composed of channel id and wqid within the channel
 623 * @dct:  the dequeue command type
 624 */
 625void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
 626                            enum qbman_pull_type_e dct)
 627{
 628        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
 629        d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
 630        d->dq_src = cpu_to_le32(wqid);
 631}
 632
 633/**
 634 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
 635 *                                 dequeues
 636 * @chid: the channel id to be dequeued
 637 * @dct:  the dequeue command type
 638 */
 639void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
 640                                 enum qbman_pull_type_e dct)
 641{
 642        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
 643        d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
 644        d->dq_src = cpu_to_le32(chid);
 645}
 646
 647/**
 648 * qbman_swp_pull() - Issue the pull dequeue command
 649 * @s: the software portal object
 650 * @d: the software portal descriptor which has been configured with
 651 *     the set of qbman_pull_desc_set_*() calls
 652 *
 653 * Return 0 for success, and -EBUSY if the software portal is not ready
 654 * to do pull dequeue.
 655 */
 656int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
 657{
 658        struct qbman_pull_desc *p;
 659
 660        if (!atomic_dec_and_test(&s->vdq.available)) {
 661                atomic_inc(&s->vdq.available);
 662                return -EBUSY;
 663        }
 664        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
 665        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
 666                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
 667        else
 668                p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
 669        p->numf = d->numf;
 670        p->tok = QMAN_DQ_TOKEN_VALID;
 671        p->dq_src = d->dq_src;
 672        p->rsp_addr = d->rsp_addr;
 673        p->rsp_addr_virt = d->rsp_addr_virt;
 674
 675        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 676                dma_wmb();
 677                /* Set the verb byte, have to substitute in the valid-bit */
 678                p->verb = d->verb | s->vdq.valid_bit;
 679                s->vdq.valid_bit ^= QB_VALID_BIT;
 680        } else {
 681                p->verb = d->verb | s->vdq.valid_bit;
 682                s->vdq.valid_bit ^= QB_VALID_BIT;
 683                dma_wmb();
 684                qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
 685        }
 686
 687        return 0;
 688}
 689
 690#define QMAN_DQRR_PI_MASK   0xf
 691
 692/**
 693 * qbman_swp_dqrr_next() - Get an valid DQRR entry
 694 * @s: the software portal object
 695 *
 696 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
 697 * only once, so repeated calls can return a sequence of DQRR entries, without
 698 * requiring they be consumed immediately or in any particular order.
 699 */
 700const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
 701{
 702        u32 verb;
 703        u32 response_verb;
 704        u32 flags;
 705        struct dpaa2_dq *p;
 706
 707        /* Before using valid-bit to detect if something is there, we have to
 708         * handle the case of the DQRR reset bug...
 709         */
 710        if (unlikely(s->dqrr.reset_bug)) {
 711                /*
 712                 * We pick up new entries by cache-inhibited producer index,
 713                 * which means that a non-coherent mapping would require us to
 714                 * invalidate and read *only* once that PI has indicated that
 715                 * there's an entry here. The first trip around the DQRR ring
 716                 * will be much less efficient than all subsequent trips around
 717                 * it...
 718                 */
 719                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
 720                        QMAN_DQRR_PI_MASK;
 721
 722                /* there are new entries if pi != next_idx */
 723                if (pi == s->dqrr.next_idx)
 724                        return NULL;
 725
 726                /*
 727                 * if next_idx is/was the last ring index, and 'pi' is
 728                 * different, we can disable the workaround as all the ring
 729                 * entries have now been DMA'd to so valid-bit checking is
 730                 * repaired. Note: this logic needs to be based on next_idx
 731                 * (which increments one at a time), rather than on pi (which
 732                 * can burst and wrap-around between our snapshots of it).
 733                 */
 734                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
 735                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
 736                                 s->dqrr.next_idx, pi);
 737                        s->dqrr.reset_bug = 0;
 738                }
 739                prefetch(qbman_get_cmd(s,
 740                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
 741        }
 742
 743        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
 744                p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
 745        else
 746                p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
 747        verb = p->dq.verb;
 748
 749        /*
 750         * If the valid-bit isn't of the expected polarity, nothing there. Note,
 751         * in the DQRR reset bug workaround, we shouldn't need to skip these
 752         * check, because we've already determined that a new entry is available
 753         * and we've invalidated the cacheline before reading it, so the
 754         * valid-bit behaviour is repaired and should tell us what we already
 755         * knew from reading PI.
 756         */
 757        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
 758                prefetch(qbman_get_cmd(s,
 759                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
 760                return NULL;
 761        }
 762        /*
 763         * There's something there. Move "next_idx" attention to the next ring
 764         * entry (and prefetch it) before returning what we found.
 765         */
 766        s->dqrr.next_idx++;
 767        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
 768        if (!s->dqrr.next_idx)
 769                s->dqrr.valid_bit ^= QB_VALID_BIT;
 770
 771        /*
 772         * If this is the final response to a volatile dequeue command
 773         * indicate that the vdq is available
 774         */
 775        flags = p->dq.stat;
 776        response_verb = verb & QBMAN_RESULT_MASK;
 777        if ((response_verb == QBMAN_RESULT_DQ) &&
 778            (flags & DPAA2_DQ_STAT_VOLATILE) &&
 779            (flags & DPAA2_DQ_STAT_EXPIRED))
 780                atomic_inc(&s->vdq.available);
 781
 782        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
 783
 784        return p;
 785}
 786
 787/**
 788 * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
 789 *                             qbman_swp_dqrr_next().
 790 * @s: the software portal object
 791 * @dq: the DQRR entry to be consumed
 792 */
 793void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
 794{
 795        qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
 796}
 797
 798/**
 799 * qbman_result_has_new_result() - Check and get the dequeue response from the
 800 *                                 dq storage memory set in pull dequeue command
 801 * @s: the software portal object
 802 * @dq: the dequeue result read from the memory
 803 *
 804 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
 805 * dequeue result.
 806 *
 807 * Only used for user-provided storage of dequeue results, not DQRR. For
 808 * efficiency purposes, the driver will perform any required endianness
 809 * conversion to ensure that the user's dequeue result storage is in host-endian
 810 * format. As such, once the user has called qbman_result_has_new_result() and
 811 * been returned a valid dequeue result, they should not call it again on
 812 * the same memory location (except of course if another dequeue command has
 813 * been executed to produce a new result to that location).
 814 */
 815int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
 816{
 817        if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
 818                return 0;
 819
 820        /*
 821         * Set token to be 0 so we will detect change back to 1
 822         * next time the looping is traversed. Const is cast away here
 823         * as we want users to treat the dequeue responses as read only.
 824         */
 825        ((struct dpaa2_dq *)dq)->dq.tok = 0;
 826
 827        /*
 828         * Determine whether VDQCR is available based on whether the
 829         * current result is sitting in the first storage location of
 830         * the busy command.
 831         */
 832        if (s->vdq.storage == dq) {
 833                s->vdq.storage = NULL;
 834                atomic_inc(&s->vdq.available);
 835        }
 836
 837        return 1;
 838}
 839
 840/**
 841 * qbman_release_desc_clear() - Clear the contents of a descriptor to
 842 *                              default/starting state.
 843 */
 844void qbman_release_desc_clear(struct qbman_release_desc *d)
 845{
 846        memset(d, 0, sizeof(*d));
 847        d->verb = 1 << 5; /* Release Command Valid */
 848}
 849
 850/**
 851 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
 852 */
 853void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
 854{
 855        d->bpid = cpu_to_le16(bpid);
 856}
 857
 858/**
 859 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
 860 * interrupt source should be asserted after the release command is completed.
 861 */
 862void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
 863{
 864        if (enable)
 865                d->verb |= 1 << 6;
 866        else
 867                d->verb &= ~(1 << 6);
 868}
 869
 870#define RAR_IDX(rar)     ((rar) & 0x7)
 871#define RAR_VB(rar)      ((rar) & 0x80)
 872#define RAR_SUCCESS(rar) ((rar) & 0x100)
 873
 874/**
 875 * qbman_swp_release() - Issue a buffer release command
 876 * @s:           the software portal object
 877 * @d:           the release descriptor
 878 * @buffers:     a pointer pointing to the buffer address to be released
 879 * @num_buffers: number of buffers to be released,  must be less than 8
 880 *
 881 * Return 0 for success, -EBUSY if the release command ring is not ready.
 882 */
 883int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
 884                      const u64 *buffers, unsigned int num_buffers)
 885{
 886        int i;
 887        struct qbman_release_desc *p;
 888        u32 rar;
 889
 890        if (!num_buffers || (num_buffers > 7))
 891                return -EINVAL;
 892
 893        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
 894        if (!RAR_SUCCESS(rar))
 895                return -EBUSY;
 896
 897        /* Start the release command */
 898        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
 899                p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
 900        else
 901                p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
 902        /* Copy the caller's buffer pointers to the command */
 903        for (i = 0; i < num_buffers; i++)
 904                p->buf[i] = cpu_to_le64(buffers[i]);
 905        p->bpid = d->bpid;
 906
 907        if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
 908                /*
 909                 * Set the verb byte, have to substitute in the valid-bit
 910                 * and the number of buffers.
 911                 */
 912                dma_wmb();
 913                p->verb = d->verb | RAR_VB(rar) | num_buffers;
 914        } else {
 915                p->verb = d->verb | RAR_VB(rar) | num_buffers;
 916                dma_wmb();
 917                qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
 918                                     RAR_IDX(rar)  * 4, QMAN_RT_MODE);
 919        }
 920
 921        return 0;
 922}
 923
 924struct qbman_acquire_desc {
 925        u8 verb;
 926        u8 reserved;
 927        __le16 bpid;
 928        u8 num;
 929        u8 reserved2[59];
 930};
 931
 932struct qbman_acquire_rslt {
 933        u8 verb;
 934        u8 rslt;
 935        __le16 reserved;
 936        u8 num;
 937        u8 reserved2[3];
 938        __le64 buf[7];
 939};
 940
 941/**
 942 * qbman_swp_acquire() - Issue a buffer acquire command
 943 * @s:           the software portal object
 944 * @bpid:        the buffer pool index
 945 * @buffers:     a pointer pointing to the acquired buffer addresses
 946 * @num_buffers: number of buffers to be acquired, must be less than 8
 947 *
 948 * Return 0 for success, or negative error code if the acquire command
 949 * fails.
 950 */
 951int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
 952                      unsigned int num_buffers)
 953{
 954        struct qbman_acquire_desc *p;
 955        struct qbman_acquire_rslt *r;
 956        int i;
 957
 958        if (!num_buffers || (num_buffers > 7))
 959                return -EINVAL;
 960
 961        /* Start the management command */
 962        p = qbman_swp_mc_start(s);
 963
 964        if (!p)
 965                return -EBUSY;
 966
 967        /* Encode the caller-provided attributes */
 968        p->bpid = cpu_to_le16(bpid);
 969        p->num = num_buffers;
 970
 971        /* Complete the management command */
 972        r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
 973        if (unlikely(!r)) {
 974                pr_err("qbman: acquire from BPID %d failed, no response\n",
 975                       bpid);
 976                return -EIO;
 977        }
 978
 979        /* Decode the outcome */
 980        WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
 981
 982        /* Determine success or failure */
 983        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
 984                pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
 985                       bpid, r->rslt);
 986                return -EIO;
 987        }
 988
 989        WARN_ON(r->num > num_buffers);
 990
 991        /* Copy the acquired buffers to the caller's array */
 992        for (i = 0; i < r->num; i++)
 993                buffers[i] = le64_to_cpu(r->buf[i]);
 994
 995        return (int)r->num;
 996}
 997
 998struct qbman_alt_fq_state_desc {
 999        u8 verb;
1000        u8 reserved[3];
1001        __le32 fqid;
1002        u8 reserved2[56];
1003};
1004
1005struct qbman_alt_fq_state_rslt {
1006        u8 verb;
1007        u8 rslt;
1008        u8 reserved[62];
1009};
1010
1011#define ALT_FQ_FQID_MASK 0x00FFFFFF
1012
1013int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
1014                           u8 alt_fq_verb)
1015{
1016        struct qbman_alt_fq_state_desc *p;
1017        struct qbman_alt_fq_state_rslt *r;
1018
1019        /* Start the management command */
1020        p = qbman_swp_mc_start(s);
1021        if (!p)
1022                return -EBUSY;
1023
1024        p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
1025
1026        /* Complete the management command */
1027        r = qbman_swp_mc_complete(s, p, alt_fq_verb);
1028        if (unlikely(!r)) {
1029                pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
1030                       alt_fq_verb);
1031                return -EIO;
1032        }
1033
1034        /* Decode the outcome */
1035        WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
1036
1037        /* Determine success or failure */
1038        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1039                pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
1040                       fqid, r->verb, r->rslt);
1041                return -EIO;
1042        }
1043
1044        return 0;
1045}
1046
1047struct qbman_cdan_ctrl_desc {
1048        u8 verb;
1049        u8 reserved;
1050        __le16 ch;
1051        u8 we;
1052        u8 ctrl;
1053        __le16 reserved2;
1054        __le64 cdan_ctx;
1055        u8 reserved3[48];
1056
1057};
1058
1059struct qbman_cdan_ctrl_rslt {
1060        u8 verb;
1061        u8 rslt;
1062        __le16 ch;
1063        u8 reserved[60];
1064};
1065
1066int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
1067                       u8 we_mask, u8 cdan_en,
1068                       u64 ctx)
1069{
1070        struct qbman_cdan_ctrl_desc *p = NULL;
1071        struct qbman_cdan_ctrl_rslt *r = NULL;
1072
1073        /* Start the management command */
1074        p = qbman_swp_mc_start(s);
1075        if (!p)
1076                return -EBUSY;
1077
1078        /* Encode the caller-provided attributes */
1079        p->ch = cpu_to_le16(channelid);
1080        p->we = we_mask;
1081        if (cdan_en)
1082                p->ctrl = 1;
1083        else
1084                p->ctrl = 0;
1085        p->cdan_ctx = cpu_to_le64(ctx);
1086
1087        /* Complete the management command */
1088        r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
1089        if (unlikely(!r)) {
1090                pr_err("qbman: wqchan config failed, no response\n");
1091                return -EIO;
1092        }
1093
1094        WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
1095
1096        /* Determine success or failure */
1097        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
1098                pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1099                       channelid, r->rslt);
1100                return -EIO;
1101        }
1102
1103        return 0;
1104}
1105
1106#define QBMAN_RESPONSE_VERB_MASK        0x7f
1107#define QBMAN_FQ_QUERY_NP               0x45
1108#define QBMAN_BP_QUERY                  0x32
1109
1110struct qbman_fq_query_desc {
1111        u8 verb;
1112        u8 reserved[3];
1113        __le32 fqid;
1114        u8 reserved2[56];
1115};
1116
1117int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
1118                         struct qbman_fq_query_np_rslt *r)
1119{
1120        struct qbman_fq_query_desc *p;
1121        void *resp;
1122
1123        p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
1124        if (!p)
1125                return -EBUSY;
1126
1127        /* FQID is a 24 bit value */
1128        p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
1129        resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
1130        if (!resp) {
1131                pr_err("qbman: Query FQID %d NP fields failed, no response\n",
1132                       fqid);
1133                return -EIO;
1134        }
1135        *r = *(struct qbman_fq_query_np_rslt *)resp;
1136        /* Decode the outcome */
1137        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
1138
1139        /* Determine success or failure */
1140        if (r->rslt != QBMAN_MC_RSLT_OK) {
1141                pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
1142                       p->fqid, r->rslt);
1143                return -EIO;
1144        }
1145
1146        return 0;
1147}
1148
1149u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
1150{
1151        return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
1152}
1153
1154u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
1155{
1156        return le32_to_cpu(r->byte_cnt);
1157}
1158
1159struct qbman_bp_query_desc {
1160        u8 verb;
1161        u8 reserved;
1162        __le16 bpid;
1163        u8 reserved2[60];
1164};
1165
1166int qbman_bp_query(struct qbman_swp *s, u16 bpid,
1167                   struct qbman_bp_query_rslt *r)
1168{
1169        struct qbman_bp_query_desc *p;
1170        void *resp;
1171
1172        p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
1173        if (!p)
1174                return -EBUSY;
1175
1176        p->bpid = cpu_to_le16(bpid);
1177        resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
1178        if (!resp) {
1179                pr_err("qbman: Query BPID %d fields failed, no response\n",
1180                       bpid);
1181                return -EIO;
1182        }
1183        *r = *(struct qbman_bp_query_rslt *)resp;
1184        /* Decode the outcome */
1185        WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
1186
1187        /* Determine success or failure */
1188        if (r->rslt != QBMAN_MC_RSLT_OK) {
1189                pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
1190                       bpid, r->rslt);
1191                return -EIO;
1192        }
1193
1194        return 0;
1195}
1196
1197u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
1198{
1199        return le32_to_cpu(a->fill);
1200}
1201