linux/drivers/staging/fsl-mc/bus/dpio/qbman-portal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2/*
   3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
   4 * Copyright 2016 NXP
   5 *
   6 */
   7
   8#include <asm/cacheflush.h>
   9#include <linux/io.h>
  10#include <linux/slab.h>
  11#include "../../include/dpaa2-global.h"
  12
  13#include "qbman-portal.h"
  14
  15#define QMAN_REV_4000   0x04000000
  16#define QMAN_REV_4100   0x04010000
  17#define QMAN_REV_4101   0x04010001
  18#define QMAN_REV_MASK   0xffff0000
  19
  20/* All QBMan command and result structures use this "valid bit" encoding */
  21#define QB_VALID_BIT ((u32)0x80)
  22
  23/* QBMan portal management command codes */
  24#define QBMAN_MC_ACQUIRE       0x30
  25#define QBMAN_WQCHAN_CONFIGURE 0x46
  26
  27/* CINH register offsets */
  28#define QBMAN_CINH_SWP_EQAR    0x8c0
  29#define QBMAN_CINH_SWP_DQPI    0xa00
  30#define QBMAN_CINH_SWP_DCAP    0xac0
  31#define QBMAN_CINH_SWP_SDQCR   0xb00
  32#define QBMAN_CINH_SWP_RAR     0xcc0
  33#define QBMAN_CINH_SWP_ISR     0xe00
  34#define QBMAN_CINH_SWP_IER     0xe40
  35#define QBMAN_CINH_SWP_ISDR    0xe80
  36#define QBMAN_CINH_SWP_IIR     0xec0
  37
  38/* CENA register offsets */
  39#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
  40#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((u32)(n) << 6))
  41#define QBMAN_CENA_SWP_RCR(n)  (0x400 + ((u32)(n) << 6))
  42#define QBMAN_CENA_SWP_CR      0x600
  43#define QBMAN_CENA_SWP_RR(vb)  (0x700 + ((u32)(vb) >> 1))
  44#define QBMAN_CENA_SWP_VDQCR   0x780
  45
  46/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
  47#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
  48
  49/* Define token used to determine if response written to memory is valid */
  50#define QMAN_DQ_TOKEN_VALID 1
  51
  52/* SDQCR attribute codes */
  53#define QB_SDQCR_FC_SHIFT   29
  54#define QB_SDQCR_FC_MASK    0x1
  55#define QB_SDQCR_DCT_SHIFT  24
  56#define QB_SDQCR_DCT_MASK   0x3
  57#define QB_SDQCR_TOK_SHIFT  16
  58#define QB_SDQCR_TOK_MASK   0xff
  59#define QB_SDQCR_SRC_SHIFT  0
  60#define QB_SDQCR_SRC_MASK   0xffff
  61
  62/* opaque token for static dequeues */
  63#define QMAN_SDQCR_TOKEN    0xbb
  64
  65enum qbman_sdqcr_dct {
  66        qbman_sdqcr_dct_null = 0,
  67        qbman_sdqcr_dct_prio_ics,
  68        qbman_sdqcr_dct_active_ics,
  69        qbman_sdqcr_dct_active
  70};
  71
  72enum qbman_sdqcr_fc {
  73        qbman_sdqcr_fc_one = 0,
  74        qbman_sdqcr_fc_up_to_3 = 1
  75};
  76
  77/* Portal Access */
  78
  79static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
  80{
  81        return readl_relaxed(p->addr_cinh + offset);
  82}
  83
  84static inline void qbman_write_register(struct qbman_swp *p, u32 offset,
  85                                        u32 value)
  86{
  87        writel_relaxed(value, p->addr_cinh + offset);
  88}
  89
  90static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
  91{
  92        return p->addr_cena + offset;
  93}
  94
  95#define QBMAN_CINH_SWP_CFG   0xd00
  96
  97#define SWP_CFG_DQRR_MF_SHIFT 20
  98#define SWP_CFG_EST_SHIFT     16
  99#define SWP_CFG_WN_SHIFT      14
 100#define SWP_CFG_RPM_SHIFT     12
 101#define SWP_CFG_DCM_SHIFT     10
 102#define SWP_CFG_EPM_SHIFT     8
 103#define SWP_CFG_SD_SHIFT      5
 104#define SWP_CFG_SP_SHIFT      4
 105#define SWP_CFG_SE_SHIFT      3
 106#define SWP_CFG_DP_SHIFT      2
 107#define SWP_CFG_DE_SHIFT      1
 108#define SWP_CFG_EP_SHIFT      0
 109
 110static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
 111                                    u8 epm, int sd, int sp, int se,
 112                                    int dp, int de, int ep)
 113{
 114        return (max_fill << SWP_CFG_DQRR_MF_SHIFT |
 115                est << SWP_CFG_EST_SHIFT |
 116                wn << SWP_CFG_WN_SHIFT |
 117                rpm << SWP_CFG_RPM_SHIFT |
 118                dcm << SWP_CFG_DCM_SHIFT |
 119                epm << SWP_CFG_EPM_SHIFT |
 120                sd << SWP_CFG_SD_SHIFT |
 121                sp << SWP_CFG_SP_SHIFT |
 122                se << SWP_CFG_SE_SHIFT |
 123                dp << SWP_CFG_DP_SHIFT |
 124                de << SWP_CFG_DE_SHIFT |
 125                ep << SWP_CFG_EP_SHIFT);
 126}
 127
 128/**
 129 * qbman_swp_init() - Create a functional object representing the given
 130 *                    QBMan portal descriptor.
 131 * @d: the given qbman swp descriptor
 132 *
 133 * Return qbman_swp portal for success, NULL if the object cannot
 134 * be created.
 135 */
 136struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
 137{
 138        struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
 139        u32 reg;
 140
 141        if (!p)
 142                return NULL;
 143        p->desc = d;
 144        p->mc.valid_bit = QB_VALID_BIT;
 145        p->sdq = 0;
 146        p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
 147        p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
 148        p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
 149
 150        atomic_set(&p->vdq.available, 1);
 151        p->vdq.valid_bit = QB_VALID_BIT;
 152        p->dqrr.next_idx = 0;
 153        p->dqrr.valid_bit = QB_VALID_BIT;
 154
 155        if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_4100) {
 156                p->dqrr.dqrr_size = 4;
 157                p->dqrr.reset_bug = 1;
 158        } else {
 159                p->dqrr.dqrr_size = 8;
 160                p->dqrr.reset_bug = 0;
 161        }
 162
 163        p->addr_cena = d->cena_bar;
 164        p->addr_cinh = d->cinh_bar;
 165
 166        reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
 167                                1, /* Writes Non-cacheable */
 168                                0, /* EQCR_CI stashing threshold */
 169                                3, /* RPM: Valid bit mode, RCR in array mode */
 170                                2, /* DCM: Discrete consumption ack mode */
 171                                3, /* EPM: Valid bit mode, EQCR in array mode */
 172                                0, /* mem stashing drop enable == FALSE */
 173                                1, /* mem stashing priority == TRUE */
 174                                0, /* mem stashing enable == FALSE */
 175                                1, /* dequeue stashing priority == TRUE */
 176                                0, /* dequeue stashing enable == FALSE */
 177                                0); /* EQCR_CI stashing priority == FALSE */
 178
 179        qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
 180        reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
 181        if (!reg) {
 182                pr_err("qbman: the portal is not enabled!\n");
 183                return NULL;
 184        }
 185
 186        /*
 187         * SDQCR needs to be initialized to 0 when no channels are
 188         * being dequeued from or else the QMan HW will indicate an
 189         * error.  The values that were calculated above will be
 190         * applied when dequeues from a specific channel are enabled.
 191         */
 192        qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
 193        return p;
 194}
 195
 196/**
 197 * qbman_swp_finish() - Create and destroy a functional object representing
 198 *                      the given QBMan portal descriptor.
 199 * @p: the qbman_swp object to be destroyed
 200 */
 201void qbman_swp_finish(struct qbman_swp *p)
 202{
 203        kfree(p);
 204}
 205
 206/**
 207 * qbman_swp_interrupt_read_status()
 208 * @p: the given software portal
 209 *
 210 * Return the value in the SWP_ISR register.
 211 */
 212u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
 213{
 214        return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
 215}
 216
 217/**
 218 * qbman_swp_interrupt_clear_status()
 219 * @p: the given software portal
 220 * @mask: The mask to clear in SWP_ISR register
 221 */
 222void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
 223{
 224        qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
 225}
 226
 227/**
 228 * qbman_swp_interrupt_get_trigger() - read interrupt enable register
 229 * @p: the given software portal
 230 *
 231 * Return the value in the SWP_IER register.
 232 */
 233u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
 234{
 235        return qbman_read_register(p, QBMAN_CINH_SWP_IER);
 236}
 237
 238/**
 239 * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp
 240 * @p: the given software portal
 241 * @mask: The mask of bits to enable in SWP_IER
 242 */
 243void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
 244{
 245        qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
 246}
 247
 248/**
 249 * qbman_swp_interrupt_get_inhibit() - read interrupt mask register
 250 * @p: the given software portal object
 251 *
 252 * Return the value in the SWP_IIR register.
 253 */
 254int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
 255{
 256        return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
 257}
 258
 259/**
 260 * qbman_swp_interrupt_set_inhibit() - write interrupt mask register
 261 * @p: the given software portal object
 262 * @mask: The mask to set in SWP_IIR register
 263 */
 264void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
 265{
 266        qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
 267}
 268
 269/*
 270 * Different management commands all use this common base layer of code to issue
 271 * commands and poll for results.
 272 */
 273
 274/*
 275 * Returns a pointer to where the caller should fill in their management command
 276 * (caller should ignore the verb byte)
 277 */
 278void *qbman_swp_mc_start(struct qbman_swp *p)
 279{
 280        return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
 281}
 282
 283/*
 284 * Commits merges in the caller-supplied command verb (which should not include
 285 * the valid-bit) and submits the command to hardware
 286 */
 287void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
 288{
 289        u8 *v = cmd;
 290
 291        dma_wmb();
 292        *v = cmd_verb | p->mc.valid_bit;
 293}
 294
 295/*
 296 * Checks for a completed response (returns non-NULL if only if the response
 297 * is complete).
 298 */
 299void *qbman_swp_mc_result(struct qbman_swp *p)
 300{
 301        u32 *ret, verb;
 302
 303        ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
 304
 305        /* Remove the valid-bit - command completed if the rest is non-zero */
 306        verb = ret[0] & ~QB_VALID_BIT;
 307        if (!verb)
 308                return NULL;
 309        p->mc.valid_bit ^= QB_VALID_BIT;
 310        return ret;
 311}
 312
 313#define QB_ENQUEUE_CMD_OPTIONS_SHIFT    0
 314enum qb_enqueue_commands {
 315        enqueue_empty = 0,
 316        enqueue_response_always = 1,
 317        enqueue_rejects_to_fq = 2
 318};
 319
 320#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT      2
 321#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
 322#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT     4
 323
 324/**
 325 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
 326 *                         default/starting state.
 327 */
 328void qbman_eq_desc_clear(struct qbman_eq_desc *d)
 329{
 330        memset(d, 0, sizeof(*d));
 331}
 332
 333/**
 334 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
 335 * @d:                the enqueue descriptor.
 336 * @response_success: 1 = enqueue with response always; 0 = enqueue with
 337 *                    rejections returned on a FQ.
 338 */
 339void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
 340{
 341        d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
 342        if (respond_success)
 343                d->verb |= enqueue_response_always;
 344        else
 345                d->verb |= enqueue_rejects_to_fq;
 346}
 347
 348/*
 349 * Exactly one of the following descriptor "targets" should be set. (Calling any
 350 * one of these will replace the effect of any prior call to one of these.)
 351 *   -enqueue to a frame queue
 352 *   -enqueue to a queuing destination
 353 */
 354
 355/**
 356 * qbman_eq_desc_set_fq() - set the FQ for the enqueue command
 357 * @d:    the enqueue descriptor
 358 * @fqid: the id of the frame queue to be enqueued
 359 */
 360void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
 361{
 362        d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
 363        d->tgtid = cpu_to_le32(fqid);
 364}
 365
 366/**
 367 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command
 368 * @d:       the enqueue descriptor
 369 * @qdid:    the id of the queuing destination to be enqueued
 370 * @qd_bin:  the queuing destination bin
 371 * @qd_prio: the queuing destination priority
 372 */
 373void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
 374                          u32 qd_bin, u32 qd_prio)
 375{
 376        d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
 377        d->tgtid = cpu_to_le32(qdid);
 378        d->qdbin = cpu_to_le16(qd_bin);
 379        d->qpri = qd_prio;
 380}
 381
 382#define EQAR_IDX(eqar)     ((eqar) & 0x7)
 383#define EQAR_VB(eqar)      ((eqar) & 0x80)
 384#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
 385
 386/**
 387 * qbman_swp_enqueue() - Issue an enqueue command
 388 * @s:  the software portal used for enqueue
 389 * @d:  the enqueue descriptor
 390 * @fd: the frame descriptor to be enqueued
 391 *
 392 * Please note that 'fd' should only be NULL if the "action" of the
 393 * descriptor is "orp_hole" or "orp_nesn".
 394 *
 395 * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
 396 */
 397int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
 398                      const struct dpaa2_fd *fd)
 399{
 400        struct qbman_eq_desc *p;
 401        u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
 402
 403        if (!EQAR_SUCCESS(eqar))
 404                return -EBUSY;
 405
 406        p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
 407        memcpy(&p->dca, &d->dca, 31);
 408        memcpy(&p->fd, fd, sizeof(*fd));
 409
 410        /* Set the verb byte, have to substitute in the valid-bit */
 411        dma_wmb();
 412        p->verb = d->verb | EQAR_VB(eqar);
 413
 414        return 0;
 415}
 416
 417/* Static (push) dequeue */
 418
 419/**
 420 * qbman_swp_push_get() - Get the push dequeue setup
 421 * @p:           the software portal object
 422 * @channel_idx: the channel index to query
 423 * @enabled:     returned boolean to show whether the push dequeue is enabled
 424 *               for the given channel
 425 */
 426void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
 427{
 428        u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 429
 430        WARN_ON(channel_idx > 15);
 431        *enabled = src | (1 << channel_idx);
 432}
 433
 434/**
 435 * qbman_swp_push_set() - Enable or disable push dequeue
 436 * @p:           the software portal object
 437 * @channel_idx: the channel index (0 to 15)
 438 * @enable:      enable or disable push dequeue
 439 */
 440void qbman_swp_push_set(struct qbman_swp *s, u8 channel_idx, int enable)
 441{
 442        u16 dqsrc;
 443
 444        WARN_ON(channel_idx > 15);
 445        if (enable)
 446                s->sdq |= 1 << channel_idx;
 447        else
 448                s->sdq &= ~(1 << channel_idx);
 449
 450        /* Read make the complete src map.  If no channels are enabled
 451         * the SDQCR must be 0 or else QMan will assert errors
 452         */
 453        dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
 454        if (dqsrc != 0)
 455                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq);
 456        else
 457                qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
 458}
 459
 460#define QB_VDQCR_VERB_DCT_SHIFT    0
 461#define QB_VDQCR_VERB_DT_SHIFT     2
 462#define QB_VDQCR_VERB_RLS_SHIFT    4
 463#define QB_VDQCR_VERB_WAE_SHIFT    5
 464
 465enum qb_pull_dt_e {
 466        qb_pull_dt_channel,
 467        qb_pull_dt_workqueue,
 468        qb_pull_dt_framequeue
 469};
 470
 471/**
 472 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
 473 *                           default/starting state
 474 * @d: the pull dequeue descriptor to be cleared
 475 */
 476void qbman_pull_desc_clear(struct qbman_pull_desc *d)
 477{
 478        memset(d, 0, sizeof(*d));
 479}
 480
 481/**
 482 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
 483 * @d:            the pull dequeue descriptor to be set
 484 * @storage:      the pointer of the memory to store the dequeue result
 485 * @storage_phys: the physical address of the storage memory
 486 * @stash:        to indicate whether write allocate is enabled
 487 *
 488 * If not called, or if called with 'storage' as NULL, the result pull dequeues
 489 * will produce results to DQRR. If 'storage' is non-NULL, then results are
 490 * produced to the given memory location (using the DMA address which
 491 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
 492 * those writes to main-memory express a cache-warming attribute.
 493 */
 494void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
 495                                 struct dpaa2_dq *storage,
 496                                 dma_addr_t storage_phys,
 497                                 int stash)
 498{
 499        /* save the virtual address */
 500        d->rsp_addr_virt = (u64)(uintptr_t)storage;
 501
 502        if (!storage) {
 503                d->verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
 504                return;
 505        }
 506        d->verb |= 1 << QB_VDQCR_VERB_RLS_SHIFT;
 507        if (stash)
 508                d->verb |= 1 << QB_VDQCR_VERB_WAE_SHIFT;
 509        else
 510                d->verb &= ~(1 << QB_VDQCR_VERB_WAE_SHIFT);
 511
 512        d->rsp_addr = cpu_to_le64(storage_phys);
 513}
 514
 515/**
 516 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued
 517 * @d:         the pull dequeue descriptor to be set
 518 * @numframes: number of frames to be set, must be between 1 and 16, inclusive
 519 */
 520void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
 521{
 522        d->numf = numframes - 1;
 523}
 524
 525/*
 526 * Exactly one of the following descriptor "actions" should be set. (Calling any
 527 * one of these will replace the effect of any prior call to one of these.)
 528 * - pull dequeue from the given frame queue (FQ)
 529 * - pull dequeue from any FQ in the given work queue (WQ)
 530 * - pull dequeue from any FQ in any WQ in the given channel
 531 */
 532
 533/**
 534 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
 535 * @fqid: the frame queue index of the given FQ
 536 */
 537void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
 538{
 539        d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
 540        d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
 541        d->dq_src = cpu_to_le32(fqid);
 542}
 543
 544/**
 545 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
 546 * @wqid: composed of channel id and wqid within the channel
 547 * @dct:  the dequeue command type
 548 */
 549void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
 550                            enum qbman_pull_type_e dct)
 551{
 552        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
 553        d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
 554        d->dq_src = cpu_to_le32(wqid);
 555}
 556
 557/**
 558 * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
 559 *                                 dequeues
 560 * @chid: the channel id to be dequeued
 561 * @dct:  the dequeue command type
 562 */
 563void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
 564                                 enum qbman_pull_type_e dct)
 565{
 566        d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
 567        d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
 568        d->dq_src = cpu_to_le32(chid);
 569}
 570
 571/**
 572 * qbman_swp_pull() - Issue the pull dequeue command
 573 * @s: the software portal object
 574 * @d: the software portal descriptor which has been configured with
 575 *     the set of qbman_pull_desc_set_*() calls
 576 *
 577 * Return 0 for success, and -EBUSY if the software portal is not ready
 578 * to do pull dequeue.
 579 */
 580int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
 581{
 582        struct qbman_pull_desc *p;
 583
 584        if (!atomic_dec_and_test(&s->vdq.available)) {
 585                atomic_inc(&s->vdq.available);
 586                return -EBUSY;
 587        }
 588        s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
 589        p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
 590        p->numf = d->numf;
 591        p->tok = QMAN_DQ_TOKEN_VALID;
 592        p->dq_src = d->dq_src;
 593        p->rsp_addr = d->rsp_addr;
 594        p->rsp_addr_virt = d->rsp_addr_virt;
 595        dma_wmb();
 596
 597        /* Set the verb byte, have to substitute in the valid-bit */
 598        p->verb = d->verb | s->vdq.valid_bit;
 599        s->vdq.valid_bit ^= QB_VALID_BIT;
 600
 601        return 0;
 602}
 603
 604#define QMAN_DQRR_PI_MASK   0xf
 605
 606/**
 607 * qbman_swp_dqrr_next() - Get an valid DQRR entry
 608 * @s: the software portal object
 609 *
 610 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
 611 * only once, so repeated calls can return a sequence of DQRR entries, without
 612 * requiring they be consumed immediately or in any particular order.
 613 */
 614const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
 615{
 616        u32 verb;
 617        u32 response_verb;
 618        u32 flags;
 619        struct dpaa2_dq *p;
 620
 621        /* Before using valid-bit to detect if something is there, we have to
 622         * handle the case of the DQRR reset bug...
 623         */
 624        if (unlikely(s->dqrr.reset_bug)) {
 625                /*
 626                 * We pick up new entries by cache-inhibited producer index,
 627                 * which means that a non-coherent mapping would require us to
 628                 * invalidate and read *only* once that PI has indicated that
 629                 * there's an entry here. The first trip around the DQRR ring
 630                 * will be much less efficient than all subsequent trips around
 631                 * it...
 632                 */
 633                u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
 634                        QMAN_DQRR_PI_MASK;
 635
 636                /* there are new entries if pi != next_idx */
 637                if (pi == s->dqrr.next_idx)
 638                        return NULL;
 639
 640                /*
 641                 * if next_idx is/was the last ring index, and 'pi' is
 642                 * different, we can disable the workaround as all the ring
 643                 * entries have now been DMA'd to so valid-bit checking is
 644                 * repaired. Note: this logic needs to be based on next_idx
 645                 * (which increments one at a time), rather than on pi (which
 646                 * can burst and wrap-around between our snapshots of it).
 647                 */
 648                if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
 649                        pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
 650                                 s->dqrr.next_idx, pi);
 651                        s->dqrr.reset_bug = 0;
 652                }
 653                prefetch(qbman_get_cmd(s,
 654                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
 655        }
 656
 657        p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
 658        verb = p->dq.verb;
 659
 660        /*
 661         * If the valid-bit isn't of the expected polarity, nothing there. Note,
 662         * in the DQRR reset bug workaround, we shouldn't need to skip these
 663         * check, because we've already determined that a new entry is available
 664         * and we've invalidated the cacheline before reading it, so the
 665         * valid-bit behaviour is repaired and should tell us what we already
 666         * knew from reading PI.
 667         */
 668        if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
 669                prefetch(qbman_get_cmd(s,
 670                                       QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
 671                return NULL;
 672        }
 673        /*
 674         * There's something there. Move "next_idx" attention to the next ring
 675         * entry (and prefetch it) before returning what we found.
 676         */
 677        s->dqrr.next_idx++;
 678        s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
 679        if (!s->dqrr.next_idx)
 680                s->dqrr.valid_bit ^= QB_VALID_BIT;
 681
 682        /*
 683         * If this is the final response to a volatile dequeue command
 684         * indicate that the vdq is available
 685         */
 686        flags = p->dq.stat;
 687        response_verb = verb & QBMAN_RESULT_MASK;
 688        if ((response_verb == QBMAN_RESULT_DQ) &&
 689            (flags & DPAA2_DQ_STAT_VOLATILE) &&
 690            (flags & DPAA2_DQ_STAT_EXPIRED))
 691                atomic_inc(&s->vdq.available);
 692
 693        prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
 694
 695        return p;
 696}
 697
 698/**
 699 * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
 700 *                             qbman_swp_dqrr_next().
 701 * @s: the software portal object
 702 * @dq: the DQRR entry to be consumed
 703 */
 704void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq)
 705{
 706        qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
 707}
 708
 709/**
 710 * qbman_result_has_new_result() - Check and get the dequeue response from the
 711 *                                 dq storage memory set in pull dequeue command
 712 * @s: the software portal object
 713 * @dq: the dequeue result read from the memory
 714 *
 715 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
 716 * dequeue result.
 717 *
 718 * Only used for user-provided storage of dequeue results, not DQRR. For
 719 * efficiency purposes, the driver will perform any required endianness
 720 * conversion to ensure that the user's dequeue result storage is in host-endian
 721 * format. As such, once the user has called qbman_result_has_new_result() and
 722 * been returned a valid dequeue result, they should not call it again on
 723 * the same memory location (except of course if another dequeue command has
 724 * been executed to produce a new result to that location).
 725 */
 726int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
 727{
 728        if (dq->dq.tok != QMAN_DQ_TOKEN_VALID)
 729                return 0;
 730
 731        /*
 732         * Set token to be 0 so we will detect change back to 1
 733         * next time the looping is traversed. Const is cast away here
 734         * as we want users to treat the dequeue responses as read only.
 735         */
 736        ((struct dpaa2_dq *)dq)->dq.tok = 0;
 737
 738        /*
 739         * Determine whether VDQCR is available based on whether the
 740         * current result is sitting in the first storage location of
 741         * the busy command.
 742         */
 743        if (s->vdq.storage == dq) {
 744                s->vdq.storage = NULL;
 745                atomic_inc(&s->vdq.available);
 746        }
 747
 748        return 1;
 749}
 750
 751/**
 752 * qbman_release_desc_clear() - Clear the contents of a descriptor to
 753 *                              default/starting state.
 754 */
 755void qbman_release_desc_clear(struct qbman_release_desc *d)
 756{
 757        memset(d, 0, sizeof(*d));
 758        d->verb = 1 << 5; /* Release Command Valid */
 759}
 760
 761/**
 762 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
 763 */
 764void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
 765{
 766        d->bpid = cpu_to_le16(bpid);
 767}
 768
 769/**
 770 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
 771 * interrupt source should be asserted after the release command is completed.
 772 */
 773void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
 774{
 775        if (enable)
 776                d->verb |= 1 << 6;
 777        else
 778                d->verb &= ~(1 << 6);
 779}
 780
 781#define RAR_IDX(rar)     ((rar) & 0x7)
 782#define RAR_VB(rar)      ((rar) & 0x80)
 783#define RAR_SUCCESS(rar) ((rar) & 0x100)
 784
 785/**
 786 * qbman_swp_release() - Issue a buffer release command
 787 * @s:           the software portal object
 788 * @d:           the release descriptor
 789 * @buffers:     a pointer pointing to the buffer address to be released
 790 * @num_buffers: number of buffers to be released,  must be less than 8
 791 *
 792 * Return 0 for success, -EBUSY if the release command ring is not ready.
 793 */
 794int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
 795                      const u64 *buffers, unsigned int num_buffers)
 796{
 797        int i;
 798        struct qbman_release_desc *p;
 799        u32 rar;
 800
 801        if (!num_buffers || (num_buffers > 7))
 802                return -EINVAL;
 803
 804        rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
 805        if (!RAR_SUCCESS(rar))
 806                return -EBUSY;
 807
 808        /* Start the release command */
 809        p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
 810        /* Copy the caller's buffer pointers to the command */
 811        for (i = 0; i < num_buffers; i++)
 812                p->buf[i] = cpu_to_le64(buffers[i]);
 813        p->bpid = d->bpid;
 814
 815        /*
 816         * Set the verb byte, have to substitute in the valid-bit and the number
 817         * of buffers.
 818         */
 819        dma_wmb();
 820        p->verb = d->verb | RAR_VB(rar) | num_buffers;
 821
 822        return 0;
 823}
 824
 825struct qbman_acquire_desc {
 826        u8 verb;
 827        u8 reserved;
 828        __le16 bpid;
 829        u8 num;
 830        u8 reserved2[59];
 831};
 832
 833struct qbman_acquire_rslt {
 834        u8 verb;
 835        u8 rslt;
 836        __le16 reserved;
 837        u8 num;
 838        u8 reserved2[3];
 839        __le64 buf[7];
 840};
 841
 842/**
 843 * qbman_swp_acquire() - Issue a buffer acquire command
 844 * @s:           the software portal object
 845 * @bpid:        the buffer pool index
 846 * @buffers:     a pointer pointing to the acquired buffer addresses
 847 * @num_buffers: number of buffers to be acquired, must be less than 8
 848 *
 849 * Return 0 for success, or negative error code if the acquire command
 850 * fails.
 851 */
 852int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
 853                      unsigned int num_buffers)
 854{
 855        struct qbman_acquire_desc *p;
 856        struct qbman_acquire_rslt *r;
 857        int i;
 858
 859        if (!num_buffers || (num_buffers > 7))
 860                return -EINVAL;
 861
 862        /* Start the management command */
 863        p = qbman_swp_mc_start(s);
 864
 865        if (!p)
 866                return -EBUSY;
 867
 868        /* Encode the caller-provided attributes */
 869        p->bpid = cpu_to_le16(bpid);
 870        p->num = num_buffers;
 871
 872        /* Complete the management command */
 873        r = qbman_swp_mc_complete(s, p, QBMAN_MC_ACQUIRE);
 874        if (unlikely(!r)) {
 875                pr_err("qbman: acquire from BPID %d failed, no response\n",
 876                       bpid);
 877                return -EIO;
 878        }
 879
 880        /* Decode the outcome */
 881        WARN_ON((r->verb & 0x7f) != QBMAN_MC_ACQUIRE);
 882
 883        /* Determine success or failure */
 884        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
 885                pr_err("qbman: acquire from BPID 0x%x failed, code=0x%02x\n",
 886                       bpid, r->rslt);
 887                return -EIO;
 888        }
 889
 890        WARN_ON(r->num > num_buffers);
 891
 892        /* Copy the acquired buffers to the caller's array */
 893        for (i = 0; i < r->num; i++)
 894                buffers[i] = le64_to_cpu(r->buf[i]);
 895
 896        return (int)r->num;
 897}
 898
 899struct qbman_alt_fq_state_desc {
 900        u8 verb;
 901        u8 reserved[3];
 902        __le32 fqid;
 903        u8 reserved2[56];
 904};
 905
 906struct qbman_alt_fq_state_rslt {
 907        u8 verb;
 908        u8 rslt;
 909        u8 reserved[62];
 910};
 911
 912#define ALT_FQ_FQID_MASK 0x00FFFFFF
 913
 914int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
 915                           u8 alt_fq_verb)
 916{
 917        struct qbman_alt_fq_state_desc *p;
 918        struct qbman_alt_fq_state_rslt *r;
 919
 920        /* Start the management command */
 921        p = qbman_swp_mc_start(s);
 922        if (!p)
 923                return -EBUSY;
 924
 925        p->fqid = cpu_to_le32(fqid & ALT_FQ_FQID_MASK);
 926
 927        /* Complete the management command */
 928        r = qbman_swp_mc_complete(s, p, alt_fq_verb);
 929        if (unlikely(!r)) {
 930                pr_err("qbman: mgmt cmd failed, no response (verb=0x%x)\n",
 931                       alt_fq_verb);
 932                return -EIO;
 933        }
 934
 935        /* Decode the outcome */
 936        WARN_ON((r->verb & QBMAN_RESULT_MASK) != alt_fq_verb);
 937
 938        /* Determine success or failure */
 939        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
 940                pr_err("qbman: ALT FQID %d failed: verb = 0x%08x code = 0x%02x\n",
 941                       fqid, r->verb, r->rslt);
 942                return -EIO;
 943        }
 944
 945        return 0;
 946}
 947
 948struct qbman_cdan_ctrl_desc {
 949        u8 verb;
 950        u8 reserved;
 951        __le16 ch;
 952        u8 we;
 953        u8 ctrl;
 954        __le16 reserved2;
 955        __le64 cdan_ctx;
 956        u8 reserved3[48];
 957
 958};
 959
 960struct qbman_cdan_ctrl_rslt {
 961        u8 verb;
 962        u8 rslt;
 963        __le16 ch;
 964        u8 reserved[60];
 965};
 966
 967int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
 968                       u8 we_mask, u8 cdan_en,
 969                       u64 ctx)
 970{
 971        struct qbman_cdan_ctrl_desc *p = NULL;
 972        struct qbman_cdan_ctrl_rslt *r = NULL;
 973
 974        /* Start the management command */
 975        p = qbman_swp_mc_start(s);
 976        if (!p)
 977                return -EBUSY;
 978
 979        /* Encode the caller-provided attributes */
 980        p->ch = cpu_to_le16(channelid);
 981        p->we = we_mask;
 982        if (cdan_en)
 983                p->ctrl = 1;
 984        else
 985                p->ctrl = 0;
 986        p->cdan_ctx = cpu_to_le64(ctx);
 987
 988        /* Complete the management command */
 989        r = qbman_swp_mc_complete(s, p, QBMAN_WQCHAN_CONFIGURE);
 990        if (unlikely(!r)) {
 991                pr_err("qbman: wqchan config failed, no response\n");
 992                return -EIO;
 993        }
 994
 995        WARN_ON((r->verb & 0x7f) != QBMAN_WQCHAN_CONFIGURE);
 996
 997        /* Determine success or failure */
 998        if (unlikely(r->rslt != QBMAN_MC_RSLT_OK)) {
 999                pr_err("qbman: CDAN cQID %d failed: code = 0x%02x\n",
1000                       channelid, r->rslt);
1001                return -EIO;
1002        }
1003
1004        return 0;
1005}
1006