dpdk/drivers/bus/fslmc/qbman/include/fsl_qbman_portal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 *
   3 * Copyright (C) 2014 Freescale Semiconductor, Inc.
   4 * Copyright 2015-2020 NXP
   5 *
   6 */
   7#ifndef _FSL_QBMAN_PORTAL_H
   8#define _FSL_QBMAN_PORTAL_H
   9
  10#include <rte_compat.h>
  11#include <fsl_qbman_base.h>
  12
  13#define SVR_LS1080A     0x87030000
  14#define SVR_LS2080A     0x87010000
  15#define SVR_LS2088A     0x87090000
  16#define SVR_LX2160A     0x87360000
  17
  18/* Variable to store DPAA2 platform type */
  19extern uint32_t dpaa2_svr_family;
  20
  21/**
  22 * DOC - QBMan portal APIs to implement the following functions:
  23 * - Initialize and destroy Software portal object.
  24 * - Read and write Software portal interrupt registers.
  25 * - Enqueue, including setting the enqueue descriptor, and issuing enqueue
  26 *   command etc.
  27 * - Dequeue, including setting the dequeue descriptor, issuing dequeue command,
  28 *   parsing the dequeue response in DQRR and memory, parsing the state change
  29 *   notifications etc.
  30 * - Release, including setting the release descriptor, and issuing the buffer
  31 *   release command.
  32 * - Acquire, acquire the buffer from the given buffer pool.
  33 * - FQ management.
  34 * - Channel management, enable/disable CDAN with or without context.
  35 */
  36
  37/**
  38 * qbman_swp_init() - Create a functional object representing the given
  39 * QBMan portal descriptor.
  40 * @d: the given qbman swp descriptor
  41 *
  42 * Return qbman_swp portal object for success, NULL if the object cannot
  43 * be created.
  44 */
  45struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
  46
  47/**
  48 * qbman_swp_update() - Update portal cacheability attributes.
  49 * @p: the given qbman swp portal
  50 */
  51int qbman_swp_update(struct qbman_swp *p, int stash_off);
  52
  53/**
  54 * qbman_swp_finish() - Create and destroy a functional object representing
  55 * the given QBMan portal descriptor.
  56 * @p: the qbman_swp object to be destroyed.
  57 *
  58 */
  59void qbman_swp_finish(struct qbman_swp *p);
  60
  61/**
  62 * qbman_swp_invalidate() - Invalidate the cache enabled area of the QBMan
  63 * portal. This is required to be called if a portal moved to another core
  64 * because the QBMan portal area is non coherent
  65 * @p: the qbman_swp object to be invalidated
  66 *
  67 */
  68void qbman_swp_invalidate(struct qbman_swp *p);
  69
  70/**
  71 * qbman_swp_get_desc() - Get the descriptor of the given portal object.
  72 * @p: the given portal object.
  73 *
  74 * Return the descriptor for this portal.
  75 */
  76const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p);
  77
  78        /**************/
  79        /* Interrupts */
  80        /**************/
  81
  82/* EQCR ring interrupt */
  83#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001)
  84/* Enqueue command dispatched interrupt */
  85#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002)
  86/* DQRR non-empty interrupt */
  87#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004)
  88/* RCR ring interrupt */
  89#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008)
  90/* Release command dispatched interrupt */
  91#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010)
  92/* Volatile dequeue command interrupt */
  93#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020)
  94
  95/**
  96 * qbman_swp_interrupt_get_vanish() - Get the data in software portal
  97 * interrupt status disable register.
  98 * @p: the given software portal object.
  99 *
 100 * Return the settings in SWP_ISDR register.
 101 */
 102uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p);
 103
 104/**
 105 * qbman_swp_interrupt_set_vanish() - Set the data in software portal
 106 * interrupt status disable register.
 107 * @p: the given software portal object.
 108 * @mask: The value to set in SWP_IDSR register.
 109 */
 110void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask);
 111
 112/**
 113 * qbman_swp_interrupt_read_status() - Get the data in software portal
 114 * interrupt status register.
 115 * @p: the given software portal object.
 116 *
 117 * Return the settings in SWP_ISR register.
 118 */
 119uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p);
 120
 121/**
 122 * qbman_swp_interrupt_clear_status() - Set the data in software portal
 123 * interrupt status register.
 124 * @p: the given software portal object.
 125 * @mask: The value to set in SWP_ISR register.
 126 */
 127__rte_internal
 128void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask);
 129
 130/**
 131 * qbman_swp_dqrr_thrshld_read_status() - Get the data in software portal
 132 * DQRR interrupt threshold register.
 133 * @p: the given software portal object.
 134 */
 135uint32_t qbman_swp_dqrr_thrshld_read_status(struct qbman_swp *p);
 136
 137/**
 138 * qbman_swp_dqrr_thrshld_write() - Set the data in software portal
 139 * DQRR interrupt threshold register.
 140 * @p: the given software portal object.
 141 * @mask: The value to set in SWP_DQRR_ITR register.
 142 */
 143void qbman_swp_dqrr_thrshld_write(struct qbman_swp *p, uint32_t mask);
 144
 145/**
 146 * qbman_swp_intr_timeout_read_status() - Get the data in software portal
 147 * Interrupt Time-Out period register.
 148 * @p: the given software portal object.
 149 */
 150uint32_t qbman_swp_intr_timeout_read_status(struct qbman_swp *p);
 151
 152/**
 153 * qbman_swp_intr_timeout_write() - Set the data in software portal
 154 * Interrupt Time-Out period register.
 155 * @p: the given software portal object.
 156 * @mask: The value to set in SWP_ITPR register.
 157 */
 158void qbman_swp_intr_timeout_write(struct qbman_swp *p, uint32_t mask);
 159
 160/**
 161 * qbman_swp_interrupt_get_trigger() - Get the data in software portal
 162 * interrupt enable register.
 163 * @p: the given software portal object.
 164 *
 165 * Return the settings in SWP_IER register.
 166 */
 167uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
 168
 169/**
 170 * qbman_swp_interrupt_set_trigger() - Set the data in software portal
 171 * interrupt enable register.
 172 * @p: the given software portal object.
 173 * @mask: The value to set in SWP_IER register.
 174 */
 175void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask);
 176
 177/**
 178 * qbman_swp_interrupt_get_inhibit() - Get the data in software portal
 179 * interrupt inhibit register.
 180 * @p: the given software portal object.
 181 *
 182 * Return the settings in SWP_IIR register.
 183 */
 184int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
 185
 186/**
 187 * qbman_swp_interrupt_set_inhibit() - Set the data in software portal
 188 * interrupt inhibit register.
 189 * @p: the given software portal object.
 190 * @mask: The value to set in SWP_IIR register.
 191 */
 192void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
 193
 194        /************/
 195        /* Dequeues */
 196        /************/
 197
 198/**
 199 * struct qbman_result - structure for qbman dequeue response and/or
 200 * notification.
 201 * @dont_manipulate_directly: the 16 32bit data to represent the whole
 202 * possible qbman dequeue result.
 203 */
 204struct qbman_result {
 205        union {
 206                struct common {
 207                        uint8_t verb;
 208                        uint8_t reserved[63];
 209                } common;
 210                struct dq {
 211                        uint8_t verb;
 212                        uint8_t stat;
 213                        __le16 seqnum;
 214                        __le16 oprid;
 215                        uint8_t reserved;
 216                        uint8_t tok;
 217                        __le32 fqid;
 218                        uint32_t reserved2;
 219                        __le32 fq_byte_cnt;
 220                        __le32 fq_frm_cnt;
 221                        __le64 fqd_ctx;
 222                        uint8_t fd[32];
 223                } dq;
 224                struct scn {
 225                        uint8_t verb;
 226                        uint8_t stat;
 227                        uint8_t state;
 228                        uint8_t reserved;
 229                        __le32 rid_tok;
 230                        __le64 ctx;
 231                } scn;
 232                struct eq_resp {
 233                        uint8_t verb;
 234                        uint8_t dca;
 235                        __le16 seqnum;
 236                        __le16 oprid;
 237                        uint8_t reserved;
 238                        uint8_t rc;
 239                        __le32 tgtid;
 240                        __le32 tag;
 241                        uint16_t qdbin;
 242                        uint8_t qpri;
 243                        uint8_t reserved1;
 244                        __le32 fqid:24;
 245                        __le32 rspid:8;
 246                        __le64 rsp_addr;
 247                        uint8_t fd[32];
 248                } eq_resp;
 249        };
 250};
 251
 252/* TODO:
 253 *A DQRI interrupt can be generated when there are dequeue results on the
 254 * portal's DQRR (this mechanism does not deal with "pull" dequeues to
 255 * user-supplied 'storage' addresses). There are two parameters to this
 256 * interrupt source, one is a threshold and the other is a timeout. The
 257 * interrupt will fire if either the fill-level of the ring exceeds 'thresh', or
 258 * if the ring has been non-empty for been longer than 'timeout' nanoseconds.
 259 * For timeout, an approximation to the desired nanosecond-granularity value is
 260 * made, so there are get and set APIs to allow the user to see what actual
 261 * timeout is set (compared to the timeout that was requested).
 262 */
 263int qbman_swp_dequeue_thresh(struct qbman_swp *s, unsigned int thresh);
 264int qbman_swp_dequeue_set_timeout(struct qbman_swp *s, unsigned int timeout);
 265int qbman_swp_dequeue_get_timeout(struct qbman_swp *s, unsigned int *timeout);
 266
 267/* ------------------- */
 268/* Push-mode dequeuing */
 269/* ------------------- */
 270
 271/* The user of a portal can enable and disable push-mode dequeuing of up to 16
 272 * channels independently. It does not specify this toggling by channel IDs, but
 273 * rather by specifying the index (from 0 to 15) that has been mapped to the
 274 * desired channel.
 275 */
 276
 277/**
 278 * qbman_swp_push_get() - Get the push dequeue setup.
 279 * @s: the software portal object.
 280 * @channel_idx: the channel index to query.
 281 * @enabled: returned boolean to show whether the push dequeue is enabled for
 282 * the given channel.
 283 */
 284void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled);
 285
 286/**
 287 * qbman_swp_push_set() - Enable or disable push dequeue.
 288 * @s: the software portal object.
 289 * @channel_idx: the channel index..
 290 * @enable: enable or disable push dequeue.
 291 *
 292 * The user of a portal can enable and disable push-mode dequeuing of up to 16
 293 * channels independently. It does not specify this toggling by channel IDs, but
 294 * rather by specifying the index (from 0 to 15) that has been mapped to the
 295 * desired channel.
 296 */
 297__rte_internal
 298void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable);
 299
 300/* ------------------- */
 301/* Pull-mode dequeuing */
 302/* ------------------- */
 303
 304/**
 305 * struct qbman_pull_desc - the structure for pull dequeue descriptor
 306 */
 307struct qbman_pull_desc {
 308        union {
 309                uint32_t dont_manipulate_directly[16];
 310                struct pull {
 311                        uint8_t verb;
 312                        uint8_t numf;
 313                        uint8_t tok;
 314                        uint8_t reserved;
 315                        uint32_t dq_src;
 316                        uint64_t rsp_addr;
 317                        uint64_t rsp_addr_virt;
 318                        uint8_t padding[40];
 319                } pull;
 320        };
 321};
 322
 323enum qbman_pull_type_e {
 324        /* dequeue with priority precedence, respect intra-class scheduling */
 325        qbman_pull_type_prio = 1,
 326        /* dequeue with active FQ precedence, respect ICS */
 327        qbman_pull_type_active,
 328        /* dequeue with active FQ precedence, no ICS */
 329        qbman_pull_type_active_noics
 330};
 331
 332/**
 333 * qbman_pull_desc_clear() - Clear the contents of a descriptor to
 334 * default/starting state.
 335 * @d: the pull dequeue descriptor to be cleared.
 336 */
 337__rte_internal
 338void qbman_pull_desc_clear(struct qbman_pull_desc *d);
 339
 340/**
 341 * qbman_pull_desc_set_storage()- Set the pull dequeue storage
 342 * @d: the pull dequeue descriptor to be set.
 343 * @storage: the pointer of the memory to store the dequeue result.
 344 * @storage_phys: the physical address of the storage memory.
 345 * @stash: to indicate whether write allocate is enabled.
 346 *
 347 * If not called, or if called with 'storage' as NULL, the result pull dequeues
 348 * will produce results to DQRR. If 'storage' is non-NULL, then results are
 349 * produced to the given memory location (using the physical/DMA address which
 350 * the caller provides in 'storage_phys'), and 'stash' controls whether or not
 351 * those writes to main-memory express a cache-warming attribute.
 352 */
 353__rte_internal
 354void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
 355                                 struct qbman_result *storage,
 356                                 uint64_t storage_phys,
 357                                 int stash);
 358/**
 359 * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued.
 360 * @d: the pull dequeue descriptor to be set.
 361 * @numframes: number of frames to be set, must be between 1 and 16, inclusive.
 362 */
 363__rte_internal
 364void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d,
 365                                   uint8_t numframes);
 366/**
 367 * qbman_pull_desc_set_token() - Set dequeue token for pull command
 368 * @d: the dequeue descriptor
 369 * @token: the token to be set
 370 *
 371 * token is the value that shows up in the dequeue response that can be used to
 372 * detect when the results have been published. The easiest technique is to zero
 373 * result "storage" before issuing a dequeue, and use any non-zero 'token' value
 374 */
 375void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token);
 376
 377/* Exactly one of the following descriptor "actions" should be set. (Calling any
 378 * one of these will replace the effect of any prior call to one of these.)
 379 * - pull dequeue from the given frame queue (FQ)
 380 * - pull dequeue from any FQ in the given work queue (WQ)
 381 * - pull dequeue from any FQ in any WQ in the given channel
 382 */
 383/**
 384 * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues.
 385 * @fqid: the frame queue index of the given FQ.
 386 */
 387__rte_internal
 388void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid);
 389
 390/**
 391 * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues.
 392 * @wqid: composed of channel id and wqid within the channel.
 393 * @dct: the dequeue command type.
 394 */
 395void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid,
 396                            enum qbman_pull_type_e dct);
 397
 398/* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
 399 * dequeues.
 400 * @chid: the channel id to be dequeued.
 401 * @dct: the dequeue command type.
 402 */
 403void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid,
 404                                 enum qbman_pull_type_e dct);
 405
 406/**
 407 * qbman_pull_desc_set_rad() - Decide whether reschedule the fq after dequeue
 408 *
 409 * @rad: 1 = Reschedule the FQ after dequeue.
 410 *       0 = Allow the FQ to remain active after dequeue.
 411 */
 412void qbman_pull_desc_set_rad(struct qbman_pull_desc *d, int rad);
 413
 414/**
 415 * qbman_swp_pull() - Issue the pull dequeue command
 416 * @s: the software portal object.
 417 * @d: the software portal descriptor which has been configured with
 418 * the set of qbman_pull_desc_set_*() calls.
 419 *
 420 * Return 0 for success, and -EBUSY if the software portal is not ready
 421 * to do pull dequeue.
 422 */
 423__rte_internal
 424int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d);
 425
 426/* -------------------------------- */
 427/* Polling DQRR for dequeue results */
 428/* -------------------------------- */
 429
 430/**
 431 * qbman_swp_dqrr_next() - Get an valid DQRR entry.
 432 * @s: the software portal object.
 433 *
 434 * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
 435 * only once, so repeated calls can return a sequence of DQRR entries, without
 436 * requiring they be consumed immediately or in any particular order.
 437 */
 438__rte_internal
 439const struct qbman_result *qbman_swp_dqrr_next(struct qbman_swp *p);
 440
 441/**
 442 * qbman_swp_prefetch_dqrr_next() - prefetch the next DQRR entry.
 443 * @s: the software portal object.
 444 */
 445__rte_internal
 446void qbman_swp_prefetch_dqrr_next(struct qbman_swp *s);
 447
 448/**
 449 * qbman_swp_dqrr_consume() -  Consume DQRR entries previously returned from
 450 * qbman_swp_dqrr_next().
 451 * @s: the software portal object.
 452 * @dq: the DQRR entry to be consumed.
 453 */
 454__rte_internal
 455void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct qbman_result *dq);
 456
 457/**
 458 * qbman_swp_dqrr_idx_consume() -  Given the DQRR index consume the DQRR entry
 459 * @s: the software portal object.
 460 * @dqrr_index: the DQRR index entry to be consumed.
 461 */
 462__rte_internal
 463void qbman_swp_dqrr_idx_consume(struct qbman_swp *s, uint8_t dqrr_index);
 464
 465/**
 466 * qbman_get_dqrr_idx() - Get dqrr index from the given dqrr
 467 * @dqrr: the given dqrr object.
 468 *
 469 * Return dqrr index.
 470 */
 471__rte_internal
 472uint8_t qbman_get_dqrr_idx(const struct qbman_result *dqrr);
 473
 474/**
 475 * qbman_get_dqrr_from_idx() - Use index to get the dqrr entry from the
 476 * given portal
 477 * @s: the given portal.
 478 * @idx: the dqrr index.
 479 *
 480 * Return dqrr entry object.
 481 */
 482__rte_internal
 483struct qbman_result *qbman_get_dqrr_from_idx(struct qbman_swp *s, uint8_t idx);
 484
 485/* ------------------------------------------------- */
 486/* Polling user-provided storage for dequeue results */
 487/* ------------------------------------------------- */
 488
 489/**
 490 * qbman_result_has_new_result() - Check and get the dequeue response from the
 491 * dq storage memory set in pull dequeue command
 492 * @s: the software portal object.
 493 * @dq: the dequeue result read from the memory.
 494 *
 495 * Only used for user-provided storage of dequeue results, not DQRR. For
 496 * efficiency purposes, the driver will perform any required endianness
 497 * conversion to ensure that the user's dequeue result storage is in host-endian
 498 * format (whether or not that is the same as the little-endian format that
 499 * hardware DMA'd to the user's storage). As such, once the user has called
 500 * qbman_result_has_new_result() and been returned a valid dequeue result,
 501 * they should not call it again on the same memory location (except of course
 502 * if another dequeue command has been executed to produce a new result to that
 503 * location).
 504 *
 505 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
 506 * dequeue result.
 507 */
 508__rte_internal
 509int qbman_result_has_new_result(struct qbman_swp *s,
 510                                struct qbman_result *dq);
 511
 512/**
 513 * qbman_check_command_complete() - Check if the previous issued dq commnd
 514 * is completed and results are available in memory.
 515 * @s: the software portal object.
 516 * @dq: the dequeue result read from the memory.
 517 *
 518 * Return 1 for getting a valid dequeue result, or 0 for not getting a valid
 519 * dequeue result.
 520 */
 521__rte_internal
 522int qbman_check_command_complete(struct qbman_result *dq);
 523
 524__rte_internal
 525int qbman_check_new_result(struct qbman_result *dq);
 526
 527/* -------------------------------------------------------- */
 528/* Parsing dequeue entries (DQRR and user-provided storage) */
 529/* -------------------------------------------------------- */
 530
 531/**
 532 * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not
 533 * @dq: the dequeue result to be checked.
 534 *
 535 * DQRR entries may contain non-dequeue results, ie. notifications
 536 */
 537int qbman_result_is_DQ(const struct qbman_result *dq);
 538
 539/**
 540 * qbman_result_is_SCN() - Check the dequeue result is notification or not
 541 * @dq: the dequeue result to be checked.
 542 *
 543 * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change
 544 * notifications" of one type or another. Some APIs apply to all of them, of the
 545 * form qbman_result_SCN_***().
 546 */
 547static inline int qbman_result_is_SCN(const struct qbman_result *dq)
 548{
 549        return !qbman_result_is_DQ(dq);
 550}
 551
 552/* Recognise different notification types, only required if the user allows for
 553 * these to occur, and cares about them when they do.
 554 */
 555
 556/**
 557 * qbman_result_is_FQDAN() - Check for FQ Data Availability
 558 * @dq: the qbman_result object.
 559 *
 560 * Return 1 if this is FQDAN.
 561 */
 562int qbman_result_is_FQDAN(const struct qbman_result *dq);
 563
 564/**
 565 * qbman_result_is_CDAN() - Check for Channel Data Availability
 566 * @dq: the qbman_result object to check.
 567 *
 568 * Return 1 if this is CDAN.
 569 */
 570int qbman_result_is_CDAN(const struct qbman_result *dq);
 571
 572/**
 573 * qbman_result_is_CSCN() - Check for Congestion State Change
 574 * @dq: the qbman_result object to check.
 575 *
 576 * Return 1 if this is CSCN.
 577 */
 578int qbman_result_is_CSCN(const struct qbman_result *dq);
 579
 580/**
 581 * qbman_result_is_BPSCN() - Check for Buffer Pool State Change.
 582 * @dq: the qbman_result object to check.
 583 *
 584 * Return 1 if this is BPSCN.
 585 */
 586int qbman_result_is_BPSCN(const struct qbman_result *dq);
 587
 588/**
 589 * qbman_result_is_CGCU() - Check for Congestion Group Count Update.
 590 * @dq: the qbman_result object to check.
 591 *
 592 * Return 1 if this is CGCU.
 593 */
 594int qbman_result_is_CGCU(const struct qbman_result *dq);
 595
 596/* Frame queue state change notifications; (FQDAN in theory counts too as it
 597 * leaves a FQ parked, but it is primarily a data availability notification)
 598 */
 599
 600/**
 601 * qbman_result_is_FQRN() - Check for FQ Retirement Notification.
 602 * @dq: the qbman_result object to check.
 603 *
 604 * Return 1 if this is FQRN.
 605 */
 606int qbman_result_is_FQRN(const struct qbman_result *dq);
 607
 608/**
 609 * qbman_result_is_FQRNI() - Check for FQ Retirement Immediate
 610 * @dq: the qbman_result object to check.
 611 *
 612 * Return 1 if this is FQRNI.
 613 */
 614int qbman_result_is_FQRNI(const struct qbman_result *dq);
 615
 616/**
 617 * qbman_result_is_FQPN() - Check for FQ Park Notification
 618 * @dq: the qbman_result object to check.
 619 *
 620 * Return 1 if this is FQPN.
 621 */
 622int qbman_result_is_FQPN(const struct qbman_result *dq);
 623
 624/* Parsing frame dequeue results (qbman_result_is_DQ() must be TRUE)
 625 */
 626/* FQ empty */
 627#define QBMAN_DQ_STAT_FQEMPTY       0x80
 628/* FQ held active */
 629#define QBMAN_DQ_STAT_HELDACTIVE    0x40
 630/* FQ force eligible */
 631#define QBMAN_DQ_STAT_FORCEELIGIBLE 0x20
 632/* Valid frame */
 633#define QBMAN_DQ_STAT_VALIDFRAME    0x10
 634/* FQ ODP enable */
 635#define QBMAN_DQ_STAT_ODPVALID      0x04
 636/* Volatile dequeue */
 637#define QBMAN_DQ_STAT_VOLATILE      0x02
 638/* volatile dequeue command is expired */
 639#define QBMAN_DQ_STAT_EXPIRED       0x01
 640
 641#define QBMAN_EQCR_DCA_IDXMASK          0x0f
 642#define QBMAN_ENQUEUE_FLAG_DCA          (1ULL << 31)
 643
 644/**
 645 * qbman_result_DQ_flags() - Get the STAT field of dequeue response
 646 * @dq: the dequeue result.
 647 *
 648 * Return the state field.
 649 */
 650__rte_internal
 651uint8_t qbman_result_DQ_flags(const struct qbman_result *dq);
 652
 653/**
 654 * qbman_result_DQ_is_pull() - Check whether the dq response is from a pull
 655 * command.
 656 * @dq: the dequeue result.
 657 *
 658 * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
 659 */
 660static inline int qbman_result_DQ_is_pull(const struct qbman_result *dq)
 661{
 662        return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_VOLATILE);
 663}
 664
 665/**
 666 * qbman_result_DQ_is_pull_complete() - Check whether the pull command is
 667 * completed.
 668 * @dq: the dequeue result.
 669 *
 670 * Return boolean.
 671 */
 672static inline int qbman_result_DQ_is_pull_complete(
 673                                        const struct qbman_result *dq)
 674{
 675        return (int)(qbman_result_DQ_flags(dq) & QBMAN_DQ_STAT_EXPIRED);
 676}
 677
 678/**
 679 * qbman_result_DQ_seqnum()  - Get the seqnum field in dequeue response
 680 * seqnum is valid only if VALIDFRAME flag is TRUE
 681 * @dq: the dequeue result.
 682 *
 683 * Return seqnum.
 684 */
 685__rte_internal
 686uint16_t qbman_result_DQ_seqnum(const struct qbman_result *dq);
 687
 688/**
 689 * qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
 690 * odpid is valid only if ODPVAILD flag is TRUE.
 691 * @dq: the dequeue result.
 692 *
 693 * Return odpid.
 694 */
 695__rte_internal
 696uint16_t qbman_result_DQ_odpid(const struct qbman_result *dq);
 697
 698/**
 699 * qbman_result_DQ_fqid() - Get the fqid in dequeue response
 700 * @dq: the dequeue result.
 701 *
 702 * Return fqid.
 703 */
 704uint32_t qbman_result_DQ_fqid(const struct qbman_result *dq);
 705
 706/**
 707 * qbman_result_DQ_byte_count() - Get the byte count in dequeue response
 708 * @dq: the dequeue result.
 709 *
 710 * Return the byte count remaining in the FQ.
 711 */
 712uint32_t qbman_result_DQ_byte_count(const struct qbman_result *dq);
 713
 714/**
 715 * qbman_result_DQ_frame_count - Get the frame count in dequeue response
 716 * @dq: the dequeue result.
 717 *
 718 * Return the frame count remaining in the FQ.
 719 */
 720uint32_t qbman_result_DQ_frame_count(const struct qbman_result *dq);
 721
 722/**
 723 * qbman_result_DQ_fqd_ctx() - Get the frame queue context in dequeue response
 724 * @dq: the dequeue result.
 725 *
 726 * Return the frame queue context.
 727 */
 728__rte_internal
 729uint64_t qbman_result_DQ_fqd_ctx(const struct qbman_result *dq);
 730
 731/**
 732 * qbman_result_DQ_fd() - Get the frame descriptor in dequeue response
 733 * @dq: the dequeue result.
 734 *
 735 * Return the frame descriptor.
 736 */
 737__rte_internal
 738const struct qbman_fd *qbman_result_DQ_fd(const struct qbman_result *dq);
 739
 740/* State-change notifications (FQDAN/CDAN/CSCN/...). */
 741
 742/**
 743 * qbman_result_SCN_state() - Get the state field in State-change notification
 744 * @scn: the state change notification.
 745 *
 746 * Return the state in the notifiation.
 747 */
 748__rte_internal
 749uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
 750
 751/**
 752 * qbman_result_SCN_rid() - Get the resource id from the notification
 753 * @scn: the state change notification.
 754 *
 755 * Return the resource id.
 756 */
 757uint32_t qbman_result_SCN_rid(const struct qbman_result *scn);
 758
 759/**
 760 * qbman_result_SCN_ctx() - get the context from the notification
 761 * @scn: the state change notification.
 762 *
 763 * Return the context.
 764 */
 765uint64_t qbman_result_SCN_ctx(const struct qbman_result *scn);
 766
 767/* Type-specific "resource IDs". Mainly for illustration purposes, though it
 768 * also gives the appropriate type widths.
 769 */
 770/* Get the FQID from the FQDAN */
 771#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq)
 772/* Get the FQID from the FQRN */
 773#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq)
 774/* Get the FQID from the FQRNI */
 775#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq)
 776/* Get the FQID from the FQPN */
 777#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq)
 778/* Get the channel ID from the CDAN */
 779#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
 780/* Get the CGID from the CSCN */
 781#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq))
 782
 783/**
 784 * qbman_result_bpscn_bpid() - Get the bpid from BPSCN
 785 * @scn: the state change notification.
 786 *
 787 * Return the buffer pool id.
 788 */
 789uint16_t qbman_result_bpscn_bpid(const struct qbman_result *scn);
 790
 791/**
 792 * qbman_result_bpscn_has_free_bufs() - Check whether there are free
 793 * buffers in the pool from BPSCN.
 794 * @scn: the state change notification.
 795 *
 796 * Return the number of free buffers.
 797 */
 798int qbman_result_bpscn_has_free_bufs(const struct qbman_result *scn);
 799
 800/**
 801 * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the
 802 * buffer pool is depleted.
 803 * @scn: the state change notification.
 804 *
 805 * Return the status of buffer pool depletion.
 806 */
 807int qbman_result_bpscn_is_depleted(const struct qbman_result *scn);
 808
 809/**
 810 * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer
 811 * pool is surplus or not.
 812 * @scn: the state change notification.
 813 *
 814 * Return the status of buffer pool surplus.
 815 */
 816int qbman_result_bpscn_is_surplus(const struct qbman_result *scn);
 817
 818/**
 819 * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message
 820 * @scn: the state change notification.
 821 *
 822 * Return the BPSCN context.
 823 */
 824uint64_t qbman_result_bpscn_ctx(const struct qbman_result *scn);
 825
 826/* Parsing CGCU */
 827/**
 828 * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
 829 * @scn: the state change notification.
 830 *
 831 * Return the CGCU resource id.
 832 */
 833uint16_t qbman_result_cgcu_cgid(const struct qbman_result *scn);
 834
 835/**
 836 * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU
 837 * @scn: the state change notification.
 838 *
 839 * Return instantaneous count in the CGCU notification.
 840 */
 841uint64_t qbman_result_cgcu_icnt(const struct qbman_result *scn);
 842
 843        /************/
 844        /* Enqueues */
 845        /************/
 846/* struct qbman_eq_desc - structure of enqueue descriptor */
 847struct qbman_eq_desc {
 848        union {
 849                uint32_t dont_manipulate_directly[8];
 850                struct eq {
 851                        uint8_t verb;
 852                        uint8_t dca;
 853                        uint16_t seqnum;
 854                        uint16_t orpid;
 855                        uint16_t reserved1;
 856                        uint32_t tgtid;
 857                        uint32_t tag;
 858                        uint16_t qdbin;
 859                        uint8_t qpri;
 860                        uint8_t reserved[3];
 861                        uint8_t wae;
 862                        uint8_t rspid;
 863                        uint64_t rsp_addr;
 864                } eq;
 865        };
 866};
 867
 868/**
 869 * struct qbman_eq_response - structure of enqueue response
 870 * @dont_manipulate_directly: the 16 32bit data to represent the whole
 871 * enqueue response.
 872 */
 873struct qbman_eq_response {
 874        uint32_t dont_manipulate_directly[16];
 875};
 876
 877/**
 878 * qbman_eq_desc_clear() - Clear the contents of a descriptor to
 879 * default/starting state.
 880 * @d: the given enqueue descriptor.
 881 */
 882__rte_internal
 883void qbman_eq_desc_clear(struct qbman_eq_desc *d);
 884
 885/* Exactly one of the following descriptor "actions" should be set. (Calling
 886 * any one of these will replace the effect of any prior call to one of these.)
 887 * - enqueue without order-restoration
 888 * - enqueue with order-restoration
 889 * - fill a hole in the order-restoration sequence, without any enqueue
 890 * - advance NESN (Next Expected Sequence Number), without any enqueue
 891 * 'respond_success' indicates whether an enqueue response should be DMA'd
 892 * after success (otherwise a response is DMA'd only after failure).
 893 * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to
 894 * be enqueued.
 895 */
 896
 897/**
 898 * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
 899 * @d: the enqueue descriptor.
 900 * @response_success: 1 = enqueue with response always; 0 = enqueue with
 901 * rejections returned on a FQ.
 902 */
 903__rte_internal
 904void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
 905/**
 906 * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
 907 * @d: the enqueue descriptor.
 908 * @response_success: 1 = enqueue with response always; 0 = enqueue with
 909 * rejections returned on a FQ.
 910 * @opr_id: the order point record id.
 911 * @seqnum: the order restoration sequence number.
 912 * @incomplete: indiates whether this is the last fragments using the same
 913 * sequeue number.
 914 */
 915__rte_internal
 916void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
 917                           uint16_t opr_id, uint16_t seqnum, int incomplete);
 918
 919/**
 920 * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
 921 * without any enqueue
 922 * @d: the enqueue descriptor.
 923 * @opr_id: the order point record id.
 924 * @seqnum: the order restoration sequence number.
 925 */
 926void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint16_t opr_id,
 927                                uint16_t seqnum);
 928
 929/**
 930 * qbman_eq_desc_set_orp_nesn() -  advance NESN (Next Expected Sequence Number)
 931 * without any enqueue
 932 * @d: the enqueue descriptor.
 933 * @opr_id: the order point record id.
 934 * @seqnum: the order restoration sequence number.
 935 */
 936void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint16_t opr_id,
 937                                uint16_t seqnum);
 938/**
 939 * qbman_eq_desc_set_response() - Set the enqueue response info.
 940 * @d: the enqueue descriptor
 941 * @storage_phys: the physical address of the enqueue response in memory.
 942 * @stash: indicate that the write allocation enabled or not.
 943 *
 944 * In the case where an enqueue response is DMA'd, this determines where that
 945 * response should go. (The physical/DMA address is given for hardware's
 946 * benefit, but software should interpret it as a "struct qbman_eq_response"
 947 * data structure.) 'stash' controls whether or not the write to main-memory
 948 * expresses a cache-warming attribute.
 949 */
 950__rte_internal
 951void qbman_eq_desc_set_response(struct qbman_eq_desc *d,
 952                                uint64_t storage_phys,
 953                                int stash);
 954
 955/**
 956 * qbman_eq_desc_set_token() - Set token for the enqueue command
 957 * @d: the enqueue descriptor
 958 * @token: the token to be set.
 959 *
 960 * token is the value that shows up in an enqueue response that can be used to
 961 * detect when the results have been published. The easiest technique is to zero
 962 * result "storage" before issuing an enqueue, and use any non-zero 'token'
 963 * value.
 964 */
 965__rte_internal
 966void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token);
 967
 968/**
 969 * Exactly one of the following descriptor "targets" should be set. (Calling any
 970 * one of these will replace the effect of any prior call to one of these.)
 971 * - enqueue to a frame queue
 972 * - enqueue to a queuing destination
 973 * Note, that none of these will have any affect if the "action" type has been
 974 * set to "orp_hole" or "orp_nesn".
 975 */
 976/**
 977 * qbman_eq_desc_set_fq() - Set Frame Queue id for the enqueue command
 978 * @d: the enqueue descriptor
 979 * @fqid: the id of the frame queue to be enqueued.
 980 */
 981__rte_internal
 982void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid);
 983
 984/**
 985 * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command.
 986 * @d: the enqueue descriptor
 987 * @qdid: the id of the queuing destination to be enqueued.
 988 * @qd_bin: the queuing destination bin
 989 * @qd_prio: the queuing destination priority.
 990 */
 991__rte_internal
 992void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid,
 993                          uint16_t qd_bin, uint8_t qd_prio);
 994
 995/**
 996 * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt
 997 * @d: the enqueue descriptor
 998 * @enable: boolean to enable/disable EQDI
 999 *
1000 * Determines whether or not the portal's EQDI interrupt source should be
1001 * asserted after the enqueue command is completed.
1002 */
1003void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable);
1004
1005/**
1006 * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command.
1007 * @d: the enqueue descriptor.
1008 * @enable: enabled/disable DCA mode.
1009 * @dqrr_idx: DCAP_CI, the DCAP consumer index.
1010 * @park: determine the whether park the FQ or not
1011 *
1012 * Determines whether or not a portal DQRR entry should be consumed once the
1013 * enqueue command is completed. (And if so, and the DQRR entry corresponds to a
1014 * held-active (order-preserving) FQ, whether the FQ should be parked instead of
1015 * being rescheduled.)
1016 */
1017__rte_internal
1018void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable,
1019                           uint8_t dqrr_idx, int park);
1020
1021/**
1022 * qbman_result_eqresp_fd() - Get fd from enqueue response.
1023 * @eqresp: enqueue response.
1024 *
1025 * Return the fd pointer.
1026 */
1027__rte_internal
1028struct qbman_fd *qbman_result_eqresp_fd(struct qbman_result *eqresp);
1029
1030/**
1031 * qbman_result_eqresp_set_rspid() - Set the response id in enqueue response.
1032 * @eqresp: enqueue response.
1033 * @val: values to set into the response id.
1034 *
1035 * This value is set into the response id before the enqueue command, which,
1036 * get overwritten by qbman once the enqueue command is complete.
1037 */
1038__rte_internal
1039void qbman_result_eqresp_set_rspid(struct qbman_result *eqresp, uint8_t val);
1040
1041/**
1042 * qbman_result_eqresp_rspid() - Get the response id.
1043 * @eqresp: enqueue response.
1044 *
1045 * Return the response id.
1046 *
1047 * At the time of enqueue user provides the response id. Response id gets
1048 * copied into the enqueue response to determine if the command has been
1049 * completed, and response has been updated.
1050 */
1051__rte_internal
1052uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
1053
1054/**
1055 * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
1056 * @eqresp: enqueue response.
1057 *
1058 * Return 0 when command is sucessful.
1059 */
1060__rte_internal
1061uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
1062
1063/**
1064 * qbman_swp_enqueue() - Issue an enqueue command.
1065 * @s: the software portal used for enqueue.
1066 * @d: the enqueue descriptor.
1067 * @fd: the frame descriptor to be enqueued.
1068 *
1069 * Please note that 'fd' should only be NULL if the "action" of the
1070 * descriptor is "orp_hole" or "orp_nesn".
1071 *
1072 * Return 0 for a successful enqueue, -EBUSY if the EQCR is not ready.
1073 */
1074int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
1075                      const struct qbman_fd *fd);
1076/**
1077 * qbman_swp_enqueue_multiple() - Enqueue multiple frames with same
1078                                  eq descriptor
1079 * @s: the software portal used for enqueue.
1080 * @d: the enqueue descriptor.
1081 * @fd: the frame descriptor to be enqueued.
1082 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
1083 * @num_frames: the number of the frames to be enqueued.
1084 *
1085 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1086 */
1087__rte_internal
1088int qbman_swp_enqueue_multiple(struct qbman_swp *s,
1089                               const struct qbman_eq_desc *d,
1090                               const struct qbman_fd *fd,
1091                               uint32_t *flags,
1092                               int num_frames);
1093
1094/**
1095 * qbman_swp_enqueue_multiple_fd() - Enqueue multiple frames with same
1096                                  eq descriptor
1097 * @s: the software portal used for enqueue.
1098 * @d: the enqueue descriptor.
1099 * @fd: the frame descriptor to be enqueued.
1100 * @flags: bit-mask of QBMAN_ENQUEUE_FLAG_*** options
1101 * @num_frames: the number of the frames to be enqueued.
1102 *
1103 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1104 */
1105__rte_internal
1106int qbman_swp_enqueue_multiple_fd(struct qbman_swp *s,
1107                                  const struct qbman_eq_desc *d,
1108                                  struct qbman_fd **fd,
1109                                  uint32_t *flags,
1110                                  int num_frames);
1111
1112/**
1113 * qbman_swp_enqueue_multiple_desc() - Enqueue multiple frames with
1114 *                                     individual eq descriptor.
1115 * @s: the software portal used for enqueue.
1116 * @d: the enqueue descriptor.
1117 * @fd: the frame descriptor to be enqueued.
1118 * @num_frames: the number of the frames to be enqueued.
1119 *
1120 * Return the number of enqueued frames, -EBUSY if the EQCR is not ready.
1121 */
1122__rte_internal
1123int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
1124                                    const struct qbman_eq_desc *d,
1125                                    const struct qbman_fd *fd,
1126                                    int num_frames);
1127
1128/* TODO:
1129 * qbman_swp_enqueue_thresh() - Set threshold for EQRI interrupt.
1130 * @s: the software portal.
1131 * @thresh: the threshold to trigger the EQRI interrupt.
1132 *
1133 * An EQRI interrupt can be generated when the fill-level of EQCR falls below
1134 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
1135 */
1136int qbman_swp_enqueue_thresh(struct qbman_swp *s, unsigned int thresh);
1137
1138        /*******************/
1139        /* Buffer releases */
1140        /*******************/
1141/**
1142 * struct qbman_release_desc - The structure for buffer release descriptor
1143 * @dont_manipulate_directly: the 32bit data to represent the whole
1144 * possible settings of qbman release descriptor.
1145 */
1146struct qbman_release_desc {
1147        union {
1148                uint32_t dont_manipulate_directly[16];
1149                struct br {
1150                        uint8_t verb;
1151                        uint8_t reserved;
1152                        uint16_t bpid;
1153                        uint32_t reserved2;
1154                        uint64_t buf[7];
1155                } br;
1156        };
1157};
1158
1159/**
1160 * qbman_release_desc_clear() - Clear the contents of a descriptor to
1161 * default/starting state.
1162 * @d: the qbman release descriptor.
1163 */
1164__rte_internal
1165void qbman_release_desc_clear(struct qbman_release_desc *d);
1166
1167/**
1168 * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
1169 * @d: the qbman release descriptor.
1170 */
1171__rte_internal
1172void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint16_t bpid);
1173
1174/**
1175 * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
1176 * interrupt source should be asserted after the release command is completed.
1177 * @d: the qbman release descriptor.
1178 */
1179void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
1180
1181/**
1182 * qbman_swp_release() - Issue a buffer release command.
1183 * @s: the software portal object.
1184 * @d: the release descriptor.
1185 * @buffers: a pointer pointing to the buffer address to be released.
1186 * @num_buffers: number of buffers to be released,  must be less than 8.
1187 *
1188 * Return 0 for success, -EBUSY if the release command ring is not ready.
1189 */
1190__rte_internal
1191int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
1192                      const uint64_t *buffers, unsigned int num_buffers);
1193
1194/* TODO:
1195 * qbman_swp_release_thresh() - Set threshold for RCRI interrupt
1196 * @s: the software portal.
1197 * @thresh: the threshold.
1198 * An RCRI interrupt can be generated when the fill-level of RCR falls below
1199 * the 'thresh' value set here. Setting thresh==0 (the default) disables.
1200 */
1201int qbman_swp_release_thresh(struct qbman_swp *s, unsigned int thresh);
1202
1203        /*******************/
1204        /* Buffer acquires */
1205        /*******************/
1206/**
1207 * qbman_swp_acquire() - Issue a buffer acquire command.
1208 * @s: the software portal object.
1209 * @bpid: the buffer pool index.
1210 * @buffers: a pointer pointing to the acquired buffer address|es.
1211 * @num_buffers: number of buffers to be acquired, must be less than 8.
1212 *
1213 * Return 0 for success, or negative error code if the acquire command
1214 * fails.
1215 */
1216__rte_internal
1217int qbman_swp_acquire(struct qbman_swp *s, uint16_t bpid, uint64_t *buffers,
1218                      unsigned int num_buffers);
1219
1220        /*****************/
1221        /* FQ management */
1222        /*****************/
1223/**
1224 * qbman_swp_fq_schedule() - Move the fq to the scheduled state.
1225 * @s: the software portal object.
1226 * @fqid: the index of frame queue to be scheduled.
1227 *
1228 * There are a couple of different ways that a FQ can end up parked state,
1229 * This schedules it.
1230 *
1231 * Return 0 for success, or negative error code for failure.
1232 */
1233int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid);
1234
1235/**
1236 * qbman_swp_fq_force() - Force the FQ to fully scheduled state.
1237 * @s: the software portal object.
1238 * @fqid: the index of frame queue to be forced.
1239 *
1240 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
1241 * and thus be available for selection by any channel-dequeuing behaviour (push
1242 * or pull). If the FQ is subsequently "dequeued" from the channel and is still
1243 * empty at the time this happens, the resulting dq_entry will have no FD.
1244 * (qbman_result_DQ_fd() will return NULL.)
1245 *
1246 * Return 0 for success, or negative error code for failure.
1247 */
1248int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid);
1249
1250/**
1251 * These functions change the FQ flow-control stuff between XON/XOFF. (The
1252 * default is XON.) This setting doesn't affect enqueues to the FQ, just
1253 * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
1254 * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
1255 * changed to XOFF after it had already become truly-scheduled to a channel, and
1256 * a pull dequeue of that channel occurs that selects that FQ for dequeuing,
1257 * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will
1258 * return NULL.)
1259 */
1260/**
1261 * qbman_swp_fq_xon() - XON the frame queue.
1262 * @s: the software portal object.
1263 * @fqid: the index of frame queue.
1264 *
1265 * Return 0 for success, or negative error code for failure.
1266 */
1267int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid);
1268/**
1269 * qbman_swp_fq_xoff() - XOFF the frame queue.
1270 * @s: the software portal object.
1271 * @fqid: the index of frame queue.
1272 *
1273 * Return 0 for success, or negative error code for failure.
1274 */
1275int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid);
1276
1277        /**********************/
1278        /* Channel management */
1279        /**********************/
1280
1281/**
1282 * If the user has been allocated a channel object that is going to generate
1283 * CDANs to another channel, then these functions will be necessary.
1284 * CDAN-enabled channels only generate a single CDAN notification, after which
1285 * it they need to be reenabled before they'll generate another. (The idea is
1286 * that pull dequeuing will occur in reaction to the CDAN, followed by a
1287 * reenable step.) Each function generates a distinct command to hardware, so a
1288 * combination function is provided if the user wishes to modify the "context"
1289 * (which shows up in each CDAN message) each time they reenable, as a single
1290 * command to hardware.
1291 */
1292
1293/**
1294 * qbman_swp_CDAN_set_context() - Set CDAN context
1295 * @s: the software portal object.
1296 * @channelid: the channel index.
1297 * @ctx: the context to be set in CDAN.
1298 *
1299 * Return 0 for success, or negative error code for failure.
1300 */
1301int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid,
1302                               uint64_t ctx);
1303
1304/**
1305 * qbman_swp_CDAN_enable() - Enable CDAN for the channel.
1306 * @s: the software portal object.
1307 * @channelid: the index of the channel to generate CDAN.
1308 *
1309 * Return 0 for success, or negative error code for failure.
1310 */
1311int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid);
1312
1313/**
1314 * qbman_swp_CDAN_disable() - disable CDAN for the channel.
1315 * @s: the software portal object.
1316 * @channelid: the index of the channel to generate CDAN.
1317 *
1318 * Return 0 for success, or negative error code for failure.
1319 */
1320int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid);
1321
1322/**
1323 * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
1324 * @s: the software portal object.
1325 * @channelid: the index of the channel to generate CDAN.
1326 * @ctx: the context set in CDAN.
1327 *
1328 * Return 0 for success, or negative error code for failure.
1329 */
1330int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid,
1331                                      uint64_t ctx);
1332#endif /* !_FSL_QBMAN_PORTAL_H */
1333