dpdk/drivers/event/dlb2/dlb2_priv.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2016-2020 Intel Corporation
   3 */
   4
   5#ifndef _DLB2_PRIV_H_
   6#define _DLB2_PRIV_H_
   7
   8#include <emmintrin.h>
   9#include <stdbool.h>
  10
  11#include <rte_eventdev.h>
  12#include <rte_config.h>
  13#include "dlb2_user.h"
  14#include "dlb2_log.h"
  15#include "rte_pmd_dlb2.h"
  16
  17#ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
  18#define DLB2_INC_STAT(_stat, _incr_val) ((_stat) += _incr_val)
  19#else
  20#define DLB2_INC_STAT(_stat, _incr_val)
  21#endif
  22
  23#define EVDEV_DLB2_NAME_PMD dlb2_event
  24
  25/* Default values for command line devargs */
  26#define DLB2_POLL_INTERVAL_DEFAULT 1000
  27#define DLB2_SW_CREDIT_QUANTA_DEFAULT 32
  28#define DLB2_DEPTH_THRESH_DEFAULT 256
  29
  30/*  command line arg strings */
  31#define NUMA_NODE_ARG "numa_node"
  32#define DLB2_MAX_NUM_EVENTS "max_num_events"
  33#define DLB2_NUM_DIR_CREDITS "num_dir_credits"
  34#define DEV_ID_ARG "dev_id"
  35#define DLB2_QID_DEPTH_THRESH_ARG "qid_depth_thresh"
  36#define DLB2_COS_ARG "cos"
  37#define DLB2_POLL_INTERVAL_ARG "poll_interval"
  38#define DLB2_SW_CREDIT_QUANTA_ARG "sw_credit_quanta"
  39#define DLB2_DEPTH_THRESH_ARG "default_depth_thresh"
  40#define DLB2_VECTOR_OPTS_ENAB_ARG "vector_opts_enable"
  41
  42/* Begin HW related defines and structs */
  43
  44#define DLB2_HW_V2 0
  45#define DLB2_HW_V2_5 1
  46#define DLB2_MAX_NUM_DOMAINS 32
  47#define DLB2_MAX_NUM_VFS 16
  48#define DLB2_MAX_NUM_LDB_QUEUES 32
  49#define DLB2_MAX_NUM_LDB_PORTS 64
  50#define DLB2_MAX_NUM_DIR_PORTS_V2               DLB2_MAX_NUM_DIR_QUEUES_V2
  51#define DLB2_MAX_NUM_DIR_PORTS_V2_5             DLB2_MAX_NUM_DIR_QUEUES_V2_5
  52#define DLB2_MAX_NUM_DIR_PORTS(ver)             (ver == DLB2_HW_V2 ? \
  53                                                 DLB2_MAX_NUM_DIR_PORTS_V2 : \
  54                                                 DLB2_MAX_NUM_DIR_PORTS_V2_5)
  55#define DLB2_MAX_NUM_DIR_QUEUES_V2              64 /* DIR == directed */
  56#define DLB2_MAX_NUM_DIR_QUEUES_V2_5            96
  57/* When needed for array sizing, the DLB 2.5 macro is used */
  58#define DLB2_MAX_NUM_DIR_QUEUES(ver)            (ver == DLB2_HW_V2 ? \
  59                                                 DLB2_MAX_NUM_DIR_QUEUES_V2 : \
  60                                                 DLB2_MAX_NUM_DIR_QUEUES_V2_5)
  61#define DLB2_MAX_NUM_FLOWS (64 * 1024)
  62#define DLB2_MAX_NUM_LDB_CREDITS (8 * 1024)
  63#define DLB2_MAX_NUM_DIR_CREDITS(ver)           (ver == DLB2_HW_V2 ? 4096 : 0)
  64#define DLB2_MAX_NUM_CREDITS(ver)               (ver == DLB2_HW_V2 ? \
  65                                                 0 : DLB2_MAX_NUM_LDB_CREDITS)
  66#define DLB2_MAX_NUM_LDB_CREDIT_POOLS 64
  67#define DLB2_MAX_NUM_DIR_CREDIT_POOLS 64
  68#define DLB2_MAX_NUM_HIST_LIST_ENTRIES 2048
  69#define DLB2_MAX_NUM_QIDS_PER_LDB_CQ 8
  70#define DLB2_QID_PRIORITIES 8
  71#define DLB2_MAX_DEVICE_PATH 32
  72#define DLB2_MIN_DEQUEUE_TIMEOUT_NS 1
  73/* Note: "- 1" here to support the timeout range check in eventdev_autotest */
  74#define DLB2_MAX_DEQUEUE_TIMEOUT_NS (UINT32_MAX - 1)
  75#define DLB2_SW_CREDIT_BATCH_SZ 32
  76#define DLB2_NUM_SN_GROUPS 2
  77#define DLB2_MAX_LDB_SN_ALLOC 1024
  78#define DLB2_MAX_QUEUE_DEPTH_THRESHOLD 8191
  79
  80/* 2048 total hist list entries and 64 total ldb ports, which
  81 * makes for 2048/64 == 32 hist list entries per port. However, CQ
  82 * depth must be a power of 2 and must also be >= HIST LIST entries.
  83 * As a result we just limit the maximum dequeue depth to 32.
  84 */
  85#define DLB2_MIN_CQ_DEPTH 1
  86#define DLB2_MAX_CQ_DEPTH 32
  87#define DLB2_MIN_HARDWARE_CQ_DEPTH 8
  88#define DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT \
  89        DLB2_MAX_CQ_DEPTH
  90
  91#define DLB2_HW_DEVICE_FROM_PCI_ID(_pdev) \
  92        (((_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_PF) ||        \
  93          (_pdev->id.device_id == PCI_DEVICE_ID_INTEL_DLB2_5_VF))   ?   \
  94                DLB2_HW_V2_5 : DLB2_HW_V2)
  95
  96/*
  97 * Static per queue/port provisioning values
  98 */
  99#define DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE 64
 100
 101#define CQ_BASE(is_dir) ((is_dir) ? DLB2_DIR_CQ_BASE : DLB2_LDB_CQ_BASE)
 102#define CQ_SIZE(is_dir) ((is_dir) ? DLB2_DIR_CQ_MAX_SIZE : \
 103                                    DLB2_LDB_CQ_MAX_SIZE)
 104#define PP_BASE(is_dir) ((is_dir) ? DLB2_DIR_PP_BASE : DLB2_LDB_PP_BASE)
 105
 106#define DLB2_NUM_QES_PER_CACHE_LINE 4
 107
 108#define DLB2_MAX_ENQUEUE_DEPTH 64
 109#define DLB2_MIN_ENQUEUE_DEPTH 4
 110
 111#define DLB2_NAME_SIZE 64
 112
 113#define DLB2_1K 1024
 114#define DLB2_2K (2 * DLB2_1K)
 115#define DLB2_4K (4 * DLB2_1K)
 116#define DLB2_16K (16 * DLB2_1K)
 117#define DLB2_32K (32 * DLB2_1K)
 118#define DLB2_1MB (DLB2_1K * DLB2_1K)
 119#define DLB2_16MB (16 * DLB2_1MB)
 120
 121/* Use the upper 3 bits of the event priority to select the DLB2 priority */
 122#define EV_TO_DLB2_PRIO(x) ((x) >> 5)
 123#define DLB2_TO_EV_PRIO(x) ((x) << 5)
 124
 125enum dlb2_hw_ver {
 126        DLB2_HW_VER_2,
 127        DLB2_HW_VER_2_5,
 128};
 129
 130enum dlb2_hw_port_types {
 131        DLB2_LDB_PORT,
 132        DLB2_DIR_PORT,
 133        DLB2_NUM_PORT_TYPES /* Must be last */
 134};
 135
 136enum dlb2_hw_queue_types {
 137        DLB2_LDB_QUEUE,
 138        DLB2_DIR_QUEUE,
 139        DLB2_NUM_QUEUE_TYPES /* Must be last */
 140};
 141
 142#define DLB2_COMBINED_POOL DLB2_LDB_QUEUE
 143
 144#define PORT_TYPE(p) ((p)->is_directed ? DLB2_DIR_PORT : DLB2_LDB_PORT)
 145
 146/* Do not change - must match hardware! */
 147enum dlb2_hw_sched_type {
 148        DLB2_SCHED_ATOMIC = 0,
 149        DLB2_SCHED_UNORDERED,
 150        DLB2_SCHED_ORDERED,
 151        DLB2_SCHED_DIRECTED,
 152        /* DLB2_NUM_HW_SCHED_TYPES must be last */
 153        DLB2_NUM_HW_SCHED_TYPES
 154};
 155
 156struct dlb2_hw_rsrcs {
 157        int32_t nb_events_limit;
 158        uint32_t num_queues;            /* Total queues (lb + dir) */
 159        uint32_t num_ldb_queues;        /* Number of available ldb queues */
 160        uint32_t num_ldb_ports;         /* Number of load balanced ports */
 161        uint32_t num_dir_ports;         /* Number of directed ports */
 162        union {
 163                struct {
 164                        uint32_t num_ldb_credits; /* Number of ldb credits */
 165                        uint32_t num_dir_credits; /* Number of dir credits */
 166                };
 167                struct {
 168                        uint32_t num_credits; /* Number of combined credits */
 169                };
 170        };
 171        uint32_t reorder_window_size;   /* Size of reorder window */
 172};
 173
 174struct dlb2_hw_resource_info {
 175        /**> Max resources that can be provided */
 176        struct dlb2_hw_rsrcs hw_rsrc_max;
 177        int num_sched_domains;
 178        uint32_t socket_id;
 179};
 180
 181enum dlb2_enqueue_type {
 182        /**>
 183         * New : Used to inject a new packet into the QM.
 184         */
 185        DLB2_ENQ_NEW,
 186        /**>
 187         * Forward : Enqueues a packet, and
 188         *  - if atomic: release any lock it holds in the QM
 189         *  - if ordered: release the packet for egress re-ordering
 190         */
 191        DLB2_ENQ_FWD,
 192        /**>
 193         * Enqueue Drop : Release an inflight packet. Must be called with
 194         * event == NULL. Used to drop a packet.
 195         *
 196         * Note that all packets dequeued from a load-balanced port must be
 197         * released, either with DLB2_ENQ_DROP or DLB2_ENQ_FWD.
 198         */
 199        DLB2_ENQ_DROP,
 200
 201        /* marker for array sizing etc. */
 202        _DLB2_NB_ENQ_TYPES
 203};
 204
 205/* hw-specific format - do not change */
 206
 207struct dlb2_event_type {
 208        uint16_t major:4;
 209        uint16_t unused:4;
 210        uint16_t sub:8;
 211};
 212
 213union dlb2_opaque_data {
 214        uint16_t opaque_data;
 215        struct dlb2_event_type event_type;
 216};
 217
 218struct dlb2_msg_info {
 219        uint8_t qid;
 220        uint8_t sched_type:2;
 221        uint8_t priority:3;
 222        uint8_t msg_type:3;
 223};
 224
 225#define DLB2_NEW_CMD_BYTE 0x08
 226#define DLB2_FWD_CMD_BYTE 0x0A
 227#define DLB2_COMP_CMD_BYTE 0x02
 228#define DLB2_POP_CMD_BYTE 0x01
 229#define DLB2_NOOP_CMD_BYTE 0x00
 230
 231/* hw-specific format - do not change */
 232struct dlb2_enqueue_qe {
 233        uint64_t data;
 234        /* Word 3 */
 235        union dlb2_opaque_data u;
 236        uint8_t qid;
 237        uint8_t sched_type:2;
 238        uint8_t priority:3;
 239        uint8_t msg_type:3;
 240        /* Word 4 */
 241        uint16_t lock_id;
 242        uint8_t meas_lat:1;
 243        uint8_t rsvd1:2;
 244        uint8_t no_dec:1;
 245        uint8_t cmp_id:4;
 246        union {
 247                uint8_t cmd_byte;
 248                struct {
 249                        uint8_t cq_token:1;
 250                        uint8_t qe_comp:1;
 251                        uint8_t qe_frag:1;
 252                        uint8_t qe_valid:1;
 253                        uint8_t rsvd3:1;
 254                        uint8_t error:1;
 255                        uint8_t rsvd:2;
 256                };
 257        };
 258};
 259
 260/* hw-specific format - do not change */
 261struct dlb2_cq_pop_qe {
 262        uint64_t data;
 263        union dlb2_opaque_data u;
 264        uint8_t qid;
 265        uint8_t sched_type:2;
 266        uint8_t priority:3;
 267        uint8_t msg_type:3;
 268        uint16_t tokens:10;
 269        uint16_t rsvd2:6;
 270        uint8_t meas_lat:1;
 271        uint8_t rsvd1:2;
 272        uint8_t no_dec:1;
 273        uint8_t cmp_id:4;
 274        union {
 275                uint8_t cmd_byte;
 276                struct {
 277                        uint8_t cq_token:1;
 278                        uint8_t qe_comp:1;
 279                        uint8_t qe_frag:1;
 280                        uint8_t qe_valid:1;
 281                        uint8_t rsvd3:1;
 282                        uint8_t error:1;
 283                        uint8_t rsvd:2;
 284                };
 285        };
 286};
 287
 288/* hw-specific format - do not change */
 289struct dlb2_dequeue_qe {
 290        uint64_t data;
 291        union dlb2_opaque_data u;
 292        uint8_t qid;
 293        uint8_t sched_type:2;
 294        uint8_t priority:3;
 295        uint8_t msg_type:3;
 296        uint16_t flow_id:16; /* was pp_id in v1 */
 297        uint8_t debug;
 298        uint8_t cq_gen:1;
 299        uint8_t qid_depth:2; /* 2 bits in v2 */
 300        uint8_t rsvd1:2;
 301        uint8_t error:1;
 302        uint8_t rsvd2:2;
 303};
 304
 305union dlb2_port_config {
 306        struct dlb2_create_ldb_port_args ldb;
 307        struct dlb2_create_dir_port_args dir;
 308};
 309
 310enum dlb2_port_state {
 311        PORT_CLOSED,
 312        PORT_STARTED,
 313        PORT_STOPPED
 314};
 315
 316enum dlb2_configuration_state {
 317        /* The resource has not been configured */
 318        DLB2_NOT_CONFIGURED,
 319        /* The resource was configured, but the device was stopped */
 320        DLB2_PREV_CONFIGURED,
 321        /* The resource is currently configured */
 322        DLB2_CONFIGURED
 323};
 324
 325struct dlb2_port {
 326        uint32_t id;
 327        bool is_directed;
 328        bool gen_bit;
 329        uint16_t dir_credits;
 330        uint32_t dequeue_depth;
 331        enum dlb2_token_pop_mode token_pop_mode;
 332        union dlb2_port_config cfg;
 333        uint32_t *credit_pool[DLB2_NUM_QUEUE_TYPES]; /* use __atomic builtins */
 334        union {
 335                struct {
 336                        uint16_t cached_ldb_credits;
 337                        uint16_t ldb_credits;
 338                        uint16_t cached_dir_credits;
 339                };
 340                struct {
 341                        uint16_t cached_credits;
 342                        uint16_t credits;
 343                };
 344        };
 345        bool int_armed;
 346        uint16_t owed_tokens;
 347        int16_t issued_releases;
 348        int16_t token_pop_thresh;
 349        int cq_depth;
 350        uint16_t cq_idx;
 351        uint16_t cq_idx_unmasked;
 352        uint16_t cq_depth_mask;
 353        uint16_t gen_bit_shift;
 354        uint64_t cq_rolling_mask; /*
 355                                   * rotate to always have right expected
 356                                   * gen bits
 357                                   */
 358        uint64_t cq_rolling_mask_2;
 359        void *cq_addr_cached; /* avoid multiple refs */
 360        enum dlb2_port_state state;
 361        enum dlb2_configuration_state config_state;
 362        int num_mapped_qids;
 363        uint8_t *qid_mappings;
 364        struct dlb2_enqueue_qe *qe4; /* Cache line's worth of QEs (4) */
 365        struct dlb2_enqueue_qe *int_arm_qe;
 366        struct dlb2_cq_pop_qe *consume_qe;
 367        struct dlb2_eventdev *dlb2; /* back ptr */
 368        struct dlb2_eventdev_port *ev_port; /* back ptr */
 369        bool use_scalar; /* force usage of scalar code */
 370};
 371
 372/* Per-process per-port mmio and memory pointers */
 373struct process_local_port_data {
 374        uint64_t *pp_addr;
 375        struct dlb2_dequeue_qe *cq_base;
 376        const struct rte_memzone *mz;
 377        bool mmaped;
 378};
 379
 380struct dlb2_eventdev;
 381
 382struct dlb2_port_low_level_io_functions {
 383        void (*pp_enqueue_four)(void *qe4, void *pp_addr);
 384};
 385
 386struct dlb2_config {
 387        int configured;
 388        int reserved;
 389        union {
 390                struct {
 391                        uint32_t num_ldb_credits;
 392                        uint32_t num_dir_credits;
 393                };
 394                struct {
 395                        uint32_t num_credits;
 396                };
 397        };
 398        struct dlb2_create_sched_domain_args resources;
 399};
 400
 401enum dlb2_cos {
 402        DLB2_COS_DEFAULT = -1,
 403        DLB2_COS_0 = 0,
 404        DLB2_COS_1,
 405        DLB2_COS_2,
 406        DLB2_COS_3
 407};
 408
 409struct dlb2_hw_dev {
 410        struct dlb2_config cfg;
 411        struct dlb2_hw_resource_info info;
 412        void *pf_dev; /* opaque pointer to PF PMD dev (struct dlb2_dev) */
 413        uint32_t domain_id;
 414        enum dlb2_cos cos_id;
 415        rte_spinlock_t resource_lock; /* for MP support */
 416} __rte_cache_aligned;
 417
 418/* End HW related defines and structs */
 419
 420/* Begin DLB2 PMD Eventdev related defines and structs */
 421
 422#define DLB2_MAX_NUM_QUEUES(ver)                                \
 423        (DLB2_MAX_NUM_DIR_QUEUES(ver) + DLB2_MAX_NUM_LDB_QUEUES)
 424
 425#define DLB2_MAX_NUM_PORTS(ver) \
 426        (DLB2_MAX_NUM_DIR_PORTS(ver) + DLB2_MAX_NUM_LDB_PORTS)
 427
 428#define DLB2_MAX_NUM_DIR_QUEUES_V2_5 96
 429#define DLB2_MAX_NUM_DIR_PORTS_V2_5 DLB2_MAX_NUM_DIR_QUEUES_V2_5
 430#define DLB2_MAX_NUM_QUEUES_ALL \
 431        (DLB2_MAX_NUM_DIR_QUEUES_V2_5 + DLB2_MAX_NUM_LDB_QUEUES)
 432#define DLB2_MAX_NUM_PORTS_ALL \
 433        (DLB2_MAX_NUM_DIR_PORTS_V2_5 + DLB2_MAX_NUM_LDB_PORTS)
 434#define DLB2_MAX_INPUT_QUEUE_DEPTH 256
 435
 436/** Structure to hold the queue to port link establishment attributes */
 437
 438struct dlb2_event_queue_link {
 439        uint8_t queue_id;
 440        uint8_t priority;
 441        bool mapped;
 442        bool valid;
 443};
 444
 445struct dlb2_traffic_stats {
 446        uint64_t rx_ok;
 447        uint64_t rx_drop;
 448        uint64_t rx_interrupt_wait;
 449        uint64_t rx_umonitor_umwait;
 450        uint64_t tx_ok;
 451        uint64_t total_polls;
 452        uint64_t zero_polls;
 453        union {
 454                struct {
 455                        uint64_t tx_nospc_ldb_hw_credits;
 456                        uint64_t tx_nospc_dir_hw_credits;
 457                };
 458                struct {
 459                        uint64_t tx_nospc_hw_credits;
 460                };
 461        };
 462        uint64_t tx_nospc_inflight_max;
 463        uint64_t tx_nospc_new_event_limit;
 464        uint64_t tx_nospc_inflight_credits;
 465};
 466
 467/* DLB2 HW sets the 2bit qid_depth in rx QEs based on the programmable depth
 468 * threshold. The global default value in config/common_base (or rte_config.h)
 469 * can be overridden on a per-qid basis using a vdev command line parameter.
 470 * 3: depth > threshold
 471 * 2: threshold >= depth > 3/4 threshold
 472 * 1: 3/4 threshold >= depth > 1/2 threshold
 473 * 0: depth <= 1/2 threshold.
 474 */
 475#define DLB2_QID_DEPTH_LE50 0
 476#define DLB2_QID_DEPTH_GT50_LE75 1
 477#define DLB2_QID_DEPTH_GT75_LE100 2
 478#define DLB2_QID_DEPTH_GT100 3
 479#define DLB2_NUM_QID_DEPTH_STAT_VALS 4 /* 2 bits */
 480
 481struct dlb2_queue_stats {
 482        uint64_t enq_ok;
 483        uint64_t qid_depth[DLB2_NUM_QID_DEPTH_STAT_VALS];
 484};
 485
 486struct dlb2_port_stats {
 487        struct dlb2_traffic_stats traffic;
 488        uint64_t tx_op_cnt[4]; /* indexed by rte_event.op */
 489        uint64_t tx_implicit_rel;
 490        uint64_t tx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
 491        uint64_t tx_invalid;
 492        uint64_t rx_sched_cnt[DLB2_NUM_HW_SCHED_TYPES];
 493        uint64_t rx_sched_invalid;
 494        struct dlb2_queue_stats queue[DLB2_MAX_NUM_QUEUES_ALL];
 495};
 496
 497struct dlb2_eventdev_port {
 498        struct dlb2_port qm_port; /* hw specific data structure */
 499        struct rte_event_port_conf conf; /* user-supplied configuration */
 500        uint16_t inflight_credits; /* num credits this port has right now */
 501        uint16_t credit_update_quanta;
 502        struct dlb2_eventdev *dlb2; /* backlink optimization */
 503        struct dlb2_port_stats stats __rte_cache_aligned;
 504        struct dlb2_event_queue_link link[DLB2_MAX_NUM_QIDS_PER_LDB_CQ];
 505        int num_links;
 506        uint32_t id; /* port id */
 507        /* num releases yet to be completed on this port.
 508         * Only applies to load-balanced ports.
 509         */
 510        uint16_t outstanding_releases;
 511        uint16_t inflight_max; /* app requested max inflights for this port */
 512        /* setup_done is set when the event port is setup */
 513        bool setup_done;
 514        /* enq_configured is set when the qm port is created */
 515        bool enq_configured;
 516        uint8_t implicit_release; /* release events before dequeueing */
 517}  __rte_cache_aligned;
 518
 519struct dlb2_queue {
 520        uint32_t num_qid_inflights; /* User config */
 521        uint32_t num_atm_inflights; /* User config */
 522        enum dlb2_configuration_state config_state;
 523        int  sched_type; /* LB queue only */
 524        uint8_t id;
 525        bool     is_directed;
 526};
 527
 528struct dlb2_eventdev_queue {
 529        struct dlb2_queue qm_queue;
 530        struct rte_event_queue_conf conf; /* User config */
 531        int depth_threshold; /* use default if 0 */
 532        uint32_t id;
 533        bool setup_done;
 534        uint8_t num_links;
 535};
 536
 537enum dlb2_run_state {
 538        DLB2_RUN_STATE_STOPPED = 0,
 539        DLB2_RUN_STATE_STOPPING,
 540        DLB2_RUN_STATE_STARTING,
 541        DLB2_RUN_STATE_STARTED
 542};
 543
 544struct dlb2_eventdev {
 545        struct dlb2_eventdev_port ev_ports[DLB2_MAX_NUM_PORTS_ALL];
 546        struct dlb2_eventdev_queue ev_queues[DLB2_MAX_NUM_QUEUES_ALL];
 547        uint8_t qm_ldb_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL];
 548        uint8_t qm_dir_to_ev_queue_id[DLB2_MAX_NUM_QUEUES_ALL];
 549        /* store num stats and offset of the stats for each queue */
 550        uint16_t xstats_count_per_qid[DLB2_MAX_NUM_QUEUES_ALL];
 551        uint16_t xstats_offset_for_qid[DLB2_MAX_NUM_QUEUES_ALL];
 552        /* store num stats and offset of the stats for each port */
 553        uint16_t xstats_count_per_port[DLB2_MAX_NUM_PORTS_ALL];
 554        uint16_t xstats_offset_for_port[DLB2_MAX_NUM_PORTS_ALL];
 555        struct dlb2_get_num_resources_args hw_rsrc_query_results;
 556        uint32_t xstats_count_mode_queue;
 557        struct dlb2_hw_dev qm_instance; /* strictly hw related */
 558        uint64_t global_dequeue_wait_ticks;
 559        struct dlb2_xstats_entry *xstats;
 560        struct rte_eventdev *event_dev; /* backlink to dev */
 561        uint32_t xstats_count_mode_dev;
 562        uint32_t xstats_count_mode_port;
 563        uint32_t xstats_count;
 564        uint32_t inflights; /* use __atomic builtins */
 565        uint32_t new_event_limit;
 566        int max_num_events_override;
 567        int num_dir_credits_override;
 568        bool vector_opts_enabled;
 569        volatile enum dlb2_run_state run_state;
 570        uint16_t num_dir_queues; /* total num of evdev dir queues requested */
 571        union {
 572                struct {
 573                        uint16_t num_dir_credits;
 574                        uint16_t num_ldb_credits;
 575                };
 576                struct {
 577                        uint16_t num_credits;
 578                };
 579        };
 580        uint16_t num_queues; /* total queues */
 581        uint16_t num_ldb_queues; /* total num of evdev ldb queues requested */
 582        uint16_t num_ports; /* total num of evdev ports requested */
 583        uint16_t num_ldb_ports; /* total num of ldb ports requested */
 584        uint16_t num_dir_ports; /* total num of dir ports requested */
 585        bool umwait_allowed;
 586        bool global_dequeue_wait; /* Not using per dequeue wait if true */
 587        enum dlb2_cq_poll_modes poll_mode;
 588        int poll_interval;
 589        int sw_credit_quanta;
 590        int default_depth_thresh;
 591        uint8_t revision;
 592        uint8_t version;
 593        bool configured;
 594        union {
 595                struct {
 596                        uint16_t max_ldb_credits;
 597                        uint16_t max_dir_credits;
 598                        /* use __atomic builtins */ /* shared hw cred */
 599                        uint32_t ldb_credit_pool __rte_cache_aligned;
 600                        /* use __atomic builtins */ /* shared hw cred */
 601                        uint32_t dir_credit_pool __rte_cache_aligned;
 602                };
 603                struct {
 604                        uint16_t max_credits;
 605                        /* use __atomic builtins */ /* shared hw cred */
 606                        uint32_t credit_pool __rte_cache_aligned;
 607                };
 608        };
 609};
 610
 611/* used for collecting and passing around the dev args */
 612struct dlb2_qid_depth_thresholds {
 613        int val[DLB2_MAX_NUM_QUEUES_ALL];
 614};
 615
 616struct dlb2_devargs {
 617        int socket_id;
 618        int max_num_events;
 619        int num_dir_credits_override;
 620        int dev_id;
 621        struct dlb2_qid_depth_thresholds qid_depth_thresholds;
 622        enum dlb2_cos cos_id;
 623        int poll_interval;
 624        int sw_credit_quanta;
 625        int default_depth_thresh;
 626        bool vector_opts_enabled;
 627};
 628
 629/* End Eventdev related defines and structs */
 630
 631/* Forwards for non-inlined functions */
 632
 633void dlb2_eventdev_dump(struct rte_eventdev *dev, FILE *f);
 634
 635int dlb2_xstats_init(struct dlb2_eventdev *dlb2);
 636
 637void dlb2_xstats_uninit(struct dlb2_eventdev *dlb2);
 638
 639int dlb2_eventdev_xstats_get(const struct rte_eventdev *dev,
 640                enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
 641                const unsigned int ids[], uint64_t values[], unsigned int n);
 642
 643int dlb2_eventdev_xstats_get_names(const struct rte_eventdev *dev,
 644                enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
 645                struct rte_event_dev_xstats_name *xstat_names,
 646                unsigned int *ids, unsigned int size);
 647
 648uint64_t dlb2_eventdev_xstats_get_by_name(const struct rte_eventdev *dev,
 649                                          const char *name, unsigned int *id);
 650
 651int dlb2_eventdev_xstats_reset(struct rte_eventdev *dev,
 652                enum rte_event_dev_xstats_mode mode,
 653                int16_t queue_port_id,
 654                const uint32_t ids[],
 655                uint32_t nb_ids);
 656
 657int test_dlb2_eventdev(void);
 658
 659int dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
 660                                const char *name,
 661                                struct dlb2_devargs *dlb2_args);
 662
 663int dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
 664                                  const char *name);
 665
 666uint32_t dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
 667                              struct dlb2_eventdev_queue *queue);
 668
 669int dlb2_parse_params(const char *params,
 670                      const char *name,
 671                      struct dlb2_devargs *dlb2_args,
 672                      uint8_t version);
 673
 674/* Extern globals */
 675extern struct process_local_port_data dlb2_port[][DLB2_NUM_PORT_TYPES];
 676
 677#endif  /* _DLB2_PRIV_H_ */
 678