dpdk/drivers/event/dlb2/dlb2.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2016-2020 Intel Corporation
   3 */
   4
   5#include <assert.h>
   6#include <errno.h>
   7#include <nmmintrin.h>
   8#include <pthread.h>
   9#include <stdint.h>
  10#include <stdbool.h>
  11#include <stdio.h>
  12#include <string.h>
  13#include <sys/mman.h>
  14#include <sys/fcntl.h>
  15
  16#include <rte_common.h>
  17#include <rte_config.h>
  18#include <rte_cycles.h>
  19#include <rte_debug.h>
  20#include <rte_dev.h>
  21#include <rte_errno.h>
  22#include <rte_eventdev.h>
  23#include <eventdev_pmd.h>
  24#include <rte_io.h>
  25#include <rte_kvargs.h>
  26#include <rte_log.h>
  27#include <rte_malloc.h>
  28#include <rte_mbuf.h>
  29#include <rte_power_intrinsics.h>
  30#include <rte_prefetch.h>
  31#include <rte_ring.h>
  32#include <rte_string_fns.h>
  33
  34#include "dlb2_priv.h"
  35#include "dlb2_iface.h"
  36#include "dlb2_inline_fns.h"
  37
  38/*
  39 * Resources exposed to eventdev. Some values overridden at runtime using
  40 * values returned by the DLB kernel driver.
  41 */
  42#if (RTE_EVENT_MAX_QUEUES_PER_DEV > UINT8_MAX)
  43#error "RTE_EVENT_MAX_QUEUES_PER_DEV cannot fit in member max_event_queues"
  44#endif
  45static struct rte_event_dev_info evdev_dlb2_default_info = {
  46        .driver_name = "", /* probe will set */
  47        .min_dequeue_timeout_ns = DLB2_MIN_DEQUEUE_TIMEOUT_NS,
  48        .max_dequeue_timeout_ns = DLB2_MAX_DEQUEUE_TIMEOUT_NS,
  49#if (RTE_EVENT_MAX_QUEUES_PER_DEV < DLB2_MAX_NUM_LDB_QUEUES)
  50        .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
  51#else
  52        .max_event_queues = DLB2_MAX_NUM_LDB_QUEUES,
  53#endif
  54        .max_event_queue_flows = DLB2_MAX_NUM_FLOWS,
  55        .max_event_queue_priority_levels = DLB2_QID_PRIORITIES,
  56        .max_event_priority_levels = DLB2_QID_PRIORITIES,
  57        .max_event_ports = DLB2_MAX_NUM_LDB_PORTS,
  58        .max_event_port_dequeue_depth = DLB2_MAX_CQ_DEPTH,
  59        .max_event_port_enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH,
  60        .max_event_port_links = DLB2_MAX_NUM_QIDS_PER_LDB_CQ,
  61        .max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
  62        .max_single_link_event_port_queue_pairs = DLB2_MAX_NUM_DIR_PORTS,
  63        .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
  64                          RTE_EVENT_DEV_CAP_EVENT_QOS |
  65                          RTE_EVENT_DEV_CAP_BURST_MODE |
  66                          RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
  67                          RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE |
  68                          RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES),
  69};
  70
  71struct process_local_port_data
  72dlb2_port[DLB2_MAX_NUM_PORTS][DLB2_NUM_PORT_TYPES];
  73
  74static void
  75dlb2_free_qe_mem(struct dlb2_port *qm_port)
  76{
  77        if (qm_port == NULL)
  78                return;
  79
  80        rte_free(qm_port->qe4);
  81        qm_port->qe4 = NULL;
  82
  83        rte_free(qm_port->int_arm_qe);
  84        qm_port->int_arm_qe = NULL;
  85
  86        rte_free(qm_port->consume_qe);
  87        qm_port->consume_qe = NULL;
  88
  89        rte_memzone_free(dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz);
  90        dlb2_port[qm_port->id][PORT_TYPE(qm_port)].mz = NULL;
  91}
  92
  93/* override defaults with value(s) provided on command line */
  94static void
  95dlb2_init_queue_depth_thresholds(struct dlb2_eventdev *dlb2,
  96                                 int *qid_depth_thresholds)
  97{
  98        int q;
  99
 100        for (q = 0; q < DLB2_MAX_NUM_QUEUES; q++) {
 101                if (qid_depth_thresholds[q] != 0)
 102                        dlb2->ev_queues[q].depth_threshold =
 103                                qid_depth_thresholds[q];
 104        }
 105}
 106
 107static int
 108dlb2_hw_query_resources(struct dlb2_eventdev *dlb2)
 109{
 110        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
 111        struct dlb2_hw_resource_info *dlb2_info = &handle->info;
 112        int ret;
 113
 114        /* Query driver resources provisioned for this device */
 115
 116        ret = dlb2_iface_get_num_resources(handle,
 117                                           &dlb2->hw_rsrc_query_results);
 118        if (ret) {
 119                DLB2_LOG_ERR("ioctl get dlb2 num resources, err=%d\n", ret);
 120                return ret;
 121        }
 122
 123        /* Complete filling in device resource info returned to evdev app,
 124         * overriding any default values.
 125         * The capabilities (CAPs) were set at compile time.
 126         */
 127
 128        evdev_dlb2_default_info.max_event_queues =
 129                dlb2->hw_rsrc_query_results.num_ldb_queues;
 130
 131        evdev_dlb2_default_info.max_event_ports =
 132                dlb2->hw_rsrc_query_results.num_ldb_ports;
 133
 134        evdev_dlb2_default_info.max_num_events =
 135                dlb2->hw_rsrc_query_results.num_ldb_credits;
 136
 137        /* Save off values used when creating the scheduling domain. */
 138
 139        handle->info.num_sched_domains =
 140                dlb2->hw_rsrc_query_results.num_sched_domains;
 141
 142        handle->info.hw_rsrc_max.nb_events_limit =
 143                dlb2->hw_rsrc_query_results.num_ldb_credits;
 144
 145        handle->info.hw_rsrc_max.num_queues =
 146                dlb2->hw_rsrc_query_results.num_ldb_queues +
 147                dlb2->hw_rsrc_query_results.num_dir_ports;
 148
 149        handle->info.hw_rsrc_max.num_ldb_queues =
 150                dlb2->hw_rsrc_query_results.num_ldb_queues;
 151
 152        handle->info.hw_rsrc_max.num_ldb_ports =
 153                dlb2->hw_rsrc_query_results.num_ldb_ports;
 154
 155        handle->info.hw_rsrc_max.num_dir_ports =
 156                dlb2->hw_rsrc_query_results.num_dir_ports;
 157
 158        handle->info.hw_rsrc_max.reorder_window_size =
 159                dlb2->hw_rsrc_query_results.num_hist_list_entries;
 160
 161        rte_memcpy(dlb2_info, &handle->info.hw_rsrc_max, sizeof(*dlb2_info));
 162
 163        return 0;
 164}
 165
 166#define DLB2_BASE_10 10
 167
 168static int
 169dlb2_string_to_int(int *result, const char *str)
 170{
 171        long ret;
 172        char *endptr;
 173
 174        if (str == NULL || result == NULL)
 175                return -EINVAL;
 176
 177        errno = 0;
 178        ret = strtol(str, &endptr, DLB2_BASE_10);
 179        if (errno)
 180                return -errno;
 181
 182        /* long int and int may be different width for some architectures */
 183        if (ret < INT_MIN || ret > INT_MAX || endptr == str)
 184                return -EINVAL;
 185
 186        *result = ret;
 187        return 0;
 188}
 189
 190static int
 191set_numa_node(const char *key __rte_unused, const char *value, void *opaque)
 192{
 193        int *socket_id = opaque;
 194        int ret;
 195
 196        ret = dlb2_string_to_int(socket_id, value);
 197        if (ret < 0)
 198                return ret;
 199
 200        if (*socket_id > RTE_MAX_NUMA_NODES)
 201                return -EINVAL;
 202        return 0;
 203}
 204
 205static int
 206set_max_num_events(const char *key __rte_unused,
 207                   const char *value,
 208                   void *opaque)
 209{
 210        int *max_num_events = opaque;
 211        int ret;
 212
 213        if (value == NULL || opaque == NULL) {
 214                DLB2_LOG_ERR("NULL pointer\n");
 215                return -EINVAL;
 216        }
 217
 218        ret = dlb2_string_to_int(max_num_events, value);
 219        if (ret < 0)
 220                return ret;
 221
 222        if (*max_num_events < 0 || *max_num_events >
 223                        DLB2_MAX_NUM_LDB_CREDITS) {
 224                DLB2_LOG_ERR("dlb2: max_num_events must be between 0 and %d\n",
 225                             DLB2_MAX_NUM_LDB_CREDITS);
 226                return -EINVAL;
 227        }
 228
 229        return 0;
 230}
 231
 232static int
 233set_num_dir_credits(const char *key __rte_unused,
 234                    const char *value,
 235                    void *opaque)
 236{
 237        int *num_dir_credits = opaque;
 238        int ret;
 239
 240        if (value == NULL || opaque == NULL) {
 241                DLB2_LOG_ERR("NULL pointer\n");
 242                return -EINVAL;
 243        }
 244
 245        ret = dlb2_string_to_int(num_dir_credits, value);
 246        if (ret < 0)
 247                return ret;
 248
 249        if (*num_dir_credits < 0 ||
 250            *num_dir_credits > DLB2_MAX_NUM_DIR_CREDITS) {
 251                DLB2_LOG_ERR("dlb2: num_dir_credits must be between 0 and %d\n",
 252                             DLB2_MAX_NUM_DIR_CREDITS);
 253                return -EINVAL;
 254        }
 255
 256        return 0;
 257}
 258
 259static int
 260set_dev_id(const char *key __rte_unused,
 261           const char *value,
 262           void *opaque)
 263{
 264        int *dev_id = opaque;
 265        int ret;
 266
 267        if (value == NULL || opaque == NULL) {
 268                DLB2_LOG_ERR("NULL pointer\n");
 269                return -EINVAL;
 270        }
 271
 272        ret = dlb2_string_to_int(dev_id, value);
 273        if (ret < 0)
 274                return ret;
 275
 276        return 0;
 277}
 278
 279static int
 280set_cos(const char *key __rte_unused,
 281        const char *value,
 282        void *opaque)
 283{
 284        enum dlb2_cos *cos_id = opaque;
 285        int x = 0;
 286        int ret;
 287
 288        if (value == NULL || opaque == NULL) {
 289                DLB2_LOG_ERR("NULL pointer\n");
 290                return -EINVAL;
 291        }
 292
 293        ret = dlb2_string_to_int(&x, value);
 294        if (ret < 0)
 295                return ret;
 296
 297        if (x != DLB2_COS_DEFAULT && (x < DLB2_COS_0 || x > DLB2_COS_3)) {
 298                DLB2_LOG_ERR(
 299                        "COS %d out of range, must be DLB2_COS_DEFAULT or 0-3\n",
 300                        x);
 301                return -EINVAL;
 302        }
 303
 304        *cos_id = x;
 305
 306        return 0;
 307}
 308
 309
 310static int
 311set_qid_depth_thresh(const char *key __rte_unused,
 312                     const char *value,
 313                     void *opaque)
 314{
 315        struct dlb2_qid_depth_thresholds *qid_thresh = opaque;
 316        int first, last, thresh, i;
 317
 318        if (value == NULL || opaque == NULL) {
 319                DLB2_LOG_ERR("NULL pointer\n");
 320                return -EINVAL;
 321        }
 322
 323        /* command line override may take one of the following 3 forms:
 324         * qid_depth_thresh=all:<threshold_value> ... all queues
 325         * qid_depth_thresh=qidA-qidB:<threshold_value> ... a range of queues
 326         * qid_depth_thresh=qid:<threshold_value> ... just one queue
 327         */
 328        if (sscanf(value, "all:%d", &thresh) == 1) {
 329                first = 0;
 330                last = DLB2_MAX_NUM_QUEUES - 1;
 331        } else if (sscanf(value, "%d-%d:%d", &first, &last, &thresh) == 3) {
 332                /* we have everything we need */
 333        } else if (sscanf(value, "%d:%d", &first, &thresh) == 2) {
 334                last = first;
 335        } else {
 336                DLB2_LOG_ERR("Error parsing qid depth devarg. Should be all:val, qid-qid:val, or qid:val\n");
 337                return -EINVAL;
 338        }
 339
 340        if (first > last || first < 0 || last >= DLB2_MAX_NUM_QUEUES) {
 341                DLB2_LOG_ERR("Error parsing qid depth devarg, invalid qid value\n");
 342                return -EINVAL;
 343        }
 344
 345        if (thresh < 0 || thresh > DLB2_MAX_QUEUE_DEPTH_THRESHOLD) {
 346                DLB2_LOG_ERR("Error parsing qid depth devarg, threshold > %d\n",
 347                             DLB2_MAX_QUEUE_DEPTH_THRESHOLD);
 348                return -EINVAL;
 349        }
 350
 351        for (i = first; i <= last; i++)
 352                qid_thresh->val[i] = thresh; /* indexed by qid */
 353
 354        return 0;
 355}
 356
 357static void
 358dlb2_eventdev_info_get(struct rte_eventdev *dev,
 359                       struct rte_event_dev_info *dev_info)
 360{
 361        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
 362        int ret;
 363
 364        ret = dlb2_hw_query_resources(dlb2);
 365        if (ret) {
 366                const struct rte_eventdev_data *data = dev->data;
 367
 368                DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
 369                             ret, data->dev_id);
 370                /* fn is void, so fall through and return values set up in
 371                 * probe
 372                 */
 373        }
 374
 375        /* Add num resources currently owned by this domain.
 376         * These would become available if the scheduling domain were reset due
 377         * to the application recalling eventdev_configure to *reconfigure* the
 378         * domain.
 379         */
 380        evdev_dlb2_default_info.max_event_ports += dlb2->num_ldb_ports;
 381        evdev_dlb2_default_info.max_event_queues += dlb2->num_ldb_queues;
 382        evdev_dlb2_default_info.max_num_events += dlb2->max_ldb_credits;
 383
 384        evdev_dlb2_default_info.max_event_queues =
 385                RTE_MIN(evdev_dlb2_default_info.max_event_queues,
 386                        RTE_EVENT_MAX_QUEUES_PER_DEV);
 387
 388        evdev_dlb2_default_info.max_num_events =
 389                RTE_MIN(evdev_dlb2_default_info.max_num_events,
 390                        dlb2->max_num_events_override);
 391
 392        *dev_info = evdev_dlb2_default_info;
 393}
 394
 395static int
 396dlb2_hw_create_sched_domain(struct dlb2_hw_dev *handle,
 397                            const struct dlb2_hw_rsrcs *resources_asked)
 398{
 399        int ret = 0;
 400        struct dlb2_create_sched_domain_args *cfg;
 401
 402        if (resources_asked == NULL) {
 403                DLB2_LOG_ERR("dlb2: dlb2_create NULL parameter\n");
 404                ret = EINVAL;
 405                goto error_exit;
 406        }
 407
 408        /* Map generic qm resources to dlb2 resources */
 409        cfg = &handle->cfg.resources;
 410
 411        /* DIR ports and queues */
 412
 413        cfg->num_dir_ports = resources_asked->num_dir_ports;
 414
 415        cfg->num_dir_credits = resources_asked->num_dir_credits;
 416
 417        /* LDB queues */
 418
 419        cfg->num_ldb_queues = resources_asked->num_ldb_queues;
 420
 421        /* LDB ports */
 422
 423        cfg->cos_strict = 0; /* Best effort */
 424        cfg->num_cos_ldb_ports[0] = 0;
 425        cfg->num_cos_ldb_ports[1] = 0;
 426        cfg->num_cos_ldb_ports[2] = 0;
 427        cfg->num_cos_ldb_ports[3] = 0;
 428
 429        switch (handle->cos_id) {
 430        case DLB2_COS_0:
 431                cfg->num_ldb_ports = 0; /* no don't care ports */
 432                cfg->num_cos_ldb_ports[0] =
 433                        resources_asked->num_ldb_ports;
 434                break;
 435        case DLB2_COS_1:
 436                cfg->num_ldb_ports = 0; /* no don't care ports */
 437                cfg->num_cos_ldb_ports[1] = resources_asked->num_ldb_ports;
 438                break;
 439        case DLB2_COS_2:
 440                cfg->num_ldb_ports = 0; /* no don't care ports */
 441                cfg->num_cos_ldb_ports[2] = resources_asked->num_ldb_ports;
 442                break;
 443        case DLB2_COS_3:
 444                cfg->num_ldb_ports = 0; /* no don't care ports */
 445                cfg->num_cos_ldb_ports[3] =
 446                        resources_asked->num_ldb_ports;
 447                break;
 448        case DLB2_COS_DEFAULT:
 449                /* all ldb ports are don't care ports from a cos perspective */
 450                cfg->num_ldb_ports =
 451                        resources_asked->num_ldb_ports;
 452                break;
 453        }
 454
 455        cfg->num_ldb_credits =
 456                resources_asked->num_ldb_credits;
 457
 458        cfg->num_atomic_inflights =
 459                DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE *
 460                cfg->num_ldb_queues;
 461
 462        cfg->num_hist_list_entries = resources_asked->num_ldb_ports *
 463                DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
 464
 465        DLB2_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, dir_credits=%d\n",
 466                     cfg->num_ldb_queues,
 467                     resources_asked->num_ldb_ports,
 468                     cfg->num_dir_ports,
 469                     cfg->num_atomic_inflights,
 470                     cfg->num_hist_list_entries,
 471                     cfg->num_ldb_credits,
 472                     cfg->num_dir_credits);
 473
 474        /* Configure the QM */
 475
 476        ret = dlb2_iface_sched_domain_create(handle, cfg);
 477        if (ret < 0) {
 478                DLB2_LOG_ERR("dlb2: domain create failed, ret = %d, extra status: %s\n",
 479                             ret,
 480                             dlb2_error_strings[cfg->response.status]);
 481
 482                goto error_exit;
 483        }
 484
 485        handle->domain_id = cfg->response.id;
 486        handle->cfg.configured = true;
 487
 488error_exit:
 489
 490        return ret;
 491}
 492
 493static void
 494dlb2_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig)
 495{
 496        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
 497        enum dlb2_configuration_state config_state;
 498        int i, j;
 499
 500        dlb2_iface_domain_reset(dlb2);
 501
 502        /* Free all dynamically allocated port memory */
 503        for (i = 0; i < dlb2->num_ports; i++)
 504                dlb2_free_qe_mem(&dlb2->ev_ports[i].qm_port);
 505
 506        /* If reconfiguring, mark the device's queues and ports as "previously
 507         * configured." If the user doesn't reconfigure them, the PMD will
 508         * reapply their previous configuration when the device is started.
 509         */
 510        config_state = (reconfig) ? DLB2_PREV_CONFIGURED :
 511                DLB2_NOT_CONFIGURED;
 512
 513        for (i = 0; i < dlb2->num_ports; i++) {
 514                dlb2->ev_ports[i].qm_port.config_state = config_state;
 515                /* Reset setup_done so ports can be reconfigured */
 516                dlb2->ev_ports[i].setup_done = false;
 517                for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
 518                        dlb2->ev_ports[i].link[j].mapped = false;
 519        }
 520
 521        for (i = 0; i < dlb2->num_queues; i++)
 522                dlb2->ev_queues[i].qm_queue.config_state = config_state;
 523
 524        for (i = 0; i < DLB2_MAX_NUM_QUEUES; i++)
 525                dlb2->ev_queues[i].setup_done = false;
 526
 527        dlb2->num_ports = 0;
 528        dlb2->num_ldb_ports = 0;
 529        dlb2->num_dir_ports = 0;
 530        dlb2->num_queues = 0;
 531        dlb2->num_ldb_queues = 0;
 532        dlb2->num_dir_queues = 0;
 533        dlb2->configured = false;
 534}
 535
 536/* Note: 1 QM instance per QM device, QM instance/device == event device */
 537static int
 538dlb2_eventdev_configure(const struct rte_eventdev *dev)
 539{
 540        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
 541        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
 542        struct dlb2_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max;
 543        const struct rte_eventdev_data *data = dev->data;
 544        const struct rte_event_dev_config *config = &data->dev_conf;
 545        int ret;
 546
 547        /* If this eventdev is already configured, we must release the current
 548         * scheduling domain before attempting to configure a new one.
 549         */
 550        if (dlb2->configured) {
 551                dlb2_hw_reset_sched_domain(dev, true);
 552
 553                ret = dlb2_hw_query_resources(dlb2);
 554                if (ret) {
 555                        DLB2_LOG_ERR("get resources err=%d, devid=%d\n",
 556                                     ret, data->dev_id);
 557                        return ret;
 558                }
 559        }
 560
 561        if (config->nb_event_queues > rsrcs->num_queues) {
 562                DLB2_LOG_ERR("nb_event_queues parameter (%d) exceeds the QM device's capabilities (%d).\n",
 563                             config->nb_event_queues,
 564                             rsrcs->num_queues);
 565                return -EINVAL;
 566        }
 567        if (config->nb_event_ports > (rsrcs->num_ldb_ports
 568                        + rsrcs->num_dir_ports)) {
 569                DLB2_LOG_ERR("nb_event_ports parameter (%d) exceeds the QM device's capabilities (%d).\n",
 570                             config->nb_event_ports,
 571                             (rsrcs->num_ldb_ports + rsrcs->num_dir_ports));
 572                return -EINVAL;
 573        }
 574        if (config->nb_events_limit > rsrcs->nb_events_limit) {
 575                DLB2_LOG_ERR("nb_events_limit parameter (%d) exceeds the QM device's capabilities (%d).\n",
 576                             config->nb_events_limit,
 577                             rsrcs->nb_events_limit);
 578                return -EINVAL;
 579        }
 580
 581        if (config->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
 582                dlb2->global_dequeue_wait = false;
 583        else {
 584                uint32_t timeout32;
 585
 586                dlb2->global_dequeue_wait = true;
 587
 588                /* note size mismatch of timeout vals in eventdev lib. */
 589                timeout32 = config->dequeue_timeout_ns;
 590
 591                dlb2->global_dequeue_wait_ticks =
 592                        timeout32 * (rte_get_timer_hz() / 1E9);
 593        }
 594
 595        /* Does this platform support umonitor/umwait? */
 596        if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_WAITPKG)) {
 597                if (RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 0 &&
 598                    RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE != 1) {
 599                        DLB2_LOG_ERR("invalid value (%d) for RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE, must be 0 or 1.\n",
 600                                     RTE_LIBRTE_PMD_DLB2_UMWAIT_CTL_STATE);
 601                        return -EINVAL;
 602                }
 603                dlb2->umwait_allowed = true;
 604        }
 605
 606        rsrcs->num_dir_ports = config->nb_single_link_event_port_queues;
 607        rsrcs->num_ldb_ports  = config->nb_event_ports - rsrcs->num_dir_ports;
 608        /* 1 dir queue per dir port */
 609        rsrcs->num_ldb_queues = config->nb_event_queues - rsrcs->num_dir_ports;
 610
 611        /* Scale down nb_events_limit by 4 for directed credits, since there
 612         * are 4x as many load-balanced credits.
 613         */
 614        rsrcs->num_ldb_credits = 0;
 615        rsrcs->num_dir_credits = 0;
 616
 617        if (rsrcs->num_ldb_queues)
 618                rsrcs->num_ldb_credits = config->nb_events_limit;
 619        if (rsrcs->num_dir_ports)
 620                rsrcs->num_dir_credits = config->nb_events_limit / 4;
 621        if (dlb2->num_dir_credits_override != -1)
 622                rsrcs->num_dir_credits = dlb2->num_dir_credits_override;
 623
 624        if (dlb2_hw_create_sched_domain(handle, rsrcs) < 0) {
 625                DLB2_LOG_ERR("dlb2_hw_create_sched_domain failed\n");
 626                return -ENODEV;
 627        }
 628
 629        dlb2->new_event_limit = config->nb_events_limit;
 630        __atomic_store_n(&dlb2->inflights, 0, __ATOMIC_SEQ_CST);
 631
 632        /* Save number of ports/queues for this event dev */
 633        dlb2->num_ports = config->nb_event_ports;
 634        dlb2->num_queues = config->nb_event_queues;
 635        dlb2->num_dir_ports = rsrcs->num_dir_ports;
 636        dlb2->num_ldb_ports = dlb2->num_ports - dlb2->num_dir_ports;
 637        dlb2->num_ldb_queues = dlb2->num_queues - dlb2->num_dir_ports;
 638        dlb2->num_dir_queues = dlb2->num_dir_ports;
 639        dlb2->ldb_credit_pool = rsrcs->num_ldb_credits;
 640        dlb2->max_ldb_credits = rsrcs->num_ldb_credits;
 641        dlb2->dir_credit_pool = rsrcs->num_dir_credits;
 642        dlb2->max_dir_credits = rsrcs->num_dir_credits;
 643
 644        dlb2->configured = true;
 645
 646        return 0;
 647}
 648
 649static void
 650dlb2_eventdev_port_default_conf_get(struct rte_eventdev *dev,
 651                                    uint8_t port_id,
 652                                    struct rte_event_port_conf *port_conf)
 653{
 654        RTE_SET_USED(port_id);
 655        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
 656
 657        port_conf->new_event_threshold = dlb2->new_event_limit;
 658        port_conf->dequeue_depth = 32;
 659        port_conf->enqueue_depth = DLB2_MAX_ENQUEUE_DEPTH;
 660        port_conf->event_port_cfg = 0;
 661}
 662
 663static void
 664dlb2_eventdev_queue_default_conf_get(struct rte_eventdev *dev,
 665                                     uint8_t queue_id,
 666                                     struct rte_event_queue_conf *queue_conf)
 667{
 668        RTE_SET_USED(dev);
 669        RTE_SET_USED(queue_id);
 670
 671        queue_conf->nb_atomic_flows = 1024;
 672        queue_conf->nb_atomic_order_sequences = 64;
 673        queue_conf->event_queue_cfg = 0;
 674        queue_conf->priority = 0;
 675}
 676
 677static int32_t
 678dlb2_get_sn_allocation(struct dlb2_eventdev *dlb2, int group)
 679{
 680        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
 681        struct dlb2_get_sn_allocation_args cfg;
 682        int ret;
 683
 684        cfg.group = group;
 685
 686        ret = dlb2_iface_get_sn_allocation(handle, &cfg);
 687        if (ret < 0) {
 688                DLB2_LOG_ERR("dlb2: get_sn_allocation ret=%d (driver status: %s)\n",
 689                             ret, dlb2_error_strings[cfg.response.status]);
 690                return ret;
 691        }
 692
 693        return cfg.response.id;
 694}
 695
 696static int
 697dlb2_set_sn_allocation(struct dlb2_eventdev *dlb2, int group, int num)
 698{
 699        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
 700        struct dlb2_set_sn_allocation_args cfg;
 701        int ret;
 702
 703        cfg.num = num;
 704        cfg.group = group;
 705
 706        ret = dlb2_iface_set_sn_allocation(handle, &cfg);
 707        if (ret < 0) {
 708                DLB2_LOG_ERR("dlb2: set_sn_allocation ret=%d (driver status: %s)\n",
 709                             ret, dlb2_error_strings[cfg.response.status]);
 710                return ret;
 711        }
 712
 713        return ret;
 714}
 715
 716static int32_t
 717dlb2_get_sn_occupancy(struct dlb2_eventdev *dlb2, int group)
 718{
 719        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
 720        struct dlb2_get_sn_occupancy_args cfg;
 721        int ret;
 722
 723        cfg.group = group;
 724
 725        ret = dlb2_iface_get_sn_occupancy(handle, &cfg);
 726        if (ret < 0) {
 727                DLB2_LOG_ERR("dlb2: get_sn_occupancy ret=%d (driver status: %s)\n",
 728                             ret, dlb2_error_strings[cfg.response.status]);
 729                return ret;
 730        }
 731
 732        return cfg.response.id;
 733}
 734
 735/* Query the current sequence number allocations and, if they conflict with the
 736 * requested LDB queue configuration, attempt to re-allocate sequence numbers.
 737 * This is best-effort; if it fails, the PMD will attempt to configure the
 738 * load-balanced queue and return an error.
 739 */
 740static void
 741dlb2_program_sn_allocation(struct dlb2_eventdev *dlb2,
 742                           const struct rte_event_queue_conf *queue_conf)
 743{
 744        int grp_occupancy[DLB2_NUM_SN_GROUPS];
 745        int grp_alloc[DLB2_NUM_SN_GROUPS];
 746        int i, sequence_numbers;
 747
 748        sequence_numbers = (int)queue_conf->nb_atomic_order_sequences;
 749
 750        for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
 751                int total_slots;
 752
 753                grp_alloc[i] = dlb2_get_sn_allocation(dlb2, i);
 754                if (grp_alloc[i] < 0)
 755                        return;
 756
 757                total_slots = DLB2_MAX_LDB_SN_ALLOC / grp_alloc[i];
 758
 759                grp_occupancy[i] = dlb2_get_sn_occupancy(dlb2, i);
 760                if (grp_occupancy[i] < 0)
 761                        return;
 762
 763                /* DLB has at least one available slot for the requested
 764                 * sequence numbers, so no further configuration required.
 765                 */
 766                if (grp_alloc[i] == sequence_numbers &&
 767                    grp_occupancy[i] < total_slots)
 768                        return;
 769        }
 770
 771        /* None of the sequence number groups are configured for the requested
 772         * sequence numbers, so we have to reconfigure one of them. This is
 773         * only possible if a group is not in use.
 774         */
 775        for (i = 0; i < DLB2_NUM_SN_GROUPS; i++) {
 776                if (grp_occupancy[i] == 0)
 777                        break;
 778        }
 779
 780        if (i == DLB2_NUM_SN_GROUPS) {
 781                DLB2_LOG_ERR("[%s()] No groups with %d sequence_numbers are available or have free slots\n",
 782                       __func__, sequence_numbers);
 783                return;
 784        }
 785
 786        /* Attempt to configure slot i with the requested number of sequence
 787         * numbers. Ignore the return value -- if this fails, the error will be
 788         * caught during subsequent queue configuration.
 789         */
 790        dlb2_set_sn_allocation(dlb2, i, sequence_numbers);
 791}
 792
 793static int32_t
 794dlb2_hw_create_ldb_queue(struct dlb2_eventdev *dlb2,
 795                         struct dlb2_eventdev_queue *ev_queue,
 796                         const struct rte_event_queue_conf *evq_conf)
 797{
 798        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
 799        struct dlb2_queue *queue = &ev_queue->qm_queue;
 800        struct dlb2_create_ldb_queue_args cfg;
 801        int32_t ret;
 802        uint32_t qm_qid;
 803        int sched_type = -1;
 804
 805        if (evq_conf == NULL)
 806                return -EINVAL;
 807
 808        if (evq_conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES) {
 809                if (evq_conf->nb_atomic_order_sequences != 0)
 810                        sched_type = RTE_SCHED_TYPE_ORDERED;
 811                else
 812                        sched_type = RTE_SCHED_TYPE_PARALLEL;
 813        } else
 814                sched_type = evq_conf->schedule_type;
 815
 816        cfg.num_atomic_inflights = DLB2_NUM_ATOMIC_INFLIGHTS_PER_QUEUE;
 817        cfg.num_sequence_numbers = evq_conf->nb_atomic_order_sequences;
 818        cfg.num_qid_inflights = evq_conf->nb_atomic_order_sequences;
 819
 820        if (sched_type != RTE_SCHED_TYPE_ORDERED) {
 821                cfg.num_sequence_numbers = 0;
 822                cfg.num_qid_inflights = 2048;
 823        }
 824
 825        /* App should set this to the number of hardware flows they want, not
 826         * the overall number of flows they're going to use. E.g. if app is
 827         * using 64 flows and sets compression to 64, best-case they'll get
 828         * 64 unique hashed flows in hardware.
 829         */
 830        switch (evq_conf->nb_atomic_flows) {
 831        /* Valid DLB2 compression levels */
 832        case 64:
 833        case 128:
 834        case 256:
 835        case 512:
 836        case (1 * 1024): /* 1K */
 837        case (2 * 1024): /* 2K */
 838        case (4 * 1024): /* 4K */
 839        case (64 * 1024): /* 64K */
 840                cfg.lock_id_comp_level = evq_conf->nb_atomic_flows;
 841                break;
 842        default:
 843                /* Invalid compression level */
 844                cfg.lock_id_comp_level = 0; /* no compression */
 845        }
 846
 847        if (ev_queue->depth_threshold == 0) {
 848                cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
 849                ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
 850        } else
 851                cfg.depth_threshold = ev_queue->depth_threshold;
 852
 853        ret = dlb2_iface_ldb_queue_create(handle, &cfg);
 854        if (ret < 0) {
 855                DLB2_LOG_ERR("dlb2: create LB event queue error, ret=%d (driver status: %s)\n",
 856                             ret, dlb2_error_strings[cfg.response.status]);
 857                return -EINVAL;
 858        }
 859
 860        qm_qid = cfg.response.id;
 861
 862        /* Save off queue config for debug, resource lookups, and reconfig */
 863        queue->num_qid_inflights = cfg.num_qid_inflights;
 864        queue->num_atm_inflights = cfg.num_atomic_inflights;
 865
 866        queue->sched_type = sched_type;
 867        queue->config_state = DLB2_CONFIGURED;
 868
 869        DLB2_LOG_DBG("Created LB event queue %d, nb_inflights=%d, nb_seq=%d, qid inflights=%d\n",
 870                     qm_qid,
 871                     cfg.num_atomic_inflights,
 872                     cfg.num_sequence_numbers,
 873                     cfg.num_qid_inflights);
 874
 875        return qm_qid;
 876}
 877
 878static int
 879dlb2_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
 880                              struct dlb2_eventdev_queue *ev_queue,
 881                              const struct rte_event_queue_conf *queue_conf)
 882{
 883        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
 884        int32_t qm_qid;
 885
 886        if (queue_conf->nb_atomic_order_sequences)
 887                dlb2_program_sn_allocation(dlb2, queue_conf);
 888
 889        qm_qid = dlb2_hw_create_ldb_queue(dlb2, ev_queue, queue_conf);
 890        if (qm_qid < 0) {
 891                DLB2_LOG_ERR("Failed to create the load-balanced queue\n");
 892
 893                return qm_qid;
 894        }
 895
 896        dlb2->qm_ldb_to_ev_queue_id[qm_qid] = ev_queue->id;
 897
 898        ev_queue->qm_queue.id = qm_qid;
 899
 900        return 0;
 901}
 902
 903static int dlb2_num_dir_queues_setup(struct dlb2_eventdev *dlb2)
 904{
 905        int i, num = 0;
 906
 907        for (i = 0; i < dlb2->num_queues; i++) {
 908                if (dlb2->ev_queues[i].setup_done &&
 909                    dlb2->ev_queues[i].qm_queue.is_directed)
 910                        num++;
 911        }
 912
 913        return num;
 914}
 915
 916static void
 917dlb2_queue_link_teardown(struct dlb2_eventdev *dlb2,
 918                         struct dlb2_eventdev_queue *ev_queue)
 919{
 920        struct dlb2_eventdev_port *ev_port;
 921        int i, j;
 922
 923        for (i = 0; i < dlb2->num_ports; i++) {
 924                ev_port = &dlb2->ev_ports[i];
 925
 926                for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
 927                        if (!ev_port->link[j].valid ||
 928                            ev_port->link[j].queue_id != ev_queue->id)
 929                                continue;
 930
 931                        ev_port->link[j].valid = false;
 932                        ev_port->num_links--;
 933                }
 934        }
 935
 936        ev_queue->num_links = 0;
 937}
 938
 939static int
 940dlb2_eventdev_queue_setup(struct rte_eventdev *dev,
 941                          uint8_t ev_qid,
 942                          const struct rte_event_queue_conf *queue_conf)
 943{
 944        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
 945        struct dlb2_eventdev_queue *ev_queue;
 946        int ret;
 947
 948        if (queue_conf == NULL)
 949                return -EINVAL;
 950
 951        if (ev_qid >= dlb2->num_queues)
 952                return -EINVAL;
 953
 954        ev_queue = &dlb2->ev_queues[ev_qid];
 955
 956        ev_queue->qm_queue.is_directed = queue_conf->event_queue_cfg &
 957                RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
 958        ev_queue->id = ev_qid;
 959        ev_queue->conf = *queue_conf;
 960
 961        if (!ev_queue->qm_queue.is_directed) {
 962                ret = dlb2_eventdev_ldb_queue_setup(dev, ev_queue, queue_conf);
 963        } else {
 964                /* The directed queue isn't setup until link time, at which
 965                 * point we know its directed port ID. Directed queue setup
 966                 * will only fail if this queue is already setup or there are
 967                 * no directed queues left to configure.
 968                 */
 969                ret = 0;
 970
 971                ev_queue->qm_queue.config_state = DLB2_NOT_CONFIGURED;
 972
 973                if (ev_queue->setup_done ||
 974                    dlb2_num_dir_queues_setup(dlb2) == dlb2->num_dir_queues)
 975                        ret = -EINVAL;
 976        }
 977
 978        /* Tear down pre-existing port->queue links */
 979        if (!ret && dlb2->run_state == DLB2_RUN_STATE_STOPPED)
 980                dlb2_queue_link_teardown(dlb2, ev_queue);
 981
 982        if (!ret)
 983                ev_queue->setup_done = true;
 984
 985        return ret;
 986}
 987
 988static int
 989dlb2_init_consume_qe(struct dlb2_port *qm_port, char *mz_name)
 990{
 991        struct dlb2_cq_pop_qe *qe;
 992
 993        qe = rte_zmalloc(mz_name,
 994                        DLB2_NUM_QES_PER_CACHE_LINE *
 995                                sizeof(struct dlb2_cq_pop_qe),
 996                        RTE_CACHE_LINE_SIZE);
 997
 998        if (qe == NULL) {
 999                DLB2_LOG_ERR("dlb2: no memory for consume_qe\n");
1000                return -ENOMEM;
1001        }
1002        qm_port->consume_qe = qe;
1003
1004        qe->qe_valid = 0;
1005        qe->qe_frag = 0;
1006        qe->qe_comp = 0;
1007        qe->cq_token = 1;
1008        /* Tokens value is 0-based; i.e. '0' returns 1 token, '1' returns 2,
1009         * and so on.
1010         */
1011        qe->tokens = 0; /* set at run time */
1012        qe->meas_lat = 0;
1013        qe->no_dec = 0;
1014        /* Completion IDs are disabled */
1015        qe->cmp_id = 0;
1016
1017        return 0;
1018}
1019
1020static int
1021dlb2_init_int_arm_qe(struct dlb2_port *qm_port, char *mz_name)
1022{
1023        struct dlb2_enqueue_qe *qe;
1024
1025        qe = rte_zmalloc(mz_name,
1026                        DLB2_NUM_QES_PER_CACHE_LINE *
1027                                sizeof(struct dlb2_enqueue_qe),
1028                        RTE_CACHE_LINE_SIZE);
1029
1030        if (qe == NULL) {
1031                DLB2_LOG_ERR("dlb2: no memory for complete_qe\n");
1032                return -ENOMEM;
1033        }
1034        qm_port->int_arm_qe = qe;
1035
1036        /* V2 - INT ARM is CQ_TOKEN + FRAG */
1037        qe->qe_valid = 0;
1038        qe->qe_frag = 1;
1039        qe->qe_comp = 0;
1040        qe->cq_token = 1;
1041        qe->meas_lat = 0;
1042        qe->no_dec = 0;
1043        /* Completion IDs are disabled */
1044        qe->cmp_id = 0;
1045
1046        return 0;
1047}
1048
1049static int
1050dlb2_init_qe_mem(struct dlb2_port *qm_port, char *mz_name)
1051{
1052        int ret, sz;
1053
1054        sz = DLB2_NUM_QES_PER_CACHE_LINE * sizeof(struct dlb2_enqueue_qe);
1055
1056        qm_port->qe4 = rte_zmalloc(mz_name, sz, RTE_CACHE_LINE_SIZE);
1057
1058        if (qm_port->qe4 == NULL) {
1059                DLB2_LOG_ERR("dlb2: no qe4 memory\n");
1060                ret = -ENOMEM;
1061                goto error_exit;
1062        }
1063
1064        ret = dlb2_init_int_arm_qe(qm_port, mz_name);
1065        if (ret < 0) {
1066                DLB2_LOG_ERR("dlb2: dlb2_init_int_arm_qe ret=%d\n", ret);
1067                goto error_exit;
1068        }
1069
1070        ret = dlb2_init_consume_qe(qm_port, mz_name);
1071        if (ret < 0) {
1072                DLB2_LOG_ERR("dlb2: dlb2_init_consume_qe ret=%d\n", ret);
1073                goto error_exit;
1074        }
1075
1076        return 0;
1077
1078error_exit:
1079
1080        dlb2_free_qe_mem(qm_port);
1081
1082        return ret;
1083}
1084
1085static inline uint16_t
1086dlb2_event_enqueue_delayed(void *event_port,
1087                           const struct rte_event events[]);
1088
1089static inline uint16_t
1090dlb2_event_enqueue_burst_delayed(void *event_port,
1091                                 const struct rte_event events[],
1092                                 uint16_t num);
1093
1094static inline uint16_t
1095dlb2_event_enqueue_new_burst_delayed(void *event_port,
1096                                     const struct rte_event events[],
1097                                     uint16_t num);
1098
1099static inline uint16_t
1100dlb2_event_enqueue_forward_burst_delayed(void *event_port,
1101                                         const struct rte_event events[],
1102                                         uint16_t num);
1103
1104static int
1105dlb2_hw_create_ldb_port(struct dlb2_eventdev *dlb2,
1106                        struct dlb2_eventdev_port *ev_port,
1107                        uint32_t dequeue_depth,
1108                        uint32_t enqueue_depth)
1109{
1110        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1111        struct dlb2_create_ldb_port_args cfg = { {0} };
1112        int ret;
1113        struct dlb2_port *qm_port = NULL;
1114        char mz_name[RTE_MEMZONE_NAMESIZE];
1115        uint32_t qm_port_id;
1116        uint16_t ldb_credit_high_watermark;
1117        uint16_t dir_credit_high_watermark;
1118
1119        if (handle == NULL)
1120                return -EINVAL;
1121
1122        if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1123                DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1124                             DLB2_MIN_CQ_DEPTH);
1125                return -EINVAL;
1126        }
1127
1128        if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1129                DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1130                             DLB2_MIN_ENQUEUE_DEPTH);
1131                return -EINVAL;
1132        }
1133
1134        rte_spinlock_lock(&handle->resource_lock);
1135
1136        /* We round up to the next power of 2 if necessary */
1137        cfg.cq_depth = rte_align32pow2(dequeue_depth);
1138        cfg.cq_depth_threshold = 1;
1139
1140        cfg.cq_history_list_size = DLB2_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT;
1141
1142        if (handle->cos_id == DLB2_COS_DEFAULT)
1143                cfg.cos_id = 0;
1144        else
1145                cfg.cos_id = handle->cos_id;
1146
1147        cfg.cos_strict = 0;
1148
1149        /* User controls the LDB high watermark via enqueue depth. The DIR high
1150         * watermark is equal, unless the directed credit pool is too small.
1151         */
1152        ldb_credit_high_watermark = enqueue_depth;
1153
1154        /* If there are no directed ports, the kernel driver will ignore this
1155         * port's directed credit settings. Don't use enqueue_depth if it would
1156         * require more directed credits than are available.
1157         */
1158        dir_credit_high_watermark =
1159                RTE_MIN(enqueue_depth,
1160                        handle->cfg.num_dir_credits / dlb2->num_ports);
1161
1162        /* Per QM values */
1163
1164        ret = dlb2_iface_ldb_port_create(handle, &cfg,  dlb2->poll_mode);
1165        if (ret < 0) {
1166                DLB2_LOG_ERR("dlb2: dlb2_ldb_port_create error, ret=%d (driver status: %s)\n",
1167                             ret, dlb2_error_strings[cfg.response.status]);
1168                goto error_exit;
1169        }
1170
1171        qm_port_id = cfg.response.id;
1172
1173        DLB2_LOG_DBG("dlb2: ev_port %d uses qm LB port %d <<<<<\n",
1174                     ev_port->id, qm_port_id);
1175
1176        qm_port = &ev_port->qm_port;
1177        qm_port->ev_port = ev_port; /* back ptr */
1178        qm_port->dlb2 = dlb2; /* back ptr */
1179        /*
1180         * Allocate and init local qe struct(s).
1181         * Note: MOVDIR64 requires the enqueue QE (qe4) to be aligned.
1182         */
1183
1184        snprintf(mz_name, sizeof(mz_name), "dlb2_ldb_port%d",
1185                 ev_port->id);
1186
1187        ret = dlb2_init_qe_mem(qm_port, mz_name);
1188        if (ret < 0) {
1189                DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1190                goto error_exit;
1191        }
1192
1193        qm_port->id = qm_port_id;
1194
1195        qm_port->cached_ldb_credits = 0;
1196        qm_port->cached_dir_credits = 0;
1197        /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1198         * the effective depth is smaller.
1199         */
1200        qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1201        qm_port->cq_idx = 0;
1202        qm_port->cq_idx_unmasked = 0;
1203
1204        if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1205                qm_port->cq_depth_mask = (qm_port->cq_depth * 4) - 1;
1206        else
1207                qm_port->cq_depth_mask = qm_port->cq_depth - 1;
1208
1209        qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1210        /* starting value of gen bit - it toggles at wrap time */
1211        qm_port->gen_bit = 1;
1212
1213        qm_port->int_armed = false;
1214
1215        /* Save off for later use in info and lookup APIs. */
1216        qm_port->qid_mappings = &dlb2->qm_ldb_to_ev_queue_id[0];
1217
1218        qm_port->dequeue_depth = dequeue_depth;
1219        qm_port->token_pop_thresh = dequeue_depth;
1220
1221        /* The default enqueue functions do not include delayed-pop support for
1222         * performance reasons.
1223         */
1224        if (qm_port->token_pop_mode == DELAYED_POP) {
1225                dlb2->event_dev->enqueue = dlb2_event_enqueue_delayed;
1226                dlb2->event_dev->enqueue_burst =
1227                        dlb2_event_enqueue_burst_delayed;
1228                dlb2->event_dev->enqueue_new_burst =
1229                        dlb2_event_enqueue_new_burst_delayed;
1230                dlb2->event_dev->enqueue_forward_burst =
1231                        dlb2_event_enqueue_forward_burst_delayed;
1232        }
1233
1234        qm_port->owed_tokens = 0;
1235        qm_port->issued_releases = 0;
1236
1237        /* Save config message too. */
1238        rte_memcpy(&qm_port->cfg.ldb, &cfg, sizeof(qm_port->cfg.ldb));
1239
1240        /* update state */
1241        qm_port->state = PORT_STARTED; /* enabled at create time */
1242        qm_port->config_state = DLB2_CONFIGURED;
1243
1244        qm_port->dir_credits = dir_credit_high_watermark;
1245        qm_port->ldb_credits = ldb_credit_high_watermark;
1246        qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1247        qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1248
1249        DLB2_LOG_DBG("dlb2: created ldb port %d, depth = %d, ldb credits=%d, dir credits=%d\n",
1250                     qm_port_id,
1251                     dequeue_depth,
1252                     qm_port->ldb_credits,
1253                     qm_port->dir_credits);
1254
1255        rte_spinlock_unlock(&handle->resource_lock);
1256
1257        return 0;
1258
1259error_exit:
1260
1261        if (qm_port)
1262                dlb2_free_qe_mem(qm_port);
1263
1264        rte_spinlock_unlock(&handle->resource_lock);
1265
1266        DLB2_LOG_ERR("dlb2: create ldb port failed!\n");
1267
1268        return ret;
1269}
1270
1271static void
1272dlb2_port_link_teardown(struct dlb2_eventdev *dlb2,
1273                        struct dlb2_eventdev_port *ev_port)
1274{
1275        struct dlb2_eventdev_queue *ev_queue;
1276        int i;
1277
1278        for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1279                if (!ev_port->link[i].valid)
1280                        continue;
1281
1282                ev_queue = &dlb2->ev_queues[ev_port->link[i].queue_id];
1283
1284                ev_port->link[i].valid = false;
1285                ev_port->num_links--;
1286                ev_queue->num_links--;
1287        }
1288}
1289
1290static int
1291dlb2_hw_create_dir_port(struct dlb2_eventdev *dlb2,
1292                        struct dlb2_eventdev_port *ev_port,
1293                        uint32_t dequeue_depth,
1294                        uint32_t enqueue_depth)
1295{
1296        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1297        struct dlb2_create_dir_port_args cfg = { {0} };
1298        int ret;
1299        struct dlb2_port *qm_port = NULL;
1300        char mz_name[RTE_MEMZONE_NAMESIZE];
1301        uint32_t qm_port_id;
1302        uint16_t ldb_credit_high_watermark;
1303        uint16_t dir_credit_high_watermark;
1304
1305        if (dlb2 == NULL || handle == NULL)
1306                return -EINVAL;
1307
1308        if (dequeue_depth < DLB2_MIN_CQ_DEPTH) {
1309                DLB2_LOG_ERR("dlb2: invalid dequeue_depth, must be %d-%d\n",
1310                             DLB2_MIN_CQ_DEPTH, DLB2_MAX_INPUT_QUEUE_DEPTH);
1311                return -EINVAL;
1312        }
1313
1314        if (enqueue_depth < DLB2_MIN_ENQUEUE_DEPTH) {
1315                DLB2_LOG_ERR("dlb2: invalid enqueue_depth, must be at least %d\n",
1316                             DLB2_MIN_ENQUEUE_DEPTH);
1317                return -EINVAL;
1318        }
1319
1320        rte_spinlock_lock(&handle->resource_lock);
1321
1322        /* Directed queues are configured at link time. */
1323        cfg.queue_id = -1;
1324
1325        /* We round up to the next power of 2 if necessary */
1326        cfg.cq_depth = rte_align32pow2(dequeue_depth);
1327        cfg.cq_depth_threshold = 1;
1328
1329        /* User controls the LDB high watermark via enqueue depth. The DIR high
1330         * watermark is equal, unless the directed credit pool is too small.
1331         */
1332        ldb_credit_high_watermark = enqueue_depth;
1333
1334        /* Don't use enqueue_depth if it would require more directed credits
1335         * than are available.
1336         */
1337        dir_credit_high_watermark =
1338                RTE_MIN(enqueue_depth,
1339                        handle->cfg.num_dir_credits / dlb2->num_ports);
1340
1341        /* Per QM values */
1342
1343        ret = dlb2_iface_dir_port_create(handle, &cfg,  dlb2->poll_mode);
1344        if (ret < 0) {
1345                DLB2_LOG_ERR("dlb2: dlb2_dir_port_create error, ret=%d (driver status: %s)\n",
1346                             ret, dlb2_error_strings[cfg.response.status]);
1347                goto error_exit;
1348        }
1349
1350        qm_port_id = cfg.response.id;
1351
1352        DLB2_LOG_DBG("dlb2: ev_port %d uses qm DIR port %d <<<<<\n",
1353                     ev_port->id, qm_port_id);
1354
1355        qm_port = &ev_port->qm_port;
1356        qm_port->ev_port = ev_port; /* back ptr */
1357        qm_port->dlb2 = dlb2;  /* back ptr */
1358
1359        /*
1360         * Init local qe struct(s).
1361         * Note: MOVDIR64 requires the enqueue QE to be aligned
1362         */
1363
1364        snprintf(mz_name, sizeof(mz_name), "dlb2_dir_port%d",
1365                 ev_port->id);
1366
1367        ret = dlb2_init_qe_mem(qm_port, mz_name);
1368
1369        if (ret < 0) {
1370                DLB2_LOG_ERR("dlb2: init_qe_mem failed, ret=%d\n", ret);
1371                goto error_exit;
1372        }
1373
1374        qm_port->id = qm_port_id;
1375
1376        qm_port->cached_ldb_credits = 0;
1377        qm_port->cached_dir_credits = 0;
1378        /* CQs with depth < 8 use an 8-entry queue, but withhold credits so
1379         * the effective depth is smaller.
1380         */
1381        qm_port->cq_depth = cfg.cq_depth <= 8 ? 8 : cfg.cq_depth;
1382        qm_port->cq_idx = 0;
1383        qm_port->cq_idx_unmasked = 0;
1384
1385        if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE)
1386                qm_port->cq_depth_mask = (cfg.cq_depth * 4) - 1;
1387        else
1388                qm_port->cq_depth_mask = cfg.cq_depth - 1;
1389
1390        qm_port->gen_bit_shift = __builtin_popcount(qm_port->cq_depth_mask);
1391        /* starting value of gen bit - it toggles at wrap time */
1392        qm_port->gen_bit = 1;
1393
1394        qm_port->int_armed = false;
1395
1396        /* Save off for later use in info and lookup APIs. */
1397        qm_port->qid_mappings = &dlb2->qm_dir_to_ev_queue_id[0];
1398
1399        qm_port->dequeue_depth = dequeue_depth;
1400
1401        /* Directed ports are auto-pop, by default. */
1402        qm_port->token_pop_mode = AUTO_POP;
1403        qm_port->owed_tokens = 0;
1404        qm_port->issued_releases = 0;
1405
1406        /* Save config message too. */
1407        rte_memcpy(&qm_port->cfg.dir, &cfg, sizeof(qm_port->cfg.dir));
1408
1409        /* update state */
1410        qm_port->state = PORT_STARTED; /* enabled at create time */
1411        qm_port->config_state = DLB2_CONFIGURED;
1412
1413        qm_port->dir_credits = dir_credit_high_watermark;
1414        qm_port->ldb_credits = ldb_credit_high_watermark;
1415        qm_port->credit_pool[DLB2_DIR_QUEUE] = &dlb2->dir_credit_pool;
1416        qm_port->credit_pool[DLB2_LDB_QUEUE] = &dlb2->ldb_credit_pool;
1417
1418        DLB2_LOG_DBG("dlb2: created dir port %d, depth = %d cr=%d,%d\n",
1419                     qm_port_id,
1420                     dequeue_depth,
1421                     dir_credit_high_watermark,
1422                     ldb_credit_high_watermark);
1423
1424        rte_spinlock_unlock(&handle->resource_lock);
1425
1426        return 0;
1427
1428error_exit:
1429
1430        if (qm_port)
1431                dlb2_free_qe_mem(qm_port);
1432
1433        rte_spinlock_unlock(&handle->resource_lock);
1434
1435        DLB2_LOG_ERR("dlb2: create dir port failed!\n");
1436
1437        return ret;
1438}
1439
1440static int
1441dlb2_eventdev_port_setup(struct rte_eventdev *dev,
1442                         uint8_t ev_port_id,
1443                         const struct rte_event_port_conf *port_conf)
1444{
1445        struct dlb2_eventdev *dlb2;
1446        struct dlb2_eventdev_port *ev_port;
1447        int ret;
1448
1449        if (dev == NULL || port_conf == NULL) {
1450                DLB2_LOG_ERR("Null parameter\n");
1451                return -EINVAL;
1452        }
1453
1454        dlb2 = dlb2_pmd_priv(dev);
1455
1456        if (ev_port_id >= DLB2_MAX_NUM_PORTS)
1457                return -EINVAL;
1458
1459        if (port_conf->dequeue_depth >
1460                evdev_dlb2_default_info.max_event_port_dequeue_depth ||
1461            port_conf->enqueue_depth >
1462                evdev_dlb2_default_info.max_event_port_enqueue_depth)
1463                return -EINVAL;
1464
1465        ev_port = &dlb2->ev_ports[ev_port_id];
1466        /* configured? */
1467        if (ev_port->setup_done) {
1468                DLB2_LOG_ERR("evport %d is already configured\n", ev_port_id);
1469                return -EINVAL;
1470        }
1471
1472        ev_port->qm_port.is_directed = port_conf->event_port_cfg &
1473                RTE_EVENT_PORT_CFG_SINGLE_LINK;
1474
1475        if (!ev_port->qm_port.is_directed) {
1476                ret = dlb2_hw_create_ldb_port(dlb2,
1477                                              ev_port,
1478                                              port_conf->dequeue_depth,
1479                                              port_conf->enqueue_depth);
1480                if (ret < 0) {
1481                        DLB2_LOG_ERR("Failed to create the lB port ve portId=%d\n",
1482                                     ev_port_id);
1483
1484                        return ret;
1485                }
1486        } else {
1487                ret = dlb2_hw_create_dir_port(dlb2,
1488                                              ev_port,
1489                                              port_conf->dequeue_depth,
1490                                              port_conf->enqueue_depth);
1491                if (ret < 0) {
1492                        DLB2_LOG_ERR("Failed to create the DIR port\n");
1493                        return ret;
1494                }
1495        }
1496
1497        /* Save off port config for reconfig */
1498        ev_port->conf = *port_conf;
1499
1500        ev_port->id = ev_port_id;
1501        ev_port->enq_configured = true;
1502        ev_port->setup_done = true;
1503        ev_port->inflight_max = port_conf->new_event_threshold;
1504        ev_port->implicit_release = !(port_conf->event_port_cfg &
1505                  RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
1506        ev_port->outstanding_releases = 0;
1507        ev_port->inflight_credits = 0;
1508        ev_port->credit_update_quanta = RTE_LIBRTE_PMD_DLB2_SW_CREDIT_QUANTA;
1509        ev_port->dlb2 = dlb2; /* reverse link */
1510
1511        /* Tear down pre-existing port->queue links */
1512        if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1513                dlb2_port_link_teardown(dlb2, &dlb2->ev_ports[ev_port_id]);
1514
1515        dev->data->ports[ev_port_id] = &dlb2->ev_ports[ev_port_id];
1516
1517        return 0;
1518}
1519
1520static int16_t
1521dlb2_hw_map_ldb_qid_to_port(struct dlb2_hw_dev *handle,
1522                            uint32_t qm_port_id,
1523                            uint16_t qm_qid,
1524                            uint8_t priority)
1525{
1526        struct dlb2_map_qid_args cfg;
1527        int32_t ret;
1528
1529        if (handle == NULL)
1530                return -EINVAL;
1531
1532        /* Build message */
1533        cfg.port_id = qm_port_id;
1534        cfg.qid = qm_qid;
1535        cfg.priority = EV_TO_DLB2_PRIO(priority);
1536
1537        ret = dlb2_iface_map_qid(handle, &cfg);
1538        if (ret < 0) {
1539                DLB2_LOG_ERR("dlb2: map qid error, ret=%d (driver status: %s)\n",
1540                             ret, dlb2_error_strings[cfg.response.status]);
1541                DLB2_LOG_ERR("dlb2: grp=%d, qm_port=%d, qm_qid=%d prio=%d\n",
1542                             handle->domain_id, cfg.port_id,
1543                             cfg.qid,
1544                             cfg.priority);
1545        } else {
1546                DLB2_LOG_DBG("dlb2: mapped queue %d to qm_port %d\n",
1547                             qm_qid, qm_port_id);
1548        }
1549
1550        return ret;
1551}
1552
1553static int
1554dlb2_event_queue_join_ldb(struct dlb2_eventdev *dlb2,
1555                          struct dlb2_eventdev_port *ev_port,
1556                          struct dlb2_eventdev_queue *ev_queue,
1557                          uint8_t priority)
1558{
1559        int first_avail = -1;
1560        int ret, i;
1561
1562        for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1563                if (ev_port->link[i].valid) {
1564                        if (ev_port->link[i].queue_id == ev_queue->id &&
1565                            ev_port->link[i].priority == priority) {
1566                                if (ev_port->link[i].mapped)
1567                                        return 0; /* already mapped */
1568                                first_avail = i;
1569                        }
1570                } else if (first_avail == -1)
1571                        first_avail = i;
1572        }
1573        if (first_avail == -1) {
1574                DLB2_LOG_ERR("dlb2: qm_port %d has no available QID slots.\n",
1575                             ev_port->qm_port.id);
1576                return -EINVAL;
1577        }
1578
1579        ret = dlb2_hw_map_ldb_qid_to_port(&dlb2->qm_instance,
1580                                          ev_port->qm_port.id,
1581                                          ev_queue->qm_queue.id,
1582                                          priority);
1583
1584        if (!ret)
1585                ev_port->link[first_avail].mapped = true;
1586
1587        return ret;
1588}
1589
1590static int32_t
1591dlb2_hw_create_dir_queue(struct dlb2_eventdev *dlb2,
1592                         struct dlb2_eventdev_queue *ev_queue,
1593                         int32_t qm_port_id)
1594{
1595        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
1596        struct dlb2_create_dir_queue_args cfg;
1597        int32_t ret;
1598
1599        /* The directed port is always configured before its queue */
1600        cfg.port_id = qm_port_id;
1601
1602        if (ev_queue->depth_threshold == 0) {
1603                cfg.depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1604                ev_queue->depth_threshold = RTE_PMD_DLB2_DEFAULT_DEPTH_THRESH;
1605        } else
1606                cfg.depth_threshold = ev_queue->depth_threshold;
1607
1608        ret = dlb2_iface_dir_queue_create(handle, &cfg);
1609        if (ret < 0) {
1610                DLB2_LOG_ERR("dlb2: create DIR event queue error, ret=%d (driver status: %s)\n",
1611                             ret, dlb2_error_strings[cfg.response.status]);
1612                return -EINVAL;
1613        }
1614
1615        return cfg.response.id;
1616}
1617
1618static int
1619dlb2_eventdev_dir_queue_setup(struct dlb2_eventdev *dlb2,
1620                              struct dlb2_eventdev_queue *ev_queue,
1621                              struct dlb2_eventdev_port *ev_port)
1622{
1623        int32_t qm_qid;
1624
1625        qm_qid = dlb2_hw_create_dir_queue(dlb2, ev_queue, ev_port->qm_port.id);
1626
1627        if (qm_qid < 0) {
1628                DLB2_LOG_ERR("Failed to create the DIR queue\n");
1629                return qm_qid;
1630        }
1631
1632        dlb2->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
1633
1634        ev_queue->qm_queue.id = qm_qid;
1635
1636        return 0;
1637}
1638
1639static int
1640dlb2_do_port_link(struct rte_eventdev *dev,
1641                  struct dlb2_eventdev_queue *ev_queue,
1642                  struct dlb2_eventdev_port *ev_port,
1643                  uint8_t prio)
1644{
1645        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1646        int err;
1647
1648        /* Don't link until start time. */
1649        if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1650                return 0;
1651
1652        if (ev_queue->qm_queue.is_directed)
1653                err = dlb2_eventdev_dir_queue_setup(dlb2, ev_queue, ev_port);
1654        else
1655                err = dlb2_event_queue_join_ldb(dlb2, ev_port, ev_queue, prio);
1656
1657        if (err) {
1658                DLB2_LOG_ERR("port link failure for %s ev_q %d, ev_port %d\n",
1659                             ev_queue->qm_queue.is_directed ? "DIR" : "LDB",
1660                             ev_queue->id, ev_port->id);
1661
1662                rte_errno = err;
1663                return -1;
1664        }
1665
1666        return 0;
1667}
1668
1669static int
1670dlb2_validate_port_link(struct dlb2_eventdev_port *ev_port,
1671                        uint8_t queue_id,
1672                        bool link_exists,
1673                        int index)
1674{
1675        struct dlb2_eventdev *dlb2 = ev_port->dlb2;
1676        struct dlb2_eventdev_queue *ev_queue;
1677        bool port_is_dir, queue_is_dir;
1678
1679        if (queue_id > dlb2->num_queues) {
1680                rte_errno = -EINVAL;
1681                return -1;
1682        }
1683
1684        ev_queue = &dlb2->ev_queues[queue_id];
1685
1686        if (!ev_queue->setup_done &&
1687            ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED) {
1688                rte_errno = -EINVAL;
1689                return -1;
1690        }
1691
1692        port_is_dir = ev_port->qm_port.is_directed;
1693        queue_is_dir = ev_queue->qm_queue.is_directed;
1694
1695        if (port_is_dir != queue_is_dir) {
1696                DLB2_LOG_ERR("%s queue %u can't link to %s port %u\n",
1697                             queue_is_dir ? "DIR" : "LDB", ev_queue->id,
1698                             port_is_dir ? "DIR" : "LDB", ev_port->id);
1699
1700                rte_errno = -EINVAL;
1701                return -1;
1702        }
1703
1704        /* Check if there is space for the requested link */
1705        if (!link_exists && index == -1) {
1706                DLB2_LOG_ERR("no space for new link\n");
1707                rte_errno = -ENOSPC;
1708                return -1;
1709        }
1710
1711        /* Check if the directed port is already linked */
1712        if (ev_port->qm_port.is_directed && ev_port->num_links > 0 &&
1713            !link_exists) {
1714                DLB2_LOG_ERR("Can't link DIR port %d to >1 queues\n",
1715                             ev_port->id);
1716                rte_errno = -EINVAL;
1717                return -1;
1718        }
1719
1720        /* Check if the directed queue is already linked */
1721        if (ev_queue->qm_queue.is_directed && ev_queue->num_links > 0 &&
1722            !link_exists) {
1723                DLB2_LOG_ERR("Can't link DIR queue %d to >1 ports\n",
1724                             ev_queue->id);
1725                rte_errno = -EINVAL;
1726                return -1;
1727        }
1728
1729        return 0;
1730}
1731
1732static int
1733dlb2_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
1734                        const uint8_t queues[], const uint8_t priorities[],
1735                        uint16_t nb_links)
1736
1737{
1738        struct dlb2_eventdev_port *ev_port = event_port;
1739        struct dlb2_eventdev *dlb2;
1740        int i, j;
1741
1742        RTE_SET_USED(dev);
1743
1744        if (ev_port == NULL) {
1745                DLB2_LOG_ERR("dlb2: evport not setup\n");
1746                rte_errno = -EINVAL;
1747                return 0;
1748        }
1749
1750        if (!ev_port->setup_done &&
1751            ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED) {
1752                DLB2_LOG_ERR("dlb2: evport not setup\n");
1753                rte_errno = -EINVAL;
1754                return 0;
1755        }
1756
1757        /* Note: rte_event_port_link() ensures the PMD won't receive a NULL
1758         * queues pointer.
1759         */
1760        if (nb_links == 0) {
1761                DLB2_LOG_DBG("dlb2: nb_links is 0\n");
1762                return 0; /* Ignore and return success */
1763        }
1764
1765        dlb2 = ev_port->dlb2;
1766
1767        DLB2_LOG_DBG("Linking %u queues to %s port %d\n",
1768                     nb_links,
1769                     ev_port->qm_port.is_directed ? "DIR" : "LDB",
1770                     ev_port->id);
1771
1772        for (i = 0; i < nb_links; i++) {
1773                struct dlb2_eventdev_queue *ev_queue;
1774                uint8_t queue_id, prio;
1775                bool found = false;
1776                int index = -1;
1777
1778                queue_id = queues[i];
1779                prio = priorities[i];
1780
1781                /* Check if the link already exists. */
1782                for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1783                        if (ev_port->link[j].valid) {
1784                                if (ev_port->link[j].queue_id == queue_id) {
1785                                        found = true;
1786                                        index = j;
1787                                        break;
1788                                }
1789                        } else if (index == -1) {
1790                                index = j;
1791                        }
1792
1793                /* could not link */
1794                if (index == -1)
1795                        break;
1796
1797                /* Check if already linked at the requested priority */
1798                if (found && ev_port->link[j].priority == prio)
1799                        continue;
1800
1801                if (dlb2_validate_port_link(ev_port, queue_id, found, index))
1802                        break; /* return index of offending queue */
1803
1804                ev_queue = &dlb2->ev_queues[queue_id];
1805
1806                if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
1807                        break; /* return index of offending queue */
1808
1809                ev_queue->num_links++;
1810
1811                ev_port->link[index].queue_id = queue_id;
1812                ev_port->link[index].priority = prio;
1813                ev_port->link[index].valid = true;
1814                /* Entry already exists?  If so, then must be prio change */
1815                if (!found)
1816                        ev_port->num_links++;
1817        }
1818        return i;
1819}
1820
1821static int16_t
1822dlb2_hw_unmap_ldb_qid_from_port(struct dlb2_hw_dev *handle,
1823                                uint32_t qm_port_id,
1824                                uint16_t qm_qid)
1825{
1826        struct dlb2_unmap_qid_args cfg;
1827        int32_t ret;
1828
1829        if (handle == NULL)
1830                return -EINVAL;
1831
1832        cfg.port_id = qm_port_id;
1833        cfg.qid = qm_qid;
1834
1835        ret = dlb2_iface_unmap_qid(handle, &cfg);
1836        if (ret < 0)
1837                DLB2_LOG_ERR("dlb2: unmap qid error, ret=%d (driver status: %s)\n",
1838                             ret, dlb2_error_strings[cfg.response.status]);
1839
1840        return ret;
1841}
1842
1843static int
1844dlb2_event_queue_detach_ldb(struct dlb2_eventdev *dlb2,
1845                            struct dlb2_eventdev_port *ev_port,
1846                            struct dlb2_eventdev_queue *ev_queue)
1847{
1848        int ret, i;
1849
1850        /* Don't unlink until start time. */
1851        if (dlb2->run_state == DLB2_RUN_STATE_STOPPED)
1852                return 0;
1853
1854        for (i = 0; i < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
1855                if (ev_port->link[i].valid &&
1856                    ev_port->link[i].queue_id == ev_queue->id)
1857                        break; /* found */
1858        }
1859
1860        /* This is expected with eventdev API!
1861         * It blindly attemmpts to unmap all queues.
1862         */
1863        if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
1864                DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
1865                             ev_queue->qm_queue.id,
1866                             ev_port->qm_port.id);
1867                return 0;
1868        }
1869
1870        ret = dlb2_hw_unmap_ldb_qid_from_port(&dlb2->qm_instance,
1871                                              ev_port->qm_port.id,
1872                                              ev_queue->qm_queue.id);
1873        if (!ret)
1874                ev_port->link[i].mapped = false;
1875
1876        return ret;
1877}
1878
1879static int
1880dlb2_eventdev_port_unlink(struct rte_eventdev *dev, void *event_port,
1881                          uint8_t queues[], uint16_t nb_unlinks)
1882{
1883        struct dlb2_eventdev_port *ev_port = event_port;
1884        struct dlb2_eventdev *dlb2;
1885        int i;
1886
1887        RTE_SET_USED(dev);
1888
1889        if (!ev_port->setup_done) {
1890                DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1891                             ev_port->id);
1892                rte_errno = -EINVAL;
1893                return 0;
1894        }
1895
1896        if (queues == NULL || nb_unlinks == 0) {
1897                DLB2_LOG_DBG("dlb2: queues is NULL or nb_unlinks is 0\n");
1898                return 0; /* Ignore and return success */
1899        }
1900
1901        if (ev_port->qm_port.is_directed) {
1902                DLB2_LOG_DBG("dlb2: ignore unlink from dir port %d\n",
1903                             ev_port->id);
1904                rte_errno = 0;
1905                return nb_unlinks; /* as if success */
1906        }
1907
1908        dlb2 = ev_port->dlb2;
1909
1910        for (i = 0; i < nb_unlinks; i++) {
1911                struct dlb2_eventdev_queue *ev_queue;
1912                int ret, j;
1913
1914                if (queues[i] >= dlb2->num_queues) {
1915                        DLB2_LOG_ERR("dlb2: invalid queue id %d\n", queues[i]);
1916                        rte_errno = -EINVAL;
1917                        return i; /* return index of offending queue */
1918                }
1919
1920                ev_queue = &dlb2->ev_queues[queues[i]];
1921
1922                /* Does a link exist? */
1923                for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++)
1924                        if (ev_port->link[j].queue_id == queues[i] &&
1925                            ev_port->link[j].valid)
1926                                break;
1927
1928                if (j == DLB2_MAX_NUM_QIDS_PER_LDB_CQ)
1929                        continue;
1930
1931                ret = dlb2_event_queue_detach_ldb(dlb2, ev_port, ev_queue);
1932                if (ret) {
1933                        DLB2_LOG_ERR("unlink err=%d for port %d queue %d\n",
1934                                     ret, ev_port->id, queues[i]);
1935                        rte_errno = -ENOENT;
1936                        return i; /* return index of offending queue */
1937                }
1938
1939                ev_port->link[j].valid = false;
1940                ev_port->num_links--;
1941                ev_queue->num_links--;
1942        }
1943
1944        return nb_unlinks;
1945}
1946
1947static int
1948dlb2_eventdev_port_unlinks_in_progress(struct rte_eventdev *dev,
1949                                       void *event_port)
1950{
1951        struct dlb2_eventdev_port *ev_port = event_port;
1952        struct dlb2_eventdev *dlb2;
1953        struct dlb2_hw_dev *handle;
1954        struct dlb2_pending_port_unmaps_args cfg;
1955        int ret;
1956
1957        RTE_SET_USED(dev);
1958
1959        if (!ev_port->setup_done) {
1960                DLB2_LOG_ERR("dlb2: evport %d is not configured\n",
1961                             ev_port->id);
1962                rte_errno = -EINVAL;
1963                return 0;
1964        }
1965
1966        cfg.port_id = ev_port->qm_port.id;
1967        dlb2 = ev_port->dlb2;
1968        handle = &dlb2->qm_instance;
1969        ret = dlb2_iface_pending_port_unmaps(handle, &cfg);
1970
1971        if (ret < 0) {
1972                DLB2_LOG_ERR("dlb2: num_unlinks_in_progress ret=%d (driver status: %s)\n",
1973                             ret, dlb2_error_strings[cfg.response.status]);
1974                return ret;
1975        }
1976
1977        return cfg.response.id;
1978}
1979
1980static int
1981dlb2_eventdev_reapply_configuration(struct rte_eventdev *dev)
1982{
1983        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
1984        int ret, i;
1985
1986        /* If an event queue or port was previously configured, but hasn't been
1987         * reconfigured, reapply its original configuration.
1988         */
1989        for (i = 0; i < dlb2->num_queues; i++) {
1990                struct dlb2_eventdev_queue *ev_queue;
1991
1992                ev_queue = &dlb2->ev_queues[i];
1993
1994                if (ev_queue->qm_queue.config_state != DLB2_PREV_CONFIGURED)
1995                        continue;
1996
1997                ret = dlb2_eventdev_queue_setup(dev, i, &ev_queue->conf);
1998                if (ret < 0) {
1999                        DLB2_LOG_ERR("dlb2: failed to reconfigure queue %d", i);
2000                        return ret;
2001                }
2002        }
2003
2004        for (i = 0; i < dlb2->num_ports; i++) {
2005                struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2006
2007                if (ev_port->qm_port.config_state != DLB2_PREV_CONFIGURED)
2008                        continue;
2009
2010                ret = dlb2_eventdev_port_setup(dev, i, &ev_port->conf);
2011                if (ret < 0) {
2012                        DLB2_LOG_ERR("dlb2: failed to reconfigure ev_port %d",
2013                                     i);
2014                        return ret;
2015                }
2016        }
2017
2018        return 0;
2019}
2020
2021static int
2022dlb2_eventdev_apply_port_links(struct rte_eventdev *dev)
2023{
2024        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2025        int i;
2026
2027        /* Perform requested port->queue links */
2028        for (i = 0; i < dlb2->num_ports; i++) {
2029                struct dlb2_eventdev_port *ev_port = &dlb2->ev_ports[i];
2030                int j;
2031
2032                for (j = 0; j < DLB2_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
2033                        struct dlb2_eventdev_queue *ev_queue;
2034                        uint8_t prio, queue_id;
2035
2036                        if (!ev_port->link[j].valid)
2037                                continue;
2038
2039                        prio = ev_port->link[j].priority;
2040                        queue_id = ev_port->link[j].queue_id;
2041
2042                        if (dlb2_validate_port_link(ev_port, queue_id, true, j))
2043                                return -EINVAL;
2044
2045                        ev_queue = &dlb2->ev_queues[queue_id];
2046
2047                        if (dlb2_do_port_link(dev, ev_queue, ev_port, prio))
2048                                return -EINVAL;
2049                }
2050        }
2051
2052        return 0;
2053}
2054
2055static int
2056dlb2_eventdev_start(struct rte_eventdev *dev)
2057{
2058        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
2059        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
2060        struct dlb2_start_domain_args cfg;
2061        int ret, i;
2062
2063        rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
2064        if (dlb2->run_state != DLB2_RUN_STATE_STOPPED) {
2065                DLB2_LOG_ERR("bad state %d for dev_start\n",
2066                             (int)dlb2->run_state);
2067                rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2068                return -EINVAL;
2069        }
2070        dlb2->run_state = DLB2_RUN_STATE_STARTING;
2071        rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
2072
2073        /* If the device was configured more than once, some event ports and/or
2074         * queues may need to be reconfigured.
2075         */
2076        ret = dlb2_eventdev_reapply_configuration(dev);
2077        if (ret)
2078                return ret;
2079
2080        /* The DLB PMD delays port links until the device is started. */
2081        ret = dlb2_eventdev_apply_port_links(dev);
2082        if (ret)
2083                return ret;
2084
2085        for (i = 0; i < dlb2->num_ports; i++) {
2086                if (!dlb2->ev_ports[i].setup_done) {
2087                        DLB2_LOG_ERR("dlb2: port %d not setup", i);
2088                        return -ESTALE;
2089                }
2090        }
2091
2092        for (i = 0; i < dlb2->num_queues; i++) {
2093                if (dlb2->ev_queues[i].num_links == 0) {
2094                        DLB2_LOG_ERR("dlb2: queue %d is not linked", i);
2095                        return -ENOLINK;
2096                }
2097        }
2098
2099        ret = dlb2_iface_sched_domain_start(handle, &cfg);
2100        if (ret < 0) {
2101                DLB2_LOG_ERR("dlb2: sched_domain_start ret=%d (driver status: %s)\n",
2102                             ret, dlb2_error_strings[cfg.response.status]);
2103                return ret;
2104        }
2105
2106        dlb2->run_state = DLB2_RUN_STATE_STARTED;
2107        DLB2_LOG_DBG("dlb2: sched_domain_start completed OK\n");
2108
2109        return 0;
2110}
2111
2112static uint8_t cmd_byte_map[DLB2_NUM_PORT_TYPES][DLB2_NUM_HW_SCHED_TYPES] = {
2113        {
2114                /* Load-balanced cmd bytes */
2115                [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2116                [RTE_EVENT_OP_FORWARD] = DLB2_FWD_CMD_BYTE,
2117                [RTE_EVENT_OP_RELEASE] = DLB2_COMP_CMD_BYTE,
2118        },
2119        {
2120                /* Directed cmd bytes */
2121                [RTE_EVENT_OP_NEW] = DLB2_NEW_CMD_BYTE,
2122                [RTE_EVENT_OP_FORWARD] = DLB2_NEW_CMD_BYTE,
2123                [RTE_EVENT_OP_RELEASE] = DLB2_NOOP_CMD_BYTE,
2124        },
2125};
2126
2127static inline uint32_t
2128dlb2_port_credits_get(struct dlb2_port *qm_port,
2129                      enum dlb2_hw_queue_types type)
2130{
2131        uint32_t credits = *qm_port->credit_pool[type];
2132        uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2133
2134        if (unlikely(credits < batch_size))
2135                batch_size = credits;
2136
2137        if (likely(credits &&
2138                   __atomic_compare_exchange_n(
2139                        qm_port->credit_pool[type],
2140                        &credits, credits - batch_size, false,
2141                        __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)))
2142                return batch_size;
2143        else
2144                return 0;
2145}
2146
2147static inline void
2148dlb2_replenish_sw_credits(struct dlb2_eventdev *dlb2,
2149                          struct dlb2_eventdev_port *ev_port)
2150{
2151        uint16_t quanta = ev_port->credit_update_quanta;
2152
2153        if (ev_port->inflight_credits >= quanta * 2) {
2154                /* Replenish credits, saving one quanta for enqueues */
2155                uint16_t val = ev_port->inflight_credits - quanta;
2156
2157                __atomic_fetch_sub(&dlb2->inflights, val, __ATOMIC_SEQ_CST);
2158                ev_port->inflight_credits -= val;
2159        }
2160}
2161
2162static inline int
2163dlb2_check_enqueue_sw_credits(struct dlb2_eventdev *dlb2,
2164                              struct dlb2_eventdev_port *ev_port)
2165{
2166        uint32_t sw_inflights = __atomic_load_n(&dlb2->inflights,
2167                                                __ATOMIC_SEQ_CST);
2168        const int num = 1;
2169
2170        if (unlikely(ev_port->inflight_max < sw_inflights)) {
2171                DLB2_INC_STAT(ev_port->stats.traffic.tx_nospc_inflight_max, 1);
2172                rte_errno = -ENOSPC;
2173                return 1;
2174        }
2175
2176        if (ev_port->inflight_credits < num) {
2177                /* check if event enqueue brings ev_port over max threshold */
2178                uint32_t credit_update_quanta = ev_port->credit_update_quanta;
2179
2180                if (sw_inflights + credit_update_quanta >
2181                                dlb2->new_event_limit) {
2182                        DLB2_INC_STAT(
2183                        ev_port->stats.traffic.tx_nospc_new_event_limit,
2184                        1);
2185                        rte_errno = -ENOSPC;
2186                        return 1;
2187                }
2188
2189                __atomic_fetch_add(&dlb2->inflights, credit_update_quanta,
2190                                   __ATOMIC_SEQ_CST);
2191                ev_port->inflight_credits += (credit_update_quanta);
2192
2193                if (ev_port->inflight_credits < num) {
2194                        DLB2_INC_STAT(
2195                        ev_port->stats.traffic.tx_nospc_inflight_credits,
2196                        1);
2197                        rte_errno = -ENOSPC;
2198                        return 1;
2199                }
2200        }
2201
2202        return 0;
2203}
2204
2205static inline int
2206dlb2_check_enqueue_hw_ldb_credits(struct dlb2_port *qm_port)
2207{
2208        if (unlikely(qm_port->cached_ldb_credits == 0)) {
2209                qm_port->cached_ldb_credits =
2210                        dlb2_port_credits_get(qm_port,
2211                                              DLB2_LDB_QUEUE);
2212                if (unlikely(qm_port->cached_ldb_credits == 0)) {
2213                        DLB2_INC_STAT(
2214                        qm_port->ev_port->stats.traffic.tx_nospc_ldb_hw_credits,
2215                        1);
2216                        DLB2_LOG_DBG("ldb credits exhausted\n");
2217                        return 1; /* credits exhausted */
2218                }
2219        }
2220
2221        return 0;
2222}
2223
2224static inline int
2225dlb2_check_enqueue_hw_dir_credits(struct dlb2_port *qm_port)
2226{
2227        if (unlikely(qm_port->cached_dir_credits == 0)) {
2228                qm_port->cached_dir_credits =
2229                        dlb2_port_credits_get(qm_port,
2230                                              DLB2_DIR_QUEUE);
2231                if (unlikely(qm_port->cached_dir_credits == 0)) {
2232                        DLB2_INC_STAT(
2233                        qm_port->ev_port->stats.traffic.tx_nospc_dir_hw_credits,
2234                        1);
2235                        DLB2_LOG_DBG("dir credits exhausted\n");
2236                        return 1; /* credits exhausted */
2237                }
2238        }
2239
2240        return 0;
2241}
2242
2243static __rte_always_inline void
2244dlb2_pp_write(struct dlb2_enqueue_qe *qe4,
2245              struct process_local_port_data *port_data)
2246{
2247        dlb2_movdir64b(port_data->pp_addr, qe4);
2248}
2249
2250static inline int
2251dlb2_consume_qe_immediate(struct dlb2_port *qm_port, int num)
2252{
2253        struct process_local_port_data *port_data;
2254        struct dlb2_cq_pop_qe *qe;
2255
2256        RTE_ASSERT(qm_port->config_state == DLB2_CONFIGURED);
2257
2258        qe = qm_port->consume_qe;
2259
2260        qe->tokens = num - 1;
2261
2262        /* No store fence needed since no pointer is being sent, and CQ token
2263         * pops can be safely reordered with other HCWs.
2264         */
2265        port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2266
2267        dlb2_movntdq_single(port_data->pp_addr, qe);
2268
2269        DLB2_LOG_DBG("dlb2: consume immediate - %d QEs\n", num);
2270
2271        qm_port->owed_tokens = 0;
2272
2273        return 0;
2274}
2275
2276static inline void
2277dlb2_hw_do_enqueue(struct dlb2_port *qm_port,
2278                   bool do_sfence,
2279                   struct process_local_port_data *port_data)
2280{
2281        /* Since MOVDIR64B is weakly-ordered, use an SFENCE to ensure that
2282         * application writes complete before enqueueing the QE.
2283         */
2284        if (do_sfence)
2285                rte_wmb();
2286
2287        dlb2_pp_write(qm_port->qe4, port_data);
2288}
2289
2290static inline void
2291dlb2_construct_token_pop_qe(struct dlb2_port *qm_port, int idx)
2292{
2293        struct dlb2_cq_pop_qe *qe = (void *)qm_port->qe4;
2294        int num = qm_port->owed_tokens;
2295
2296        qe[idx].cmd_byte = DLB2_POP_CMD_BYTE;
2297        qe[idx].tokens = num - 1;
2298
2299        qm_port->owed_tokens = 0;
2300}
2301
2302static inline void
2303dlb2_event_build_hcws(struct dlb2_port *qm_port,
2304                      const struct rte_event ev[],
2305                      int num,
2306                      uint8_t *sched_type,
2307                      uint8_t *queue_id)
2308{
2309        struct dlb2_enqueue_qe *qe;
2310        uint16_t sched_word[4];
2311        __m128i sse_qe[2];
2312        int i;
2313
2314        qe = qm_port->qe4;
2315
2316        sse_qe[0] = _mm_setzero_si128();
2317        sse_qe[1] = _mm_setzero_si128();
2318
2319        switch (num) {
2320        case 4:
2321                /* Construct the metadata portion of two HCWs in one 128b SSE
2322                 * register. HCW metadata is constructed in the SSE registers
2323                 * like so:
2324                 * sse_qe[0][63:0]:   qe[0]'s metadata
2325                 * sse_qe[0][127:64]: qe[1]'s metadata
2326                 * sse_qe[1][63:0]:   qe[2]'s metadata
2327                 * sse_qe[1][127:64]: qe[3]'s metadata
2328                 */
2329
2330                /* Convert the event operation into a command byte and store it
2331                 * in the metadata:
2332                 * sse_qe[0][63:56]   = cmd_byte_map[is_directed][ev[0].op]
2333                 * sse_qe[0][127:120] = cmd_byte_map[is_directed][ev[1].op]
2334                 * sse_qe[1][63:56]   = cmd_byte_map[is_directed][ev[2].op]
2335                 * sse_qe[1][127:120] = cmd_byte_map[is_directed][ev[3].op]
2336                 */
2337#define DLB2_QE_CMD_BYTE 7
2338                sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2339                                cmd_byte_map[qm_port->is_directed][ev[0].op],
2340                                DLB2_QE_CMD_BYTE);
2341                sse_qe[0] = _mm_insert_epi8(sse_qe[0],
2342                                cmd_byte_map[qm_port->is_directed][ev[1].op],
2343                                DLB2_QE_CMD_BYTE + 8);
2344                sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2345                                cmd_byte_map[qm_port->is_directed][ev[2].op],
2346                                DLB2_QE_CMD_BYTE);
2347                sse_qe[1] = _mm_insert_epi8(sse_qe[1],
2348                                cmd_byte_map[qm_port->is_directed][ev[3].op],
2349                                DLB2_QE_CMD_BYTE + 8);
2350
2351                /* Store priority, scheduling type, and queue ID in the sched
2352                 * word array because these values are re-used when the
2353                 * destination is a directed queue.
2354                 */
2355                sched_word[0] = EV_TO_DLB2_PRIO(ev[0].priority) << 10 |
2356                                sched_type[0] << 8 |
2357                                queue_id[0];
2358                sched_word[1] = EV_TO_DLB2_PRIO(ev[1].priority) << 10 |
2359                                sched_type[1] << 8 |
2360                                queue_id[1];
2361                sched_word[2] = EV_TO_DLB2_PRIO(ev[2].priority) << 10 |
2362                                sched_type[2] << 8 |
2363                                queue_id[2];
2364                sched_word[3] = EV_TO_DLB2_PRIO(ev[3].priority) << 10 |
2365                                sched_type[3] << 8 |
2366                                queue_id[3];
2367
2368                /* Store the event priority, scheduling type, and queue ID in
2369                 * the metadata:
2370                 * sse_qe[0][31:16] = sched_word[0]
2371                 * sse_qe[0][95:80] = sched_word[1]
2372                 * sse_qe[1][31:16] = sched_word[2]
2373                 * sse_qe[1][95:80] = sched_word[3]
2374                 */
2375#define DLB2_QE_QID_SCHED_WORD 1
2376                sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2377                                             sched_word[0],
2378                                             DLB2_QE_QID_SCHED_WORD);
2379                sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2380                                             sched_word[1],
2381                                             DLB2_QE_QID_SCHED_WORD + 4);
2382                sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2383                                             sched_word[2],
2384                                             DLB2_QE_QID_SCHED_WORD);
2385                sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2386                                             sched_word[3],
2387                                             DLB2_QE_QID_SCHED_WORD + 4);
2388
2389                /* If the destination is a load-balanced queue, store the lock
2390                 * ID. If it is a directed queue, DLB places this field in
2391                 * bytes 10-11 of the received QE, so we format it accordingly:
2392                 * sse_qe[0][47:32]  = dir queue ? sched_word[0] : flow_id[0]
2393                 * sse_qe[0][111:96] = dir queue ? sched_word[1] : flow_id[1]
2394                 * sse_qe[1][47:32]  = dir queue ? sched_word[2] : flow_id[2]
2395                 * sse_qe[1][111:96] = dir queue ? sched_word[3] : flow_id[3]
2396                 */
2397#define DLB2_QE_LOCK_ID_WORD 2
2398                sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2399                                (sched_type[0] == DLB2_SCHED_DIRECTED) ?
2400                                        sched_word[0] : ev[0].flow_id,
2401                                DLB2_QE_LOCK_ID_WORD);
2402                sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2403                                (sched_type[1] == DLB2_SCHED_DIRECTED) ?
2404                                        sched_word[1] : ev[1].flow_id,
2405                                DLB2_QE_LOCK_ID_WORD + 4);
2406                sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2407                                (sched_type[2] == DLB2_SCHED_DIRECTED) ?
2408                                        sched_word[2] : ev[2].flow_id,
2409                                DLB2_QE_LOCK_ID_WORD);
2410                sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2411                                (sched_type[3] == DLB2_SCHED_DIRECTED) ?
2412                                        sched_word[3] : ev[3].flow_id,
2413                                DLB2_QE_LOCK_ID_WORD + 4);
2414
2415                /* Store the event type and sub event type in the metadata:
2416                 * sse_qe[0][15:0]  = flow_id[0]
2417                 * sse_qe[0][79:64] = flow_id[1]
2418                 * sse_qe[1][15:0]  = flow_id[2]
2419                 * sse_qe[1][79:64] = flow_id[3]
2420                 */
2421#define DLB2_QE_EV_TYPE_WORD 0
2422                sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2423                                             ev[0].sub_event_type << 8 |
2424                                                ev[0].event_type,
2425                                             DLB2_QE_EV_TYPE_WORD);
2426                sse_qe[0] = _mm_insert_epi16(sse_qe[0],
2427                                             ev[1].sub_event_type << 8 |
2428                                                ev[1].event_type,
2429                                             DLB2_QE_EV_TYPE_WORD + 4);
2430                sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2431                                             ev[2].sub_event_type << 8 |
2432                                                ev[2].event_type,
2433                                             DLB2_QE_EV_TYPE_WORD);
2434                sse_qe[1] = _mm_insert_epi16(sse_qe[1],
2435                                             ev[3].sub_event_type << 8 |
2436                                                ev[3].event_type,
2437                                             DLB2_QE_EV_TYPE_WORD + 4);
2438
2439                /* Store the metadata to memory (use the double-precision
2440                 * _mm_storeh_pd because there is no integer function for
2441                 * storing the upper 64b):
2442                 * qe[0] metadata = sse_qe[0][63:0]
2443                 * qe[1] metadata = sse_qe[0][127:64]
2444                 * qe[2] metadata = sse_qe[1][63:0]
2445                 * qe[3] metadata = sse_qe[1][127:64]
2446                 */
2447                _mm_storel_epi64((__m128i *)&qe[0].u.opaque_data, sse_qe[0]);
2448                _mm_storeh_pd((double *)&qe[1].u.opaque_data,
2449                              (__m128d)sse_qe[0]);
2450                _mm_storel_epi64((__m128i *)&qe[2].u.opaque_data, sse_qe[1]);
2451                _mm_storeh_pd((double *)&qe[3].u.opaque_data,
2452                              (__m128d)sse_qe[1]);
2453
2454                qe[0].data = ev[0].u64;
2455                qe[1].data = ev[1].u64;
2456                qe[2].data = ev[2].u64;
2457                qe[3].data = ev[3].u64;
2458
2459                break;
2460        case 3:
2461        case 2:
2462        case 1:
2463                for (i = 0; i < num; i++) {
2464                        qe[i].cmd_byte =
2465                                cmd_byte_map[qm_port->is_directed][ev[i].op];
2466                        qe[i].sched_type = sched_type[i];
2467                        qe[i].data = ev[i].u64;
2468                        qe[i].qid = queue_id[i];
2469                        qe[i].priority = EV_TO_DLB2_PRIO(ev[i].priority);
2470                        qe[i].lock_id = ev[i].flow_id;
2471                        if (sched_type[i] == DLB2_SCHED_DIRECTED) {
2472                                struct dlb2_msg_info *info =
2473                                        (struct dlb2_msg_info *)&qe[i].lock_id;
2474
2475                                info->qid = queue_id[i];
2476                                info->sched_type = DLB2_SCHED_DIRECTED;
2477                                info->priority = qe[i].priority;
2478                        }
2479                        qe[i].u.event_type.major = ev[i].event_type;
2480                        qe[i].u.event_type.sub = ev[i].sub_event_type;
2481                }
2482                break;
2483        case 0:
2484                break;
2485        }
2486}
2487
2488static inline int
2489dlb2_event_enqueue_prep(struct dlb2_eventdev_port *ev_port,
2490                        struct dlb2_port *qm_port,
2491                        const struct rte_event ev[],
2492                        uint8_t *sched_type,
2493                        uint8_t *queue_id)
2494{
2495        struct dlb2_eventdev *dlb2 = ev_port->dlb2;
2496        struct dlb2_eventdev_queue *ev_queue;
2497        uint16_t *cached_credits = NULL;
2498        struct dlb2_queue *qm_queue;
2499
2500        ev_queue = &dlb2->ev_queues[ev->queue_id];
2501        qm_queue = &ev_queue->qm_queue;
2502        *queue_id = qm_queue->id;
2503
2504        /* Ignore sched_type and hardware credits on release events */
2505        if (ev->op == RTE_EVENT_OP_RELEASE)
2506                goto op_check;
2507
2508        if (!qm_queue->is_directed) {
2509                /* Load balanced destination queue */
2510
2511                if (dlb2_check_enqueue_hw_ldb_credits(qm_port)) {
2512                        rte_errno = -ENOSPC;
2513                        return 1;
2514                }
2515                cached_credits = &qm_port->cached_ldb_credits;
2516
2517                switch (ev->sched_type) {
2518                case RTE_SCHED_TYPE_ORDERED:
2519                        DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ORDERED\n");
2520                        if (qm_queue->sched_type != RTE_SCHED_TYPE_ORDERED) {
2521                                DLB2_LOG_ERR("dlb2: tried to send ordered event to unordered queue %d\n",
2522                                             *queue_id);
2523                                rte_errno = -EINVAL;
2524                                return 1;
2525                        }
2526                        *sched_type = DLB2_SCHED_ORDERED;
2527                        break;
2528                case RTE_SCHED_TYPE_ATOMIC:
2529                        DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_ATOMIC\n");
2530                        *sched_type = DLB2_SCHED_ATOMIC;
2531                        break;
2532                case RTE_SCHED_TYPE_PARALLEL:
2533                        DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_PARALLEL\n");
2534                        if (qm_queue->sched_type == RTE_SCHED_TYPE_ORDERED)
2535                                *sched_type = DLB2_SCHED_ORDERED;
2536                        else
2537                                *sched_type = DLB2_SCHED_UNORDERED;
2538                        break;
2539                default:
2540                        DLB2_LOG_ERR("Unsupported LDB sched type in put_qe\n");
2541                        DLB2_INC_STAT(ev_port->stats.tx_invalid, 1);
2542                        rte_errno = -EINVAL;
2543                        return 1;
2544                }
2545        } else {
2546                /* Directed destination queue */
2547
2548                if (dlb2_check_enqueue_hw_dir_credits(qm_port)) {
2549                        rte_errno = -ENOSPC;
2550                        return 1;
2551                }
2552                cached_credits = &qm_port->cached_dir_credits;
2553
2554                DLB2_LOG_DBG("dlb2: put_qe: RTE_SCHED_TYPE_DIRECTED\n");
2555
2556                *sched_type = DLB2_SCHED_DIRECTED;
2557        }
2558
2559op_check:
2560        switch (ev->op) {
2561        case RTE_EVENT_OP_NEW:
2562                /* Check that a sw credit is available */
2563                if (dlb2_check_enqueue_sw_credits(dlb2, ev_port)) {
2564                        rte_errno = -ENOSPC;
2565                        return 1;
2566                }
2567                ev_port->inflight_credits--;
2568                (*cached_credits)--;
2569                break;
2570        case RTE_EVENT_OP_FORWARD:
2571                /* Check for outstanding_releases underflow. If this occurs,
2572                 * the application is not using the EVENT_OPs correctly; for
2573                 * example, forwarding or releasing events that were not
2574                 * dequeued.
2575                 */
2576                RTE_ASSERT(ev_port->outstanding_releases > 0);
2577                ev_port->outstanding_releases--;
2578                qm_port->issued_releases++;
2579                (*cached_credits)--;
2580                break;
2581        case RTE_EVENT_OP_RELEASE:
2582                ev_port->inflight_credits++;
2583                /* Check for outstanding_releases underflow. If this occurs,
2584                 * the application is not using the EVENT_OPs correctly; for
2585                 * example, forwarding or releasing events that were not
2586                 * dequeued.
2587                 */
2588                RTE_ASSERT(ev_port->outstanding_releases > 0);
2589                ev_port->outstanding_releases--;
2590                qm_port->issued_releases++;
2591
2592                /* Replenish s/w credits if enough are cached */
2593                dlb2_replenish_sw_credits(dlb2, ev_port);
2594                break;
2595        }
2596
2597        DLB2_INC_STAT(ev_port->stats.tx_op_cnt[ev->op], 1);
2598        DLB2_INC_STAT(ev_port->stats.traffic.tx_ok, 1);
2599
2600#ifndef RTE_LIBRTE_PMD_DLB2_QUELL_STATS
2601        if (ev->op != RTE_EVENT_OP_RELEASE) {
2602                DLB2_INC_STAT(ev_port->stats.queue[ev->queue_id].enq_ok, 1);
2603                DLB2_INC_STAT(ev_port->stats.tx_sched_cnt[*sched_type], 1);
2604        }
2605#endif
2606
2607        return 0;
2608}
2609
2610static inline uint16_t
2611__dlb2_event_enqueue_burst(void *event_port,
2612                           const struct rte_event events[],
2613                           uint16_t num,
2614                           bool use_delayed)
2615{
2616        struct dlb2_eventdev_port *ev_port = event_port;
2617        struct dlb2_port *qm_port = &ev_port->qm_port;
2618        struct process_local_port_data *port_data;
2619        int i;
2620
2621        RTE_ASSERT(ev_port->enq_configured);
2622        RTE_ASSERT(events != NULL);
2623
2624        i = 0;
2625
2626        port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2627
2628        while (i < num) {
2629                uint8_t sched_types[DLB2_NUM_QES_PER_CACHE_LINE];
2630                uint8_t queue_ids[DLB2_NUM_QES_PER_CACHE_LINE];
2631                int pop_offs = 0;
2632                int j = 0;
2633
2634                memset(qm_port->qe4,
2635                       0,
2636                       DLB2_NUM_QES_PER_CACHE_LINE *
2637                       sizeof(struct dlb2_enqueue_qe));
2638
2639                for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < num; j++) {
2640                        const struct rte_event *ev = &events[i + j];
2641                        int16_t thresh = qm_port->token_pop_thresh;
2642
2643                        if (use_delayed &&
2644                            qm_port->token_pop_mode == DELAYED_POP &&
2645                            (ev->op == RTE_EVENT_OP_FORWARD ||
2646                             ev->op == RTE_EVENT_OP_RELEASE) &&
2647                            qm_port->issued_releases >= thresh - 1) {
2648                                /* Insert the token pop QE and break out. This
2649                                 * may result in a partial HCW, but that is
2650                                 * simpler than supporting arbitrary QE
2651                                 * insertion.
2652                                 */
2653                                dlb2_construct_token_pop_qe(qm_port, j);
2654
2655                                /* Reset the releases for the next QE batch */
2656                                qm_port->issued_releases -= thresh;
2657
2658                                pop_offs = 1;
2659                                j++;
2660                                break;
2661                        }
2662
2663                        if (dlb2_event_enqueue_prep(ev_port, qm_port, ev,
2664                                                    &sched_types[j],
2665                                                    &queue_ids[j]))
2666                                break;
2667                }
2668
2669                if (j == 0)
2670                        break;
2671
2672                dlb2_event_build_hcws(qm_port, &events[i], j - pop_offs,
2673                                      sched_types, queue_ids);
2674
2675                dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2676
2677                /* Don't include the token pop QE in the enqueue count */
2678                i += j - pop_offs;
2679
2680                /* Don't interpret j < DLB2_NUM_... as out-of-credits if
2681                 * pop_offs != 0
2682                 */
2683                if (j < DLB2_NUM_QES_PER_CACHE_LINE && pop_offs == 0)
2684                        break;
2685        }
2686
2687        return i;
2688}
2689
2690static uint16_t
2691dlb2_event_enqueue_burst(void *event_port,
2692                             const struct rte_event events[],
2693                             uint16_t num)
2694{
2695        return __dlb2_event_enqueue_burst(event_port, events, num, false);
2696}
2697
2698static uint16_t
2699dlb2_event_enqueue_burst_delayed(void *event_port,
2700                                     const struct rte_event events[],
2701                                     uint16_t num)
2702{
2703        return __dlb2_event_enqueue_burst(event_port, events, num, true);
2704}
2705
2706static inline uint16_t
2707dlb2_event_enqueue(void *event_port,
2708                   const struct rte_event events[])
2709{
2710        return __dlb2_event_enqueue_burst(event_port, events, 1, false);
2711}
2712
2713static inline uint16_t
2714dlb2_event_enqueue_delayed(void *event_port,
2715                           const struct rte_event events[])
2716{
2717        return __dlb2_event_enqueue_burst(event_port, events, 1, true);
2718}
2719
2720static uint16_t
2721dlb2_event_enqueue_new_burst(void *event_port,
2722                             const struct rte_event events[],
2723                             uint16_t num)
2724{
2725        return __dlb2_event_enqueue_burst(event_port, events, num, false);
2726}
2727
2728static uint16_t
2729dlb2_event_enqueue_new_burst_delayed(void *event_port,
2730                                     const struct rte_event events[],
2731                                     uint16_t num)
2732{
2733        return __dlb2_event_enqueue_burst(event_port, events, num, true);
2734}
2735
2736static uint16_t
2737dlb2_event_enqueue_forward_burst(void *event_port,
2738                                 const struct rte_event events[],
2739                                 uint16_t num)
2740{
2741        return __dlb2_event_enqueue_burst(event_port, events, num, false);
2742}
2743
2744static uint16_t
2745dlb2_event_enqueue_forward_burst_delayed(void *event_port,
2746                                         const struct rte_event events[],
2747                                         uint16_t num)
2748{
2749        return __dlb2_event_enqueue_burst(event_port, events, num, true);
2750}
2751
2752static void
2753dlb2_event_release(struct dlb2_eventdev *dlb2,
2754                   uint8_t port_id,
2755                   int n)
2756{
2757        struct process_local_port_data *port_data;
2758        struct dlb2_eventdev_port *ev_port;
2759        struct dlb2_port *qm_port;
2760        int i;
2761
2762        if (port_id > dlb2->num_ports) {
2763                DLB2_LOG_ERR("Invalid port id %d in dlb2-event_release\n",
2764                             port_id);
2765                rte_errno = -EINVAL;
2766                return;
2767        }
2768
2769        ev_port = &dlb2->ev_ports[port_id];
2770        qm_port = &ev_port->qm_port;
2771        port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2772
2773        i = 0;
2774
2775        if (qm_port->is_directed) {
2776                i = n;
2777                goto sw_credit_update;
2778        }
2779
2780        while (i < n) {
2781                int pop_offs = 0;
2782                int j = 0;
2783
2784                /* Zero-out QEs */
2785                qm_port->qe4[0].cmd_byte = 0;
2786                qm_port->qe4[1].cmd_byte = 0;
2787                qm_port->qe4[2].cmd_byte = 0;
2788                qm_port->qe4[3].cmd_byte = 0;
2789
2790                for (; j < DLB2_NUM_QES_PER_CACHE_LINE && (i + j) < n; j++) {
2791                        int16_t thresh = qm_port->token_pop_thresh;
2792
2793                        if (qm_port->token_pop_mode == DELAYED_POP &&
2794                            qm_port->issued_releases >= thresh - 1) {
2795                                /* Insert the token pop QE */
2796                                dlb2_construct_token_pop_qe(qm_port, j);
2797
2798                                /* Reset the releases for the next QE batch */
2799                                qm_port->issued_releases -= thresh;
2800
2801                                pop_offs = 1;
2802                                j++;
2803                                break;
2804                        }
2805
2806                        qm_port->qe4[j].cmd_byte = DLB2_COMP_CMD_BYTE;
2807                        qm_port->issued_releases++;
2808                }
2809
2810                dlb2_hw_do_enqueue(qm_port, i == 0, port_data);
2811
2812                /* Don't include the token pop QE in the release count */
2813                i += j - pop_offs;
2814        }
2815
2816sw_credit_update:
2817        /* each release returns one credit */
2818        if (!ev_port->outstanding_releases) {
2819                DLB2_LOG_ERR("%s: Outstanding releases underflowed.\n",
2820                             __func__);
2821                return;
2822        }
2823        ev_port->outstanding_releases -= i;
2824        ev_port->inflight_credits += i;
2825
2826        /* Replenish s/w credits if enough releases are performed */
2827        dlb2_replenish_sw_credits(dlb2, ev_port);
2828}
2829
2830static inline void
2831dlb2_port_credits_inc(struct dlb2_port *qm_port, int num)
2832{
2833        uint32_t batch_size = DLB2_SW_CREDIT_BATCH_SZ;
2834
2835        /* increment port credits, and return to pool if exceeds threshold */
2836        if (!qm_port->is_directed) {
2837                qm_port->cached_ldb_credits += num;
2838                if (qm_port->cached_ldb_credits >= 2 * batch_size) {
2839                        __atomic_fetch_add(
2840                                qm_port->credit_pool[DLB2_LDB_QUEUE],
2841                                batch_size, __ATOMIC_SEQ_CST);
2842                        qm_port->cached_ldb_credits -= batch_size;
2843                }
2844        } else {
2845                qm_port->cached_dir_credits += num;
2846                if (qm_port->cached_dir_credits >= 2 * batch_size) {
2847                        __atomic_fetch_add(
2848                                qm_port->credit_pool[DLB2_DIR_QUEUE],
2849                                batch_size, __ATOMIC_SEQ_CST);
2850                        qm_port->cached_dir_credits -= batch_size;
2851                }
2852        }
2853}
2854
2855static inline int
2856dlb2_dequeue_wait(struct dlb2_eventdev *dlb2,
2857                  struct dlb2_eventdev_port *ev_port,
2858                  struct dlb2_port *qm_port,
2859                  uint64_t timeout,
2860                  uint64_t start_ticks)
2861{
2862        struct process_local_port_data *port_data;
2863        uint64_t elapsed_ticks;
2864
2865        port_data = &dlb2_port[qm_port->id][PORT_TYPE(qm_port)];
2866
2867        elapsed_ticks = rte_get_timer_cycles() - start_ticks;
2868
2869        /* Wait/poll time expired */
2870        if (elapsed_ticks >= timeout) {
2871                return 1;
2872        } else if (dlb2->umwait_allowed) {
2873                struct rte_power_monitor_cond pmc;
2874                volatile struct dlb2_dequeue_qe *cq_base;
2875                union {
2876                        uint64_t raw_qe[2];
2877                        struct dlb2_dequeue_qe qe;
2878                } qe_mask;
2879                uint64_t expected_value;
2880                volatile uint64_t *monitor_addr;
2881
2882                qe_mask.qe.cq_gen = 1; /* set mask */
2883
2884                cq_base = port_data->cq_base;
2885                monitor_addr = (volatile uint64_t *)(volatile void *)
2886                        &cq_base[qm_port->cq_idx];
2887                monitor_addr++; /* cq_gen bit is in second 64bit location */
2888
2889                if (qm_port->gen_bit)
2890                        expected_value = qe_mask.raw_qe[1];
2891                else
2892                        expected_value = 0;
2893
2894                pmc.addr = monitor_addr;
2895                pmc.val = expected_value;
2896                pmc.mask = qe_mask.raw_qe[1];
2897                pmc.size = sizeof(uint64_t);
2898
2899                rte_power_monitor(&pmc, timeout + start_ticks);
2900
2901                DLB2_INC_STAT(ev_port->stats.traffic.rx_umonitor_umwait, 1);
2902        } else {
2903                uint64_t poll_interval = RTE_LIBRTE_PMD_DLB2_POLL_INTERVAL;
2904                uint64_t curr_ticks = rte_get_timer_cycles();
2905                uint64_t init_ticks = curr_ticks;
2906
2907                while ((curr_ticks - start_ticks < timeout) &&
2908                       (curr_ticks - init_ticks < poll_interval))
2909                        curr_ticks = rte_get_timer_cycles();
2910        }
2911
2912        return 0;
2913}
2914
2915static inline int
2916dlb2_process_dequeue_qes(struct dlb2_eventdev_port *ev_port,
2917                         struct dlb2_port *qm_port,
2918                         struct rte_event *events,
2919                         struct dlb2_dequeue_qe *qes,
2920                         int cnt)
2921{
2922        uint8_t *qid_mappings = qm_port->qid_mappings;
2923        int i, num, evq_id;
2924
2925        for (i = 0, num = 0; i < cnt; i++) {
2926                struct dlb2_dequeue_qe *qe = &qes[i];
2927                int sched_type_map[DLB2_NUM_HW_SCHED_TYPES] = {
2928                        [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2929                        [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2930                        [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2931                        [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2932                };
2933
2934                /* Fill in event information.
2935                 * Note that flow_id must be embedded in the data by
2936                 * the app, such as the mbuf RSS hash field if the data
2937                 * buffer is a mbuf.
2938                 */
2939                if (unlikely(qe->error)) {
2940                        DLB2_LOG_ERR("QE error bit ON\n");
2941                        DLB2_INC_STAT(ev_port->stats.traffic.rx_drop, 1);
2942                        dlb2_consume_qe_immediate(qm_port, 1);
2943                        continue; /* Ignore */
2944                }
2945
2946                events[num].u64 = qe->data;
2947                events[num].flow_id = qe->flow_id;
2948                events[num].priority = DLB2_TO_EV_PRIO((uint8_t)qe->priority);
2949                events[num].event_type = qe->u.event_type.major;
2950                events[num].sub_event_type = qe->u.event_type.sub;
2951                events[num].sched_type = sched_type_map[qe->sched_type];
2952                events[num].impl_opaque = qe->qid_depth;
2953
2954                /* qid not preserved for directed queues */
2955                if (qm_port->is_directed)
2956                        evq_id = ev_port->link[0].queue_id;
2957                else
2958                        evq_id = qid_mappings[qe->qid];
2959
2960                events[num].queue_id = evq_id;
2961                DLB2_INC_STAT(
2962                        ev_port->stats.queue[evq_id].qid_depth[qe->qid_depth],
2963                        1);
2964                DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qe->sched_type], 1);
2965                num++;
2966        }
2967
2968        DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num);
2969
2970        return num;
2971}
2972
2973static inline int
2974dlb2_process_dequeue_four_qes(struct dlb2_eventdev_port *ev_port,
2975                              struct dlb2_port *qm_port,
2976                              struct rte_event *events,
2977                              struct dlb2_dequeue_qe *qes)
2978{
2979        int sched_type_map[] = {
2980                [DLB2_SCHED_ATOMIC] = RTE_SCHED_TYPE_ATOMIC,
2981                [DLB2_SCHED_UNORDERED] = RTE_SCHED_TYPE_PARALLEL,
2982                [DLB2_SCHED_ORDERED] = RTE_SCHED_TYPE_ORDERED,
2983                [DLB2_SCHED_DIRECTED] = RTE_SCHED_TYPE_ATOMIC,
2984        };
2985        const int num_events = DLB2_NUM_QES_PER_CACHE_LINE;
2986        uint8_t *qid_mappings = qm_port->qid_mappings;
2987        __m128i sse_evt[2];
2988
2989        /* In the unlikely case that any of the QE error bits are set, process
2990         * them one at a time.
2991         */
2992        if (unlikely(qes[0].error || qes[1].error ||
2993                     qes[2].error || qes[3].error))
2994                return dlb2_process_dequeue_qes(ev_port, qm_port, events,
2995                                                 qes, num_events);
2996
2997        events[0].u64 = qes[0].data;
2998        events[1].u64 = qes[1].data;
2999        events[2].u64 = qes[2].data;
3000        events[3].u64 = qes[3].data;
3001
3002        /* Construct the metadata portion of two struct rte_events
3003         * in one 128b SSE register. Event metadata is constructed in the SSE
3004         * registers like so:
3005         * sse_evt[0][63:0]:   event[0]'s metadata
3006         * sse_evt[0][127:64]: event[1]'s metadata
3007         * sse_evt[1][63:0]:   event[2]'s metadata
3008         * sse_evt[1][127:64]: event[3]'s metadata
3009         */
3010        sse_evt[0] = _mm_setzero_si128();
3011        sse_evt[1] = _mm_setzero_si128();
3012
3013        /* Convert the hardware queue ID to an event queue ID and store it in
3014         * the metadata:
3015         * sse_evt[0][47:40]   = qid_mappings[qes[0].qid]
3016         * sse_evt[0][111:104] = qid_mappings[qes[1].qid]
3017         * sse_evt[1][47:40]   = qid_mappings[qes[2].qid]
3018         * sse_evt[1][111:104] = qid_mappings[qes[3].qid]
3019         */
3020#define DLB_EVENT_QUEUE_ID_BYTE 5
3021        sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3022                                     qid_mappings[qes[0].qid],
3023                                     DLB_EVENT_QUEUE_ID_BYTE);
3024        sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3025                                     qid_mappings[qes[1].qid],
3026                                     DLB_EVENT_QUEUE_ID_BYTE + 8);
3027        sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3028                                     qid_mappings[qes[2].qid],
3029                                     DLB_EVENT_QUEUE_ID_BYTE);
3030        sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3031                                     qid_mappings[qes[3].qid],
3032                                     DLB_EVENT_QUEUE_ID_BYTE + 8);
3033
3034        /* Convert the hardware priority to an event priority and store it in
3035         * the metadata, while also returning the queue depth status
3036         * value captured by the hardware, storing it in impl_opaque, which can
3037         * be read by the application but not modified
3038         * sse_evt[0][55:48]   = DLB2_TO_EV_PRIO(qes[0].priority)
3039         * sse_evt[0][63:56]   = qes[0].qid_depth
3040         * sse_evt[0][119:112] = DLB2_TO_EV_PRIO(qes[1].priority)
3041         * sse_evt[0][127:120] = qes[1].qid_depth
3042         * sse_evt[1][55:48]   = DLB2_TO_EV_PRIO(qes[2].priority)
3043         * sse_evt[1][63:56]   = qes[2].qid_depth
3044         * sse_evt[1][119:112] = DLB2_TO_EV_PRIO(qes[3].priority)
3045         * sse_evt[1][127:120] = qes[3].qid_depth
3046         */
3047#define DLB_EVENT_PRIO_IMPL_OPAQUE_WORD 3
3048#define DLB_BYTE_SHIFT 8
3049        sse_evt[0] =
3050                _mm_insert_epi16(sse_evt[0],
3051                        DLB2_TO_EV_PRIO((uint8_t)qes[0].priority) |
3052                        (qes[0].qid_depth << DLB_BYTE_SHIFT),
3053                        DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3054        sse_evt[0] =
3055                _mm_insert_epi16(sse_evt[0],
3056                        DLB2_TO_EV_PRIO((uint8_t)qes[1].priority) |
3057                        (qes[1].qid_depth << DLB_BYTE_SHIFT),
3058                        DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3059        sse_evt[1] =
3060                _mm_insert_epi16(sse_evt[1],
3061                        DLB2_TO_EV_PRIO((uint8_t)qes[2].priority) |
3062                        (qes[2].qid_depth << DLB_BYTE_SHIFT),
3063                        DLB_EVENT_PRIO_IMPL_OPAQUE_WORD);
3064        sse_evt[1] =
3065                _mm_insert_epi16(sse_evt[1],
3066                        DLB2_TO_EV_PRIO((uint8_t)qes[3].priority) |
3067                        (qes[3].qid_depth << DLB_BYTE_SHIFT),
3068                        DLB_EVENT_PRIO_IMPL_OPAQUE_WORD + 4);
3069
3070        /* Write the event type, sub event type, and flow_id to the event
3071         * metadata.
3072         * sse_evt[0][31:0]   = qes[0].flow_id |
3073         *                      qes[0].u.event_type.major << 28 |
3074         *                      qes[0].u.event_type.sub << 20;
3075         * sse_evt[0][95:64]  = qes[1].flow_id |
3076         *                      qes[1].u.event_type.major << 28 |
3077         *                      qes[1].u.event_type.sub << 20;
3078         * sse_evt[1][31:0]   = qes[2].flow_id |
3079         *                      qes[2].u.event_type.major << 28 |
3080         *                      qes[2].u.event_type.sub << 20;
3081         * sse_evt[1][95:64]  = qes[3].flow_id |
3082         *                      qes[3].u.event_type.major << 28 |
3083         *                      qes[3].u.event_type.sub << 20;
3084         */
3085#define DLB_EVENT_EV_TYPE_DW 0
3086#define DLB_EVENT_EV_TYPE_SHIFT 28
3087#define DLB_EVENT_SUB_EV_TYPE_SHIFT 20
3088        sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3089                        qes[0].flow_id |
3090                        qes[0].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3091                        qes[0].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3092                        DLB_EVENT_EV_TYPE_DW);
3093        sse_evt[0] = _mm_insert_epi32(sse_evt[0],
3094                        qes[1].flow_id |
3095                        qes[1].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3096                        qes[1].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3097                        DLB_EVENT_EV_TYPE_DW + 2);
3098        sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3099                        qes[2].flow_id |
3100                        qes[2].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT |
3101                        qes[2].u.event_type.sub <<  DLB_EVENT_SUB_EV_TYPE_SHIFT,
3102                        DLB_EVENT_EV_TYPE_DW);
3103        sse_evt[1] = _mm_insert_epi32(sse_evt[1],
3104                        qes[3].flow_id |
3105                        qes[3].u.event_type.major << DLB_EVENT_EV_TYPE_SHIFT  |
3106                        qes[3].u.event_type.sub << DLB_EVENT_SUB_EV_TYPE_SHIFT,
3107                        DLB_EVENT_EV_TYPE_DW + 2);
3108
3109        /* Write the sched type to the event metadata. 'op' and 'rsvd' are not
3110         * set:
3111         * sse_evt[0][39:32]  = sched_type_map[qes[0].sched_type] << 6
3112         * sse_evt[0][103:96] = sched_type_map[qes[1].sched_type] << 6
3113         * sse_evt[1][39:32]  = sched_type_map[qes[2].sched_type] << 6
3114         * sse_evt[1][103:96] = sched_type_map[qes[3].sched_type] << 6
3115         */
3116#define DLB_EVENT_SCHED_TYPE_BYTE 4
3117#define DLB_EVENT_SCHED_TYPE_SHIFT 6
3118        sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3119                sched_type_map[qes[0].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3120                DLB_EVENT_SCHED_TYPE_BYTE);
3121        sse_evt[0] = _mm_insert_epi8(sse_evt[0],
3122                sched_type_map[qes[1].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3123                DLB_EVENT_SCHED_TYPE_BYTE + 8);
3124        sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3125                sched_type_map[qes[2].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3126                DLB_EVENT_SCHED_TYPE_BYTE);
3127        sse_evt[1] = _mm_insert_epi8(sse_evt[1],
3128                sched_type_map[qes[3].sched_type] << DLB_EVENT_SCHED_TYPE_SHIFT,
3129                DLB_EVENT_SCHED_TYPE_BYTE + 8);
3130
3131        /* Store the metadata to the event (use the double-precision
3132         * _mm_storeh_pd because there is no integer function for storing the
3133         * upper 64b):
3134         * events[0].event = sse_evt[0][63:0]
3135         * events[1].event = sse_evt[0][127:64]
3136         * events[2].event = sse_evt[1][63:0]
3137         * events[3].event = sse_evt[1][127:64]
3138         */
3139        _mm_storel_epi64((__m128i *)&events[0].event, sse_evt[0]);
3140        _mm_storeh_pd((double *)&events[1].event, (__m128d) sse_evt[0]);
3141        _mm_storel_epi64((__m128i *)&events[2].event, sse_evt[1]);
3142        _mm_storeh_pd((double *)&events[3].event, (__m128d) sse_evt[1]);
3143
3144        DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[0].sched_type], 1);
3145        DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[1].sched_type], 1);
3146        DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[2].sched_type], 1);
3147        DLB2_INC_STAT(ev_port->stats.rx_sched_cnt[qes[3].sched_type], 1);
3148
3149        DLB2_INC_STAT(
3150                ev_port->stats.queue[events[0].queue_id].
3151                        qid_depth[qes[0].qid_depth],
3152                1);
3153        DLB2_INC_STAT(
3154                ev_port->stats.queue[events[1].queue_id].
3155                        qid_depth[qes[1].qid_depth],
3156                1);
3157        DLB2_INC_STAT(
3158                ev_port->stats.queue[events[2].queue_id].
3159                        qid_depth[qes[2].qid_depth],
3160                1);
3161        DLB2_INC_STAT(
3162                ev_port->stats.queue[events[3].queue_id].
3163                        qid_depth[qes[3].qid_depth],
3164                1);
3165
3166        DLB2_INC_STAT(ev_port->stats.traffic.rx_ok, num_events);
3167
3168        return num_events;
3169}
3170
3171static __rte_always_inline int
3172dlb2_recv_qe_sparse(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe)
3173{
3174        volatile struct dlb2_dequeue_qe *cq_addr;
3175        uint8_t xor_mask[2] = {0x0F, 0x00};
3176        const uint8_t and_mask = 0x0F;
3177        __m128i *qes = (__m128i *)qe;
3178        uint8_t gen_bits, gen_bit;
3179        uintptr_t addr[4];
3180        uint16_t idx;
3181
3182        cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3183
3184        idx = qm_port->cq_idx;
3185
3186        /* Load the next 4 QEs */
3187        addr[0] = (uintptr_t)&cq_addr[idx];
3188        addr[1] = (uintptr_t)&cq_addr[(idx +  4) & qm_port->cq_depth_mask];
3189        addr[2] = (uintptr_t)&cq_addr[(idx +  8) & qm_port->cq_depth_mask];
3190        addr[3] = (uintptr_t)&cq_addr[(idx + 12) & qm_port->cq_depth_mask];
3191
3192        /* Prefetch next batch of QEs (all CQs occupy minimum 8 cache lines) */
3193        rte_prefetch0(&cq_addr[(idx + 16) & qm_port->cq_depth_mask]);
3194        rte_prefetch0(&cq_addr[(idx + 20) & qm_port->cq_depth_mask]);
3195        rte_prefetch0(&cq_addr[(idx + 24) & qm_port->cq_depth_mask]);
3196        rte_prefetch0(&cq_addr[(idx + 28) & qm_port->cq_depth_mask]);
3197
3198        /* Correct the xor_mask for wrap-around QEs */
3199        gen_bit = qm_port->gen_bit;
3200        xor_mask[gen_bit] ^= !!((idx +  4) > qm_port->cq_depth_mask) << 1;
3201        xor_mask[gen_bit] ^= !!((idx +  8) > qm_port->cq_depth_mask) << 2;
3202        xor_mask[gen_bit] ^= !!((idx + 12) > qm_port->cq_depth_mask) << 3;
3203
3204        /* Read the cache lines backwards to ensure that if QE[N] (N > 0) is
3205         * valid, then QEs[0:N-1] are too.
3206         */
3207        qes[3] = _mm_load_si128((__m128i *)(void *)addr[3]);
3208        rte_compiler_barrier();
3209        qes[2] = _mm_load_si128((__m128i *)(void *)addr[2]);
3210        rte_compiler_barrier();
3211        qes[1] = _mm_load_si128((__m128i *)(void *)addr[1]);
3212        rte_compiler_barrier();
3213        qes[0] = _mm_load_si128((__m128i *)(void *)addr[0]);
3214
3215        /* Extract and combine the gen bits */
3216        gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3217                   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3218                   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3219                   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3220
3221        /* XOR the combined bits such that a 1 represents a valid QE */
3222        gen_bits ^= xor_mask[gen_bit];
3223
3224        /* Mask off gen bits we don't care about */
3225        gen_bits &= and_mask;
3226
3227        return __builtin_popcount(gen_bits);
3228}
3229
3230static inline void
3231dlb2_inc_cq_idx(struct dlb2_port *qm_port, int cnt)
3232{
3233        uint16_t idx = qm_port->cq_idx_unmasked + cnt;
3234
3235        qm_port->cq_idx_unmasked = idx;
3236        qm_port->cq_idx = idx & qm_port->cq_depth_mask;
3237        qm_port->gen_bit = (~(idx >> qm_port->gen_bit_shift)) & 0x1;
3238}
3239
3240static inline int16_t
3241dlb2_hw_dequeue_sparse(struct dlb2_eventdev *dlb2,
3242                       struct dlb2_eventdev_port *ev_port,
3243                       struct rte_event *events,
3244                       uint16_t max_num,
3245                       uint64_t dequeue_timeout_ticks)
3246{
3247        uint64_t timeout;
3248        uint64_t start_ticks = 0ULL;
3249        struct dlb2_port *qm_port;
3250        int num = 0;
3251
3252        qm_port = &ev_port->qm_port;
3253
3254        /* We have a special implementation for waiting. Wait can be:
3255         * 1) no waiting at all
3256         * 2) busy poll only
3257         * 3) wait for interrupt. If wakeup and poll time
3258         * has expired, then return to caller
3259         * 4) umonitor/umwait repeatedly up to poll time
3260         */
3261
3262        /* If configured for per dequeue wait, then use wait value provided
3263         * to this API. Otherwise we must use the global
3264         * value from eventdev config time.
3265         */
3266        if (!dlb2->global_dequeue_wait)
3267                timeout = dequeue_timeout_ticks;
3268        else
3269                timeout = dlb2->global_dequeue_wait_ticks;
3270
3271        start_ticks = rte_get_timer_cycles();
3272
3273        while (num < max_num) {
3274                struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3275                int num_avail;
3276
3277                /* Copy up to 4 QEs from the current cache line into qes */
3278                num_avail = dlb2_recv_qe_sparse(qm_port, qes);
3279
3280                /* But don't process more than the user requested */
3281                num_avail = RTE_MIN(num_avail, max_num - num);
3282
3283                dlb2_inc_cq_idx(qm_port, num_avail << 2);
3284
3285                if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3286                        num += dlb2_process_dequeue_four_qes(ev_port,
3287                                                              qm_port,
3288                                                              &events[num],
3289                                                              &qes[0]);
3290                else if (num_avail)
3291                        num += dlb2_process_dequeue_qes(ev_port,
3292                                                         qm_port,
3293                                                         &events[num],
3294                                                         &qes[0],
3295                                                         num_avail);
3296                else if ((timeout == 0) || (num > 0))
3297                        /* Not waiting in any form, or 1+ events received? */
3298                        break;
3299                else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3300                                           timeout, start_ticks))
3301                        break;
3302        }
3303
3304        qm_port->owed_tokens += num;
3305
3306        if (num) {
3307                if (qm_port->token_pop_mode == AUTO_POP)
3308                        dlb2_consume_qe_immediate(qm_port, num);
3309
3310                ev_port->outstanding_releases += num;
3311
3312                dlb2_port_credits_inc(qm_port, num);
3313        }
3314
3315        return num;
3316}
3317
3318static __rte_always_inline int
3319dlb2_recv_qe(struct dlb2_port *qm_port, struct dlb2_dequeue_qe *qe,
3320             uint8_t *offset)
3321{
3322        uint8_t xor_mask[2][4] = { {0x0F, 0x0E, 0x0C, 0x08},
3323                                   {0x00, 0x01, 0x03, 0x07} };
3324        uint8_t and_mask[4] = {0x0F, 0x0E, 0x0C, 0x08};
3325        volatile struct dlb2_dequeue_qe *cq_addr;
3326        __m128i *qes = (__m128i *)qe;
3327        uint64_t *cache_line_base;
3328        uint8_t gen_bits;
3329
3330        cq_addr = dlb2_port[qm_port->id][PORT_TYPE(qm_port)].cq_base;
3331        cq_addr = &cq_addr[qm_port->cq_idx];
3332
3333        cache_line_base = (void *)(((uintptr_t)cq_addr) & ~0x3F);
3334        *offset = ((uintptr_t)cq_addr & 0x30) >> 4;
3335
3336        /* Load the next CQ cache line from memory. Pack these reads as tight
3337         * as possible to reduce the chance that DLB invalidates the line while
3338         * the CPU is reading it. Read the cache line backwards to ensure that
3339         * if QE[N] (N > 0) is valid, then QEs[0:N-1] are too.
3340         *
3341         * (Valid QEs start at &qe[offset])
3342         */
3343        qes[3] = _mm_load_si128((__m128i *)&cache_line_base[6]);
3344        qes[2] = _mm_load_si128((__m128i *)&cache_line_base[4]);
3345        qes[1] = _mm_load_si128((__m128i *)&cache_line_base[2]);
3346        qes[0] = _mm_load_si128((__m128i *)&cache_line_base[0]);
3347
3348        /* Evict the cache line ASAP */
3349        rte_cldemote(cache_line_base);
3350
3351        /* Extract and combine the gen bits */
3352        gen_bits = ((_mm_extract_epi8(qes[0], 15) & 0x1) << 0) |
3353                   ((_mm_extract_epi8(qes[1], 15) & 0x1) << 1) |
3354                   ((_mm_extract_epi8(qes[2], 15) & 0x1) << 2) |
3355                   ((_mm_extract_epi8(qes[3], 15) & 0x1) << 3);
3356
3357        /* XOR the combined bits such that a 1 represents a valid QE */
3358        gen_bits ^= xor_mask[qm_port->gen_bit][*offset];
3359
3360        /* Mask off gen bits we don't care about */
3361        gen_bits &= and_mask[*offset];
3362
3363        return __builtin_popcount(gen_bits);
3364}
3365
3366static inline int16_t
3367dlb2_hw_dequeue(struct dlb2_eventdev *dlb2,
3368                struct dlb2_eventdev_port *ev_port,
3369                struct rte_event *events,
3370                uint16_t max_num,
3371                uint64_t dequeue_timeout_ticks)
3372{
3373        uint64_t timeout;
3374        uint64_t start_ticks = 0ULL;
3375        struct dlb2_port *qm_port;
3376        int num = 0;
3377
3378        qm_port = &ev_port->qm_port;
3379
3380        /* We have a special implementation for waiting. Wait can be:
3381         * 1) no waiting at all
3382         * 2) busy poll only
3383         * 3) wait for interrupt. If wakeup and poll time
3384         * has expired, then return to caller
3385         * 4) umonitor/umwait repeatedly up to poll time
3386         */
3387
3388        /* If configured for per dequeue wait, then use wait value provided
3389         * to this API. Otherwise we must use the global
3390         * value from eventdev config time.
3391         */
3392        if (!dlb2->global_dequeue_wait)
3393                timeout = dequeue_timeout_ticks;
3394        else
3395                timeout = dlb2->global_dequeue_wait_ticks;
3396
3397        start_ticks = rte_get_timer_cycles();
3398
3399        while (num < max_num) {
3400                struct dlb2_dequeue_qe qes[DLB2_NUM_QES_PER_CACHE_LINE];
3401                uint8_t offset;
3402                int num_avail;
3403
3404                /* Copy up to 4 QEs from the current cache line into qes */
3405                num_avail = dlb2_recv_qe(qm_port, qes, &offset);
3406
3407                /* But don't process more than the user requested */
3408                num_avail = RTE_MIN(num_avail, max_num - num);
3409
3410                dlb2_inc_cq_idx(qm_port, num_avail);
3411
3412                if (num_avail == DLB2_NUM_QES_PER_CACHE_LINE)
3413                        num += dlb2_process_dequeue_four_qes(ev_port,
3414                                                             qm_port,
3415                                                             &events[num],
3416                                                             &qes[offset]);
3417                else if (num_avail)
3418                        num += dlb2_process_dequeue_qes(ev_port,
3419                                                        qm_port,
3420                                                        &events[num],
3421                                                        &qes[offset],
3422                                                        num_avail);
3423                else if ((timeout == 0) || (num > 0))
3424                        /* Not waiting in any form, or 1+ events received? */
3425                        break;
3426                else if (dlb2_dequeue_wait(dlb2, ev_port, qm_port,
3427                                           timeout, start_ticks))
3428                        break;
3429        }
3430
3431        qm_port->owed_tokens += num;
3432
3433        if (num) {
3434                if (qm_port->token_pop_mode == AUTO_POP)
3435                        dlb2_consume_qe_immediate(qm_port, num);
3436
3437                ev_port->outstanding_releases += num;
3438
3439                dlb2_port_credits_inc(qm_port, num);
3440        }
3441
3442        return num;
3443}
3444
3445static uint16_t
3446dlb2_event_dequeue_burst(void *event_port, struct rte_event *ev, uint16_t num,
3447                         uint64_t wait)
3448{
3449        struct dlb2_eventdev_port *ev_port = event_port;
3450        struct dlb2_port *qm_port = &ev_port->qm_port;
3451        struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3452        uint16_t cnt;
3453
3454        RTE_ASSERT(ev_port->setup_done);
3455        RTE_ASSERT(ev != NULL);
3456
3457        if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3458                uint16_t out_rels = ev_port->outstanding_releases;
3459
3460                dlb2_event_release(dlb2, ev_port->id, out_rels);
3461
3462                DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3463        }
3464
3465        if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3466                dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3467
3468        cnt = dlb2_hw_dequeue(dlb2, ev_port, ev, num, wait);
3469
3470        DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3471        DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3472
3473        return cnt;
3474}
3475
3476static uint16_t
3477dlb2_event_dequeue(void *event_port, struct rte_event *ev, uint64_t wait)
3478{
3479        return dlb2_event_dequeue_burst(event_port, ev, 1, wait);
3480}
3481
3482static uint16_t
3483dlb2_event_dequeue_burst_sparse(void *event_port, struct rte_event *ev,
3484                                uint16_t num, uint64_t wait)
3485{
3486        struct dlb2_eventdev_port *ev_port = event_port;
3487        struct dlb2_port *qm_port = &ev_port->qm_port;
3488        struct dlb2_eventdev *dlb2 = ev_port->dlb2;
3489        uint16_t cnt;
3490
3491        RTE_ASSERT(ev_port->setup_done);
3492        RTE_ASSERT(ev != NULL);
3493
3494        if (ev_port->implicit_release && ev_port->outstanding_releases > 0) {
3495                uint16_t out_rels = ev_port->outstanding_releases;
3496
3497                dlb2_event_release(dlb2, ev_port->id, out_rels);
3498
3499                DLB2_INC_STAT(ev_port->stats.tx_implicit_rel, out_rels);
3500        }
3501
3502        if (qm_port->token_pop_mode == DEFERRED_POP && qm_port->owed_tokens)
3503                dlb2_consume_qe_immediate(qm_port, qm_port->owed_tokens);
3504
3505        cnt = dlb2_hw_dequeue_sparse(dlb2, ev_port, ev, num, wait);
3506
3507        DLB2_INC_STAT(ev_port->stats.traffic.total_polls, 1);
3508        DLB2_INC_STAT(ev_port->stats.traffic.zero_polls, ((cnt == 0) ? 1 : 0));
3509        return cnt;
3510}
3511
3512static uint16_t
3513dlb2_event_dequeue_sparse(void *event_port, struct rte_event *ev,
3514                          uint64_t wait)
3515{
3516        return dlb2_event_dequeue_burst_sparse(event_port, ev, 1, wait);
3517}
3518
3519static void
3520dlb2_flush_port(struct rte_eventdev *dev, int port_id)
3521{
3522        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3523        eventdev_stop_flush_t flush;
3524        struct rte_event ev;
3525        uint8_t dev_id;
3526        void *arg;
3527        int i;
3528
3529        flush = dev->dev_ops->dev_stop_flush;
3530        dev_id = dev->data->dev_id;
3531        arg = dev->data->dev_stop_flush_arg;
3532
3533        while (rte_event_dequeue_burst(dev_id, port_id, &ev, 1, 0)) {
3534                if (flush)
3535                        flush(dev_id, ev, arg);
3536
3537                if (dlb2->ev_ports[port_id].qm_port.is_directed)
3538                        continue;
3539
3540                ev.op = RTE_EVENT_OP_RELEASE;
3541
3542                rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3543        }
3544
3545        /* Enqueue any additional outstanding releases */
3546        ev.op = RTE_EVENT_OP_RELEASE;
3547
3548        for (i = dlb2->ev_ports[port_id].outstanding_releases; i > 0; i--)
3549                rte_event_enqueue_burst(dev_id, port_id, &ev, 1);
3550}
3551
3552static uint32_t
3553dlb2_get_ldb_queue_depth(struct dlb2_eventdev *dlb2,
3554                         struct dlb2_eventdev_queue *queue)
3555{
3556        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3557        struct dlb2_get_ldb_queue_depth_args cfg;
3558        int ret;
3559
3560        cfg.queue_id = queue->qm_queue.id;
3561
3562        ret = dlb2_iface_get_ldb_queue_depth(handle, &cfg);
3563        if (ret < 0) {
3564                DLB2_LOG_ERR("dlb2: get_ldb_queue_depth ret=%d (driver status: %s)\n",
3565                             ret, dlb2_error_strings[cfg.response.status]);
3566                return ret;
3567        }
3568
3569        return cfg.response.id;
3570}
3571
3572static uint32_t
3573dlb2_get_dir_queue_depth(struct dlb2_eventdev *dlb2,
3574                         struct dlb2_eventdev_queue *queue)
3575{
3576        struct dlb2_hw_dev *handle = &dlb2->qm_instance;
3577        struct dlb2_get_dir_queue_depth_args cfg;
3578        int ret;
3579
3580        cfg.queue_id = queue->qm_queue.id;
3581
3582        ret = dlb2_iface_get_dir_queue_depth(handle, &cfg);
3583        if (ret < 0) {
3584                DLB2_LOG_ERR("dlb2: get_dir_queue_depth ret=%d (driver status: %s)\n",
3585                             ret, dlb2_error_strings[cfg.response.status]);
3586                return ret;
3587        }
3588
3589        return cfg.response.id;
3590}
3591
3592uint32_t
3593dlb2_get_queue_depth(struct dlb2_eventdev *dlb2,
3594                     struct dlb2_eventdev_queue *queue)
3595{
3596        if (queue->qm_queue.is_directed)
3597                return dlb2_get_dir_queue_depth(dlb2, queue);
3598        else
3599                return dlb2_get_ldb_queue_depth(dlb2, queue);
3600}
3601
3602static bool
3603dlb2_queue_is_empty(struct dlb2_eventdev *dlb2,
3604                    struct dlb2_eventdev_queue *queue)
3605{
3606        return dlb2_get_queue_depth(dlb2, queue) == 0;
3607}
3608
3609static bool
3610dlb2_linked_queues_empty(struct dlb2_eventdev *dlb2)
3611{
3612        int i;
3613
3614        for (i = 0; i < dlb2->num_queues; i++) {
3615                if (dlb2->ev_queues[i].num_links == 0)
3616                        continue;
3617                if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3618                        return false;
3619        }
3620
3621        return true;
3622}
3623
3624static bool
3625dlb2_queues_empty(struct dlb2_eventdev *dlb2)
3626{
3627        int i;
3628
3629        for (i = 0; i < dlb2->num_queues; i++) {
3630                if (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3631                        return false;
3632        }
3633
3634        return true;
3635}
3636
3637static void
3638dlb2_drain(struct rte_eventdev *dev)
3639{
3640        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3641        struct dlb2_eventdev_port *ev_port = NULL;
3642        uint8_t dev_id;
3643        int i;
3644
3645        dev_id = dev->data->dev_id;
3646
3647        while (!dlb2_linked_queues_empty(dlb2)) {
3648                /* Flush all the ev_ports, which will drain all their connected
3649                 * queues.
3650                 */
3651                for (i = 0; i < dlb2->num_ports; i++)
3652                        dlb2_flush_port(dev, i);
3653        }
3654
3655        /* The queues are empty, but there may be events left in the ports. */
3656        for (i = 0; i < dlb2->num_ports; i++)
3657                dlb2_flush_port(dev, i);
3658
3659        /* If the domain's queues are empty, we're done. */
3660        if (dlb2_queues_empty(dlb2))
3661                return;
3662
3663        /* Else, there must be at least one unlinked load-balanced queue.
3664         * Select a load-balanced port with which to drain the unlinked
3665         * queue(s).
3666         */
3667        for (i = 0; i < dlb2->num_ports; i++) {
3668                ev_port = &dlb2->ev_ports[i];
3669
3670                if (!ev_port->qm_port.is_directed)
3671                        break;
3672        }
3673
3674        if (i == dlb2->num_ports) {
3675                DLB2_LOG_ERR("internal error: no LDB ev_ports\n");
3676                return;
3677        }
3678
3679        rte_errno = 0;
3680        rte_event_port_unlink(dev_id, ev_port->id, NULL, 0);
3681
3682        if (rte_errno) {
3683                DLB2_LOG_ERR("internal error: failed to unlink ev_port %d\n",
3684                             ev_port->id);
3685                return;
3686        }
3687
3688        for (i = 0; i < dlb2->num_queues; i++) {
3689                uint8_t qid, prio;
3690                int ret;
3691
3692                if (dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3693                        continue;
3694
3695                qid = i;
3696                prio = 0;
3697
3698                /* Link the ev_port to the queue */
3699                ret = rte_event_port_link(dev_id, ev_port->id, &qid, &prio, 1);
3700                if (ret != 1) {
3701                        DLB2_LOG_ERR("internal error: failed to link ev_port %d to queue %d\n",
3702                                     ev_port->id, qid);
3703                        return;
3704                }
3705
3706                /* Flush the queue */
3707                while (!dlb2_queue_is_empty(dlb2, &dlb2->ev_queues[i]))
3708                        dlb2_flush_port(dev, ev_port->id);
3709
3710                /* Drain any extant events in the ev_port. */
3711                dlb2_flush_port(dev, ev_port->id);
3712
3713                /* Unlink the ev_port from the queue */
3714                ret = rte_event_port_unlink(dev_id, ev_port->id, &qid, 1);
3715                if (ret != 1) {
3716                        DLB2_LOG_ERR("internal error: failed to unlink ev_port %d to queue %d\n",
3717                                     ev_port->id, qid);
3718                        return;
3719                }
3720        }
3721}
3722
3723static void
3724dlb2_eventdev_stop(struct rte_eventdev *dev)
3725{
3726        struct dlb2_eventdev *dlb2 = dlb2_pmd_priv(dev);
3727
3728        rte_spinlock_lock(&dlb2->qm_instance.resource_lock);
3729
3730        if (dlb2->run_state == DLB2_RUN_STATE_STOPPED) {
3731                DLB2_LOG_DBG("Internal error: already stopped\n");
3732                rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3733                return;
3734        } else if (dlb2->run_state != DLB2_RUN_STATE_STARTED) {
3735                DLB2_LOG_ERR("Internal error: bad state %d for dev_stop\n",
3736                             (int)dlb2->run_state);
3737                rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3738                return;
3739        }
3740
3741        dlb2->run_state = DLB2_RUN_STATE_STOPPING;
3742
3743        rte_spinlock_unlock(&dlb2->qm_instance.resource_lock);
3744
3745        dlb2_drain(dev);
3746
3747        dlb2->run_state = DLB2_RUN_STATE_STOPPED;
3748}
3749
3750static int
3751dlb2_eventdev_close(struct rte_eventdev *dev)
3752{
3753        dlb2_hw_reset_sched_domain(dev, false);
3754
3755        return 0;
3756}
3757
3758static void
3759dlb2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t id)
3760{
3761        RTE_SET_USED(dev);
3762        RTE_SET_USED(id);
3763
3764        /* This function intentionally left blank. */
3765}
3766
3767static void
3768dlb2_eventdev_port_release(void *port)
3769{
3770        struct dlb2_eventdev_port *ev_port = port;
3771        struct dlb2_port *qm_port;
3772
3773        if (ev_port) {
3774                qm_port = &ev_port->qm_port;
3775                if (qm_port->config_state == DLB2_CONFIGURED)
3776                        dlb2_free_qe_mem(qm_port);
3777        }
3778}
3779
3780static int
3781dlb2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
3782                            uint64_t *timeout_ticks)
3783{
3784        RTE_SET_USED(dev);
3785        uint64_t cycles_per_ns = rte_get_timer_hz() / 1E9;
3786
3787        *timeout_ticks = ns * cycles_per_ns;
3788
3789        return 0;
3790}
3791
3792static void
3793dlb2_entry_points_init(struct rte_eventdev *dev)
3794{
3795        struct dlb2_eventdev *dlb2;
3796
3797        /* Expose PMD's eventdev interface */
3798        static struct rte_eventdev_ops dlb2_eventdev_entry_ops = {
3799                .dev_infos_get    = dlb2_eventdev_info_get,
3800                .dev_configure    = dlb2_eventdev_configure,
3801                .dev_start        = dlb2_eventdev_start,
3802                .dev_stop         = dlb2_eventdev_stop,
3803                .dev_close        = dlb2_eventdev_close,
3804                .queue_def_conf   = dlb2_eventdev_queue_default_conf_get,
3805                .queue_setup      = dlb2_eventdev_queue_setup,
3806                .queue_release    = dlb2_eventdev_queue_release,
3807                .port_def_conf    = dlb2_eventdev_port_default_conf_get,
3808                .port_setup       = dlb2_eventdev_port_setup,
3809                .port_release     = dlb2_eventdev_port_release,
3810                .port_link        = dlb2_eventdev_port_link,
3811                .port_unlink      = dlb2_eventdev_port_unlink,
3812                .port_unlinks_in_progress =
3813                                    dlb2_eventdev_port_unlinks_in_progress,
3814                .timeout_ticks    = dlb2_eventdev_timeout_ticks,
3815                .dump             = dlb2_eventdev_dump,
3816                .xstats_get       = dlb2_eventdev_xstats_get,
3817                .xstats_get_names = dlb2_eventdev_xstats_get_names,
3818                .xstats_get_by_name = dlb2_eventdev_xstats_get_by_name,
3819                .xstats_reset       = dlb2_eventdev_xstats_reset,
3820                .dev_selftest     = test_dlb2_eventdev,
3821        };
3822
3823        /* Expose PMD's eventdev interface */
3824
3825        dev->dev_ops = &dlb2_eventdev_entry_ops;
3826        dev->enqueue = dlb2_event_enqueue;
3827        dev->enqueue_burst = dlb2_event_enqueue_burst;
3828        dev->enqueue_new_burst = dlb2_event_enqueue_new_burst;
3829        dev->enqueue_forward_burst = dlb2_event_enqueue_forward_burst;
3830
3831        dlb2 = dev->data->dev_private;
3832        if (dlb2->poll_mode == DLB2_CQ_POLL_MODE_SPARSE) {
3833                dev->dequeue = dlb2_event_dequeue_sparse;
3834                dev->dequeue_burst = dlb2_event_dequeue_burst_sparse;
3835        } else {
3836                dev->dequeue = dlb2_event_dequeue;
3837                dev->dequeue_burst = dlb2_event_dequeue_burst;
3838        }
3839}
3840
3841int
3842dlb2_primary_eventdev_probe(struct rte_eventdev *dev,
3843                            const char *name,
3844                            struct dlb2_devargs *dlb2_args)
3845{
3846        struct dlb2_eventdev *dlb2;
3847        int err, i;
3848
3849        dlb2 = dev->data->dev_private;
3850
3851        dlb2->event_dev = dev; /* backlink */
3852
3853        evdev_dlb2_default_info.driver_name = name;
3854
3855        dlb2->max_num_events_override = dlb2_args->max_num_events;
3856        dlb2->num_dir_credits_override = dlb2_args->num_dir_credits_override;
3857        dlb2->qm_instance.cos_id = dlb2_args->cos_id;
3858
3859        err = dlb2_iface_open(&dlb2->qm_instance, name);
3860        if (err < 0) {
3861                DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3862                             err);
3863                return err;
3864        }
3865
3866        err = dlb2_iface_get_device_version(&dlb2->qm_instance,
3867                                            &dlb2->revision);
3868        if (err < 0) {
3869                DLB2_LOG_ERR("dlb2: failed to get the device version, err=%d\n",
3870                             err);
3871                return err;
3872        }
3873
3874        err = dlb2_hw_query_resources(dlb2);
3875        if (err) {
3876                DLB2_LOG_ERR("get resources err=%d for %s\n",
3877                             err, name);
3878                return err;
3879        }
3880
3881        dlb2_iface_hardware_init(&dlb2->qm_instance);
3882
3883        err = dlb2_iface_get_cq_poll_mode(&dlb2->qm_instance, &dlb2->poll_mode);
3884        if (err < 0) {
3885                DLB2_LOG_ERR("dlb2: failed to get the poll mode, err=%d\n",
3886                             err);
3887                return err;
3888        }
3889
3890        /* Complete xtstats runtime initialization */
3891        err = dlb2_xstats_init(dlb2);
3892        if (err) {
3893                DLB2_LOG_ERR("dlb2: failed to init xstats, err=%d\n", err);
3894                return err;
3895        }
3896
3897        /* Initialize each port's token pop mode */
3898        for (i = 0; i < DLB2_MAX_NUM_PORTS; i++)
3899                dlb2->ev_ports[i].qm_port.token_pop_mode = AUTO_POP;
3900
3901        rte_spinlock_init(&dlb2->qm_instance.resource_lock);
3902
3903        dlb2_iface_low_level_io_init();
3904
3905        dlb2_entry_points_init(dev);
3906
3907        dlb2_init_queue_depth_thresholds(dlb2,
3908                                         dlb2_args->qid_depth_thresholds.val);
3909
3910        return 0;
3911}
3912
3913int
3914dlb2_secondary_eventdev_probe(struct rte_eventdev *dev,
3915                              const char *name)
3916{
3917        struct dlb2_eventdev *dlb2;
3918        int err;
3919
3920        dlb2 = dev->data->dev_private;
3921
3922        evdev_dlb2_default_info.driver_name = name;
3923
3924        err = dlb2_iface_open(&dlb2->qm_instance, name);
3925        if (err < 0) {
3926                DLB2_LOG_ERR("could not open event hardware device, err=%d\n",
3927                             err);
3928                return err;
3929        }
3930
3931        err = dlb2_hw_query_resources(dlb2);
3932        if (err) {
3933                DLB2_LOG_ERR("get resources err=%d for %s\n",
3934                             err, name);
3935                return err;
3936        }
3937
3938        dlb2_iface_low_level_io_init();
3939
3940        dlb2_entry_points_init(dev);
3941
3942        return 0;
3943}
3944
3945int
3946dlb2_parse_params(const char *params,
3947                  const char *name,
3948                  struct dlb2_devargs *dlb2_args)
3949{
3950        int ret = 0;
3951        static const char * const args[] = { NUMA_NODE_ARG,
3952                                             DLB2_MAX_NUM_EVENTS,
3953                                             DLB2_NUM_DIR_CREDITS,
3954                                             DEV_ID_ARG,
3955                                             DLB2_QID_DEPTH_THRESH_ARG,
3956                                             DLB2_COS_ARG,
3957                                             NULL };
3958
3959        if (params != NULL && params[0] != '\0') {
3960                struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
3961
3962                if (kvlist == NULL) {
3963                        RTE_LOG(INFO, PMD,
3964                                "Ignoring unsupported parameters when creating device '%s'\n",
3965                                name);
3966                } else {
3967                        int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
3968                                                     set_numa_node,
3969                                                     &dlb2_args->socket_id);
3970                        if (ret != 0) {
3971                                DLB2_LOG_ERR("%s: Error parsing numa node parameter",
3972                                             name);
3973                                rte_kvargs_free(kvlist);
3974                                return ret;
3975                        }
3976
3977                        ret = rte_kvargs_process(kvlist, DLB2_MAX_NUM_EVENTS,
3978                                                 set_max_num_events,
3979                                                 &dlb2_args->max_num_events);
3980                        if (ret != 0) {
3981                                DLB2_LOG_ERR("%s: Error parsing max_num_events parameter",
3982                                             name);
3983                                rte_kvargs_free(kvlist);
3984                                return ret;
3985                        }
3986
3987                        ret = rte_kvargs_process(kvlist,
3988                                        DLB2_NUM_DIR_CREDITS,
3989                                        set_num_dir_credits,
3990                                        &dlb2_args->num_dir_credits_override);
3991                        if (ret != 0) {
3992                                DLB2_LOG_ERR("%s: Error parsing num_dir_credits parameter",
3993                                             name);
3994                                rte_kvargs_free(kvlist);
3995                                return ret;
3996                        }
3997
3998                        ret = rte_kvargs_process(kvlist, DEV_ID_ARG,
3999                                                 set_dev_id,
4000                                                 &dlb2_args->dev_id);
4001                        if (ret != 0) {
4002                                DLB2_LOG_ERR("%s: Error parsing dev_id parameter",
4003                                             name);
4004                                rte_kvargs_free(kvlist);
4005                                return ret;
4006                        }
4007
4008                        ret = rte_kvargs_process(
4009                                        kvlist,
4010                                        DLB2_QID_DEPTH_THRESH_ARG,
4011                                        set_qid_depth_thresh,
4012                                        &dlb2_args->qid_depth_thresholds);
4013                        if (ret != 0) {
4014                                DLB2_LOG_ERR("%s: Error parsing qid_depth_thresh parameter",
4015                                             name);
4016                                rte_kvargs_free(kvlist);
4017                                return ret;
4018                        }
4019
4020                        ret = rte_kvargs_process(kvlist, DLB2_COS_ARG,
4021                                                 set_cos,
4022                                                 &dlb2_args->cos_id);
4023                        if (ret != 0) {
4024                                DLB2_LOG_ERR("%s: Error parsing cos parameter",
4025                                             name);
4026                                rte_kvargs_free(kvlist);
4027                                return ret;
4028                        }
4029
4030                        rte_kvargs_free(kvlist);
4031                }
4032        }
4033        return ret;
4034}
4035RTE_LOG_REGISTER(eventdev_dlb2_log_level, pmd.event.dlb2, NOTICE);
4036