dpdk/app/test-eventdev/test_perf_common.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Cavium, Inc
   3 */
   4
   5#include <math.h>
   6
   7#include "test_perf_common.h"
   8
   9#define NB_CRYPTODEV_DESCRIPTORS 1024
  10#define DATA_SIZE               512
  11struct modex_test_data {
  12        enum rte_crypto_asym_xform_type xform_type;
  13        struct {
  14                uint8_t data[DATA_SIZE];
  15                uint16_t len;
  16        } base;
  17        struct {
  18                uint8_t data[DATA_SIZE];
  19                uint16_t len;
  20        } exponent;
  21        struct {
  22                uint8_t data[DATA_SIZE];
  23                uint16_t len;
  24        } modulus;
  25        struct {
  26                uint8_t data[DATA_SIZE];
  27                uint16_t len;
  28        } reminder;
  29        uint16_t result_len;
  30};
  31
  32static struct
  33modex_test_data modex_test_case = {
  34        .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX,
  35        .base = {
  36                .data = {
  37                        0xF8, 0xBA, 0x1A, 0x55, 0xD0, 0x2F, 0x85,
  38                        0xAE, 0x96, 0x7B, 0xB6, 0x2F, 0xB6, 0xCD,
  39                        0xA8, 0xEB, 0x7E, 0x78, 0xA0, 0x50
  40                },
  41                .len = 20,
  42        },
  43        .exponent = {
  44                .data = {
  45                        0x01, 0x00, 0x01
  46                },
  47                .len = 3,
  48        },
  49        .reminder = {
  50                .data = {
  51                        0x2C, 0x60, 0x75, 0x45, 0x98, 0x9D, 0xE0, 0x72,
  52                        0xA0, 0x9D, 0x3A, 0x9E, 0x03, 0x38, 0x73, 0x3C,
  53                        0x31, 0x83, 0x04, 0xFE, 0x75, 0x43, 0xE6, 0x17,
  54                        0x5C, 0x01, 0x29, 0x51, 0x69, 0x33, 0x62, 0x2D,
  55                        0x78, 0xBE, 0xAE, 0xC4, 0xBC, 0xDE, 0x7E, 0x2C,
  56                        0x77, 0x84, 0xF2, 0xC5, 0x14, 0xB5, 0x2F, 0xF7,
  57                        0xC5, 0x94, 0xEF, 0x86, 0x75, 0x75, 0xB5, 0x11,
  58                        0xE5, 0x0E, 0x0A, 0x29, 0x76, 0xE2, 0xEA, 0x32,
  59                        0x0E, 0x43, 0x77, 0x7E, 0x2C, 0x27, 0xAC, 0x3B,
  60                        0x86, 0xA5, 0xDB, 0xC9, 0x48, 0x40, 0xE8, 0x99,
  61                        0x9A, 0x0A, 0x3D, 0xD6, 0x74, 0xFA, 0x2E, 0x2E,
  62                        0x5B, 0xAF, 0x8C, 0x99, 0x44, 0x2A, 0x67, 0x38,
  63                        0x27, 0x41, 0x59, 0x9D, 0xB8, 0x51, 0xC9, 0xF7,
  64                        0x43, 0x61, 0x31, 0x6E, 0xF1, 0x25, 0x38, 0x7F,
  65                        0xAE, 0xC6, 0xD0, 0xBB, 0x29, 0x76, 0x3F, 0x46,
  66                        0x2E, 0x1B, 0xE4, 0x67, 0x71, 0xE3, 0x87, 0x5A
  67                },
  68                .len = 128,
  69        },
  70        .modulus = {
  71                .data = {
  72                        0xb3, 0xa1, 0xaf, 0xb7, 0x13, 0x08, 0x00, 0x0a,
  73                        0x35, 0xdc, 0x2b, 0x20, 0x8d, 0xa1, 0xb5, 0xce,
  74                        0x47, 0x8a, 0xc3, 0x80, 0xf4, 0x7d, 0x4a, 0xa2,
  75                        0x62, 0xfd, 0x61, 0x7f, 0xb5, 0xa8, 0xde, 0x0a,
  76                        0x17, 0x97, 0xa0, 0xbf, 0xdf, 0x56, 0x5a, 0x3d,
  77                        0x51, 0x56, 0x4f, 0x70, 0x70, 0x3f, 0x63, 0x6a,
  78                        0x44, 0x5b, 0xad, 0x84, 0x0d, 0x3f, 0x27, 0x6e,
  79                        0x3b, 0x34, 0x91, 0x60, 0x14, 0xb9, 0xaa, 0x72,
  80                        0xfd, 0xa3, 0x64, 0xd2, 0x03, 0xa7, 0x53, 0x87,
  81                        0x9e, 0x88, 0x0b, 0xc1, 0x14, 0x93, 0x1a, 0x62,
  82                        0xff, 0xb1, 0x5d, 0x74, 0xcd, 0x59, 0x63, 0x18,
  83                        0x11, 0x3d, 0x4f, 0xba, 0x75, 0xd4, 0x33, 0x4e,
  84                        0x23, 0x6b, 0x7b, 0x57, 0x44, 0xe1, 0xd3, 0x03,
  85                        0x13, 0xa6, 0xf0, 0x8b, 0x60, 0xb0, 0x9e, 0xee,
  86                        0x75, 0x08, 0x9d, 0x71, 0x63, 0x13, 0xcb, 0xa6,
  87                        0x81, 0x92, 0x14, 0x03, 0x22, 0x2d, 0xde, 0x55
  88                },
  89                .len = 128,
  90        },
  91        .result_len = 128,
  92};
  93
  94int
  95perf_test_result(struct evt_test *test, struct evt_options *opt)
  96{
  97        RTE_SET_USED(opt);
  98        int i;
  99        uint64_t total = 0;
 100        struct test_perf *t = evt_test_priv(test);
 101
 102        printf("Packet distribution across worker cores :\n");
 103        for (i = 0; i < t->nb_workers; i++)
 104                total += t->worker[i].processed_pkts;
 105        for (i = 0; i < t->nb_workers; i++)
 106                printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
 107                                CLGRN" %3.2f"CLNRM"\n", i,
 108                                t->worker[i].processed_pkts,
 109                                (((double)t->worker[i].processed_pkts)/total)
 110                                * 100);
 111
 112        return t->result;
 113}
 114
 115static inline int
 116perf_producer(void *arg)
 117{
 118        int i;
 119        struct prod_data *p  = arg;
 120        struct test_perf *t = p->t;
 121        struct evt_options *opt = t->opt;
 122        const uint8_t dev_id = p->dev_id;
 123        const uint8_t port = p->port_id;
 124        struct rte_mempool *pool = t->pool;
 125        const uint64_t nb_pkts = t->nb_pkts;
 126        const uint32_t nb_flows = t->nb_flows;
 127        uint32_t flow_counter = 0;
 128        uint64_t count = 0;
 129        struct perf_elt *m[BURST_SIZE + 1] = {NULL};
 130        struct rte_event ev;
 131
 132        if (opt->verbose_level > 1)
 133                printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
 134                                rte_lcore_id(), dev_id, port, p->queue_id);
 135
 136        ev.event = 0;
 137        ev.op = RTE_EVENT_OP_NEW;
 138        ev.queue_id = p->queue_id;
 139        ev.sched_type = t->opt->sched_type_list[0];
 140        ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 141        ev.event_type =  RTE_EVENT_TYPE_CPU;
 142        ev.sub_event_type = 0; /* stage 0 */
 143
 144        while (count < nb_pkts && t->done == false) {
 145                if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
 146                        continue;
 147                for (i = 0; i < BURST_SIZE; i++) {
 148                        ev.flow_id = flow_counter++ % nb_flows;
 149                        ev.event_ptr = m[i];
 150                        m[i]->timestamp = rte_get_timer_cycles();
 151                        while (rte_event_enqueue_burst(dev_id,
 152                                                       port, &ev, 1) != 1) {
 153                                if (t->done)
 154                                        break;
 155                                rte_pause();
 156                                m[i]->timestamp = rte_get_timer_cycles();
 157                        }
 158                }
 159                count += BURST_SIZE;
 160        }
 161
 162        return 0;
 163}
 164
 165static inline int
 166perf_producer_burst(void *arg)
 167{
 168        uint32_t i;
 169        uint64_t timestamp;
 170        struct rte_event_dev_info dev_info;
 171        struct prod_data *p  = arg;
 172        struct test_perf *t = p->t;
 173        struct evt_options *opt = t->opt;
 174        const uint8_t dev_id = p->dev_id;
 175        const uint8_t port = p->port_id;
 176        struct rte_mempool *pool = t->pool;
 177        const uint64_t nb_pkts = t->nb_pkts;
 178        const uint32_t nb_flows = t->nb_flows;
 179        uint32_t flow_counter = 0;
 180        uint16_t enq = 0;
 181        uint64_t count = 0;
 182        struct perf_elt *m[MAX_PROD_ENQ_BURST_SIZE + 1];
 183        struct rte_event ev[MAX_PROD_ENQ_BURST_SIZE + 1];
 184        uint32_t burst_size = opt->prod_enq_burst_sz;
 185
 186        memset(m, 0, sizeof(*m) * (MAX_PROD_ENQ_BURST_SIZE + 1));
 187        rte_event_dev_info_get(dev_id, &dev_info);
 188        if (dev_info.max_event_port_enqueue_depth < burst_size)
 189                burst_size = dev_info.max_event_port_enqueue_depth;
 190
 191        if (opt->verbose_level > 1)
 192                printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
 193                                rte_lcore_id(), dev_id, port, p->queue_id);
 194
 195        for (i = 0; i < burst_size; i++) {
 196                ev[i].op = RTE_EVENT_OP_NEW;
 197                ev[i].queue_id = p->queue_id;
 198                ev[i].sched_type = t->opt->sched_type_list[0];
 199                ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 200                ev[i].event_type =  RTE_EVENT_TYPE_CPU;
 201                ev[i].sub_event_type = 0; /* stage 0 */
 202        }
 203
 204        while (count < nb_pkts && t->done == false) {
 205                if (rte_mempool_get_bulk(pool, (void **)m, burst_size) < 0)
 206                        continue;
 207                timestamp = rte_get_timer_cycles();
 208                for (i = 0; i < burst_size; i++) {
 209                        ev[i].flow_id = flow_counter++ % nb_flows;
 210                        ev[i].event_ptr = m[i];
 211                        m[i]->timestamp = timestamp;
 212                }
 213                enq = rte_event_enqueue_burst(dev_id, port, ev, burst_size);
 214                while (enq < burst_size) {
 215                        enq += rte_event_enqueue_burst(dev_id, port,
 216                                                        ev + enq,
 217                                                        burst_size - enq);
 218                        if (t->done)
 219                                break;
 220                        rte_pause();
 221                        timestamp = rte_get_timer_cycles();
 222                        for (i = enq; i < burst_size; i++)
 223                                m[i]->timestamp = timestamp;
 224                }
 225                count += burst_size;
 226        }
 227        return 0;
 228}
 229
 230static inline int
 231perf_event_timer_producer(void *arg)
 232{
 233        int i;
 234        struct prod_data *p  = arg;
 235        struct test_perf *t = p->t;
 236        struct evt_options *opt = t->opt;
 237        uint32_t flow_counter = 0;
 238        uint64_t count = 0;
 239        uint64_t arm_latency = 0;
 240        const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
 241        const uint32_t nb_flows = t->nb_flows;
 242        const uint64_t nb_timers = opt->nb_timers;
 243        struct rte_mempool *pool = t->pool;
 244        struct perf_elt *m[BURST_SIZE + 1] = {NULL};
 245        struct rte_event_timer_adapter **adptr = t->timer_adptr;
 246        struct rte_event_timer tim;
 247        uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
 248
 249        memset(&tim, 0, sizeof(struct rte_event_timer));
 250        timeout_ticks =
 251                opt->optm_timer_tick_nsec
 252                        ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
 253                               opt->optm_timer_tick_nsec)
 254                        : timeout_ticks;
 255        timeout_ticks += timeout_ticks ? 0 : 1;
 256        tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
 257        tim.ev.op = RTE_EVENT_OP_NEW;
 258        tim.ev.sched_type = t->opt->sched_type_list[0];
 259        tim.ev.queue_id = p->queue_id;
 260        tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 261        tim.state = RTE_EVENT_TIMER_NOT_ARMED;
 262        tim.timeout_ticks = timeout_ticks;
 263
 264        if (opt->verbose_level > 1)
 265                printf("%s(): lcore %d\n", __func__, rte_lcore_id());
 266
 267        while (count < nb_timers && t->done == false) {
 268                if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
 269                        continue;
 270                for (i = 0; i < BURST_SIZE; i++) {
 271                        rte_prefetch0(m[i + 1]);
 272                        m[i]->tim = tim;
 273                        m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
 274                        m[i]->tim.ev.event_ptr = m[i];
 275                        m[i]->timestamp = rte_get_timer_cycles();
 276                        while (rte_event_timer_arm_burst(
 277                               adptr[flow_counter % nb_timer_adptrs],
 278                               (struct rte_event_timer **)&m[i], 1) != 1) {
 279                                if (t->done)
 280                                        break;
 281                                m[i]->timestamp = rte_get_timer_cycles();
 282                        }
 283                        arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
 284                }
 285                count += BURST_SIZE;
 286        }
 287        fflush(stdout);
 288        rte_delay_ms(1000);
 289        printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
 290                        __func__, rte_lcore_id(),
 291                        count ? (float)(arm_latency / count) /
 292                        (rte_get_timer_hz() / 1000000) : 0);
 293        return 0;
 294}
 295
 296static inline int
 297perf_event_timer_producer_burst(void *arg)
 298{
 299        int i;
 300        struct prod_data *p  = arg;
 301        struct test_perf *t = p->t;
 302        struct evt_options *opt = t->opt;
 303        uint32_t flow_counter = 0;
 304        uint64_t count = 0;
 305        uint64_t arm_latency = 0;
 306        const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
 307        const uint32_t nb_flows = t->nb_flows;
 308        const uint64_t nb_timers = opt->nb_timers;
 309        struct rte_mempool *pool = t->pool;
 310        struct perf_elt *m[BURST_SIZE + 1] = {NULL};
 311        struct rte_event_timer_adapter **adptr = t->timer_adptr;
 312        struct rte_event_timer tim;
 313        uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
 314
 315        memset(&tim, 0, sizeof(struct rte_event_timer));
 316        timeout_ticks =
 317                opt->optm_timer_tick_nsec
 318                        ? ceil((double)(timeout_ticks * opt->timer_tick_nsec) /
 319                               opt->optm_timer_tick_nsec)
 320                        : timeout_ticks;
 321        timeout_ticks += timeout_ticks ? 0 : 1;
 322        tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
 323        tim.ev.op = RTE_EVENT_OP_NEW;
 324        tim.ev.sched_type = t->opt->sched_type_list[0];
 325        tim.ev.queue_id = p->queue_id;
 326        tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
 327        tim.state = RTE_EVENT_TIMER_NOT_ARMED;
 328        tim.timeout_ticks = timeout_ticks;
 329
 330        if (opt->verbose_level > 1)
 331                printf("%s(): lcore %d\n", __func__, rte_lcore_id());
 332
 333        while (count < nb_timers && t->done == false) {
 334                if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
 335                        continue;
 336                for (i = 0; i < BURST_SIZE; i++) {
 337                        rte_prefetch0(m[i + 1]);
 338                        m[i]->tim = tim;
 339                        m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
 340                        m[i]->tim.ev.event_ptr = m[i];
 341                        m[i]->timestamp = rte_get_timer_cycles();
 342                }
 343                rte_event_timer_arm_tmo_tick_burst(
 344                                adptr[flow_counter % nb_timer_adptrs],
 345                                (struct rte_event_timer **)m,
 346                                tim.timeout_ticks,
 347                                BURST_SIZE);
 348                arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
 349                count += BURST_SIZE;
 350        }
 351        fflush(stdout);
 352        rte_delay_ms(1000);
 353        printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
 354                        __func__, rte_lcore_id(),
 355                        count ? (float)(arm_latency / count) /
 356                        (rte_get_timer_hz() / 1000000) : 0);
 357        return 0;
 358}
 359
 360static inline void
 361crypto_adapter_enq_op_new(struct prod_data *p)
 362{
 363        struct test_perf *t = p->t;
 364        const uint32_t nb_flows = t->nb_flows;
 365        const uint64_t nb_pkts = t->nb_pkts;
 366        struct rte_mempool *pool = t->pool;
 367        struct evt_options *opt = t->opt;
 368        uint16_t qp_id = p->ca.cdev_qp_id;
 369        uint8_t cdev_id = p->ca.cdev_id;
 370        uint64_t alloc_failures = 0;
 371        uint32_t flow_counter = 0;
 372        struct rte_crypto_op *op;
 373        struct rte_mbuf *m;
 374        uint64_t count = 0;
 375        uint16_t len;
 376
 377        if (opt->verbose_level > 1)
 378                printf("%s(): lcore %d queue %d cdev_id %u cdev_qp_id %u\n",
 379                       __func__, rte_lcore_id(), p->queue_id, p->ca.cdev_id,
 380                       p->ca.cdev_qp_id);
 381
 382        len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
 383
 384        while (count < nb_pkts && t->done == false) {
 385                if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
 386                        struct rte_crypto_sym_op *sym_op;
 387
 388                        op = rte_crypto_op_alloc(t->ca_op_pool,
 389                                         RTE_CRYPTO_OP_TYPE_SYMMETRIC);
 390                        if (unlikely(op == NULL)) {
 391                                alloc_failures++;
 392                                continue;
 393                        }
 394
 395                        m = rte_pktmbuf_alloc(pool);
 396                        if (unlikely(m == NULL)) {
 397                                alloc_failures++;
 398                                rte_crypto_op_free(op);
 399                                continue;
 400                        }
 401
 402                        rte_pktmbuf_append(m, len);
 403                        sym_op = op->sym;
 404                        sym_op->m_src = m;
 405                        sym_op->cipher.data.offset = 0;
 406                        sym_op->cipher.data.length = len;
 407                        rte_crypto_op_attach_sym_session(
 408                                op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
 409                } else {
 410                        struct rte_crypto_asym_op *asym_op;
 411                        uint8_t *result = rte_zmalloc(NULL,
 412                                        modex_test_case.result_len, 0);
 413
 414                        op = rte_crypto_op_alloc(t->ca_op_pool,
 415                                         RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
 416                        if (unlikely(op == NULL)) {
 417                                alloc_failures++;
 418                                continue;
 419                        }
 420
 421                        asym_op = op->asym;
 422                        asym_op->modex.base.data = modex_test_case.base.data;
 423                        asym_op->modex.base.length = modex_test_case.base.len;
 424                        asym_op->modex.result.data = result;
 425                        asym_op->modex.result.length = modex_test_case.result_len;
 426                        rte_crypto_op_attach_asym_session(
 427                                op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
 428                }
 429                while (rte_cryptodev_enqueue_burst(cdev_id, qp_id, &op, 1) != 1 &&
 430                                t->done == false)
 431                        rte_pause();
 432
 433                count++;
 434        }
 435
 436        if (opt->verbose_level > 1 && alloc_failures)
 437                printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
 438                       __func__, rte_lcore_id(), alloc_failures);
 439}
 440
 441static inline void
 442crypto_adapter_enq_op_fwd(struct prod_data *p)
 443{
 444        const uint8_t dev_id = p->dev_id;
 445        const uint8_t port = p->port_id;
 446        struct test_perf *t = p->t;
 447        const uint32_t nb_flows = t->nb_flows;
 448        const uint64_t nb_pkts = t->nb_pkts;
 449        struct rte_mempool *pool = t->pool;
 450        struct evt_options *opt = t->opt;
 451        uint64_t alloc_failures = 0;
 452        uint32_t flow_counter = 0;
 453        struct rte_crypto_op *op;
 454        struct rte_event ev;
 455        struct rte_mbuf *m;
 456        uint64_t count = 0;
 457        uint16_t len;
 458
 459        if (opt->verbose_level > 1)
 460                printf("%s(): lcore %d port %d queue %d cdev_id %u cdev_qp_id %u\n",
 461                       __func__, rte_lcore_id(), port, p->queue_id,
 462                       p->ca.cdev_id, p->ca.cdev_qp_id);
 463
 464        ev.event = 0;
 465        ev.op = RTE_EVENT_OP_NEW;
 466        ev.queue_id = p->queue_id;
 467        ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
 468        ev.event_type = RTE_EVENT_TYPE_CPU;
 469        len = opt->mbuf_sz ? opt->mbuf_sz : RTE_ETHER_MIN_LEN;
 470
 471        while (count < nb_pkts && t->done == false) {
 472                if (opt->crypto_op_type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
 473                        struct rte_crypto_sym_op *sym_op;
 474
 475                        op = rte_crypto_op_alloc(t->ca_op_pool,
 476                                         RTE_CRYPTO_OP_TYPE_SYMMETRIC);
 477                        if (unlikely(op == NULL)) {
 478                                alloc_failures++;
 479                                continue;
 480                        }
 481
 482                        m = rte_pktmbuf_alloc(pool);
 483                        if (unlikely(m == NULL)) {
 484                                alloc_failures++;
 485                                rte_crypto_op_free(op);
 486                                continue;
 487                        }
 488
 489                        rte_pktmbuf_append(m, len);
 490                        sym_op = op->sym;
 491                        sym_op->m_src = m;
 492                        sym_op->cipher.data.offset = 0;
 493                        sym_op->cipher.data.length = len;
 494                        rte_crypto_op_attach_sym_session(
 495                                op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
 496                } else {
 497                        struct rte_crypto_asym_op *asym_op;
 498                        uint8_t *result = rte_zmalloc(NULL,
 499                                        modex_test_case.result_len, 0);
 500
 501                        op = rte_crypto_op_alloc(t->ca_op_pool,
 502                                         RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
 503                        if (unlikely(op == NULL)) {
 504                                alloc_failures++;
 505                                continue;
 506                        }
 507
 508                        asym_op = op->asym;
 509                        asym_op->modex.base.data = modex_test_case.base.data;
 510                        asym_op->modex.base.length = modex_test_case.base.len;
 511                        asym_op->modex.result.data = result;
 512                        asym_op->modex.result.length = modex_test_case.result_len;
 513                        rte_crypto_op_attach_asym_session(
 514                                op, p->ca.crypto_sess[flow_counter++ % nb_flows]);
 515                }
 516                ev.event_ptr = op;
 517
 518                while (rte_event_crypto_adapter_enqueue(dev_id, port, &ev, 1) != 1 &&
 519                       t->done == false)
 520                        rte_pause();
 521
 522                count++;
 523        }
 524
 525        if (opt->verbose_level > 1 && alloc_failures)
 526                printf("%s(): lcore %d allocation failures: %"PRIu64"\n",
 527                       __func__, rte_lcore_id(), alloc_failures);
 528}
 529
 530static inline int
 531perf_event_crypto_producer(void *arg)
 532{
 533        struct prod_data *p = arg;
 534        struct evt_options *opt = p->t->opt;
 535
 536        if (opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
 537                crypto_adapter_enq_op_new(p);
 538        else
 539                crypto_adapter_enq_op_fwd(p);
 540
 541        return 0;
 542}
 543
 544static int
 545perf_producer_wrapper(void *arg)
 546{
 547        struct prod_data *p  = arg;
 548        struct test_perf *t = p->t;
 549        bool burst = evt_has_burst_mode(p->dev_id);
 550
 551        /* In case of synthetic producer, launch perf_producer or
 552         * perf_producer_burst depending on producer enqueue burst size
 553         */
 554        if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
 555                        t->opt->prod_enq_burst_sz == 1)
 556                return perf_producer(arg);
 557        else if (t->opt->prod_type == EVT_PROD_TYPE_SYNT &&
 558                        t->opt->prod_enq_burst_sz > 1) {
 559                if (!burst)
 560                        evt_err("This event device does not support burst mode");
 561                else
 562                        return perf_producer_burst(arg);
 563        }
 564        else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
 565                        !t->opt->timdev_use_burst)
 566                return perf_event_timer_producer(arg);
 567        else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
 568                        t->opt->timdev_use_burst)
 569                return perf_event_timer_producer_burst(arg);
 570        else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
 571                return perf_event_crypto_producer(arg);
 572        return 0;
 573}
 574
 575static inline uint64_t
 576processed_pkts(struct test_perf *t)
 577{
 578        uint8_t i;
 579        uint64_t total = 0;
 580
 581        for (i = 0; i < t->nb_workers; i++)
 582                total += t->worker[i].processed_pkts;
 583
 584        return total;
 585}
 586
 587static inline uint64_t
 588total_latency(struct test_perf *t)
 589{
 590        uint8_t i;
 591        uint64_t total = 0;
 592
 593        for (i = 0; i < t->nb_workers; i++)
 594                total += t->worker[i].latency;
 595
 596        return total;
 597}
 598
 599
 600int
 601perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
 602                int (*worker)(void *))
 603{
 604        int ret, lcore_id;
 605        struct test_perf *t = evt_test_priv(test);
 606
 607        int port_idx = 0;
 608        /* launch workers */
 609        RTE_LCORE_FOREACH_WORKER(lcore_id) {
 610                if (!(opt->wlcores[lcore_id]))
 611                        continue;
 612
 613                ret = rte_eal_remote_launch(worker,
 614                                 &t->worker[port_idx], lcore_id);
 615                if (ret) {
 616                        evt_err("failed to launch worker %d", lcore_id);
 617                        return ret;
 618                }
 619                port_idx++;
 620        }
 621
 622        /* launch producers */
 623        RTE_LCORE_FOREACH_WORKER(lcore_id) {
 624                if (!(opt->plcores[lcore_id]))
 625                        continue;
 626
 627                ret = rte_eal_remote_launch(perf_producer_wrapper,
 628                                &t->prod[port_idx], lcore_id);
 629                if (ret) {
 630                        evt_err("failed to launch perf_producer %d", lcore_id);
 631                        return ret;
 632                }
 633                port_idx++;
 634        }
 635
 636        const uint64_t total_pkts = t->outstand_pkts;
 637
 638        uint64_t dead_lock_cycles = rte_get_timer_cycles();
 639        int64_t dead_lock_remaining  =  total_pkts;
 640        const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
 641
 642        uint64_t perf_cycles = rte_get_timer_cycles();
 643        int64_t perf_remaining  = total_pkts;
 644        const uint64_t perf_sample = rte_get_timer_hz();
 645
 646        static float total_mpps;
 647        static uint64_t samples;
 648
 649        const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
 650        int64_t remaining = t->outstand_pkts - processed_pkts(t);
 651
 652        while (t->done == false) {
 653                const uint64_t new_cycles = rte_get_timer_cycles();
 654
 655                if ((new_cycles - perf_cycles) > perf_sample) {
 656                        const uint64_t latency = total_latency(t);
 657                        const uint64_t pkts = processed_pkts(t);
 658
 659                        remaining = t->outstand_pkts - pkts;
 660                        float mpps = (float)(perf_remaining-remaining)/1000000;
 661
 662                        perf_remaining = remaining;
 663                        perf_cycles = new_cycles;
 664                        total_mpps += mpps;
 665                        ++samples;
 666                        if (opt->fwd_latency && pkts > 0) {
 667                                printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
 668                                        mpps, total_mpps/samples,
 669                                        (float)(latency/pkts)/freq_mhz);
 670                        } else {
 671                                printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
 672                                        mpps, total_mpps/samples);
 673                        }
 674                        fflush(stdout);
 675
 676                        if (remaining <= 0) {
 677                                t->result = EVT_TEST_SUCCESS;
 678                                if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
 679                                    opt->prod_type ==
 680                                            EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
 681                                    opt->prod_type ==
 682                                            EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
 683                                        t->done = true;
 684                                        break;
 685                                }
 686                        }
 687                }
 688
 689                if (new_cycles - dead_lock_cycles > dead_lock_sample &&
 690                    (opt->prod_type == EVT_PROD_TYPE_SYNT ||
 691                     opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
 692                     opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)) {
 693                        remaining = t->outstand_pkts - processed_pkts(t);
 694                        if (dead_lock_remaining == remaining) {
 695                                rte_event_dev_dump(opt->dev_id, stdout);
 696                                evt_err("No schedules for seconds, deadlock");
 697                                t->done = true;
 698                                break;
 699                        }
 700                        dead_lock_remaining = remaining;
 701                        dead_lock_cycles = new_cycles;
 702                }
 703        }
 704        printf("\n");
 705        return 0;
 706}
 707
 708static int
 709perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 710                struct rte_event_port_conf prod_conf)
 711{
 712        int ret = 0;
 713        uint16_t prod;
 714        struct rte_event_eth_rx_adapter_queue_conf queue_conf;
 715
 716        memset(&queue_conf, 0,
 717                        sizeof(struct rte_event_eth_rx_adapter_queue_conf));
 718        queue_conf.ev.sched_type = opt->sched_type_list[0];
 719        RTE_ETH_FOREACH_DEV(prod) {
 720                uint32_t cap;
 721
 722                ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
 723                                prod, &cap);
 724                if (ret) {
 725                        evt_err("failed to get event rx adapter[%d]"
 726                                        " capabilities",
 727                                        opt->dev_id);
 728                        return ret;
 729                }
 730                queue_conf.ev.queue_id = prod * stride;
 731                ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
 732                                &prod_conf);
 733                if (ret) {
 734                        evt_err("failed to create rx adapter[%d]", prod);
 735                        return ret;
 736                }
 737                ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
 738                                &queue_conf);
 739                if (ret) {
 740                        evt_err("failed to add rx queues to adapter[%d]", prod);
 741                        return ret;
 742                }
 743
 744                if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
 745                        uint32_t service_id;
 746
 747                        rte_event_eth_rx_adapter_service_id_get(prod,
 748                                        &service_id);
 749                        ret = evt_service_setup(service_id);
 750                        if (ret) {
 751                                evt_err("Failed to setup service core"
 752                                                " for Rx adapter\n");
 753                                return ret;
 754                        }
 755                }
 756        }
 757
 758        return ret;
 759}
 760
 761static int
 762perf_event_timer_adapter_setup(struct test_perf *t)
 763{
 764        int i;
 765        int ret;
 766        struct rte_event_timer_adapter_info adapter_info;
 767        struct rte_event_timer_adapter *wl;
 768        uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
 769        uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
 770
 771        if (nb_producers == 1)
 772                flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
 773
 774        for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
 775                struct rte_event_timer_adapter_conf config = {
 776                        .event_dev_id = t->opt->dev_id,
 777                        .timer_adapter_id = i,
 778                        .timer_tick_ns = t->opt->timer_tick_nsec,
 779                        .max_tmo_ns = t->opt->max_tmo_nsec,
 780                        .nb_timers = t->opt->pool_sz,
 781                        .flags = flags,
 782                };
 783
 784                wl = rte_event_timer_adapter_create(&config);
 785                if (wl == NULL) {
 786                        evt_err("failed to create event timer ring %d", i);
 787                        return rte_errno;
 788                }
 789
 790                memset(&adapter_info, 0,
 791                                sizeof(struct rte_event_timer_adapter_info));
 792                rte_event_timer_adapter_get_info(wl, &adapter_info);
 793                t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
 794
 795                if (!(adapter_info.caps &
 796                                RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
 797                        uint32_t service_id = -1U;
 798
 799                        rte_event_timer_adapter_service_id_get(wl,
 800                                        &service_id);
 801                        ret = evt_service_setup(service_id);
 802                        if (ret) {
 803                                evt_err("Failed to setup service core"
 804                                                " for timer adapter\n");
 805                                return ret;
 806                        }
 807                        rte_service_runstate_set(service_id, 1);
 808                }
 809                t->timer_adptr[i] = wl;
 810        }
 811        return 0;
 812}
 813
 814static int
 815perf_event_crypto_adapter_setup(struct test_perf *t, struct prod_data *p)
 816{
 817        struct evt_options *opt = t->opt;
 818        uint32_t cap;
 819        int ret;
 820
 821        ret = rte_event_crypto_adapter_caps_get(p->dev_id, p->ca.cdev_id, &cap);
 822        if (ret) {
 823                evt_err("Failed to get crypto adapter capabilities");
 824                return ret;
 825        }
 826
 827        if (((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) &&
 828             !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW)) ||
 829            ((opt->crypto_adptr_mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) &&
 830             !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD))) {
 831                evt_err("crypto adapter %s mode unsupported\n",
 832                        opt->crypto_adptr_mode ? "OP_FORWARD" : "OP_NEW");
 833                return -ENOTSUP;
 834        } else if (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA)) {
 835                evt_err("Storing crypto session not supported");
 836                return -ENOTSUP;
 837        }
 838
 839        if (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) {
 840                struct rte_event response_info;
 841
 842                response_info.event = 0;
 843                response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
 844                response_info.queue_id = p->queue_id;
 845                ret = rte_event_crypto_adapter_queue_pair_add(
 846                        TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id,
 847                        &response_info);
 848        } else {
 849                ret = rte_event_crypto_adapter_queue_pair_add(
 850                        TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id, NULL);
 851        }
 852
 853        return ret;
 854}
 855
 856static struct rte_cryptodev_sym_session *
 857cryptodev_sym_sess_create(struct prod_data *p, struct test_perf *t)
 858{
 859        struct rte_crypto_sym_xform cipher_xform;
 860        struct rte_cryptodev_sym_session *sess;
 861
 862        cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
 863        cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_NULL;
 864        cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
 865        cipher_xform.next = NULL;
 866
 867        sess = rte_cryptodev_sym_session_create(t->ca_sess_pool);
 868        if (sess == NULL) {
 869                evt_err("Failed to create sym session");
 870                return NULL;
 871        }
 872
 873        if (rte_cryptodev_sym_session_init(p->ca.cdev_id, sess, &cipher_xform,
 874                                           t->ca_sess_priv_pool)) {
 875                evt_err("Failed to init session");
 876                return NULL;
 877        }
 878
 879        return sess;
 880}
 881
 882static void *
 883cryptodev_asym_sess_create(struct prod_data *p, struct test_perf *t)
 884{
 885        const struct rte_cryptodev_asymmetric_xform_capability *capability;
 886        struct rte_cryptodev_asym_capability_idx cap_idx;
 887        struct rte_crypto_asym_xform xform;
 888        void *sess;
 889
 890        xform.next = NULL;
 891        xform.xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX;
 892        cap_idx.type = xform.xform_type;
 893        capability = rte_cryptodev_asym_capability_get(p->ca.cdev_id, &cap_idx);
 894        if (capability == NULL) {
 895                evt_err("Device doesn't support MODEX. Test Skipped\n");
 896                return NULL;
 897        }
 898
 899        xform.modex.modulus.data = modex_test_case.modulus.data;
 900        xform.modex.modulus.length = modex_test_case.modulus.len;
 901        xform.modex.exponent.data = modex_test_case.exponent.data;
 902        xform.modex.exponent.length = modex_test_case.exponent.len;
 903
 904        if (rte_cryptodev_asym_session_create(p->ca.cdev_id, &xform,
 905                        t->ca_asym_sess_pool, &sess)) {
 906                evt_err("Failed to create asym session");
 907                return NULL;
 908        }
 909
 910        return sess;
 911}
 912
 913int
 914perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 915                                uint8_t stride, uint8_t nb_queues,
 916                                const struct rte_event_port_conf *port_conf)
 917{
 918        struct test_perf *t = evt_test_priv(test);
 919        uint16_t port, prod;
 920        int ret = -1;
 921
 922        /* setup one port per worker, linking to all queues */
 923        for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
 924                                port++) {
 925                struct worker_data *w = &t->worker[port];
 926
 927                w->dev_id = opt->dev_id;
 928                w->port_id = port;
 929                w->t = t;
 930                w->processed_pkts = 0;
 931                w->latency = 0;
 932
 933                struct rte_event_port_conf conf = *port_conf;
 934                conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_WORKER;
 935
 936                ret = rte_event_port_setup(opt->dev_id, port, &conf);
 937                if (ret) {
 938                        evt_err("failed to setup port %d", port);
 939                        return ret;
 940                }
 941
 942                ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
 943                if (ret != nb_queues) {
 944                        evt_err("failed to link all queues to port %d", port);
 945                        return -EINVAL;
 946                }
 947        }
 948
 949        /* port for producers, no links */
 950        if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
 951                for ( ; port < perf_nb_event_ports(opt); port++) {
 952                        struct prod_data *p = &t->prod[port];
 953                        p->t = t;
 954                }
 955
 956                struct rte_event_port_conf conf = *port_conf;
 957                conf.event_port_cfg |= RTE_EVENT_PORT_CFG_HINT_PRODUCER;
 958
 959                ret = perf_event_rx_adapter_setup(opt, stride, conf);
 960                if (ret)
 961                        return ret;
 962        } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
 963                prod = 0;
 964                for ( ; port < perf_nb_event_ports(opt); port++) {
 965                        struct prod_data *p = &t->prod[port];
 966                        p->queue_id = prod * stride;
 967                        p->t = t;
 968                        prod++;
 969                }
 970
 971                ret = perf_event_timer_adapter_setup(t);
 972                if (ret)
 973                        return ret;
 974        } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
 975                struct rte_event_port_conf conf = *port_conf;
 976                uint8_t cdev_id = 0;
 977                uint16_t qp_id = 0;
 978
 979                ret = rte_event_crypto_adapter_create(TEST_PERF_CA_ID,
 980                                                      opt->dev_id, &conf, 0);
 981                if (ret) {
 982                        evt_err("Failed to create crypto adapter");
 983                        return ret;
 984                }
 985
 986                prod = 0;
 987                for (; port < perf_nb_event_ports(opt); port++) {
 988                        union rte_event_crypto_metadata m_data;
 989                        struct prod_data *p = &t->prod[port];
 990                        uint32_t flow_id;
 991
 992                        if (qp_id == rte_cryptodev_queue_pair_count(cdev_id)) {
 993                                cdev_id++;
 994                                qp_id = 0;
 995                        }
 996
 997                        p->dev_id = opt->dev_id;
 998                        p->port_id = port;
 999                        p->queue_id = prod * stride;
1000                        p->ca.cdev_id = cdev_id;
1001                        p->ca.cdev_qp_id = qp_id;
1002                        p->ca.crypto_sess = rte_zmalloc_socket(
1003                                NULL, sizeof(void *) * t->nb_flows,
1004                                RTE_CACHE_LINE_SIZE, opt->socket_id);
1005                        p->t = t;
1006
1007                        m_data.request_info.cdev_id = p->ca.cdev_id;
1008                        m_data.request_info.queue_pair_id = p->ca.cdev_qp_id;
1009                        m_data.response_info.sched_type = RTE_SCHED_TYPE_ATOMIC;
1010                        m_data.response_info.queue_id = p->queue_id;
1011
1012                        for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1013                                m_data.response_info.flow_id = flow_id;
1014                                if (opt->crypto_op_type ==
1015                                                RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1016                                        struct rte_cryptodev_sym_session *sess;
1017
1018                                        sess = cryptodev_sym_sess_create(p, t);
1019                                        if (sess == NULL)
1020                                                return -ENOMEM;
1021
1022                                        rte_cryptodev_session_event_mdata_set(
1023                                                cdev_id,
1024                                                sess,
1025                                                RTE_CRYPTO_OP_TYPE_SYMMETRIC,
1026                                                RTE_CRYPTO_OP_WITH_SESSION,
1027                                                &m_data, sizeof(m_data));
1028                                        p->ca.crypto_sess[flow_id] = sess;
1029                                } else {
1030                                        void *sess;
1031
1032                                        sess = cryptodev_asym_sess_create(p, t);
1033                                        if (sess == NULL)
1034                                                return -ENOMEM;
1035                                        rte_cryptodev_session_event_mdata_set(
1036                                                cdev_id,
1037                                                sess,
1038                                                RTE_CRYPTO_OP_TYPE_ASYMMETRIC,
1039                                                RTE_CRYPTO_OP_WITH_SESSION,
1040                                                &m_data, sizeof(m_data));
1041                                        p->ca.crypto_sess[flow_id] = sess;
1042                                }
1043                        }
1044
1045                        conf.event_port_cfg |=
1046                                RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1047                                RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1048
1049                        ret = rte_event_port_setup(opt->dev_id, port, &conf);
1050                        if (ret) {
1051                                evt_err("failed to setup port %d", port);
1052                                return ret;
1053                        }
1054
1055                        ret = perf_event_crypto_adapter_setup(t, p);
1056                        if (ret)
1057                                return ret;
1058
1059                        qp_id++;
1060                        prod++;
1061                }
1062        } else {
1063                prod = 0;
1064                for ( ; port < perf_nb_event_ports(opt); port++) {
1065                        struct prod_data *p = &t->prod[port];
1066
1067                        p->dev_id = opt->dev_id;
1068                        p->port_id = port;
1069                        p->queue_id = prod * stride;
1070                        p->t = t;
1071
1072                        struct rte_event_port_conf conf = *port_conf;
1073                        conf.event_port_cfg |=
1074                                RTE_EVENT_PORT_CFG_HINT_PRODUCER |
1075                                RTE_EVENT_PORT_CFG_HINT_CONSUMER;
1076
1077                        ret = rte_event_port_setup(opt->dev_id, port, &conf);
1078                        if (ret) {
1079                                evt_err("failed to setup port %d", port);
1080                                return ret;
1081                        }
1082                        prod++;
1083                }
1084        }
1085
1086        return ret;
1087}
1088
1089int
1090perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
1091{
1092        unsigned int lcores;
1093
1094        /* N producer + N worker + main when producer cores are used
1095         * Else N worker + main when Rx adapter is used
1096         */
1097        lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
1098
1099        if (rte_lcore_count() < lcores) {
1100                evt_err("test need minimum %d lcores", lcores);
1101                return -1;
1102        }
1103
1104        /* Validate worker lcores */
1105        if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
1106                evt_err("worker lcores overlaps with main lcore");
1107                return -1;
1108        }
1109        if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
1110                evt_err("worker lcores overlaps producer lcores");
1111                return -1;
1112        }
1113        if (evt_has_disabled_lcore(opt->wlcores)) {
1114                evt_err("one or more workers lcores are not enabled");
1115                return -1;
1116        }
1117        if (!evt_has_active_lcore(opt->wlcores)) {
1118                evt_err("minimum one worker is required");
1119                return -1;
1120        }
1121
1122        if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1123            opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ||
1124            opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) {
1125                /* Validate producer lcores */
1126                if (evt_lcores_has_overlap(opt->plcores,
1127                                        rte_get_main_lcore())) {
1128                        evt_err("producer lcores overlaps with main lcore");
1129                        return -1;
1130                }
1131                if (evt_has_disabled_lcore(opt->plcores)) {
1132                        evt_err("one or more producer lcores are not enabled");
1133                        return -1;
1134                }
1135                if (!evt_has_active_lcore(opt->plcores)) {
1136                        evt_err("minimum one producer is required");
1137                        return -1;
1138                }
1139        }
1140
1141        if (evt_has_invalid_stage(opt))
1142                return -1;
1143
1144        if (evt_has_invalid_sched_type(opt))
1145                return -1;
1146
1147        if (nb_queues > EVT_MAX_QUEUES) {
1148                evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
1149                return -1;
1150        }
1151        if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
1152                evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
1153                return -1;
1154        }
1155
1156        /* Fixups */
1157        if ((opt->nb_stages == 1 &&
1158                        opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
1159                        opt->fwd_latency) {
1160                evt_info("fwd_latency is valid when nb_stages > 1, disabling");
1161                opt->fwd_latency = 0;
1162        }
1163
1164        if (opt->fwd_latency && !opt->q_priority) {
1165                evt_info("enabled queue priority for latency measurement");
1166                opt->q_priority = 1;
1167        }
1168        if (opt->nb_pkts == 0)
1169                opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
1170
1171        return 0;
1172}
1173
1174void
1175perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
1176{
1177        evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
1178        evt_dump_producer_lcores(opt);
1179        evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
1180        evt_dump_worker_lcores(opt);
1181        evt_dump_nb_stages(opt);
1182        evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
1183        evt_dump("nb_evdev_queues", "%d", nb_queues);
1184        evt_dump_queue_priority(opt);
1185        evt_dump_sched_type_list(opt);
1186        evt_dump_producer_type(opt);
1187        evt_dump("prod_enq_burst_sz", "%d", opt->prod_enq_burst_sz);
1188}
1189
1190static void
1191perf_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
1192                      void *args)
1193{
1194        rte_mempool_put(args, ev.event_ptr);
1195}
1196
1197void
1198perf_worker_cleanup(struct rte_mempool *const pool, uint8_t dev_id,
1199                    uint8_t port_id, struct rte_event events[], uint16_t nb_enq,
1200                    uint16_t nb_deq)
1201{
1202        int i;
1203
1204        if (nb_deq) {
1205                for (i = nb_enq; i < nb_deq; i++)
1206                        rte_mempool_put(pool, events[i].event_ptr);
1207
1208                for (i = 0; i < nb_deq; i++)
1209                        events[i].op = RTE_EVENT_OP_RELEASE;
1210                rte_event_enqueue_burst(dev_id, port_id, events, nb_deq);
1211        }
1212        rte_event_port_quiesce(dev_id, port_id, perf_event_port_flush, pool);
1213}
1214
1215void
1216perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
1217{
1218        int i;
1219        struct test_perf *t = evt_test_priv(test);
1220
1221        if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1222                for (i = 0; i < opt->nb_timer_adptrs; i++)
1223                        rte_event_timer_adapter_stop(t->timer_adptr[i]);
1224        }
1225        rte_event_dev_stop(opt->dev_id);
1226        rte_event_dev_close(opt->dev_id);
1227}
1228
1229static inline void
1230perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
1231            void *obj, unsigned i __rte_unused)
1232{
1233        memset(obj, 0, mp->elt_size);
1234}
1235
1236#define NB_RX_DESC                      128
1237#define NB_TX_DESC                      512
1238int
1239perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
1240{
1241        uint16_t i;
1242        int ret;
1243        struct test_perf *t = evt_test_priv(test);
1244        struct rte_eth_conf port_conf = {
1245                .rxmode = {
1246                        .mq_mode = RTE_ETH_MQ_RX_RSS,
1247                        .split_hdr_size = 0,
1248                },
1249                .rx_adv_conf = {
1250                        .rss_conf = {
1251                                .rss_key = NULL,
1252                                .rss_hf = RTE_ETH_RSS_IP,
1253                        },
1254                },
1255        };
1256
1257        if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR)
1258                return 0;
1259
1260        if (!rte_eth_dev_count_avail()) {
1261                evt_err("No ethernet ports found.");
1262                return -ENODEV;
1263        }
1264
1265        RTE_ETH_FOREACH_DEV(i) {
1266                struct rte_eth_dev_info dev_info;
1267                struct rte_eth_conf local_port_conf = port_conf;
1268
1269                ret = rte_eth_dev_info_get(i, &dev_info);
1270                if (ret != 0) {
1271                        evt_err("Error during getting device (port %u) info: %s\n",
1272                                        i, strerror(-ret));
1273                        return ret;
1274                }
1275
1276                local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
1277                        dev_info.flow_type_rss_offloads;
1278                if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
1279                                port_conf.rx_adv_conf.rss_conf.rss_hf) {
1280                        evt_info("Port %u modified RSS hash function based on hardware support,"
1281                                "requested:%#"PRIx64" configured:%#"PRIx64"\n",
1282                                i,
1283                                port_conf.rx_adv_conf.rss_conf.rss_hf,
1284                                local_port_conf.rx_adv_conf.rss_conf.rss_hf);
1285                }
1286
1287                if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
1288                        evt_err("Failed to configure eth port [%d]", i);
1289                        return -EINVAL;
1290                }
1291
1292                if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
1293                                rte_socket_id(), NULL, t->pool) < 0) {
1294                        evt_err("Failed to setup eth port [%d] rx_queue: %d.",
1295                                        i, 0);
1296                        return -EINVAL;
1297                }
1298
1299                if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
1300                                        rte_socket_id(), NULL) < 0) {
1301                        evt_err("Failed to setup eth port [%d] tx_queue: %d.",
1302                                        i, 0);
1303                        return -EINVAL;
1304                }
1305
1306                ret = rte_eth_promiscuous_enable(i);
1307                if (ret != 0) {
1308                        evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
1309                                i, rte_strerror(-ret));
1310                        return ret;
1311                }
1312        }
1313
1314        return 0;
1315}
1316
1317void
1318perf_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
1319{
1320        uint16_t i;
1321        RTE_SET_USED(test);
1322
1323        if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1324                RTE_ETH_FOREACH_DEV(i) {
1325                        rte_event_eth_rx_adapter_stop(i);
1326                        rte_event_eth_rx_adapter_queue_del(i, i, -1);
1327                        rte_eth_dev_rx_queue_stop(i, 0);
1328                }
1329        }
1330}
1331
1332void
1333perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
1334{
1335        uint16_t i;
1336        RTE_SET_USED(test);
1337
1338        if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
1339                RTE_ETH_FOREACH_DEV(i) {
1340                        rte_event_eth_tx_adapter_stop(i);
1341                        rte_event_eth_tx_adapter_queue_del(i, i, -1);
1342                        rte_eth_dev_tx_queue_stop(i, 0);
1343                        rte_eth_dev_stop(i);
1344                }
1345        }
1346}
1347
1348int
1349perf_cryptodev_setup(struct evt_test *test, struct evt_options *opt)
1350{
1351        uint8_t cdev_count, cdev_id, nb_plcores, nb_qps;
1352        struct test_perf *t = evt_test_priv(test);
1353        unsigned int max_session_size;
1354        uint32_t nb_sessions;
1355        int ret;
1356
1357        if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1358                return 0;
1359
1360        cdev_count = rte_cryptodev_count();
1361        if (cdev_count == 0) {
1362                evt_err("No crypto devices available\n");
1363                return -ENODEV;
1364        }
1365
1366        t->ca_op_pool = rte_crypto_op_pool_create(
1367                "crypto_op_pool", opt->crypto_op_type, opt->pool_sz,
1368                128, sizeof(union rte_event_crypto_metadata),
1369                rte_socket_id());
1370        if (t->ca_op_pool == NULL) {
1371                evt_err("Failed to create crypto op pool");
1372                return -ENOMEM;
1373        }
1374
1375        nb_sessions = evt_nr_active_lcores(opt->plcores) * t->nb_flows;
1376        t->ca_asym_sess_pool = rte_cryptodev_asym_session_pool_create(
1377                "ca_asym_sess_pool", nb_sessions, 0,
1378                sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1379        if (t->ca_asym_sess_pool == NULL) {
1380                evt_err("Failed to create sym session pool");
1381                ret = -ENOMEM;
1382                goto err;
1383        }
1384
1385        t->ca_sess_pool = rte_cryptodev_sym_session_pool_create(
1386                "ca_sess_pool", nb_sessions, 0, 0,
1387                sizeof(union rte_event_crypto_metadata), SOCKET_ID_ANY);
1388        if (t->ca_sess_pool == NULL) {
1389                evt_err("Failed to create sym session pool");
1390                ret = -ENOMEM;
1391                goto err;
1392        }
1393
1394        max_session_size = 0;
1395        for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1396                unsigned int session_size;
1397
1398                session_size =
1399                        rte_cryptodev_sym_get_private_session_size(cdev_id);
1400                if (session_size > max_session_size)
1401                        max_session_size = session_size;
1402        }
1403
1404        max_session_size += sizeof(union rte_event_crypto_metadata);
1405        t->ca_sess_priv_pool = rte_mempool_create(
1406                "ca_sess_priv_pool", nb_sessions, max_session_size, 0, 0, NULL,
1407                NULL, NULL, NULL, SOCKET_ID_ANY, 0);
1408        if (t->ca_sess_priv_pool == NULL) {
1409                evt_err("failed to create sym session private pool");
1410                ret = -ENOMEM;
1411                goto err;
1412        }
1413
1414        /*
1415         * Calculate number of needed queue pairs, based on the amount of
1416         * available number of logical cores and crypto devices. For instance,
1417         * if there are 4 cores and 2 crypto devices, 2 queue pairs will be set
1418         * up per device.
1419         */
1420        nb_plcores = evt_nr_active_lcores(opt->plcores);
1421        nb_qps = (nb_plcores % cdev_count) ? (nb_plcores / cdev_count) + 1 :
1422                                             nb_plcores / cdev_count;
1423        for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1424                struct rte_cryptodev_qp_conf qp_conf;
1425                struct rte_cryptodev_config conf;
1426                struct rte_cryptodev_info info;
1427                int qp_id;
1428
1429                rte_cryptodev_info_get(cdev_id, &info);
1430                if (nb_qps > info.max_nb_queue_pairs) {
1431                        evt_err("Not enough queue pairs per cryptodev (%u)",
1432                                nb_qps);
1433                        ret = -EINVAL;
1434                        goto err;
1435                }
1436
1437                conf.nb_queue_pairs = nb_qps;
1438                conf.socket_id = SOCKET_ID_ANY;
1439                conf.ff_disable = RTE_CRYPTODEV_FF_SECURITY;
1440
1441                ret = rte_cryptodev_configure(cdev_id, &conf);
1442                if (ret) {
1443                        evt_err("Failed to configure cryptodev (%u)", cdev_id);
1444                        goto err;
1445                }
1446
1447                qp_conf.nb_descriptors = NB_CRYPTODEV_DESCRIPTORS;
1448                qp_conf.mp_session = t->ca_sess_pool;
1449                qp_conf.mp_session_private = t->ca_sess_priv_pool;
1450
1451                for (qp_id = 0; qp_id < conf.nb_queue_pairs; qp_id++) {
1452                        ret = rte_cryptodev_queue_pair_setup(
1453                                cdev_id, qp_id, &qp_conf,
1454                                rte_cryptodev_socket_id(cdev_id));
1455                        if (ret) {
1456                                evt_err("Failed to setup queue pairs on cryptodev %u\n",
1457                                        cdev_id);
1458                                goto err;
1459                        }
1460                }
1461        }
1462
1463        return 0;
1464err:
1465        for (cdev_id = 0; cdev_id < cdev_count; cdev_id++)
1466                rte_cryptodev_close(cdev_id);
1467
1468        rte_mempool_free(t->ca_op_pool);
1469        rte_mempool_free(t->ca_sess_pool);
1470        rte_mempool_free(t->ca_sess_priv_pool);
1471        rte_mempool_free(t->ca_asym_sess_pool);
1472
1473        return ret;
1474}
1475
1476void
1477perf_cryptodev_destroy(struct evt_test *test, struct evt_options *opt)
1478{
1479        uint8_t cdev_id, cdev_count = rte_cryptodev_count();
1480        struct test_perf *t = evt_test_priv(test);
1481        uint16_t port;
1482
1483        if (opt->prod_type != EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR)
1484                return;
1485
1486        for (port = t->nb_workers; port < perf_nb_event_ports(opt); port++) {
1487                struct rte_cryptodev_sym_session *sess;
1488                struct prod_data *p = &t->prod[port];
1489                uint32_t flow_id;
1490                uint8_t cdev_id;
1491
1492                for (flow_id = 0; flow_id < t->nb_flows; flow_id++) {
1493                        sess = p->ca.crypto_sess[flow_id];
1494                        cdev_id = p->ca.cdev_id;
1495                        rte_cryptodev_sym_session_clear(cdev_id, sess);
1496                        rte_cryptodev_sym_session_free(sess);
1497                }
1498
1499                rte_event_crypto_adapter_queue_pair_del(
1500                        TEST_PERF_CA_ID, p->ca.cdev_id, p->ca.cdev_qp_id);
1501        }
1502
1503        rte_event_crypto_adapter_free(TEST_PERF_CA_ID);
1504
1505        for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
1506                rte_cryptodev_stop(cdev_id);
1507                rte_cryptodev_close(cdev_id);
1508        }
1509
1510        rte_mempool_free(t->ca_op_pool);
1511        rte_mempool_free(t->ca_sess_pool);
1512        rte_mempool_free(t->ca_sess_priv_pool);
1513        rte_mempool_free(t->ca_asym_sess_pool);
1514}
1515
1516int
1517perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
1518{
1519        struct test_perf *t = evt_test_priv(test);
1520
1521        if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
1522                        opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1523                t->pool = rte_mempool_create(test->name, /* mempool name */
1524                                opt->pool_sz, /* number of elements*/
1525                                sizeof(struct perf_elt), /* element size*/
1526                                512, /* cache size*/
1527                                0, NULL, NULL,
1528                                perf_elt_init, /* obj constructor */
1529                                NULL, opt->socket_id, 0); /* flags */
1530        } else {
1531                t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
1532                                opt->pool_sz, /* number of elements*/
1533                                512, /* cache size*/
1534                                0,
1535                                RTE_MBUF_DEFAULT_BUF_SIZE,
1536                                opt->socket_id); /* flags */
1537
1538        }
1539
1540        if (t->pool == NULL) {
1541                evt_err("failed to create mempool");
1542                return -ENOMEM;
1543        }
1544
1545        return 0;
1546}
1547
1548void
1549perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
1550{
1551        RTE_SET_USED(opt);
1552        struct test_perf *t = evt_test_priv(test);
1553
1554        rte_mempool_free(t->pool);
1555}
1556
1557int
1558perf_test_setup(struct evt_test *test, struct evt_options *opt)
1559{
1560        void *test_perf;
1561
1562        test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
1563                                RTE_CACHE_LINE_SIZE, opt->socket_id);
1564        if (test_perf  == NULL) {
1565                evt_err("failed to allocate test_perf memory");
1566                goto nomem;
1567        }
1568        test->test_priv = test_perf;
1569
1570        struct test_perf *t = evt_test_priv(test);
1571
1572        if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
1573                t->outstand_pkts = opt->nb_timers *
1574                        evt_nr_active_lcores(opt->plcores);
1575                t->nb_pkts = opt->nb_timers;
1576        } else {
1577                t->outstand_pkts = opt->nb_pkts *
1578                        evt_nr_active_lcores(opt->plcores);
1579                t->nb_pkts = opt->nb_pkts;
1580        }
1581
1582        t->nb_workers = evt_nr_active_lcores(opt->wlcores);
1583        t->done = false;
1584        t->nb_flows = opt->nb_flows;
1585        t->result = EVT_TEST_FAILED;
1586        t->opt = opt;
1587        memcpy(t->sched_type_list, opt->sched_type_list,
1588                        sizeof(opt->sched_type_list));
1589        return 0;
1590nomem:
1591        return -ENOMEM;
1592}
1593
1594void
1595perf_test_destroy(struct evt_test *test, struct evt_options *opt)
1596{
1597        RTE_SET_USED(opt);
1598
1599        rte_free(test->test_priv);
1600}
1601