dpdk/app/test-eventdev/test_order_common.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Cavium, Inc
   3 */
   4
   5#include "test_order_common.h"
   6
   7int
   8order_test_result(struct evt_test *test, struct evt_options *opt)
   9{
  10        RTE_SET_USED(opt);
  11        struct test_order *t = evt_test_priv(test);
  12
  13        return t->result;
  14}
  15
  16static inline int
  17order_producer(void *arg)
  18{
  19        struct prod_data *p  = arg;
  20        struct test_order *t = p->t;
  21        struct evt_options *opt = t->opt;
  22        const uint8_t dev_id = p->dev_id;
  23        const uint8_t port = p->port_id;
  24        struct rte_mempool *pool = t->pool;
  25        const uint64_t nb_pkts = t->nb_pkts;
  26        uint32_t *producer_flow_seq = t->producer_flow_seq;
  27        const uint32_t nb_flows = t->nb_flows;
  28        uint64_t count = 0;
  29        struct rte_mbuf *m;
  30        struct rte_event ev;
  31
  32        if (opt->verbose_level > 1)
  33                printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
  34                         __func__, rte_lcore_id(), dev_id, port, p->queue_id);
  35
  36        ev.event = 0;
  37        ev.op = RTE_EVENT_OP_NEW;
  38        ev.queue_id = p->queue_id;
  39        ev.sched_type = RTE_SCHED_TYPE_ORDERED;
  40        ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
  41        ev.event_type =  RTE_EVENT_TYPE_CPU;
  42        ev.sub_event_type = 0; /* stage 0 */
  43
  44        while (count < nb_pkts && t->err == false) {
  45                m = rte_pktmbuf_alloc(pool);
  46                if (m == NULL)
  47                        continue;
  48
  49                const flow_id_t flow = (uintptr_t)m % nb_flows;
  50                /* Maintain seq number per flow */
  51                *order_mbuf_seqn(t, m) = producer_flow_seq[flow]++;
  52                order_flow_id_save(t, flow, m, &ev);
  53
  54                while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
  55                        if (t->err)
  56                                break;
  57                        rte_pause();
  58                }
  59
  60                count++;
  61        }
  62        return 0;
  63}
  64
  65int
  66order_opt_check(struct evt_options *opt)
  67{
  68        if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
  69                evt_err("Invalid producer type '%s' valid producer '%s'",
  70                        evt_prod_id_to_name(opt->prod_type),
  71                        evt_prod_id_to_name(EVT_PROD_TYPE_SYNT));
  72                return -1;
  73        }
  74
  75        /* 1 producer + N workers + main */
  76        if (rte_lcore_count() < 3) {
  77                evt_err("test need minimum 3 lcores");
  78                return -1;
  79        }
  80
  81        /* Validate worker lcores */
  82        if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
  83                evt_err("worker lcores overlaps with main lcore");
  84                return -1;
  85        }
  86
  87        if (evt_nr_active_lcores(opt->plcores) == 0) {
  88                evt_err("missing the producer lcore");
  89                return -1;
  90        }
  91
  92        if (evt_nr_active_lcores(opt->plcores) != 1) {
  93                evt_err("only one producer lcore must be selected");
  94                return -1;
  95        }
  96
  97        int plcore = evt_get_first_active_lcore(opt->plcores);
  98
  99        if (plcore < 0) {
 100                evt_err("failed to find active producer");
 101                return plcore;
 102        }
 103
 104        if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
 105                evt_err("worker lcores overlaps producer lcore");
 106                return -1;
 107        }
 108        if (evt_has_disabled_lcore(opt->wlcores)) {
 109                evt_err("one or more workers lcores are not enabled");
 110                return -1;
 111        }
 112        if (!evt_has_active_lcore(opt->wlcores)) {
 113                evt_err("minimum one worker is required");
 114                return -1;
 115        }
 116
 117        /* Validate producer lcore */
 118        if (plcore == (int)rte_get_main_lcore()) {
 119                evt_err("producer lcore and main lcore should be different");
 120                return -1;
 121        }
 122        if (!rte_lcore_is_enabled(plcore)) {
 123                evt_err("producer lcore is not enabled");
 124                return -1;
 125        }
 126
 127        /* Fixups */
 128        if (opt->nb_pkts == 0)
 129                opt->nb_pkts = INT64_MAX;
 130
 131        return 0;
 132}
 133
 134int
 135order_test_setup(struct evt_test *test, struct evt_options *opt)
 136{
 137        void *test_order;
 138        struct test_order *t;
 139        static const struct rte_mbuf_dynfield flow_id_dynfield_desc = {
 140                .name = "test_event_dynfield_flow_id",
 141                .size = sizeof(flow_id_t),
 142                .align = __alignof__(flow_id_t),
 143        };
 144        static const struct rte_mbuf_dynfield seqn_dynfield_desc = {
 145                .name = "test_event_dynfield_seqn",
 146                .size = sizeof(seqn_t),
 147                .align = __alignof__(seqn_t),
 148        };
 149
 150        test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
 151                                RTE_CACHE_LINE_SIZE, opt->socket_id);
 152        if (test_order  == NULL) {
 153                evt_err("failed to allocate test_order memory");
 154                goto nomem;
 155        }
 156        test->test_priv = test_order;
 157        t = evt_test_priv(test);
 158
 159        t->flow_id_dynfield_offset =
 160                rte_mbuf_dynfield_register(&flow_id_dynfield_desc);
 161        if (t->flow_id_dynfield_offset < 0) {
 162                evt_err("failed to register mbuf field");
 163                return -rte_errno;
 164        }
 165
 166        t->seqn_dynfield_offset =
 167                rte_mbuf_dynfield_register(&seqn_dynfield_desc);
 168        if (t->seqn_dynfield_offset < 0) {
 169                evt_err("failed to register mbuf field");
 170                return -rte_errno;
 171        }
 172
 173        t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
 174                                 sizeof(*t->producer_flow_seq) * opt->nb_flows,
 175                                RTE_CACHE_LINE_SIZE, opt->socket_id);
 176
 177        if (t->producer_flow_seq  == NULL) {
 178                evt_err("failed to allocate t->producer_flow_seq memory");
 179                goto prod_nomem;
 180        }
 181
 182        t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
 183                                 sizeof(*t->expected_flow_seq) * opt->nb_flows,
 184                                RTE_CACHE_LINE_SIZE, opt->socket_id);
 185
 186        if (t->expected_flow_seq  == NULL) {
 187                evt_err("failed to allocate t->expected_flow_seq memory");
 188                goto exp_nomem;
 189        }
 190        rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
 191        t->err = false;
 192        t->nb_pkts = opt->nb_pkts;
 193        t->nb_flows = opt->nb_flows;
 194        t->result = EVT_TEST_FAILED;
 195        t->opt = opt;
 196        return 0;
 197
 198exp_nomem:
 199        rte_free(t->producer_flow_seq);
 200prod_nomem:
 201        rte_free(test->test_priv);
 202nomem:
 203        return -ENOMEM;
 204}
 205
 206void
 207order_test_destroy(struct evt_test *test, struct evt_options *opt)
 208{
 209        RTE_SET_USED(opt);
 210        struct test_order *t = evt_test_priv(test);
 211
 212        rte_free(t->expected_flow_seq);
 213        rte_free(t->producer_flow_seq);
 214        rte_free(test->test_priv);
 215}
 216
 217int
 218order_mempool_setup(struct evt_test *test, struct evt_options *opt)
 219{
 220        struct test_order *t = evt_test_priv(test);
 221
 222        t->pool  = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
 223                                        256 /* Cache */, 0,
 224                                        512, /* Use very small mbufs */
 225                                        opt->socket_id);
 226        if (t->pool == NULL) {
 227                evt_err("failed to create mempool");
 228                return -ENOMEM;
 229        }
 230
 231        return 0;
 232}
 233
 234void
 235order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
 236{
 237        RTE_SET_USED(opt);
 238        struct test_order *t = evt_test_priv(test);
 239
 240        rte_mempool_free(t->pool);
 241}
 242
 243void
 244order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
 245{
 246        RTE_SET_USED(test);
 247
 248        rte_event_dev_stop(opt->dev_id);
 249        rte_event_dev_close(opt->dev_id);
 250}
 251
 252void
 253order_opt_dump(struct evt_options *opt)
 254{
 255        evt_dump_producer_lcores(opt);
 256        evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
 257        evt_dump_worker_lcores(opt);
 258        evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
 259}
 260
 261int
 262order_launch_lcores(struct evt_test *test, struct evt_options *opt,
 263                        int (*worker)(void *))
 264{
 265        int ret, lcore_id;
 266        struct test_order *t = evt_test_priv(test);
 267
 268        int wkr_idx = 0;
 269        /* launch workers */
 270        RTE_LCORE_FOREACH_WORKER(lcore_id) {
 271                if (!(opt->wlcores[lcore_id]))
 272                        continue;
 273
 274                ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
 275                                         lcore_id);
 276                if (ret) {
 277                        evt_err("failed to launch worker %d", lcore_id);
 278                        return ret;
 279                }
 280                wkr_idx++;
 281        }
 282
 283        /* launch producer */
 284        int plcore = evt_get_first_active_lcore(opt->plcores);
 285
 286        ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
 287        if (ret) {
 288                evt_err("failed to launch order_producer %d", plcore);
 289                return ret;
 290        }
 291
 292        uint64_t cycles = rte_get_timer_cycles();
 293        int64_t old_remaining  = -1;
 294
 295        while (t->err == false) {
 296                uint64_t new_cycles = rte_get_timer_cycles();
 297                int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
 298
 299                if (remaining <= 0) {
 300                        t->result = EVT_TEST_SUCCESS;
 301                        break;
 302                }
 303
 304                if (new_cycles - cycles > rte_get_timer_hz() * 1) {
 305                        printf(CLGRN"\r%"PRId64""CLNRM, remaining);
 306                        fflush(stdout);
 307                        if (old_remaining == remaining) {
 308                                rte_event_dev_dump(opt->dev_id, stdout);
 309                                evt_err("No schedules for seconds, deadlock");
 310                                t->err = true;
 311                                rte_smp_wmb();
 312                                break;
 313                        }
 314                        old_remaining = remaining;
 315                        cycles = new_cycles;
 316                }
 317        }
 318        printf("\r");
 319
 320        return 0;
 321}
 322
 323int
 324order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 325                                uint8_t nb_workers, uint8_t nb_queues)
 326{
 327        int ret;
 328        uint8_t port;
 329        struct test_order *t = evt_test_priv(test);
 330        struct rte_event_dev_info dev_info;
 331
 332        memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
 333        ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
 334        if (ret) {
 335                evt_err("failed to get eventdev info %d", opt->dev_id);
 336                return ret;
 337        }
 338
 339        if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
 340                opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
 341
 342        /* port configuration */
 343        const struct rte_event_port_conf p_conf = {
 344                        .dequeue_depth = opt->wkr_deq_dep,
 345                        .enqueue_depth = dev_info.max_event_port_dequeue_depth,
 346                        .new_event_threshold = dev_info.max_num_events,
 347        };
 348
 349        /* setup one port per worker, linking to all queues */
 350        for (port = 0; port < nb_workers; port++) {
 351                struct worker_data *w = &t->worker[port];
 352
 353                w->dev_id = opt->dev_id;
 354                w->port_id = port;
 355                w->t = t;
 356
 357                ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
 358                if (ret) {
 359                        evt_err("failed to setup port %d", port);
 360                        return ret;
 361                }
 362
 363                ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
 364                if (ret != nb_queues) {
 365                        evt_err("failed to link all queues to port %d", port);
 366                        return -EINVAL;
 367                }
 368        }
 369        struct prod_data *p = &t->prod;
 370
 371        p->dev_id = opt->dev_id;
 372        p->port_id = port; /* last port */
 373        p->queue_id = 0;
 374        p->t = t;
 375
 376        ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
 377        if (ret) {
 378                evt_err("failed to setup producer port %d", port);
 379                return ret;
 380        }
 381
 382        return ret;
 383}
 384