dpdk/app/test-eventdev/test_perf_common.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Cavium, Inc
   3 */
   4
   5#ifndef _TEST_PERF_COMMON_
   6#define _TEST_PERF_COMMON_
   7
   8#include <stdio.h>
   9#include <stdbool.h>
  10#include <unistd.h>
  11
  12#include <rte_cycles.h>
  13#include <rte_ethdev.h>
  14#include <rte_eventdev.h>
  15#include <rte_event_eth_rx_adapter.h>
  16#include <rte_event_timer_adapter.h>
  17#include <rte_lcore.h>
  18#include <rte_malloc.h>
  19#include <rte_mempool.h>
  20#include <rte_prefetch.h>
  21
  22#include "evt_common.h"
  23#include "evt_options.h"
  24#include "evt_test.h"
  25
  26struct test_perf;
  27
  28struct worker_data {
  29        uint64_t processed_pkts;
  30        uint64_t latency;
  31        uint8_t dev_id;
  32        uint8_t port_id;
  33        struct test_perf *t;
  34} __rte_cache_aligned;
  35
  36struct prod_data {
  37        uint8_t dev_id;
  38        uint8_t port_id;
  39        uint8_t queue_id;
  40        struct test_perf *t;
  41} __rte_cache_aligned;
  42
  43
  44struct test_perf {
  45        /* Don't change the offset of "done". Signal handler use this memory
  46         * to terminate all lcores work.
  47         */
  48        int done;
  49        uint64_t outstand_pkts;
  50        uint8_t nb_workers;
  51        enum evt_test_result result;
  52        uint32_t nb_flows;
  53        uint64_t nb_pkts;
  54        struct rte_mempool *pool;
  55        struct prod_data prod[EVT_MAX_PORTS];
  56        struct worker_data worker[EVT_MAX_PORTS];
  57        struct evt_options *opt;
  58        uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
  59        struct rte_event_timer_adapter *timer_adptr[
  60                RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
  61} __rte_cache_aligned;
  62
  63struct perf_elt {
  64        union {
  65                struct rte_event_timer tim;
  66                struct {
  67                        char pad[offsetof(struct rte_event_timer, user_meta)];
  68                        uint64_t timestamp;
  69                };
  70        };
  71} __rte_cache_aligned;
  72
  73#define BURST_SIZE 16
  74
  75#define PERF_WORKER_INIT\
  76        struct worker_data *w  = arg;\
  77        struct test_perf *t = w->t;\
  78        struct evt_options *opt = t->opt;\
  79        const uint8_t dev = w->dev_id;\
  80        const uint8_t port = w->port_id;\
  81        const uint8_t prod_timer_type = \
  82                opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
  83        uint8_t *const sched_type_list = &t->sched_type_list[0];\
  84        struct rte_mempool *const pool = t->pool;\
  85        const uint8_t nb_stages = t->opt->nb_stages;\
  86        const uint8_t laststage = nb_stages - 1;\
  87        uint8_t cnt = 0;\
  88        void *bufs[16] __rte_cache_aligned;\
  89        int const sz = RTE_DIM(bufs);\
  90        if (opt->verbose_level > 1)\
  91                printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
  92                                rte_lcore_id(), dev, port)
  93
  94static __rte_always_inline int
  95perf_process_last_stage(struct rte_mempool *const pool,
  96                struct rte_event *const ev, struct worker_data *const w,
  97                void *bufs[], int const buf_sz, uint8_t count)
  98{
  99        bufs[count++] = ev->event_ptr;
 100
 101        /* release fence here ensures event_prt is
 102         * stored before updating the number of
 103         * processed packets for worker lcores
 104         */
 105        rte_atomic_thread_fence(__ATOMIC_RELEASE);
 106        w->processed_pkts++;
 107
 108        if (unlikely(count == buf_sz)) {
 109                count = 0;
 110                rte_mempool_put_bulk(pool, bufs, buf_sz);
 111        }
 112        return count;
 113}
 114
 115static __rte_always_inline uint8_t
 116perf_process_last_stage_latency(struct rte_mempool *const pool,
 117                struct rte_event *const ev, struct worker_data *const w,
 118                void *bufs[], int const buf_sz, uint8_t count)
 119{
 120        uint64_t latency;
 121        struct perf_elt *const m = ev->event_ptr;
 122
 123        bufs[count++] = ev->event_ptr;
 124
 125        /* release fence here ensures event_prt is
 126         * stored before updating the number of
 127         * processed packets for worker lcores
 128         */
 129        rte_atomic_thread_fence(__ATOMIC_RELEASE);
 130        w->processed_pkts++;
 131
 132        if (unlikely(count == buf_sz)) {
 133                count = 0;
 134                latency = rte_get_timer_cycles() - m->timestamp;
 135                rte_mempool_put_bulk(pool, bufs, buf_sz);
 136        } else {
 137                latency = rte_get_timer_cycles() - m->timestamp;
 138        }
 139
 140        w->latency += latency;
 141        return count;
 142}
 143
 144
 145static inline int
 146perf_nb_event_ports(struct evt_options *opt)
 147{
 148        return evt_nr_active_lcores(opt->wlcores) +
 149                        evt_nr_active_lcores(opt->plcores);
 150}
 151
 152int perf_test_result(struct evt_test *test, struct evt_options *opt);
 153int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
 154int perf_test_setup(struct evt_test *test, struct evt_options *opt);
 155int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
 156int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
 157int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 158                                uint8_t stride, uint8_t nb_queues,
 159                                const struct rte_event_port_conf *port_conf);
 160int perf_event_dev_service_setup(uint8_t dev_id);
 161int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
 162                int (*worker)(void *));
 163void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
 164void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
 165void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
 166void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
 167void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
 168
 169#endif /* _TEST_PERF_COMMON_ */
 170