dpdk/app/test-eventdev/test_pipeline_common.h
<<
>>
Prefs
   1/*
   2 * SPDX-License-Identifier: BSD-3-Clause
   3 * Copyright 2017 Cavium, Inc.
   4 */
   5
   6#ifndef _TEST_PIPELINE_COMMON_
   7#define _TEST_PIPELINE_COMMON_
   8
   9#include <stdio.h>
  10#include <stdbool.h>
  11#include <unistd.h>
  12
  13#include <rte_cycles.h>
  14#include <rte_ethdev.h>
  15#include <rte_eventdev.h>
  16#include <rte_event_eth_rx_adapter.h>
  17#include <rte_event_eth_tx_adapter.h>
  18#include <rte_lcore.h>
  19#include <rte_malloc.h>
  20#include <rte_mempool.h>
  21#include <rte_prefetch.h>
  22#include <rte_spinlock.h>
  23#include <rte_service.h>
  24#include <rte_service_component.h>
  25
  26#include "evt_common.h"
  27#include "evt_options.h"
  28#include "evt_test.h"
  29
  30struct test_pipeline;
  31
  32struct worker_data {
  33        uint64_t processed_pkts;
  34        uint8_t dev_id;
  35        uint8_t port_id;
  36        struct test_pipeline *t;
  37} __rte_cache_aligned;
  38
  39struct test_pipeline {
  40        /* Don't change the offset of "done". Signal handler use this memory
  41         * to terminate all lcores work.
  42         */
  43        int done;
  44        uint8_t nb_workers;
  45        uint8_t internal_port;
  46        uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
  47        enum evt_test_result result;
  48        uint32_t nb_flows;
  49        uint64_t outstand_pkts;
  50        struct rte_mempool *pool[RTE_MAX_ETHPORTS];
  51        struct worker_data worker[EVT_MAX_PORTS];
  52        struct evt_options *opt;
  53        uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
  54} __rte_cache_aligned;
  55
  56#define BURST_SIZE 16
  57
  58#define PIPELINE_WORKER_SINGLE_STAGE_INIT \
  59        struct worker_data *w  = arg;     \
  60        struct test_pipeline *t = w->t;   \
  61        const uint8_t dev = w->dev_id;    \
  62        const uint8_t port = w->port_id;  \
  63        struct rte_event ev __rte_cache_aligned
  64
  65#define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \
  66        int i;                                  \
  67        struct worker_data *w  = arg;           \
  68        struct test_pipeline *t = w->t;         \
  69        const uint8_t dev = w->dev_id;          \
  70        const uint8_t port = w->port_id;        \
  71        struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
  72
  73#define PIPELINE_WORKER_MULTI_STAGE_INIT                         \
  74        struct worker_data *w  = arg;                            \
  75        struct test_pipeline *t = w->t;                          \
  76        uint8_t cq_id;                                           \
  77        const uint8_t dev = w->dev_id;                           \
  78        const uint8_t port = w->port_id;                         \
  79        const uint8_t last_queue = t->opt->nb_stages - 1;        \
  80        uint8_t *const sched_type_list = &t->sched_type_list[0]; \
  81        const uint8_t nb_stages = t->opt->nb_stages + 1;         \
  82        struct rte_event ev __rte_cache_aligned
  83
  84#define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT                   \
  85        int i;                                                   \
  86        struct worker_data *w  = arg;                            \
  87        struct test_pipeline *t = w->t;                          \
  88        uint8_t cq_id;                                           \
  89        const uint8_t dev = w->dev_id;                           \
  90        const uint8_t port = w->port_id;                         \
  91        const uint8_t last_queue = t->opt->nb_stages - 1;        \
  92        uint8_t *const sched_type_list = &t->sched_type_list[0]; \
  93        const uint8_t nb_stages = t->opt->nb_stages + 1;         \
  94        struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
  95
  96static __rte_always_inline void
  97pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
  98{
  99        ev->event_type = RTE_EVENT_TYPE_CPU;
 100        ev->op = RTE_EVENT_OP_FORWARD;
 101        ev->sched_type = sched;
 102}
 103
 104static __rte_always_inline void
 105pipeline_fwd_event_vector(struct rte_event *ev, uint8_t sched)
 106{
 107        ev->event_type = RTE_EVENT_TYPE_CPU_VECTOR;
 108        ev->op = RTE_EVENT_OP_FORWARD;
 109        ev->sched_type = sched;
 110}
 111
 112static __rte_always_inline void
 113pipeline_event_tx(const uint8_t dev, const uint8_t port,
 114                struct rte_event * const ev)
 115{
 116        rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
 117        while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
 118                rte_pause();
 119}
 120
 121static __rte_always_inline void
 122pipeline_event_tx_vector(const uint8_t dev, const uint8_t port,
 123                         struct rte_event *const ev)
 124{
 125        ev->vec->queue = 0;
 126
 127        while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
 128                rte_pause();
 129}
 130
 131static __rte_always_inline void
 132pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
 133                struct rte_event *ev, const uint16_t nb_rx)
 134{
 135        uint16_t enq;
 136
 137        enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx, 0);
 138        while (enq < nb_rx) {
 139                enq += rte_event_eth_tx_adapter_enqueue(dev, port,
 140                                ev + enq, nb_rx - enq, 0);
 141        }
 142}
 143
 144static __rte_always_inline void
 145pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
 146                struct rte_event *ev)
 147{
 148        while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
 149                rte_pause();
 150}
 151
 152static __rte_always_inline void
 153pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
 154                struct rte_event *ev, const uint16_t nb_rx)
 155{
 156        uint16_t enq;
 157
 158        enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
 159        while (enq < nb_rx) {
 160                enq += rte_event_enqueue_burst(dev, port,
 161                                                ev + enq, nb_rx - enq);
 162        }
 163}
 164
 165static inline int
 166pipeline_nb_event_ports(struct evt_options *opt)
 167{
 168        return evt_nr_active_lcores(opt->wlcores);
 169}
 170
 171int pipeline_test_result(struct evt_test *test, struct evt_options *opt);
 172int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
 173int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
 174int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
 175int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 176                struct rte_event_port_conf prod_conf);
 177int pipeline_event_tx_adapter_setup(struct evt_options *opt,
 178                struct rte_event_port_conf prod_conf);
 179int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
 180int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
 181                uint8_t *queue_arr, uint8_t nb_queues,
 182                const struct rte_event_port_conf p_conf);
 183int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
 184                int (*worker)(void *));
 185void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
 186void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
 187void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
 188void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
 189void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
 190
 191#endif /* _TEST_PIPELINE_COMMON_ */
 192