dpdk/app/test-eventdev/evt_common.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Cavium, Inc
   3 */
   4
   5#ifndef _EVT_COMMON_
   6#define _EVT_COMMON_
   7
   8#include <rte_common.h>
   9#include <rte_debug.h>
  10#include <rte_eventdev.h>
  11#include <rte_service.h>
  12
  13#define CLNRM  "\x1b[0m"
  14#define CLRED  "\x1b[31m"
  15#define CLGRN  "\x1b[32m"
  16#define CLYEL  "\x1b[33m"
  17
  18#define evt_err(fmt, args...) \
  19        fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
  20
  21#define evt_info(fmt, args...) \
  22        fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
  23
  24#define EVT_STR_FMT 20
  25
  26#define evt_dump(str, fmt, val...) \
  27        printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
  28
  29#define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
  30
  31#define evt_dump_end printf("\b}\n")
  32
  33#define EVT_MAX_STAGES           64
  34#define EVT_MAX_PORTS            256
  35#define EVT_MAX_QUEUES           256
  36
  37enum evt_prod_type {
  38        EVT_PROD_TYPE_NONE,
  39        EVT_PROD_TYPE_SYNT,          /* Producer type Synthetic i.e. CPU. */
  40        EVT_PROD_TYPE_ETH_RX_ADPTR,  /* Producer type Eth Rx Adapter. */
  41        EVT_PROD_TYPE_EVENT_TIMER_ADPTR,  /* Producer type Timer Adapter. */
  42        EVT_PROD_TYPE_MAX,
  43};
  44
  45struct evt_options {
  46#define EVT_TEST_NAME_MAX_LEN     32
  47        char test_name[EVT_TEST_NAME_MAX_LEN];
  48        bool plcores[RTE_MAX_LCORE];
  49        bool wlcores[RTE_MAX_LCORE];
  50        int pool_sz;
  51        int socket_id;
  52        int nb_stages;
  53        int verbose_level;
  54        uint8_t dev_id;
  55        uint8_t timdev_cnt;
  56        uint8_t nb_timer_adptrs;
  57        uint8_t timdev_use_burst;
  58        uint8_t per_port_pool;
  59        uint8_t sched_type_list[EVT_MAX_STAGES];
  60        uint16_t mbuf_sz;
  61        uint16_t wkr_deq_dep;
  62        uint16_t vector_size;
  63        uint16_t eth_queues;
  64        uint32_t nb_flows;
  65        uint32_t tx_first;
  66        uint32_t max_pkt_sz;
  67        uint32_t deq_tmo_nsec;
  68        uint32_t q_priority:1;
  69        uint32_t fwd_latency:1;
  70        uint32_t ena_vector : 1;
  71        uint64_t nb_pkts;
  72        uint64_t nb_timers;
  73        uint64_t expiry_nsec;
  74        uint64_t max_tmo_nsec;
  75        uint64_t vector_tmo_nsec;
  76        uint64_t timer_tick_nsec;
  77        uint64_t optm_timer_tick_nsec;
  78        enum evt_prod_type prod_type;
  79};
  80
  81static inline bool
  82evt_has_distributed_sched(uint8_t dev_id)
  83{
  84        struct rte_event_dev_info dev_info;
  85
  86        rte_event_dev_info_get(dev_id, &dev_info);
  87        return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
  88                        true : false;
  89}
  90
  91static inline bool
  92evt_has_burst_mode(uint8_t dev_id)
  93{
  94        struct rte_event_dev_info dev_info;
  95
  96        rte_event_dev_info_get(dev_id, &dev_info);
  97        return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
  98                        true : false;
  99}
 100
 101
 102static inline bool
 103evt_has_all_types_queue(uint8_t dev_id)
 104{
 105        struct rte_event_dev_info dev_info;
 106
 107        rte_event_dev_info_get(dev_id, &dev_info);
 108        return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
 109                        true : false;
 110}
 111
 112static inline bool
 113evt_has_flow_id(uint8_t dev_id)
 114{
 115        struct rte_event_dev_info dev_info;
 116
 117        rte_event_dev_info_get(dev_id, &dev_info);
 118        return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ?
 119                        true : false;
 120}
 121
 122static inline int
 123evt_service_setup(uint32_t service_id)
 124{
 125        int32_t core_cnt;
 126        unsigned int lcore = 0;
 127        uint32_t core_array[RTE_MAX_LCORE];
 128        uint8_t cnt;
 129        uint8_t min_cnt = UINT8_MAX;
 130
 131        if (!rte_service_lcore_count())
 132                return -ENOENT;
 133
 134        core_cnt = rte_service_lcore_list(core_array,
 135                        RTE_MAX_LCORE);
 136        if (core_cnt < 0)
 137                return -ENOENT;
 138        /* Get the core which has least number of services running. */
 139        while (core_cnt--) {
 140                /* Reset default mapping */
 141                rte_service_map_lcore_set(service_id,
 142                                core_array[core_cnt], 0);
 143                cnt = rte_service_lcore_count_services(
 144                                core_array[core_cnt]);
 145                if (cnt < min_cnt) {
 146                        lcore = core_array[core_cnt];
 147                        min_cnt = cnt;
 148                }
 149        }
 150        if (rte_service_map_lcore_set(service_id, lcore, 1))
 151                return -ENOENT;
 152
 153        return 0;
 154}
 155
 156static inline int
 157evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
 158                uint8_t nb_ports)
 159{
 160        struct rte_event_dev_info info;
 161        int ret;
 162
 163        memset(&info, 0, sizeof(struct rte_event_dev_info));
 164        ret = rte_event_dev_info_get(opt->dev_id, &info);
 165        if (ret) {
 166                evt_err("failed to get eventdev info %d", opt->dev_id);
 167                return ret;
 168        }
 169
 170        if (opt->deq_tmo_nsec) {
 171                if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
 172                        opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
 173                        evt_info("dequeue_timeout_ns too low, using %d",
 174                                        opt->deq_tmo_nsec);
 175                }
 176                if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
 177                        opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
 178                        evt_info("dequeue_timeout_ns too high, using %d",
 179                                        opt->deq_tmo_nsec);
 180                }
 181        }
 182
 183        const struct rte_event_dev_config config = {
 184                        .dequeue_timeout_ns = opt->deq_tmo_nsec,
 185                        .nb_event_queues = nb_queues,
 186                        .nb_event_ports = nb_ports,
 187                        .nb_single_link_event_port_queues = 0,
 188                        .nb_events_limit  = info.max_num_events,
 189                        .nb_event_queue_flows = opt->nb_flows,
 190                        .nb_event_port_dequeue_depth =
 191                                info.max_event_port_dequeue_depth,
 192                        .nb_event_port_enqueue_depth =
 193                                info.max_event_port_enqueue_depth,
 194        };
 195
 196        return rte_event_dev_configure(opt->dev_id, &config);
 197}
 198
 199#endif /*  _EVT_COMMON_*/
 200