1
2
3
4
5#ifndef _TEST_ORDER_COMMON_
6#define _TEST_ORDER_COMMON_
7
8#include <stdio.h>
9#include <stdbool.h>
10
11#include <rte_cycles.h>
12#include <rte_eventdev.h>
13#include <rte_lcore.h>
14#include <rte_malloc.h>
15#include <rte_mbuf.h>
16#include <rte_mbuf_dyn.h>
17
18#include "evt_common.h"
19#include "evt_options.h"
20#include "evt_test.h"
21
22#define BURST_SIZE 16
23
24typedef uint32_t flow_id_t;
25typedef uint32_t seqn_t;
26
27struct test_order;
28
29struct worker_data {
30 uint8_t dev_id;
31 uint8_t port_id;
32 struct test_order *t;
33};
34
35struct prod_data {
36 uint8_t dev_id;
37 uint8_t port_id;
38 uint8_t queue_id;
39 struct test_order *t;
40};
41
42struct test_order {
43
44
45
46 int err;
47
48
49
50
51 rte_atomic64_t outstand_pkts;
52 enum evt_test_result result;
53 uint32_t nb_flows;
54 uint64_t nb_pkts;
55 struct rte_mempool *pool;
56 int flow_id_dynfield_offset;
57 int seqn_dynfield_offset;
58 struct prod_data prod;
59 struct worker_data worker[EVT_MAX_PORTS];
60 uint32_t *producer_flow_seq;
61 uint32_t *expected_flow_seq;
62 struct evt_options *opt;
63} __rte_cache_aligned;
64
65static inline void
66order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event)
67{
68 event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf,
69 t->flow_id_dynfield_offset, flow_id_t *);
70}
71
72static inline void
73order_flow_id_save(struct test_order *t, flow_id_t flow_id,
74 struct rte_mbuf *mbuf, struct rte_event *event)
75{
76 *RTE_MBUF_DYNFIELD(mbuf,
77 t->flow_id_dynfield_offset, flow_id_t *) = flow_id;
78 event->flow_id = flow_id;
79 event->mbuf = mbuf;
80}
81
82static inline seqn_t *
83order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf)
84{
85 return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *);
86}
87
88static inline int
89order_nb_event_ports(struct evt_options *opt)
90{
91 return evt_nr_active_lcores(opt->wlcores) + 1 ;
92}
93
94static __rte_always_inline void
95order_process_stage_1(struct test_order *const t,
96 struct rte_event *const ev, const uint32_t nb_flows,
97 uint32_t *const expected_flow_seq,
98 rte_atomic64_t *const outstand_pkts)
99{
100 const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
101
102 if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) {
103 evt_err("flow=%x seqn mismatch got=%x expected=%x",
104 flow, *order_mbuf_seqn(t, ev->mbuf),
105 expected_flow_seq[flow]);
106 t->err = true;
107 }
108
109
110
111
112
113
114 expected_flow_seq[flow]++;
115 rte_pktmbuf_free(ev->mbuf);
116 rte_atomic64_sub(outstand_pkts, 1);
117}
118
119static __rte_always_inline void
120order_process_stage_invalid(struct test_order *const t,
121 struct rte_event *const ev)
122{
123 evt_err("invalid queue %d", ev->queue_id);
124 t->err = true;
125}
126
127#define ORDER_WORKER_INIT\
128 struct worker_data *w = arg;\
129 struct test_order *t = w->t;\
130 struct evt_options *opt = t->opt;\
131 const uint8_t dev_id = w->dev_id;\
132 const uint8_t port = w->port_id;\
133 const uint32_t nb_flows = t->nb_flows;\
134 uint32_t *expected_flow_seq = t->expected_flow_seq;\
135 rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
136 if (opt->verbose_level > 1)\
137 printf("%s(): lcore %d dev_id %d port=%d\n",\
138 __func__, rte_lcore_id(), dev_id, port)
139
140int order_test_result(struct evt_test *test, struct evt_options *opt);
141int order_opt_check(struct evt_options *opt);
142int order_test_setup(struct evt_test *test, struct evt_options *opt);
143int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
144int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
145 int (*worker)(void *));
146int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
147 uint8_t nb_workers, uint8_t nb_queues);
148void order_test_destroy(struct evt_test *test, struct evt_options *opt);
149void order_opt_dump(struct evt_options *opt);
150void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
151void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
152
153#endif
154