1
2
3
4
5#include <string.h>
6
7#include <rte_bus_vdev.h>
8#include <rte_common.h>
9#include <rte_ethdev.h>
10#include <rte_eth_ring.h>
11#include <rte_eventdev.h>
12#include <rte_event_eth_tx_adapter.h>
13#include <rte_mbuf.h>
14#include <rte_mempool.h>
15#include <rte_service.h>
16
17#include "test.h"
18
19#define MAX_NUM_QUEUE RTE_PMD_RING_MAX_RX_RINGS
20#define TEST_INST_ID 0
21#define TEST_DEV_ID 0
22#define SOCKET0 0
23#define RING_SIZE 256
24#define ETH_NAME_LEN 32
25#define NUM_ETH_PAIR 1
26#define NUM_ETH_DEV (2 * NUM_ETH_PAIR)
27#define NB_MBUF 512
28#define PAIR_PORT_INDEX(p) ((p) + NUM_ETH_PAIR)
29#define PORT(p) default_params.port[(p)]
30#define TEST_ETHDEV_ID PORT(0)
31#define TEST_ETHDEV_PAIR_ID PORT(PAIR_PORT_INDEX(0))
32
33#define EDEV_RETRY 0xffff
34
35struct event_eth_tx_adapter_test_params {
36 struct rte_mempool *mp;
37 uint16_t rx_rings, tx_rings;
38 struct rte_ring *r[NUM_ETH_DEV][MAX_NUM_QUEUE];
39 int port[NUM_ETH_DEV];
40};
41
42static int event_dev_delete;
43static struct event_eth_tx_adapter_test_params default_params;
44static uint64_t eid = ~0ULL;
45static uint32_t tid;
46
47static inline int
48port_init_common(uint16_t port, const struct rte_eth_conf *port_conf,
49 struct rte_mempool *mp)
50{
51 const uint16_t rx_ring_size = RING_SIZE, tx_ring_size = RING_SIZE;
52 int retval;
53 uint16_t q;
54
55 if (!rte_eth_dev_is_valid_port(port))
56 return -1;
57
58 default_params.rx_rings = MAX_NUM_QUEUE;
59 default_params.tx_rings = MAX_NUM_QUEUE;
60
61
62 retval = rte_eth_dev_configure(port, default_params.rx_rings,
63 default_params.tx_rings, port_conf);
64 if (retval != 0)
65 return retval;
66
67 for (q = 0; q < default_params.rx_rings; q++) {
68 retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
69 rte_eth_dev_socket_id(port), NULL, mp);
70 if (retval < 0)
71 return retval;
72 }
73
74 for (q = 0; q < default_params.tx_rings; q++) {
75 retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
76 rte_eth_dev_socket_id(port), NULL);
77 if (retval < 0)
78 return retval;
79 }
80
81
82 retval = rte_eth_dev_start(port);
83 if (retval < 0)
84 return retval;
85
86
87 struct rte_ether_addr addr;
88 retval = rte_eth_macaddr_get(port, &addr);
89 if (retval < 0)
90 return retval;
91 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
92 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
93 (unsigned int)port,
94 addr.addr_bytes[0], addr.addr_bytes[1],
95 addr.addr_bytes[2], addr.addr_bytes[3],
96 addr.addr_bytes[4], addr.addr_bytes[5]);
97
98
99 retval = rte_eth_promiscuous_enable(port);
100 if (retval != 0)
101 return retval;
102
103 return 0;
104}
105
106static inline int
107port_init(uint16_t port, struct rte_mempool *mp)
108{
109 struct rte_eth_conf conf = { 0 };
110 return port_init_common(port, &conf, mp);
111}
112
113#define RING_NAME_LEN 20
114#define DEV_NAME_LEN 20
115
116static int
117init_ports(void)
118{
119 char ring_name[ETH_NAME_LEN];
120 unsigned int i, j;
121 struct rte_ring * const *c1;
122 struct rte_ring * const *c2;
123 int err;
124
125 if (!default_params.mp)
126 default_params.mp = rte_pktmbuf_pool_create("mbuf_pool",
127 NB_MBUF, 32,
128 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
129
130 if (!default_params.mp)
131 return -ENOMEM;
132
133 for (i = 0; i < NUM_ETH_DEV; i++) {
134 for (j = 0; j < MAX_NUM_QUEUE; j++) {
135 snprintf(ring_name, sizeof(ring_name), "R%u%u", i, j);
136 default_params.r[i][j] = rte_ring_create(ring_name,
137 RING_SIZE,
138 SOCKET0,
139 RING_F_SP_ENQ | RING_F_SC_DEQ);
140 TEST_ASSERT((default_params.r[i][j] != NULL),
141 "Failed to allocate ring");
142 }
143 }
144
145
146
147
148
149
150 for (i = 0; i < NUM_ETH_PAIR; i++) {
151 char dev_name[DEV_NAME_LEN];
152 int p;
153
154 c1 = default_params.r[i];
155 c2 = default_params.r[PAIR_PORT_INDEX(i)];
156
157 snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i, i + NUM_ETH_PAIR);
158 p = rte_eth_from_rings(dev_name, c1, MAX_NUM_QUEUE,
159 c2, MAX_NUM_QUEUE, SOCKET0);
160 TEST_ASSERT(p >= 0, "Port creation failed %s", dev_name);
161 err = port_init(p, default_params.mp);
162 TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
163 default_params.port[i] = p;
164
165 snprintf(dev_name, DEV_NAME_LEN, "%u-%u", i + NUM_ETH_PAIR, i);
166 p = rte_eth_from_rings(dev_name, c2, MAX_NUM_QUEUE,
167 c1, MAX_NUM_QUEUE, SOCKET0);
168 TEST_ASSERT(p > 0, "Port creation failed %s", dev_name);
169 err = port_init(p, default_params.mp);
170 TEST_ASSERT(err == 0, "Port init failed %s", dev_name);
171 default_params.port[PAIR_PORT_INDEX(i)] = p;
172 }
173
174 return 0;
175}
176
177static void
178deinit_ports(void)
179{
180 uint16_t i, j;
181 char name[ETH_NAME_LEN];
182
183 for (i = 0; i < RTE_DIM(default_params.port); i++) {
184 rte_eth_dev_stop(default_params.port[i]);
185 rte_eth_dev_get_name_by_port(default_params.port[i], name);
186 rte_vdev_uninit(name);
187 for (j = 0; j < RTE_DIM(default_params.r[i]); j++)
188 rte_ring_free(default_params.r[i][j]);
189 }
190}
191
192static int
193testsuite_setup(void)
194{
195 const char *vdev_name = "event_sw0";
196
197 int err = init_ports();
198 TEST_ASSERT(err == 0, "Port initialization failed err %d\n", err);
199
200 if (rte_event_dev_count() == 0) {
201 printf("Failed to find a valid event device,"
202 " testing with event_sw0 device\n");
203 err = rte_vdev_init(vdev_name, NULL);
204 TEST_ASSERT(err == 0, "vdev %s creation failed %d\n",
205 vdev_name, err);
206 event_dev_delete = 1;
207 }
208 return err;
209}
210
211#define DEVICE_ID_SIZE 64
212
213static void
214testsuite_teardown(void)
215{
216 deinit_ports();
217 rte_mempool_free(default_params.mp);
218 default_params.mp = NULL;
219 if (event_dev_delete)
220 rte_vdev_uninit("event_sw0");
221}
222
223static int
224tx_adapter_create(void)
225{
226 int err;
227 struct rte_event_dev_info dev_info;
228 struct rte_event_port_conf tx_p_conf;
229 uint8_t priority;
230 uint8_t queue_id;
231
232 struct rte_event_dev_config config = {
233 .nb_event_queues = 1,
234 .nb_event_ports = 1,
235 };
236
237 struct rte_event_queue_conf wkr_q_conf = {
238 .schedule_type = RTE_SCHED_TYPE_ORDERED,
239 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
240 .nb_atomic_flows = 1024,
241 .nb_atomic_order_sequences = 1024,
242 };
243
244 memset(&tx_p_conf, 0, sizeof(tx_p_conf));
245 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
246 config.nb_event_queue_flows = dev_info.max_event_queue_flows;
247 config.nb_event_port_dequeue_depth =
248 dev_info.max_event_port_dequeue_depth;
249 config.nb_event_port_enqueue_depth =
250 dev_info.max_event_port_enqueue_depth;
251 config.nb_events_limit =
252 dev_info.max_num_events;
253
254 err = rte_event_dev_configure(TEST_DEV_ID, &config);
255 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
256 err);
257
258 queue_id = 0;
259 err = rte_event_queue_setup(TEST_DEV_ID, 0, &wkr_q_conf);
260 TEST_ASSERT(err == 0, "Event queue setup failed %d\n", err);
261
262 err = rte_event_port_setup(TEST_DEV_ID, 0, NULL);
263 TEST_ASSERT(err == 0, "Event port setup failed %d\n", err);
264
265 priority = RTE_EVENT_DEV_PRIORITY_LOWEST;
266 err = rte_event_port_link(TEST_DEV_ID, 0, &queue_id, &priority, 1);
267 TEST_ASSERT(err == 1, "Error linking port %s\n",
268 rte_strerror(rte_errno));
269 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
270 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
271
272 tx_p_conf.new_event_threshold = dev_info.max_num_events;
273 tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
274 tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
275 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
276 &tx_p_conf);
277 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
278
279 return err;
280}
281
282static void
283tx_adapter_free(void)
284{
285 rte_event_eth_tx_adapter_free(TEST_INST_ID);
286}
287
288static int
289tx_adapter_create_free(void)
290{
291 int err;
292 struct rte_event_dev_info dev_info;
293 struct rte_event_port_conf tx_p_conf;
294
295 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
296 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
297
298 tx_p_conf.new_event_threshold = dev_info.max_num_events;
299 tx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
300 tx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
301
302 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
303 NULL);
304 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
305
306 err = rte_event_eth_tx_adapter_create(TEST_INST_ID, TEST_DEV_ID,
307 &tx_p_conf);
308 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
309
310 err = rte_event_eth_tx_adapter_create(TEST_INST_ID,
311 TEST_DEV_ID, &tx_p_conf);
312 TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
313
314 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
315 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
316
317 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
318 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
319
320 err = rte_event_eth_tx_adapter_free(1);
321 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
322
323 return TEST_SUCCESS;
324}
325
326static int
327tx_adapter_queue_add_del(void)
328{
329 int err;
330 uint32_t cap;
331
332 err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
333 &cap);
334 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
335
336
337 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
338 rte_eth_dev_count_total(),
339 -1);
340 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
341
342 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
343 TEST_ETHDEV_ID,
344 0);
345 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
346
347 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
348 TEST_ETHDEV_ID,
349 -1);
350 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
351
352 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
353 TEST_ETHDEV_ID,
354 0);
355 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
356
357 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
358 TEST_ETHDEV_ID,
359 -1);
360 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
361
362 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
363 TEST_ETHDEV_ID,
364 -1);
365 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
366
367 err = rte_event_eth_tx_adapter_queue_add(1, TEST_ETHDEV_ID, -1);
368 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
369
370 err = rte_event_eth_tx_adapter_queue_del(1, TEST_ETHDEV_ID, -1);
371 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
372
373 return TEST_SUCCESS;
374}
375
376static int
377tx_adapter_start_stop(void)
378{
379 int err;
380
381 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
382 -1);
383 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
384
385 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
386 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
387
388 err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
389 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
390
391 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
392 -1);
393 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
394
395 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
396 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
397
398 err = rte_event_eth_tx_adapter_stop(TEST_INST_ID);
399 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
400
401 err = rte_event_eth_tx_adapter_start(1);
402
403 err = rte_event_eth_tx_adapter_stop(1);
404 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
405
406 return TEST_SUCCESS;
407}
408
409
410static int
411tx_adapter_single(uint16_t port, uint16_t tx_queue_id,
412 struct rte_mbuf *m, uint8_t qid,
413 uint8_t sched_type)
414{
415 struct rte_event event;
416 struct rte_mbuf *r;
417 int ret;
418 unsigned int l;
419
420 event.queue_id = qid;
421 event.op = RTE_EVENT_OP_NEW;
422 event.event_type = RTE_EVENT_TYPE_CPU;
423 event.sched_type = sched_type;
424 event.mbuf = m;
425
426 m->port = port;
427 rte_event_eth_tx_adapter_txq_set(m, tx_queue_id);
428
429 l = 0;
430 while (rte_event_enqueue_burst(TEST_DEV_ID, 0, &event, 1) != 1) {
431 l++;
432 if (l > EDEV_RETRY)
433 break;
434 }
435
436 TEST_ASSERT(l < EDEV_RETRY, "Unable to enqueue to eventdev");
437 l = 0;
438 while (l++ < EDEV_RETRY) {
439
440 if (eid != ~0ULL) {
441 ret = rte_service_run_iter_on_app_lcore(eid, 0);
442 TEST_ASSERT(ret == 0, "failed to run service %d", ret);
443 }
444
445 ret = rte_service_run_iter_on_app_lcore(tid, 0);
446 TEST_ASSERT(ret == 0, "failed to run service %d", ret);
447
448 if (rte_eth_rx_burst(TEST_ETHDEV_PAIR_ID, tx_queue_id,
449 &r, 1)) {
450 TEST_ASSERT_EQUAL(r, m, "mbuf comparison failed"
451 " expected %p received %p", m, r);
452 return 0;
453 }
454 }
455
456 TEST_ASSERT(0, "Failed to receive packet");
457 return -1;
458}
459
460static int
461tx_adapter_service(void)
462{
463 struct rte_event_eth_tx_adapter_stats stats;
464 uint32_t i;
465 int err;
466 uint8_t ev_port, ev_qid;
467 struct rte_mbuf bufs[RING_SIZE];
468 struct rte_mbuf *pbufs[RING_SIZE];
469 struct rte_event_dev_info dev_info;
470 struct rte_event_dev_config dev_conf;
471 struct rte_event_queue_conf qconf;
472 uint32_t qcnt, pcnt;
473 uint16_t q;
474 int internal_port;
475 uint32_t cap;
476
477 memset(&dev_conf, 0, sizeof(dev_conf));
478 err = rte_event_eth_tx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
479 &cap);
480 TEST_ASSERT(err == 0, "Failed to get adapter cap err %d\n", err);
481
482 internal_port = !!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
483 if (internal_port)
484 return TEST_SUCCESS;
485
486 err = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID, TEST_ETHDEV_ID,
487 -1);
488 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
489
490 err = rte_event_eth_tx_adapter_event_port_get(TEST_INST_ID,
491 &ev_port);
492 TEST_ASSERT_SUCCESS(err, "Failed to get event port %d", err);
493
494 err = rte_event_dev_attr_get(TEST_DEV_ID, RTE_EVENT_DEV_ATTR_PORT_COUNT,
495 &pcnt);
496 TEST_ASSERT_SUCCESS(err, "Port count get failed");
497
498 err = rte_event_dev_attr_get(TEST_DEV_ID,
499 RTE_EVENT_DEV_ATTR_QUEUE_COUNT, &qcnt);
500 TEST_ASSERT_SUCCESS(err, "Queue count get failed");
501
502 err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
503 TEST_ASSERT_SUCCESS(err, "Dev info failed");
504
505 dev_conf.nb_event_queue_flows = dev_info.max_event_queue_flows;
506 dev_conf.nb_event_port_dequeue_depth =
507 dev_info.max_event_port_dequeue_depth;
508 dev_conf.nb_event_port_enqueue_depth =
509 dev_info.max_event_port_enqueue_depth;
510 dev_conf.nb_events_limit =
511 dev_info.max_num_events;
512 dev_conf.nb_event_queues = qcnt + 1;
513 dev_conf.nb_event_ports = pcnt;
514 err = rte_event_dev_configure(TEST_DEV_ID, &dev_conf);
515 TEST_ASSERT(err == 0, "Event device initialization failed err %d\n",
516 err);
517
518 ev_qid = qcnt;
519 qconf.nb_atomic_flows = dev_info.max_event_queue_flows;
520 qconf.nb_atomic_order_sequences = 32;
521 qconf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
522 qconf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
523 qconf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
524 err = rte_event_queue_setup(TEST_DEV_ID, ev_qid, &qconf);
525 TEST_ASSERT_SUCCESS(err, "Failed to setup queue %u", ev_qid);
526
527
528
529
530
531 for (i = 0; i < pcnt; i++) {
532
533 int n_links;
534 uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
535 uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
536
537 if (i == ev_port)
538 continue;
539
540 n_links = rte_event_port_links_get(TEST_DEV_ID, i, queues,
541 priorities);
542 TEST_ASSERT(n_links > 0, "Failed to get port links %d\n",
543 n_links);
544 err = rte_event_port_setup(TEST_DEV_ID, i, NULL);
545 TEST_ASSERT(err == 0, "Failed to setup port err %d\n", err);
546 err = rte_event_port_link(TEST_DEV_ID, i, queues, priorities,
547 n_links);
548 TEST_ASSERT(n_links == err, "Failed to link all queues"
549 " err %s\n", rte_strerror(rte_errno));
550 }
551
552 err = rte_event_port_link(TEST_DEV_ID, ev_port, &ev_qid, NULL, 1);
553 TEST_ASSERT(err == 1, "Failed to link queue port %u",
554 ev_port);
555
556 err = rte_event_eth_tx_adapter_start(TEST_INST_ID);
557 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
558
559 if (!(dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
560 err = rte_event_dev_service_id_get(0, (uint32_t *)&eid);
561 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
562
563 err = rte_service_runstate_set(eid, 1);
564 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
565
566 err = rte_service_set_runstate_mapped_check(eid, 0);
567 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
568 }
569
570 err = rte_event_eth_tx_adapter_service_id_get(TEST_INST_ID, &tid);
571 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
572
573 err = rte_service_runstate_set(tid, 1);
574 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
575
576 err = rte_service_set_runstate_mapped_check(tid, 0);
577 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
578
579 err = rte_event_dev_start(TEST_DEV_ID);
580 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
581
582 for (q = 0; q < MAX_NUM_QUEUE; q++) {
583 for (i = 0; i < RING_SIZE; i++)
584 pbufs[i] = &bufs[i];
585 for (i = 0; i < RING_SIZE; i++) {
586 pbufs[i] = &bufs[i];
587 err = tx_adapter_single(TEST_ETHDEV_ID, q, pbufs[i],
588 ev_qid,
589 RTE_SCHED_TYPE_ORDERED);
590 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
591 }
592 for (i = 0; i < RING_SIZE; i++) {
593 TEST_ASSERT_EQUAL(pbufs[i], &bufs[i],
594 "Error: received data does not match"
595 " that transmitted");
596 }
597 }
598
599 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, NULL);
600 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
601
602 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
603 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
604 TEST_ASSERT_EQUAL(stats.tx_packets, MAX_NUM_QUEUE * RING_SIZE,
605 "stats.tx_packets expected %u got %"PRIu64,
606 MAX_NUM_QUEUE * RING_SIZE,
607 stats.tx_packets);
608
609 err = rte_event_eth_tx_adapter_stats_reset(TEST_INST_ID);
610 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
611
612 err = rte_event_eth_tx_adapter_stats_get(TEST_INST_ID, &stats);
613 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
614 TEST_ASSERT_EQUAL(stats.tx_packets, 0,
615 "stats.tx_packets expected %u got %"PRIu64,
616 0,
617 stats.tx_packets);
618
619 err = rte_event_eth_tx_adapter_stats_get(1, &stats);
620 TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
621
622 err = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID, TEST_ETHDEV_ID,
623 -1);
624 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
625
626 err = rte_event_eth_tx_adapter_free(TEST_INST_ID);
627 TEST_ASSERT(err == 0, "Expected 0 got %d", err);
628
629 rte_event_dev_stop(TEST_DEV_ID);
630
631 return TEST_SUCCESS;
632}
633
634static int
635tx_adapter_dynamic_device(void)
636{
637 uint16_t port_id = rte_eth_dev_count_avail();
638 const char *null_dev[2] = { "eth_null0", "eth_null1" };
639 struct rte_eth_conf dev_conf;
640 int ret;
641 size_t i;
642
643 memset(&dev_conf, 0, sizeof(dev_conf));
644 for (i = 0; i < RTE_DIM(null_dev); i++) {
645 ret = rte_vdev_init(null_dev[i], NULL);
646 TEST_ASSERT_SUCCESS(ret, "%s Port creation failed %d",
647 null_dev[i], ret);
648
649 if (i == 0) {
650 ret = tx_adapter_create();
651 TEST_ASSERT_SUCCESS(ret, "Adapter create failed %d",
652 ret);
653 }
654
655 ret = rte_eth_dev_configure(port_id + i, MAX_NUM_QUEUE,
656 MAX_NUM_QUEUE, &dev_conf);
657 TEST_ASSERT_SUCCESS(ret, "Failed to configure device %d", ret);
658
659 ret = rte_event_eth_tx_adapter_queue_add(TEST_INST_ID,
660 port_id + i, 0);
661 TEST_ASSERT_SUCCESS(ret, "Failed to add queues %d", ret);
662
663 }
664
665 for (i = 0; i < RTE_DIM(null_dev); i++) {
666 ret = rte_event_eth_tx_adapter_queue_del(TEST_INST_ID,
667 port_id + i, -1);
668 TEST_ASSERT_SUCCESS(ret, "Failed to delete queues %d", ret);
669 }
670
671 tx_adapter_free();
672
673 for (i = 0; i < RTE_DIM(null_dev); i++)
674 rte_vdev_uninit(null_dev[i]);
675
676 return TEST_SUCCESS;
677}
678
679static struct unit_test_suite event_eth_tx_tests = {
680 .setup = testsuite_setup,
681 .teardown = testsuite_teardown,
682 .suite_name = "tx event eth adapter test suite",
683 .unit_test_cases = {
684 TEST_CASE_ST(NULL, NULL, tx_adapter_create_free),
685 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
686 tx_adapter_queue_add_del),
687 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
688 tx_adapter_start_stop),
689 TEST_CASE_ST(tx_adapter_create, tx_adapter_free,
690 tx_adapter_service),
691 TEST_CASE_ST(NULL, NULL, tx_adapter_dynamic_device),
692 TEST_CASES_END()
693 }
694};
695
696static int
697test_event_eth_tx_adapter_common(void)
698{
699 return unit_test_suite_runner(&event_eth_tx_tests);
700}
701
702REGISTER_TEST_COMMAND(event_eth_tx_adapter_autotest,
703 test_event_eth_tx_adapter_common);
704