1
2
3
4
5#include <stdio.h>
6#include <string.h>
7#include <stdint.h>
8#include <errno.h>
9#include <unistd.h>
10#include <sys/queue.h>
11
12#include <rte_memory.h>
13#include <rte_memzone.h>
14#include <rte_launch.h>
15#include <rte_eal.h>
16#include <rte_per_lcore.h>
17#include <rte_lcore.h>
18#include <rte_debug.h>
19#include <rte_ethdev.h>
20#include <rte_cycles.h>
21#include <rte_eventdev.h>
22#include <rte_bus_vdev.h>
23#include <rte_pause.h>
24
25#include "opdl_evdev.h"
26#include "opdl_log.h"
27
28
29#define MAX_PORTS 16
30#define MAX_QIDS 16
31#define NUM_PACKETS (1<<18)
32#define NUM_EVENTS 256
33#define BURST_SIZE 32
34
35
36
37static int evdev;
38
39struct test {
40 struct rte_mempool *mbuf_pool;
41 uint8_t port[MAX_PORTS];
42 uint8_t qid[MAX_QIDS];
43 int nb_qids;
44};
45
46static struct rte_mempool *eventdev_func_mempool;
47
48static __rte_always_inline struct rte_mbuf *
49rte_gen_arp(int portid, struct rte_mempool *mp)
50{
51
52
53
54
55 static const uint8_t arp_request[] = {
56 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8,
57 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01,
58 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8,
59 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01,
60 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00,
61 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
62 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00
64 };
65 struct rte_mbuf *m;
66 int pkt_len = sizeof(arp_request) - 1;
67
68 m = rte_pktmbuf_alloc(mp);
69 if (!m)
70 return 0;
71
72 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off),
73 arp_request, pkt_len);
74 rte_pktmbuf_pkt_len(m) = pkt_len;
75 rte_pktmbuf_data_len(m) = pkt_len;
76
77 RTE_SET_USED(portid);
78
79 return m;
80}
81
82
83static __rte_always_inline int
84init(struct test *t, int nb_queues, int nb_ports)
85{
86 struct rte_event_dev_config config = {
87 .nb_event_queues = nb_queues,
88 .nb_event_ports = nb_ports,
89 .nb_event_queue_flows = 1024,
90 .nb_events_limit = 4096,
91 .nb_event_port_dequeue_depth = 128,
92 .nb_event_port_enqueue_depth = 128,
93 };
94 int ret;
95
96 void *temp = t->mbuf_pool;
97
98 memset(t, 0, sizeof(*t));
99 t->mbuf_pool = temp;
100
101 ret = rte_event_dev_configure(evdev, &config);
102 if (ret < 0)
103 PMD_DRV_LOG(ERR, "%d: Error configuring device\n", __LINE__);
104 return ret;
105};
106
107static __rte_always_inline int
108create_ports(struct test *t, int num_ports)
109{
110 int i;
111 static const struct rte_event_port_conf conf = {
112 .new_event_threshold = 1024,
113 .dequeue_depth = 32,
114 .enqueue_depth = 32,
115 };
116 if (num_ports > MAX_PORTS)
117 return -1;
118
119 for (i = 0; i < num_ports; i++) {
120 if (rte_event_port_setup(evdev, i, &conf) < 0) {
121 PMD_DRV_LOG(ERR, "Error setting up port %d\n", i);
122 return -1;
123 }
124 t->port[i] = i;
125 }
126
127 return 0;
128};
129
130static __rte_always_inline int
131create_queues_type(struct test *t, int num_qids, enum queue_type flags)
132{
133 int i;
134 uint8_t type;
135
136 switch (flags) {
137 case OPDL_Q_TYPE_ORDERED:
138 type = RTE_SCHED_TYPE_ORDERED;
139 break;
140 case OPDL_Q_TYPE_ATOMIC:
141 type = RTE_SCHED_TYPE_ATOMIC;
142 break;
143 default:
144 type = 0;
145 }
146
147
148 const struct rte_event_queue_conf conf = {
149 .event_queue_cfg =
150 (flags == OPDL_Q_TYPE_SINGLE_LINK ?
151 RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0),
152 .schedule_type = type,
153 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
154 .nb_atomic_flows = 1024,
155 .nb_atomic_order_sequences = 1024,
156 };
157
158 for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) {
159 if (rte_event_queue_setup(evdev, i, &conf) < 0) {
160 PMD_DRV_LOG(ERR, "%d: error creating qid %d\n ",
161 __LINE__, i);
162 return -1;
163 }
164 t->qid[i] = i;
165 }
166
167 t->nb_qids += num_qids;
168
169 if (t->nb_qids > MAX_QIDS)
170 return -1;
171
172 return 0;
173}
174
175
176
177static __rte_always_inline int
178cleanup(struct test *t __rte_unused)
179{
180 rte_event_dev_stop(evdev);
181 rte_event_dev_close(evdev);
182 PMD_DRV_LOG(ERR, "clean up for test done\n");
183 return 0;
184};
185
186static int
187ordered_basic(struct test *t)
188{
189 const uint8_t rx_port = 0;
190 const uint8_t w1_port = 1;
191 const uint8_t w3_port = 3;
192 const uint8_t tx_port = 4;
193 int err;
194 uint32_t i;
195 uint32_t deq_pkts;
196 struct rte_mbuf *mbufs[3];
197
198 const uint32_t MAGIC_SEQN = 1234;
199
200
201 if (init(t, 2, tx_port+1) < 0 ||
202 create_ports(t, tx_port+1) < 0 ||
203 create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) {
204 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
205 return -1;
206 }
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224 for (i = w1_port; i <= w3_port; i++) {
225 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
226 1);
227 if (err != 1) {
228 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
229 __LINE__);
230 cleanup(t);
231 return -1;
232 }
233 }
234
235 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
236 1);
237 if (err != 1) {
238 PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
239 cleanup(t);
240 return -1;
241 }
242
243 if (rte_event_dev_start(evdev) < 0) {
244 PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
245 return -1;
246 }
247
248 for (i = 0; i < 3; i++) {
249 struct rte_event ev;
250 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
251 if (!mbufs[i]) {
252 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
253 return -1;
254 }
255
256 ev.queue_id = t->qid[0];
257 ev.op = RTE_EVENT_OP_NEW;
258 ev.mbuf = mbufs[i];
259 *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
260
261
262 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
263 if (err != 1) {
264 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
265 __LINE__, i, err);
266 return -1;
267 }
268 }
269
270
271 struct rte_event deq_ev[w3_port + 1];
272
273 uint32_t seq = 0;
274
275
276 for (i = w1_port; i <= w3_port; i++) {
277 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
278 &deq_ev[i], 1, 0);
279 if (deq_pkts != 1) {
280 PMD_DRV_LOG(ERR, "%d: Failed to deq\n", __LINE__);
281 rte_event_dev_dump(evdev, stdout);
282 return -1;
283 }
284 seq = *rte_event_pmd_selftest_seqn(deq_ev[i].mbuf) - MAGIC_SEQN;
285
286 if (seq != (i-1)) {
287 PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , "
288 "port number is %u\n", seq, i);
289 return -1;
290 }
291 }
292
293
294 for (i = w3_port; i >= w1_port; i--) {
295
296 deq_ev[i].op = RTE_EVENT_OP_FORWARD;
297 deq_ev[i].queue_id = t->qid[1];
298 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1);
299 if (err != 1) {
300 PMD_DRV_LOG(ERR, "%d: Failed to enqueue\n", __LINE__);
301 return -1;
302 }
303 }
304
305
306 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
307 3, 0);
308
309
310 if (deq_pkts != 3) {
311 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
312 __LINE__, deq_pkts, tx_port);
313 rte_event_dev_dump(evdev, stdout);
314 return 1;
315 }
316
317
318 cleanup(t);
319
320 return 0;
321}
322
323
324static int
325atomic_basic(struct test *t)
326{
327 const uint8_t rx_port = 0;
328 const uint8_t w1_port = 1;
329 const uint8_t w3_port = 3;
330 const uint8_t tx_port = 4;
331 int err;
332 int i;
333 uint32_t deq_pkts;
334 struct rte_mbuf *mbufs[3];
335 const uint32_t MAGIC_SEQN = 1234;
336
337
338 if (init(t, 2, tx_port+1) < 0 ||
339 create_ports(t, tx_port+1) < 0 ||
340 create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) {
341 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
342 return -1;
343 }
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362 for (i = w1_port; i <= w3_port; i++) {
363 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL,
364 1);
365 if (err != 1) {
366 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n",
367 __LINE__);
368 cleanup(t);
369 return -1;
370 }
371 }
372
373 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL,
374 1);
375 if (err != 1) {
376 PMD_DRV_LOG(ERR, "%d: error mapping TX qid\n", __LINE__);
377 cleanup(t);
378 return -1;
379 }
380
381 if (rte_event_dev_start(evdev) < 0) {
382 PMD_DRV_LOG(ERR, "%d: Error with start call\n", __LINE__);
383 return -1;
384 }
385
386
387 for (i = 0; i < 3; i++) {
388 struct rte_event ev;
389 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
390 if (!mbufs[i]) {
391 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
392 return -1;
393 }
394
395 ev.queue_id = t->qid[0];
396 ev.op = RTE_EVENT_OP_NEW;
397 ev.flow_id = 1;
398 ev.mbuf = mbufs[i];
399 *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i;
400
401
402 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
403 if (err != 1) {
404 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
405 __LINE__, i, err);
406 return -1;
407 }
408 }
409
410
411 struct rte_event deq_ev[w3_port + 1];
412
413
414 for (i = w1_port; i <= w3_port; i++) {
415
416 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i],
417 deq_ev, 3, 0);
418
419 if (t->port[i] != 2) {
420 if (deq_pkts != 0) {
421 PMD_DRV_LOG(ERR, "%d: deq none zero !\n",
422 __LINE__);
423 rte_event_dev_dump(evdev, stdout);
424 return -1;
425 }
426 } else {
427
428 if (deq_pkts != 3) {
429 PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !\n",
430 __LINE__, deq_pkts);
431 rte_event_dev_dump(evdev, stdout);
432 return -1;
433 }
434
435 int j;
436 for (j = 0; j < 3; j++) {
437 deq_ev[j].op = RTE_EVENT_OP_FORWARD;
438 deq_ev[j].queue_id = t->qid[1];
439 }
440
441 err = rte_event_enqueue_burst(evdev, t->port[i],
442 deq_ev, 3);
443
444 if (err != 3) {
445 PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, "
446 "retval = %u\n",
447 t->port[i], 3, err);
448 return -1;
449 }
450
451 }
452
453 }
454
455
456
457 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev,
458 3, 0);
459
460
461 if (deq_pkts != 3) {
462 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d\n",
463 __LINE__, deq_pkts, tx_port);
464 rte_event_dev_dump(evdev, stdout);
465 return 1;
466 }
467
468 cleanup(t);
469
470 return 0;
471}
472static __rte_always_inline int
473check_qid_stats(uint32_t id[], int index)
474{
475
476 if (index == 0) {
477 if (id[0] != 3 || id[1] != 3
478 || id[2] != 3)
479 return -1;
480 } else if (index == 1) {
481 if (id[0] != 5 || id[1] != 5
482 || id[2] != 2)
483 return -1;
484 } else if (index == 2) {
485 if (id[0] != 3 || id[1] != 1
486 || id[2] != 1)
487 return -1;
488 }
489
490 return 0;
491}
492
493
494static int
495check_statistics(void)
496{
497 int num_ports = 3;
498 int i;
499
500 for (i = 0; i < num_ports; i++) {
501 int num_stats, num_stats_returned;
502
503 num_stats = rte_event_dev_xstats_names_get(0,
504 RTE_EVENT_DEV_XSTATS_PORT,
505 i,
506 NULL,
507 NULL,
508 0);
509 if (num_stats > 0) {
510
511 uint32_t id[num_stats];
512 struct rte_event_dev_xstats_name names[num_stats];
513 uint64_t values[num_stats];
514
515 num_stats_returned = rte_event_dev_xstats_names_get(0,
516 RTE_EVENT_DEV_XSTATS_PORT,
517 i,
518 names,
519 id,
520 num_stats);
521
522 if (num_stats == num_stats_returned) {
523 num_stats_returned = rte_event_dev_xstats_get(0,
524 RTE_EVENT_DEV_XSTATS_PORT,
525 i,
526 id,
527 values,
528 num_stats);
529
530 if (num_stats == num_stats_returned) {
531 int err;
532
533 err = check_qid_stats(id, i);
534
535 if (err)
536 return err;
537
538 } else {
539 return -1;
540 }
541 } else {
542 return -1;
543 }
544 } else {
545 return -1;
546 }
547 }
548 return 0;
549}
550
551#define OLD_NUM_PACKETS 3
552#define NEW_NUM_PACKETS 2
553static int
554single_link_w_stats(struct test *t)
555{
556 const uint8_t rx_port = 0;
557 const uint8_t w1_port = 1;
558 const uint8_t tx_port = 2;
559 int err;
560 int i;
561 uint32_t deq_pkts;
562 struct rte_mbuf *mbufs[3];
563 RTE_SET_USED(mbufs);
564
565
566 if (init(t, 2, tx_port + 1) < 0 ||
567 create_ports(t, 3) < 0 ||
568 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
569 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
570 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
571 return -1;
572 }
573
574
575
576
577
578
579
580
581
582
583
584
585
586 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
587 1);
588 if (err != 1) {
589 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
590 __LINE__,
591 t->port[1],
592 t->qid[0]);
593 cleanup(t);
594 return -1;
595 }
596
597 err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL,
598 1);
599 if (err != 1) {
600 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]\n",
601 __LINE__,
602 t->port[2],
603 t->qid[1]);
604 cleanup(t);
605 return -1;
606 }
607
608 if (rte_event_dev_start(evdev) != 0) {
609 PMD_DRV_LOG(ERR, "%d: failed to start device\n", __LINE__);
610 cleanup(t);
611 return -1;
612 }
613
614
615
616
617 for (i = 0; i < 3; i++) {
618 struct rte_event ev;
619 mbufs[i] = rte_gen_arp(0, t->mbuf_pool);
620 if (!mbufs[i]) {
621 PMD_DRV_LOG(ERR, "%d: gen of pkt failed\n", __LINE__);
622 return -1;
623 }
624
625 ev.queue_id = t->qid[0];
626 ev.op = RTE_EVENT_OP_NEW;
627 ev.mbuf = mbufs[i];
628 *rte_event_pmd_selftest_seqn(mbufs[i]) = 1234 + i;
629
630
631 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1);
632 if (err != 1) {
633 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u\n",
634 __LINE__,
635 t->port[rx_port],
636 err);
637 return -1;
638 }
639 }
640
641
642 struct rte_event deq_ev[3];
643
644 deq_pkts = rte_event_dequeue_burst(evdev,
645 t->port[w1_port],
646 deq_ev, 3, 0);
647
648 if (deq_pkts != 3) {
649 PMD_DRV_LOG(ERR, "%d: deq not 3 !\n", __LINE__);
650 cleanup(t);
651 return -1;
652 }
653
654
655 for (i = 0; i < NEW_NUM_PACKETS; i++)
656 deq_ev[i].queue_id = t->qid[1];
657
658 deq_pkts = rte_event_enqueue_burst(evdev,
659 t->port[w1_port],
660 deq_ev,
661 NEW_NUM_PACKETS);
662
663 if (deq_pkts != 2) {
664 PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!\n", __LINE__, deq_pkts);
665 cleanup(t);
666 return -1;
667 }
668
669
670 deq_pkts = rte_event_dequeue_burst(evdev,
671 t->port[tx_port],
672 deq_ev,
673 3,
674 0);
675
676
677 if (deq_pkts != 2) {
678 PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d\n",
679 __LINE__, deq_pkts, tx_port);
680 cleanup(t);
681 return -1;
682 }
683
684 if (!check_statistics()) {
685 PMD_DRV_LOG(ERR, "xstats check failed");
686 cleanup(t);
687 return -1;
688 }
689
690 cleanup(t);
691
692 return 0;
693}
694
695static int
696single_link(struct test *t)
697{
698 const uint8_t tx_port = 2;
699 int err;
700 struct rte_mbuf *mbufs[3];
701 RTE_SET_USED(mbufs);
702
703
704 if (init(t, 2, tx_port+1) < 0 ||
705 create_ports(t, 3) < 0 ||
706 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 ||
707 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) {
708 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
709 return -1;
710 }
711
712
713
714
715
716
717
718
719
720
721
722
723
724 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL,
725 1);
726 if (err != 1) {
727 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
728 cleanup(t);
729 return -1;
730 }
731
732 err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL,
733 1);
734 if (err != 1) {
735 PMD_DRV_LOG(ERR, "%d: error mapping lb qid\n", __LINE__);
736 cleanup(t);
737 return -1;
738 }
739
740 if (rte_event_dev_start(evdev) == 0) {
741 PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 "
742 "SINGLE_LINK PORT\n", __LINE__);
743 cleanup(t);
744 return -1;
745 }
746
747 cleanup(t);
748
749 return 0;
750}
751
752
753static __rte_always_inline void
754populate_event_burst(struct rte_event ev[],
755 uint8_t qid,
756 uint16_t num_events)
757{
758 uint16_t i;
759 for (i = 0; i < num_events; i++) {
760 ev[i].flow_id = 1;
761 ev[i].op = RTE_EVENT_OP_NEW;
762 ev[i].sched_type = RTE_SCHED_TYPE_ORDERED;
763 ev[i].queue_id = qid;
764 ev[i].event_type = RTE_EVENT_TYPE_ETHDEV;
765 ev[i].sub_event_type = 0;
766 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
767 ev[i].mbuf = (struct rte_mbuf *)0xdead0000;
768 }
769}
770
771#define NUM_QUEUES 3
772#define BATCH_SIZE 32
773
774static int
775qid_basic(struct test *t)
776{
777 int err = 0;
778
779 uint8_t q_id = 0;
780 uint8_t p_id = 0;
781
782 uint32_t num_events;
783 uint32_t i;
784
785 struct rte_event ev[BATCH_SIZE];
786
787
788 if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 ||
789 create_ports(t, NUM_QUEUES+1) < 0 ||
790 create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) {
791 PMD_DRV_LOG(ERR, "%d: Error initializing device\n", __LINE__);
792 return -1;
793 }
794
795 for (i = 0; i < NUM_QUEUES; i++) {
796 int nb_linked;
797 q_id = i;
798
799 nb_linked = rte_event_port_link(evdev,
800 i+1,
801 &q_id,
802 NULL,
803 1);
804
805 if (nb_linked != 1) {
806
807 PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u\n",
808 __FILE__,
809 __LINE__,
810 i + 1,
811 q_id);
812
813 err = -1;
814 break;
815 }
816
817 }
818
819
820
821 if (!err) {
822 uint8_t t_qid = 0;
823 if (rte_event_port_link(evdev,
824 1,
825 &t_qid,
826 NULL,
827 1) > 0) {
828 PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail\n",
829 __FILE__,
830 __LINE__);
831 err = -1;
832 }
833
834 uint32_t test_num_events;
835
836 if (!err) {
837 test_num_events = rte_event_dequeue_burst(evdev,
838 p_id,
839 ev,
840 BATCH_SIZE,
841 0);
842 if (test_num_events != 0) {
843 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device\n",
844 __FILE__,
845 __LINE__,
846 p_id);
847 err = -1;
848 }
849 }
850
851 if (!err) {
852 test_num_events = rte_event_enqueue_burst(evdev,
853 p_id,
854 ev,
855 BATCH_SIZE);
856 if (test_num_events != 0) {
857 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device\n",
858 __FILE__,
859 __LINE__,
860 p_id);
861 err = -1;
862 }
863 }
864 }
865
866
867
868 if (!err) {
869 if (rte_event_dev_start(evdev) < 0) {
870 PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
871 __FILE__,
872 __LINE__);
873 err = -1;
874 }
875 }
876
877
878
879 if (!err) {
880 uint8_t t_qid = 0;
881 if (rte_event_port_link(evdev,
882 1,
883 &t_qid,
884 NULL,
885 1) > 0) {
886 PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail\n",
887 __FILE__,
888 __LINE__);
889 err = -1;
890 }
891 }
892
893 if (!err) {
894
895 q_id = 0;
896
897 populate_event_burst(ev,
898 q_id,
899 BATCH_SIZE);
900
901 num_events = rte_event_enqueue_burst(evdev,
902 p_id,
903 ev,
904 BATCH_SIZE);
905 if (num_events != BATCH_SIZE) {
906 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets\n",
907 __FILE__,
908 __LINE__);
909 err = -1;
910 }
911 }
912
913 if (!err) {
914 while (++p_id < NUM_QUEUES) {
915
916 num_events = rte_event_dequeue_burst(evdev,
917 p_id,
918 ev,
919 BATCH_SIZE,
920 0);
921
922 if (num_events != BATCH_SIZE) {
923 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u\n",
924 __FILE__,
925 __LINE__,
926 p_id);
927 err = -1;
928 break;
929 }
930
931 if (ev[0].queue_id != q_id) {
932 PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]\n",
933 __FILE__,
934 __LINE__,
935 p_id,
936 ev[0].queue_id,
937 q_id);
938 err = -1;
939 break;
940 }
941
942 populate_event_burst(ev,
943 ++q_id,
944 BATCH_SIZE);
945
946 num_events = rte_event_enqueue_burst(evdev,
947 p_id,
948 ev,
949 BATCH_SIZE);
950 if (num_events != BATCH_SIZE) {
951 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u\n",
952 __FILE__,
953 __LINE__,
954 p_id,
955 q_id);
956 err = -1;
957 break;
958 }
959 }
960 }
961
962 if (!err) {
963 num_events = rte_event_dequeue_burst(evdev,
964 p_id,
965 ev,
966 BATCH_SIZE,
967 0);
968 if (num_events != BATCH_SIZE) {
969 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u\n",
970 __FILE__,
971 __LINE__,
972 p_id);
973 err = -1;
974 }
975 }
976
977 cleanup(t);
978
979 return err;
980}
981
982
983
984int
985opdl_selftest(void)
986{
987 struct test *t = malloc(sizeof(struct test));
988 int ret;
989
990 const char *eventdev_name = "event_opdl0";
991
992 evdev = rte_event_dev_get_dev_id(eventdev_name);
993
994 if (evdev < 0) {
995 PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.\n",
996 __LINE__, eventdev_name);
997
998 if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) {
999 PMD_DRV_LOG(ERR, "Error creating eventdev\n");
1000 free(t);
1001 return -1;
1002 }
1003 evdev = rte_event_dev_get_dev_id(eventdev_name);
1004 if (evdev < 0) {
1005 PMD_DRV_LOG(ERR, "Error finding newly created eventdev\n");
1006 free(t);
1007 return -1;
1008 }
1009 }
1010
1011
1012 if (!eventdev_func_mempool) {
1013 eventdev_func_mempool = rte_pktmbuf_pool_create(
1014 "EVENTDEV_SW_SA_MBUF_POOL",
1015 (1<<12),
1016 32 ,
1017 0,
1018 512,
1019 rte_socket_id());
1020 if (!eventdev_func_mempool) {
1021 PMD_DRV_LOG(ERR, "ERROR creating mempool\n");
1022 free(t);
1023 return -1;
1024 }
1025 }
1026 t->mbuf_pool = eventdev_func_mempool;
1027
1028 PMD_DRV_LOG(ERR, "*** Running Ordered Basic test...\n");
1029 ret = ordered_basic(t);
1030
1031 PMD_DRV_LOG(ERR, "*** Running Atomic Basic test...\n");
1032 ret = atomic_basic(t);
1033
1034
1035 PMD_DRV_LOG(ERR, "*** Running QID Basic test...\n");
1036 ret = qid_basic(t);
1037
1038 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test...\n");
1039 ret = single_link(t);
1040
1041 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test...\n");
1042 ret = single_link_w_stats(t);
1043
1044
1045
1046
1047 rte_mempool_free(t->mbuf_pool);
1048 free(t);
1049
1050 if (ret != 0)
1051 return ret;
1052 return 0;
1053
1054}
1055