1
2
3
4
5#include <stdio.h>
6#include <stdlib.h>
7#include <stdint.h>
8#include <inttypes.h>
9#include <sys/types.h>
10#include <string.h>
11#include <sys/queue.h>
12#include <stdarg.h>
13#include <errno.h>
14#include <getopt.h>
15#include <stdbool.h>
16#include <sys/socket.h>
17#include <arpa/inet.h>
18
19#include <rte_debug.h>
20#include <rte_ether.h>
21#include <rte_ethdev.h>
22#include <rte_cycles.h>
23#include <rte_mbuf.h>
24#include <rte_ip.h>
25#include <rte_tcp.h>
26#include <rte_udp.h>
27#include <rte_lpm.h>
28#include <rte_lpm6.h>
29
30#include "l3fwd.h"
31#include "l3fwd_event.h"
32
33#include "l3fwd_route.h"
34
35#define IPV4_L3FWD_LPM_MAX_RULES 1024
36#define IPV4_L3FWD_LPM_NUMBER_TBL8S (1 << 8)
37#define IPV6_L3FWD_LPM_MAX_RULES 1024
38#define IPV6_L3FWD_LPM_NUMBER_TBL8S (1 << 16)
39
40static struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
41static struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
42
43
44static inline uint16_t
45lpm_get_ipv4_dst_port(const struct rte_ipv4_hdr *ipv4_hdr,
46 uint16_t portid,
47 struct rte_lpm *ipv4_l3fwd_lookup_struct)
48{
49 uint32_t dst_ip = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
50 uint32_t next_hop;
51
52 if (rte_lpm_lookup(ipv4_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
53 return next_hop;
54 else
55 return portid;
56}
57
58
59static inline uint16_t
60lpm_get_ipv6_dst_port(const struct rte_ipv6_hdr *ipv6_hdr,
61 uint16_t portid,
62 struct rte_lpm6 *ipv6_l3fwd_lookup_struct)
63{
64 const uint8_t *dst_ip = ipv6_hdr->dst_addr;
65 uint32_t next_hop;
66
67 if (rte_lpm6_lookup(ipv6_l3fwd_lookup_struct, dst_ip, &next_hop) == 0)
68 return next_hop;
69 else
70 return portid;
71}
72
73static __rte_always_inline uint16_t
74lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
75 uint16_t portid)
76{
77 struct rte_ipv6_hdr *ipv6_hdr;
78 struct rte_ipv4_hdr *ipv4_hdr;
79 struct rte_ether_hdr *eth_hdr;
80
81 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
82
83 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
84 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
85
86 return lpm_get_ipv4_dst_port(ipv4_hdr, portid,
87 qconf->ipv4_lookup_struct);
88 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
89
90 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
91 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
92
93 return lpm_get_ipv6_dst_port(ipv6_hdr, portid,
94 qconf->ipv6_lookup_struct);
95 }
96
97 return portid;
98}
99
100
101
102
103
104
105static __rte_always_inline uint16_t
106lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
107 uint32_t dst_ipv4, uint16_t portid)
108{
109 uint32_t next_hop;
110 struct rte_ipv6_hdr *ipv6_hdr;
111 struct rte_ether_hdr *eth_hdr;
112
113 if (RTE_ETH_IS_IPV4_HDR(pkt->packet_type)) {
114 return (uint16_t) ((rte_lpm_lookup(qconf->ipv4_lookup_struct,
115 dst_ipv4, &next_hop) == 0)
116 ? next_hop : portid);
117
118 } else if (RTE_ETH_IS_IPV6_HDR(pkt->packet_type)) {
119
120 eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
121 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
122
123 return (uint16_t) ((rte_lpm6_lookup(qconf->ipv6_lookup_struct,
124 ipv6_hdr->dst_addr, &next_hop) == 0)
125 ? next_hop : portid);
126
127 }
128
129 return portid;
130}
131
132#if defined(RTE_ARCH_X86)
133#include "l3fwd_lpm_sse.h"
134#elif defined __ARM_NEON
135#include "l3fwd_lpm_neon.h"
136#elif defined(RTE_ARCH_PPC_64)
137#include "l3fwd_lpm_altivec.h"
138#else
139#include "l3fwd_lpm.h"
140#endif
141
142
143int
144lpm_main_loop(__rte_unused void *dummy)
145{
146 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
147 unsigned lcore_id;
148 uint64_t prev_tsc, diff_tsc, cur_tsc;
149 int i, nb_rx;
150 uint16_t portid;
151 uint8_t queueid;
152 struct lcore_conf *qconf;
153 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
154 US_PER_S * BURST_TX_DRAIN_US;
155
156 lcore_id = rte_lcore_id();
157 qconf = &lcore_conf[lcore_id];
158
159 const uint16_t n_rx_q = qconf->n_rx_queue;
160 const uint16_t n_tx_p = qconf->n_tx_port;
161 if (n_rx_q == 0) {
162 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
163 return 0;
164 }
165
166 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
167
168 for (i = 0; i < n_rx_q; i++) {
169
170 portid = qconf->rx_queue_list[i].port_id;
171 queueid = qconf->rx_queue_list[i].queue_id;
172 RTE_LOG(INFO, L3FWD,
173 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
174 lcore_id, portid, queueid);
175 }
176
177 cur_tsc = rte_rdtsc();
178 prev_tsc = cur_tsc;
179
180 while (!force_quit) {
181
182
183
184
185 diff_tsc = cur_tsc - prev_tsc;
186 if (unlikely(diff_tsc > drain_tsc)) {
187
188 for (i = 0; i < n_tx_p; ++i) {
189 portid = qconf->tx_port_id[i];
190 if (qconf->tx_mbufs[portid].len == 0)
191 continue;
192 send_burst(qconf,
193 qconf->tx_mbufs[portid].len,
194 portid);
195 qconf->tx_mbufs[portid].len = 0;
196 }
197
198 prev_tsc = cur_tsc;
199 }
200
201
202
203
204 for (i = 0; i < n_rx_q; ++i) {
205 portid = qconf->rx_queue_list[i].port_id;
206 queueid = qconf->rx_queue_list[i].queue_id;
207 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
208 MAX_PKT_BURST);
209 if (nb_rx == 0)
210 continue;
211
212#if defined RTE_ARCH_X86 || defined __ARM_NEON \
213 || defined RTE_ARCH_PPC_64
214 l3fwd_lpm_send_packets(nb_rx, pkts_burst,
215 portid, qconf);
216#else
217 l3fwd_lpm_no_opt_send_packets(nb_rx, pkts_burst,
218 portid, qconf);
219#endif
220 }
221
222 cur_tsc = rte_rdtsc();
223 }
224
225 return 0;
226}
227
228static __rte_always_inline uint16_t
229lpm_process_event_pkt(const struct lcore_conf *lconf, struct rte_mbuf *mbuf)
230{
231 mbuf->port = lpm_get_dst_port(lconf, mbuf, mbuf->port);
232
233#if defined RTE_ARCH_X86 || defined __ARM_NEON \
234 || defined RTE_ARCH_PPC_64
235 process_packet(mbuf, &mbuf->port);
236#else
237
238 struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(mbuf,
239 struct rte_ether_hdr *);
240#ifdef DO_RFC_1812_CHECKS
241 struct rte_ipv4_hdr *ipv4_hdr;
242 if (RTE_ETH_IS_IPV4_HDR(mbuf->packet_type)) {
243
244 ipv4_hdr = rte_pktmbuf_mtod_offset(mbuf,
245 struct rte_ipv4_hdr *,
246 sizeof(struct rte_ether_hdr));
247
248 if (is_valid_ipv4_pkt(ipv4_hdr, mbuf->pkt_len)
249 < 0) {
250 mbuf->port = BAD_PORT;
251 continue;
252 }
253
254 --(ipv4_hdr->time_to_live);
255 ++(ipv4_hdr->hdr_checksum);
256 }
257#endif
258
259 *(uint64_t *)ð_hdr->dst_addr = dest_eth_addr[mbuf->port];
260
261
262 rte_ether_addr_copy(&ports_eth_addr[mbuf->port],
263 ð_hdr->src_addr);
264#endif
265 return mbuf->port;
266}
267
268static __rte_always_inline void
269lpm_event_loop_single(struct l3fwd_event_resources *evt_rsrc,
270 const uint8_t flags)
271{
272 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
273 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
274 evt_rsrc->evq.nb_queues - 1];
275 const uint8_t event_d_id = evt_rsrc->event_d_id;
276 struct lcore_conf *lconf;
277 unsigned int lcore_id;
278 struct rte_event ev;
279
280 if (event_p_id < 0)
281 return;
282
283 lcore_id = rte_lcore_id();
284 lconf = &lcore_conf[lcore_id];
285
286 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
287 while (!force_quit) {
288 if (!rte_event_dequeue_burst(event_d_id, event_p_id, &ev, 1, 0))
289 continue;
290
291 if (lpm_process_event_pkt(lconf, ev.mbuf) == BAD_PORT) {
292 rte_pktmbuf_free(ev.mbuf);
293 continue;
294 }
295
296 if (flags & L3FWD_EVENT_TX_ENQ) {
297 ev.queue_id = tx_q_id;
298 ev.op = RTE_EVENT_OP_FORWARD;
299 while (rte_event_enqueue_burst(event_d_id, event_p_id,
300 &ev, 1) && !force_quit)
301 ;
302 }
303
304 if (flags & L3FWD_EVENT_TX_DIRECT) {
305 rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
306 while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
307 event_p_id, &ev, 1, 0) &&
308 !force_quit)
309 ;
310 }
311 }
312}
313
314static __rte_always_inline void
315lpm_event_loop_burst(struct l3fwd_event_resources *evt_rsrc,
316 const uint8_t flags)
317{
318 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
319 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
320 evt_rsrc->evq.nb_queues - 1];
321 const uint8_t event_d_id = evt_rsrc->event_d_id;
322 const uint16_t deq_len = evt_rsrc->deq_depth;
323 struct rte_event events[MAX_PKT_BURST];
324 struct lcore_conf *lconf;
325 unsigned int lcore_id;
326 int i, nb_enq, nb_deq;
327
328 if (event_p_id < 0)
329 return;
330
331 lcore_id = rte_lcore_id();
332
333 lconf = &lcore_conf[lcore_id];
334
335 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
336
337 while (!force_quit) {
338
339 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
340 events, deq_len, 0);
341 if (nb_deq == 0) {
342 rte_pause();
343 continue;
344 }
345
346 for (i = 0; i < nb_deq; i++) {
347 if (flags & L3FWD_EVENT_TX_ENQ) {
348 events[i].queue_id = tx_q_id;
349 events[i].op = RTE_EVENT_OP_FORWARD;
350 }
351
352 if (flags & L3FWD_EVENT_TX_DIRECT)
353 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
354 0);
355
356 lpm_process_event_pkt(lconf, events[i].mbuf);
357 }
358
359 if (flags & L3FWD_EVENT_TX_ENQ) {
360 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
361 events, nb_deq);
362 while (nb_enq < nb_deq && !force_quit)
363 nb_enq += rte_event_enqueue_burst(event_d_id,
364 event_p_id, events + nb_enq,
365 nb_deq - nb_enq);
366 }
367
368 if (flags & L3FWD_EVENT_TX_DIRECT) {
369 nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
370 event_p_id, events, nb_deq, 0);
371 while (nb_enq < nb_deq && !force_quit)
372 nb_enq += rte_event_eth_tx_adapter_enqueue(
373 event_d_id, event_p_id,
374 events + nb_enq,
375 nb_deq - nb_enq, 0);
376 }
377 }
378}
379
380static __rte_always_inline void
381lpm_event_loop(struct l3fwd_event_resources *evt_rsrc,
382 const uint8_t flags)
383{
384 if (flags & L3FWD_EVENT_SINGLE)
385 lpm_event_loop_single(evt_rsrc, flags);
386 if (flags & L3FWD_EVENT_BURST)
387 lpm_event_loop_burst(evt_rsrc, flags);
388}
389
390int __rte_noinline
391lpm_event_main_loop_tx_d(__rte_unused void *dummy)
392{
393 struct l3fwd_event_resources *evt_rsrc =
394 l3fwd_get_eventdev_rsrc();
395
396 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_SINGLE);
397 return 0;
398}
399
400int __rte_noinline
401lpm_event_main_loop_tx_d_burst(__rte_unused void *dummy)
402{
403 struct l3fwd_event_resources *evt_rsrc =
404 l3fwd_get_eventdev_rsrc();
405
406 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT | L3FWD_EVENT_BURST);
407 return 0;
408}
409
410int __rte_noinline
411lpm_event_main_loop_tx_q(__rte_unused void *dummy)
412{
413 struct l3fwd_event_resources *evt_rsrc =
414 l3fwd_get_eventdev_rsrc();
415
416 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_SINGLE);
417 return 0;
418}
419
420int __rte_noinline
421lpm_event_main_loop_tx_q_burst(__rte_unused void *dummy)
422{
423 struct l3fwd_event_resources *evt_rsrc =
424 l3fwd_get_eventdev_rsrc();
425
426 lpm_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ | L3FWD_EVENT_BURST);
427 return 0;
428}
429
430static __rte_always_inline void
431lpm_process_event_vector(struct rte_event_vector *vec, struct lcore_conf *lconf)
432{
433 struct rte_mbuf **mbufs = vec->mbufs;
434 int i;
435
436
437 lpm_process_event_pkt(lconf, mbufs[0]);
438 if (vec->attr_valid) {
439 if (mbufs[0]->port != BAD_PORT)
440 vec->port = mbufs[0]->port;
441 else
442 vec->attr_valid = 0;
443 }
444
445 for (i = 1; i < vec->nb_elem; i++) {
446 lpm_process_event_pkt(lconf, mbufs[i]);
447 event_vector_attr_validate(vec, mbufs[i]);
448 }
449}
450
451
452static __rte_always_inline void
453lpm_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
454 const uint8_t flags)
455{
456 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
457 const uint8_t tx_q_id =
458 evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
459 const uint8_t event_d_id = evt_rsrc->event_d_id;
460 const uint16_t deq_len = evt_rsrc->deq_depth;
461 struct rte_event events[MAX_PKT_BURST];
462 struct lcore_conf *lconf;
463 unsigned int lcore_id;
464 int i, nb_enq, nb_deq;
465
466 if (event_p_id < 0)
467 return;
468
469 lcore_id = rte_lcore_id();
470 lconf = &lcore_conf[lcore_id];
471
472 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
473
474 while (!force_quit) {
475
476 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
477 deq_len, 0);
478 if (nb_deq == 0) {
479 rte_pause();
480 continue;
481 }
482
483 for (i = 0; i < nb_deq; i++) {
484 if (flags & L3FWD_EVENT_TX_ENQ) {
485 events[i].queue_id = tx_q_id;
486 events[i].op = RTE_EVENT_OP_FORWARD;
487 }
488
489 lpm_process_event_vector(events[i].vec, lconf);
490
491 if (flags & L3FWD_EVENT_TX_DIRECT)
492 event_vector_txq_set(events[i].vec, 0);
493 }
494
495 if (flags & L3FWD_EVENT_TX_ENQ) {
496 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
497 events, nb_deq);
498 while (nb_enq < nb_deq && !force_quit)
499 nb_enq += rte_event_enqueue_burst(
500 event_d_id, event_p_id, events + nb_enq,
501 nb_deq - nb_enq);
502 }
503
504 if (flags & L3FWD_EVENT_TX_DIRECT) {
505 nb_enq = rte_event_eth_tx_adapter_enqueue(
506 event_d_id, event_p_id, events, nb_deq, 0);
507 while (nb_enq < nb_deq && !force_quit)
508 nb_enq += rte_event_eth_tx_adapter_enqueue(
509 event_d_id, event_p_id, events + nb_enq,
510 nb_deq - nb_enq, 0);
511 }
512 }
513}
514
515int __rte_noinline
516lpm_event_main_loop_tx_d_vector(__rte_unused void *dummy)
517{
518 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
519
520 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
521 return 0;
522}
523
524int __rte_noinline
525lpm_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
526{
527 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
528
529 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
530 return 0;
531}
532
533int __rte_noinline
534lpm_event_main_loop_tx_q_vector(__rte_unused void *dummy)
535{
536 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
537
538 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
539 return 0;
540}
541
542int __rte_noinline
543lpm_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
544{
545 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
546
547 lpm_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
548 return 0;
549}
550
551void
552setup_lpm(const int socketid)
553{
554 struct rte_eth_dev_info dev_info;
555 struct rte_lpm6_config config;
556 struct rte_lpm_config config_ipv4;
557 unsigned i;
558 int ret;
559 char s[64];
560 char abuf[INET6_ADDRSTRLEN];
561
562
563 config_ipv4.max_rules = IPV4_L3FWD_LPM_MAX_RULES;
564 config_ipv4.number_tbl8s = IPV4_L3FWD_LPM_NUMBER_TBL8S;
565 config_ipv4.flags = 0;
566 snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
567 ipv4_l3fwd_lpm_lookup_struct[socketid] =
568 rte_lpm_create(s, socketid, &config_ipv4);
569 if (ipv4_l3fwd_lpm_lookup_struct[socketid] == NULL)
570 rte_exit(EXIT_FAILURE,
571 "Unable to create the l3fwd LPM table on socket %d\n",
572 socketid);
573
574
575 for (i = 0; i < RTE_DIM(ipv4_l3fwd_route_array); i++) {
576 struct in_addr in;
577
578
579 if ((1 << ipv4_l3fwd_route_array[i].if_out &
580 enabled_port_mask) == 0)
581 continue;
582
583 rte_eth_dev_info_get(ipv4_l3fwd_route_array[i].if_out,
584 &dev_info);
585 ret = rte_lpm_add(ipv4_l3fwd_lpm_lookup_struct[socketid],
586 ipv4_l3fwd_route_array[i].ip,
587 ipv4_l3fwd_route_array[i].depth,
588 ipv4_l3fwd_route_array[i].if_out);
589
590 if (ret < 0) {
591 rte_exit(EXIT_FAILURE,
592 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
593 i, socketid);
594 }
595
596 in.s_addr = htonl(ipv4_l3fwd_route_array[i].ip);
597 printf("LPM: Adding route %s / %d (%d) [%s]\n",
598 inet_ntop(AF_INET, &in, abuf, sizeof(abuf)),
599 ipv4_l3fwd_route_array[i].depth,
600 ipv4_l3fwd_route_array[i].if_out, dev_info.device->name);
601 }
602
603
604 snprintf(s, sizeof(s), "IPV6_L3FWD_LPM_%d", socketid);
605
606 config.max_rules = IPV6_L3FWD_LPM_MAX_RULES;
607 config.number_tbl8s = IPV6_L3FWD_LPM_NUMBER_TBL8S;
608 config.flags = 0;
609 ipv6_l3fwd_lpm_lookup_struct[socketid] = rte_lpm6_create(s, socketid,
610 &config);
611 if (ipv6_l3fwd_lpm_lookup_struct[socketid] == NULL)
612 rte_exit(EXIT_FAILURE,
613 "Unable to create the l3fwd LPM table on socket %d\n",
614 socketid);
615
616
617 for (i = 0; i < RTE_DIM(ipv6_l3fwd_route_array); i++) {
618
619
620 if ((1 << ipv6_l3fwd_route_array[i].if_out &
621 enabled_port_mask) == 0)
622 continue;
623
624 rte_eth_dev_info_get(ipv4_l3fwd_route_array[i].if_out,
625 &dev_info);
626 ret = rte_lpm6_add(ipv6_l3fwd_lpm_lookup_struct[socketid],
627 ipv6_l3fwd_route_array[i].ip,
628 ipv6_l3fwd_route_array[i].depth,
629 ipv6_l3fwd_route_array[i].if_out);
630
631 if (ret < 0) {
632 rte_exit(EXIT_FAILURE,
633 "Unable to add entry %u to the l3fwd LPM table on socket %d\n",
634 i, socketid);
635 }
636
637 printf("LPM: Adding route %s / %d (%d) [%s]\n",
638 inet_ntop(AF_INET6, ipv6_l3fwd_route_array[i].ip, abuf,
639 sizeof(abuf)),
640 ipv6_l3fwd_route_array[i].depth,
641 ipv6_l3fwd_route_array[i].if_out, dev_info.device->name);
642 }
643}
644
645int
646lpm_check_ptype(int portid)
647{
648 int i, ret;
649 int ptype_l3_ipv4 = 0, ptype_l3_ipv6 = 0;
650 uint32_t ptype_mask = RTE_PTYPE_L3_MASK;
651
652 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, NULL, 0);
653 if (ret <= 0)
654 return 0;
655
656 uint32_t ptypes[ret];
657
658 ret = rte_eth_dev_get_supported_ptypes(portid, ptype_mask, ptypes, ret);
659 for (i = 0; i < ret; ++i) {
660 if (ptypes[i] & RTE_PTYPE_L3_IPV4)
661 ptype_l3_ipv4 = 1;
662 if (ptypes[i] & RTE_PTYPE_L3_IPV6)
663 ptype_l3_ipv6 = 1;
664 }
665
666 if (ptype_l3_ipv4 == 0)
667 printf("port %d cannot parse RTE_PTYPE_L3_IPV4\n", portid);
668
669 if (ptype_l3_ipv6 == 0)
670 printf("port %d cannot parse RTE_PTYPE_L3_IPV6\n", portid);
671
672 if (ptype_l3_ipv4 && ptype_l3_ipv6)
673 return 1;
674
675 return 0;
676
677}
678
679static inline void
680lpm_parse_ptype(struct rte_mbuf *m)
681{
682 struct rte_ether_hdr *eth_hdr;
683 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
684 uint16_t ether_type;
685
686 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
687 ether_type = eth_hdr->ether_type;
688 if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
689 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
690 else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))
691 packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
692
693 m->packet_type = packet_type;
694}
695
696uint16_t
697lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
698 struct rte_mbuf *pkts[], uint16_t nb_pkts,
699 uint16_t max_pkts __rte_unused,
700 void *user_param __rte_unused)
701{
702 unsigned int i;
703
704 if (unlikely(nb_pkts == 0))
705 return nb_pkts;
706 rte_prefetch0(rte_pktmbuf_mtod(pkts[0], struct ether_hdr *));
707 for (i = 0; i < (unsigned int) (nb_pkts - 1); ++i) {
708 rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1],
709 struct ether_hdr *));
710 lpm_parse_ptype(pkts[i]);
711 }
712 lpm_parse_ptype(pkts[i]);
713
714 return nb_pkts;
715}
716
717
718void *
719lpm_get_ipv4_l3fwd_lookup_struct(const int socketid)
720{
721 return ipv4_l3fwd_lpm_lookup_struct[socketid];
722}
723
724void *
725lpm_get_ipv6_l3fwd_lookup_struct(const int socketid)
726{
727 return ipv6_l3fwd_lpm_lookup_struct[socketid];
728}
729