1
2
3
4
5#include <stdio.h>
6#include <stddef.h>
7#include <stdint.h>
8#include <sys/socket.h>
9#include <arpa/inet.h>
10
11#include <rte_fib.h>
12#include <rte_fib6.h>
13
14#include "l3fwd.h"
15#if defined RTE_ARCH_X86
16#include "l3fwd_sse.h"
17#elif defined __ARM_NEON
18#include "l3fwd_neon.h"
19#elif defined RTE_ARCH_PPC_64
20#include "l3fwd_altivec.h"
21#else
22#include "l3fwd_common.h"
23#endif
24#include "l3fwd_event.h"
25#include "l3fwd_route.h"
26
27
28#define FIB_PREFETCH_OFFSET 4
29
30
31#define FIB_DEFAULT_HOP 999
32
33
34
35
36
37#if defined RTE_ARCH_X86 || defined __ARM_NEON \
38 || defined RTE_ARCH_PPC_64
39#define FIB_SEND_MULTI
40#endif
41
42static struct rte_fib *ipv4_l3fwd_fib_lookup_struct[NB_SOCKETS];
43static struct rte_fib6 *ipv6_l3fwd_fib_lookup_struct[NB_SOCKETS];
44
45
46static inline void
47fib_parse_packet(struct rte_mbuf *mbuf,
48 uint32_t *ipv4, uint32_t *ipv4_cnt,
49 uint8_t ipv6[RTE_FIB6_IPV6_ADDR_SIZE],
50 uint32_t *ipv6_cnt, uint8_t *ip_type)
51{
52 struct rte_ether_hdr *eth_hdr;
53 struct rte_ipv4_hdr *ipv4_hdr;
54 struct rte_ipv6_hdr *ipv6_hdr;
55
56 eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
57
58 if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
59 ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1);
60 *ipv4 = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
61
62 *ip_type = 1;
63 (*ipv4_cnt)++;
64 }
65
66 else {
67 ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1);
68 rte_mov16(ipv6, (const uint8_t *)ipv6_hdr->dst_addr);
69 *ip_type = 0;
70 (*ipv6_cnt)++;
71 }
72}
73
74
75
76
77
78#if !defined FIB_SEND_MULTI
79static inline void
80fib_send_single(int nb_tx, struct lcore_conf *qconf,
81 struct rte_mbuf **pkts_burst, uint16_t hops[nb_tx])
82{
83 int32_t j;
84 struct rte_ether_hdr *eth_hdr;
85
86 for (j = 0; j < nb_tx; j++) {
87
88#if defined DO_RFC_1812_CHECKS
89 rfc1812_process((struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(
90 pkts_burst[j], struct rte_ether_hdr *) + 1),
91 &hops[j], pkts_burst[j]->packet_type);
92#endif
93
94
95 eth_hdr = rte_pktmbuf_mtod(pkts_burst[j],
96 struct rte_ether_hdr *);
97 *(uint64_t *)ð_hdr->dst_addr = dest_eth_addr[hops[j]];
98 rte_ether_addr_copy(&ports_eth_addr[hops[j]],
99 ð_hdr->src_addr);
100
101
102 send_single_packet(qconf, pkts_burst[j], hops[j]);
103 }
104}
105#endif
106
107
108static inline void
109fib_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
110 uint16_t portid, struct lcore_conf *qconf)
111{
112 uint32_t ipv4_arr[nb_rx];
113 uint8_t ipv6_arr[nb_rx][RTE_FIB6_IPV6_ADDR_SIZE];
114 uint16_t hops[nb_rx];
115 uint64_t hopsv4[nb_rx], hopsv6[nb_rx];
116 uint8_t type_arr[nb_rx];
117 uint32_t ipv4_cnt = 0, ipv6_cnt = 0;
118 uint32_t ipv4_arr_assem = 0, ipv6_arr_assem = 0;
119 uint16_t nh;
120 int32_t i;
121
122
123 for (i = 0; i < FIB_PREFETCH_OFFSET && i < nb_rx; i++)
124 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i], void *));
125
126
127 for (i = 0; i < (nb_rx - FIB_PREFETCH_OFFSET); i++) {
128
129 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
130 i + FIB_PREFETCH_OFFSET], void *));
131 fib_parse_packet(pkts_burst[i],
132 &ipv4_arr[ipv4_cnt], &ipv4_cnt,
133 ipv6_arr[ipv6_cnt], &ipv6_cnt,
134 &type_arr[i]);
135 }
136
137
138 for (; i < nb_rx; i++)
139 fib_parse_packet(pkts_burst[i],
140 &ipv4_arr[ipv4_cnt], &ipv4_cnt,
141 ipv6_arr[ipv6_cnt], &ipv6_cnt,
142 &type_arr[i]);
143
144
145 if (likely(ipv4_cnt > 0))
146 rte_fib_lookup_bulk(qconf->ipv4_lookup_struct,
147 ipv4_arr, hopsv4, ipv4_cnt);
148
149
150 if (ipv6_cnt > 0)
151 rte_fib6_lookup_bulk(qconf->ipv6_lookup_struct,
152 ipv6_arr, hopsv6, ipv6_cnt);
153
154
155 for (i = 0; i < nb_rx; i++) {
156 if (type_arr[i])
157 nh = (uint16_t)hopsv4[ipv4_arr_assem++];
158 else
159 nh = (uint16_t)hopsv6[ipv6_arr_assem++];
160 hops[i] = nh != FIB_DEFAULT_HOP ? nh : portid;
161 }
162
163#if defined FIB_SEND_MULTI
164 send_packets_multi(qconf, pkts_burst, hops, nb_rx);
165#else
166 fib_send_single(nb_rx, qconf, pkts_burst, hops);
167#endif
168}
169
170
171int
172fib_main_loop(__rte_unused void *dummy)
173{
174 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
175 unsigned int lcore_id;
176 uint64_t prev_tsc, diff_tsc, cur_tsc;
177 int i, nb_rx;
178 uint16_t portid;
179 uint8_t queueid;
180 struct lcore_conf *qconf;
181 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
182 US_PER_S * BURST_TX_DRAIN_US;
183
184 lcore_id = rte_lcore_id();
185 qconf = &lcore_conf[lcore_id];
186
187 const uint16_t n_rx_q = qconf->n_rx_queue;
188 const uint16_t n_tx_p = qconf->n_tx_port;
189 if (n_rx_q == 0) {
190 RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
191 return 0;
192 }
193
194 RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
195
196 for (i = 0; i < n_rx_q; i++) {
197
198 portid = qconf->rx_queue_list[i].port_id;
199 queueid = qconf->rx_queue_list[i].queue_id;
200 RTE_LOG(INFO, L3FWD,
201 " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
202 lcore_id, portid, queueid);
203 }
204
205 cur_tsc = rte_rdtsc();
206 prev_tsc = cur_tsc;
207
208 while (!force_quit) {
209
210
211 diff_tsc = cur_tsc - prev_tsc;
212 if (unlikely(diff_tsc > drain_tsc)) {
213
214 for (i = 0; i < n_tx_p; ++i) {
215 portid = qconf->tx_port_id[i];
216 if (qconf->tx_mbufs[portid].len == 0)
217 continue;
218 send_burst(qconf,
219 qconf->tx_mbufs[portid].len,
220 portid);
221 qconf->tx_mbufs[portid].len = 0;
222 }
223
224 prev_tsc = cur_tsc;
225 }
226
227
228 for (i = 0; i < n_rx_q; ++i) {
229 portid = qconf->rx_queue_list[i].port_id;
230 queueid = qconf->rx_queue_list[i].queue_id;
231 nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst,
232 MAX_PKT_BURST);
233 if (nb_rx == 0)
234 continue;
235
236
237 fib_send_packets(nb_rx, pkts_burst, portid, qconf);
238 }
239
240 cur_tsc = rte_rdtsc();
241 }
242
243 return 0;
244}
245
246
247static __rte_always_inline void
248fib_event_loop(struct l3fwd_event_resources *evt_rsrc,
249 const uint8_t flags)
250{
251 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
252 const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
253 evt_rsrc->evq.nb_queues - 1];
254 const uint8_t event_d_id = evt_rsrc->event_d_id;
255 const uint16_t deq_len = evt_rsrc->deq_depth;
256 struct rte_event events[MAX_PKT_BURST];
257 int i, nb_enq = 0, nb_deq = 0;
258 struct lcore_conf *lconf;
259 unsigned int lcore_id;
260
261 uint32_t ipv4_arr[MAX_PKT_BURST];
262 uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
263 uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
264 uint16_t nh;
265 uint8_t type_arr[MAX_PKT_BURST];
266 uint32_t ipv4_cnt, ipv6_cnt;
267 uint32_t ipv4_arr_assem, ipv6_arr_assem;
268
269 if (event_p_id < 0)
270 return;
271
272 lcore_id = rte_lcore_id();
273
274 lconf = &lcore_conf[lcore_id];
275
276 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__, lcore_id);
277
278 while (!force_quit) {
279
280 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id,
281 events, deq_len, 0);
282 if (nb_deq == 0) {
283 rte_pause();
284 continue;
285 }
286
287
288 ipv4_cnt = 0;
289 ipv6_cnt = 0;
290 ipv4_arr_assem = 0;
291 ipv6_arr_assem = 0;
292
293
294 for (i = 0; i < FIB_PREFETCH_OFFSET && i < nb_deq; i++)
295 rte_prefetch0(rte_pktmbuf_mtod(events[i].mbuf, void *));
296
297
298 for (i = 0; i < (nb_deq - FIB_PREFETCH_OFFSET); i++) {
299 if (flags & L3FWD_EVENT_TX_ENQ) {
300 events[i].queue_id = tx_q_id;
301 events[i].op = RTE_EVENT_OP_FORWARD;
302 }
303
304 if (flags & L3FWD_EVENT_TX_DIRECT)
305 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
306 0);
307
308
309 rte_prefetch0(rte_pktmbuf_mtod(events[
310 i + FIB_PREFETCH_OFFSET].mbuf,
311 void *));
312
313 fib_parse_packet(events[i].mbuf,
314 &ipv4_arr[ipv4_cnt], &ipv4_cnt,
315 ipv6_arr[ipv6_cnt], &ipv6_cnt,
316 &type_arr[i]);
317 }
318
319
320 for (; i < nb_deq; i++) {
321 if (flags & L3FWD_EVENT_TX_ENQ) {
322 events[i].queue_id = tx_q_id;
323 events[i].op = RTE_EVENT_OP_FORWARD;
324 }
325
326 if (flags & L3FWD_EVENT_TX_DIRECT)
327 rte_event_eth_tx_adapter_txq_set(events[i].mbuf,
328 0);
329
330 fib_parse_packet(events[i].mbuf,
331 &ipv4_arr[ipv4_cnt], &ipv4_cnt,
332 ipv6_arr[ipv6_cnt], &ipv6_cnt,
333 &type_arr[i]);
334 }
335
336
337 if (likely(ipv4_cnt > 0))
338 rte_fib_lookup_bulk(lconf->ipv4_lookup_struct,
339 ipv4_arr, hopsv4, ipv4_cnt);
340
341
342 if (ipv6_cnt > 0)
343 rte_fib6_lookup_bulk(lconf->ipv6_lookup_struct,
344 ipv6_arr, hopsv6, ipv6_cnt);
345
346
347 for (i = 0; i < nb_deq; i++) {
348 if (type_arr[i])
349 nh = (uint16_t)hopsv4[ipv4_arr_assem++];
350 else
351 nh = (uint16_t)hopsv6[ipv6_arr_assem++];
352 if (nh != FIB_DEFAULT_HOP)
353 events[i].mbuf->port = nh;
354 }
355
356 if (flags & L3FWD_EVENT_TX_ENQ) {
357 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
358 events, nb_deq);
359 while (nb_enq < nb_deq && !force_quit)
360 nb_enq += rte_event_enqueue_burst(event_d_id,
361 event_p_id, events + nb_enq,
362 nb_deq - nb_enq);
363 }
364
365 if (flags & L3FWD_EVENT_TX_DIRECT) {
366 nb_enq = rte_event_eth_tx_adapter_enqueue(event_d_id,
367 event_p_id, events, nb_deq, 0);
368 while (nb_enq < nb_deq && !force_quit)
369 nb_enq += rte_event_eth_tx_adapter_enqueue(
370 event_d_id, event_p_id,
371 events + nb_enq,
372 nb_deq - nb_enq, 0);
373 }
374 }
375
376 l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
377 nb_deq, 0);
378}
379
380int __rte_noinline
381fib_event_main_loop_tx_d(__rte_unused void *dummy)
382{
383 struct l3fwd_event_resources *evt_rsrc =
384 l3fwd_get_eventdev_rsrc();
385
386 fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
387 return 0;
388}
389
390int __rte_noinline
391fib_event_main_loop_tx_d_burst(__rte_unused void *dummy)
392{
393 struct l3fwd_event_resources *evt_rsrc =
394 l3fwd_get_eventdev_rsrc();
395
396 fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
397 return 0;
398}
399
400int __rte_noinline
401fib_event_main_loop_tx_q(__rte_unused void *dummy)
402{
403 struct l3fwd_event_resources *evt_rsrc =
404 l3fwd_get_eventdev_rsrc();
405
406 fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ);
407 return 0;
408}
409
410int __rte_noinline
411fib_event_main_loop_tx_q_burst(__rte_unused void *dummy)
412{
413 struct l3fwd_event_resources *evt_rsrc =
414 l3fwd_get_eventdev_rsrc();
415
416 fib_event_loop(evt_rsrc, L3FWD_EVENT_TX_ENQ);
417 return 0;
418}
419
420static __rte_always_inline void
421fib_process_event_vector(struct rte_event_vector *vec)
422{
423 uint8_t ipv6_arr[MAX_PKT_BURST][RTE_FIB6_IPV6_ADDR_SIZE];
424 uint64_t hopsv4[MAX_PKT_BURST], hopsv6[MAX_PKT_BURST];
425 uint32_t ipv4_arr_assem, ipv6_arr_assem;
426 struct rte_mbuf **mbufs = vec->mbufs;
427 uint32_t ipv4_arr[MAX_PKT_BURST];
428 uint8_t type_arr[MAX_PKT_BURST];
429 uint32_t ipv4_cnt, ipv6_cnt;
430 struct lcore_conf *lconf;
431 uint16_t nh;
432 int i;
433
434 lconf = &lcore_conf[rte_lcore_id()];
435
436
437 ipv4_cnt = 0;
438 ipv6_cnt = 0;
439 ipv4_arr_assem = 0;
440 ipv6_arr_assem = 0;
441
442
443 for (i = 0; i < FIB_PREFETCH_OFFSET && i < vec->nb_elem; i++)
444 rte_prefetch0(rte_pktmbuf_mtod(mbufs[i], void *));
445
446
447 for (i = 0; i < (vec->nb_elem - FIB_PREFETCH_OFFSET); i++) {
448 rte_prefetch0(rte_pktmbuf_mtod(mbufs[i + FIB_PREFETCH_OFFSET],
449 void *));
450 fib_parse_packet(mbufs[i], &ipv4_arr[ipv4_cnt], &ipv4_cnt,
451 ipv6_arr[ipv6_cnt], &ipv6_cnt, &type_arr[i]);
452 }
453
454
455 for (; i < vec->nb_elem; i++)
456 fib_parse_packet(mbufs[i], &ipv4_arr[ipv4_cnt], &ipv4_cnt,
457 ipv6_arr[ipv6_cnt], &ipv6_cnt, &type_arr[i]);
458
459
460 if (likely(ipv4_cnt > 0))
461 rte_fib_lookup_bulk(lconf->ipv4_lookup_struct, ipv4_arr, hopsv4,
462 ipv4_cnt);
463
464
465 if (ipv6_cnt > 0)
466 rte_fib6_lookup_bulk(lconf->ipv6_lookup_struct, ipv6_arr,
467 hopsv6, ipv6_cnt);
468
469 if (vec->attr_valid) {
470 nh = type_arr[0] ? (uint16_t)hopsv4[0] : (uint16_t)hopsv6[0];
471 if (nh != FIB_DEFAULT_HOP)
472 vec->port = nh;
473 else
474 vec->attr_valid = 0;
475 }
476
477
478 for (i = 0; i < vec->nb_elem; i++) {
479 if (type_arr[i])
480 nh = (uint16_t)hopsv4[ipv4_arr_assem++];
481 else
482 nh = (uint16_t)hopsv6[ipv6_arr_assem++];
483 if (nh != FIB_DEFAULT_HOP)
484 mbufs[i]->port = nh;
485 event_vector_attr_validate(vec, mbufs[i]);
486 }
487}
488
489static __rte_always_inline void
490fib_event_loop_vector(struct l3fwd_event_resources *evt_rsrc,
491 const uint8_t flags)
492{
493 const int event_p_id = l3fwd_get_free_event_port(evt_rsrc);
494 const uint8_t tx_q_id =
495 evt_rsrc->evq.event_q_id[evt_rsrc->evq.nb_queues - 1];
496 const uint8_t event_d_id = evt_rsrc->event_d_id;
497 const uint16_t deq_len = evt_rsrc->deq_depth;
498 struct rte_event events[MAX_PKT_BURST];
499 int nb_enq = 0, nb_deq = 0, i;
500
501 if (event_p_id < 0)
502 return;
503
504 RTE_LOG(INFO, L3FWD, "entering %s on lcore %u\n", __func__,
505 rte_lcore_id());
506
507 while (!force_quit) {
508
509 nb_deq = rte_event_dequeue_burst(event_d_id, event_p_id, events,
510 deq_len, 0);
511 if (nb_deq == 0) {
512 rte_pause();
513 continue;
514 }
515
516 for (i = 0; i < nb_deq; i++) {
517 if (flags & L3FWD_EVENT_TX_ENQ) {
518 events[i].queue_id = tx_q_id;
519 events[i].op = RTE_EVENT_OP_FORWARD;
520 }
521
522 fib_process_event_vector(events[i].vec);
523
524 if (flags & L3FWD_EVENT_TX_DIRECT)
525 event_vector_txq_set(events[i].vec, 0);
526 }
527
528 if (flags & L3FWD_EVENT_TX_ENQ) {
529 nb_enq = rte_event_enqueue_burst(event_d_id, event_p_id,
530 events, nb_deq);
531 while (nb_enq < nb_deq && !force_quit)
532 nb_enq += rte_event_enqueue_burst(
533 event_d_id, event_p_id, events + nb_enq,
534 nb_deq - nb_enq);
535 }
536
537 if (flags & L3FWD_EVENT_TX_DIRECT) {
538 nb_enq = rte_event_eth_tx_adapter_enqueue(
539 event_d_id, event_p_id, events, nb_deq, 0);
540 while (nb_enq < nb_deq && !force_quit)
541 nb_enq += rte_event_eth_tx_adapter_enqueue(
542 event_d_id, event_p_id, events + nb_enq,
543 nb_deq - nb_enq, 0);
544 }
545 }
546
547 l3fwd_event_worker_cleanup(event_d_id, event_p_id, events, nb_enq,
548 nb_deq, 1);
549}
550
551int __rte_noinline
552fib_event_main_loop_tx_d_vector(__rte_unused void *dummy)
553{
554 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
555
556 fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
557 return 0;
558}
559
560int __rte_noinline
561fib_event_main_loop_tx_d_burst_vector(__rte_unused void *dummy)
562{
563 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
564
565 fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_DIRECT);
566 return 0;
567}
568
569int __rte_noinline
570fib_event_main_loop_tx_q_vector(__rte_unused void *dummy)
571{
572 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
573
574 fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
575 return 0;
576}
577
578int __rte_noinline
579fib_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy)
580{
581 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc();
582
583 fib_event_loop_vector(evt_rsrc, L3FWD_EVENT_TX_ENQ);
584 return 0;
585}
586
587
588void
589setup_fib(const int socketid)
590{
591 struct rte_eth_dev_info dev_info;
592 struct rte_fib6_conf config;
593 struct rte_fib_conf config_ipv4;
594 int i;
595 int ret;
596 char s[64];
597 char abuf[INET6_ADDRSTRLEN];
598
599
600 config_ipv4.type = RTE_FIB_DIR24_8;
601 config_ipv4.max_routes = (1 << 16);
602 config_ipv4.rib_ext_sz = 0;
603 config_ipv4.default_nh = FIB_DEFAULT_HOP;
604 config_ipv4.dir24_8.nh_sz = RTE_FIB_DIR24_8_4B;
605 config_ipv4.dir24_8.num_tbl8 = (1 << 15);
606 snprintf(s, sizeof(s), "IPV4_L3FWD_FIB_%d", socketid);
607 ipv4_l3fwd_fib_lookup_struct[socketid] =
608 rte_fib_create(s, socketid, &config_ipv4);
609 if (ipv4_l3fwd_fib_lookup_struct[socketid] == NULL)
610 rte_exit(EXIT_FAILURE,
611 "Unable to create the l3fwd FIB table on socket %d\n",
612 socketid);
613
614
615
616 for (i = 0; i < route_num_v4; i++) {
617 struct in_addr in;
618
619
620 if ((1 << route_base_v4[i].if_out &
621 enabled_port_mask) == 0)
622 continue;
623
624 rte_eth_dev_info_get(route_base_v4[i].if_out,
625 &dev_info);
626 ret = rte_fib_add(ipv4_l3fwd_fib_lookup_struct[socketid],
627 route_base_v4[i].ip,
628 route_base_v4[i].depth,
629 route_base_v4[i].if_out);
630
631 if (ret < 0) {
632 free(route_base_v4);
633 rte_exit(EXIT_FAILURE,
634 "Unable to add entry %u to the l3fwd FIB table on socket %d\n",
635 i, socketid);
636 }
637
638 in.s_addr = htonl(route_base_v4[i].ip);
639 if (inet_ntop(AF_INET, &in, abuf, sizeof(abuf)) != NULL) {
640 printf("FIB: Adding route %s / %d (%d) [%s]\n", abuf,
641 route_base_v4[i].depth,
642 route_base_v4[i].if_out,
643 dev_info.device->name);
644 } else {
645 printf("FIB: IPv4 route added to port %d [%s]\n",
646 route_base_v4[i].if_out,
647 dev_info.device->name);
648 }
649 }
650
651
652
653 snprintf(s, sizeof(s), "IPV6_L3FWD_FIB_%d", socketid);
654
655 config.type = RTE_FIB6_TRIE;
656 config.max_routes = (1 << 16) - 1;
657 config.rib_ext_sz = 0;
658 config.default_nh = FIB_DEFAULT_HOP;
659 config.trie.nh_sz = RTE_FIB6_TRIE_4B;
660 config.trie.num_tbl8 = (1 << 15);
661 ipv6_l3fwd_fib_lookup_struct[socketid] = rte_fib6_create(s, socketid,
662 &config);
663 if (ipv6_l3fwd_fib_lookup_struct[socketid] == NULL) {
664 free(route_base_v4);
665 rte_exit(EXIT_FAILURE,
666 "Unable to create the l3fwd FIB table on socket %d\n",
667 socketid);
668 }
669
670
671 for (i = 0; i < route_num_v6; i++) {
672
673
674 if ((1 << route_base_v6[i].if_out &
675 enabled_port_mask) == 0)
676 continue;
677
678 rte_eth_dev_info_get(route_base_v6[i].if_out,
679 &dev_info);
680 ret = rte_fib6_add(ipv6_l3fwd_fib_lookup_struct[socketid],
681 route_base_v6[i].ip_8,
682 route_base_v6[i].depth,
683 route_base_v6[i].if_out);
684
685 if (ret < 0) {
686 free(route_base_v4);
687 free(route_base_v6);
688 rte_exit(EXIT_FAILURE,
689 "Unable to add entry %u to the l3fwd FIB table on socket %d\n",
690 i, socketid);
691 }
692
693 if (inet_ntop(AF_INET6, route_base_v6[i].ip_8,
694 abuf, sizeof(abuf)) != NULL) {
695 printf("FIB: Adding route %s / %d (%d) [%s]\n", abuf,
696 route_base_v6[i].depth,
697 route_base_v6[i].if_out,
698 dev_info.device->name);
699 } else {
700 printf("FIB: IPv6 route added to port %d [%s]\n",
701 route_base_v6[i].if_out,
702 dev_info.device->name);
703 }
704 }
705}
706
707
708void *
709fib_get_ipv4_l3fwd_lookup_struct(const int socketid)
710{
711 return ipv4_l3fwd_fib_lookup_struct[socketid];
712}
713
714
715void *
716fib_get_ipv6_l3fwd_lookup_struct(const int socketid)
717{
718 return ipv6_l3fwd_fib_lookup_struct[socketid];
719}
720