1
2
3
4
5#include <rte_acl.h>
6#include <rte_event_eth_tx_adapter.h>
7#include <rte_lpm.h>
8#include <rte_lpm6.h>
9
10#include "event_helper.h"
11#include "ipsec.h"
12#include "ipsec-secgw.h"
13#include "ipsec_worker.h"
14
15static inline enum pkt_type
16process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
17{
18 struct rte_ether_hdr *eth;
19
20 eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
21 if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
22 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
23 offsetof(struct ip, ip_p));
24 if (**nlp == IPPROTO_ESP)
25 return PKT_TYPE_IPSEC_IPV4;
26 else
27 return PKT_TYPE_PLAIN_IPV4;
28 } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
29 *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
30 offsetof(struct ip6_hdr, ip6_nxt));
31 if (**nlp == IPPROTO_ESP)
32 return PKT_TYPE_IPSEC_IPV6;
33 else
34 return PKT_TYPE_PLAIN_IPV6;
35 }
36
37
38 return PKT_TYPE_INVALID;
39}
40
41static inline void
42update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
43{
44 struct rte_ether_hdr *ethhdr;
45
46 ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
47 memcpy(ðhdr->s_addr, ðaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
48 memcpy(ðhdr->d_addr, ðaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
49}
50
51static inline void
52ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
53{
54
55 m->port = port_id;
56
57
58 rte_event_eth_tx_adapter_txq_set(m, 0);
59}
60
61static inline void
62prepare_out_sessions_tbl(struct sa_ctx *sa_out,
63 struct rte_security_session **sess_tbl, uint16_t size)
64{
65 struct rte_ipsec_session *pri_sess;
66 struct ipsec_sa *sa;
67 uint32_t i;
68
69 if (!sa_out)
70 return;
71
72 for (i = 0; i < sa_out->nb_sa; i++) {
73
74 sa = &sa_out->sa[i];
75 if (!sa)
76 continue;
77
78 pri_sess = ipsec_get_primary_session(sa);
79 if (!pri_sess)
80 continue;
81
82 if (pri_sess->type !=
83 RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
84
85 RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
86 pri_sess->type);
87 continue;
88 }
89
90 if (sa->portid >= size) {
91 RTE_LOG(ERR, IPSEC,
92 "Port id >= than table size %d, %d\n",
93 sa->portid, size);
94 continue;
95 }
96
97
98 if (sess_tbl[sa->portid])
99 continue;
100 sess_tbl[sa->portid] = pri_sess->security.ses;
101 }
102}
103
104static inline int
105check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
106{
107 uint32_t res;
108
109 if (unlikely(sp == NULL))
110 return 0;
111
112 rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
113 DEFAULT_MAX_CATEGORIES);
114
115 if (unlikely(res == DISCARD))
116 return 0;
117 else if (res == BYPASS) {
118 *sa_idx = -1;
119 return 1;
120 }
121
122 *sa_idx = res - 1;
123 return 1;
124}
125
126static inline uint16_t
127route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
128{
129 uint32_t dst_ip;
130 uint16_t offset;
131 uint32_t hop;
132 int ret;
133
134 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
135 dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
136 dst_ip = rte_be_to_cpu_32(dst_ip);
137
138 ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
139
140 if (ret == 0) {
141
142 return hop;
143 }
144
145
146 return RTE_MAX_ETHPORTS;
147}
148
149
150static inline uint16_t
151route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
152{
153 uint8_t dst_ip[16];
154 uint8_t *ip6_dst;
155 uint16_t offset;
156 uint32_t hop;
157 int ret;
158
159 offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
160 ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
161 memcpy(&dst_ip[0], ip6_dst, 16);
162
163 ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
164
165 if (ret == 0) {
166
167 return hop;
168 }
169
170
171 return RTE_MAX_ETHPORTS;
172}
173
174static inline uint16_t
175get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
176{
177 if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
178 return route4_pkt(pkt, rt->rt4_ctx);
179 else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
180 return route6_pkt(pkt, rt->rt6_ctx);
181
182 return RTE_MAX_ETHPORTS;
183}
184
185static inline int
186process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
187 struct rte_event *ev)
188{
189 struct ipsec_sa *sa = NULL;
190 struct rte_mbuf *pkt;
191 uint16_t port_id = 0;
192 enum pkt_type type;
193 uint32_t sa_idx;
194 uint8_t *nlp;
195
196
197 pkt = ev->mbuf;
198
199
200 type = process_ipsec_get_pkt_type(pkt, &nlp);
201
202 switch (type) {
203 case PKT_TYPE_PLAIN_IPV4:
204 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
205 if (unlikely(pkt->ol_flags &
206 PKT_RX_SEC_OFFLOAD_FAILED)) {
207 RTE_LOG(ERR, IPSEC,
208 "Inbound security offload failed\n");
209 goto drop_pkt_and_exit;
210 }
211 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
212 }
213
214
215 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
216
217 goto drop_pkt_and_exit;
218 }
219 break;
220
221 case PKT_TYPE_PLAIN_IPV6:
222 if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
223 if (unlikely(pkt->ol_flags &
224 PKT_RX_SEC_OFFLOAD_FAILED)) {
225 RTE_LOG(ERR, IPSEC,
226 "Inbound security offload failed\n");
227 goto drop_pkt_and_exit;
228 }
229 sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
230 }
231
232
233 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
234
235 goto drop_pkt_and_exit;
236 }
237 break;
238
239 default:
240 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
241 goto drop_pkt_and_exit;
242 }
243
244
245 if (sa_idx == BYPASS)
246 goto route_and_send_pkt;
247
248
249 if (sa_idx >= ctx->sa_ctx->nb_sa)
250 goto drop_pkt_and_exit;
251
252
253
254
255 if (sa == NULL)
256 goto drop_pkt_and_exit;
257
258
259 if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
260 goto drop_pkt_and_exit;
261
262route_and_send_pkt:
263 port_id = get_route(pkt, rt, type);
264 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
265
266 goto drop_pkt_and_exit;
267 }
268
269
270
271 update_mac_addrs(pkt, port_id);
272
273
274 ipsec_event_pre_forward(pkt, port_id);
275 return PKT_FORWARDED;
276
277drop_pkt_and_exit:
278 RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
279 rte_pktmbuf_free(pkt);
280 ev->mbuf = NULL;
281 return PKT_DROPPED;
282}
283
284static inline int
285process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
286 struct rte_event *ev)
287{
288 struct rte_ipsec_session *sess;
289 struct sa_ctx *sa_ctx;
290 struct rte_mbuf *pkt;
291 uint16_t port_id = 0;
292 struct ipsec_sa *sa;
293 enum pkt_type type;
294 uint32_t sa_idx;
295 uint8_t *nlp;
296
297
298 pkt = ev->mbuf;
299
300
301 type = process_ipsec_get_pkt_type(pkt, &nlp);
302
303 switch (type) {
304 case PKT_TYPE_PLAIN_IPV4:
305
306 if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
307
308 goto drop_pkt_and_exit;
309 }
310 break;
311 case PKT_TYPE_PLAIN_IPV6:
312
313 if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
314
315 goto drop_pkt_and_exit;
316 }
317 break;
318 default:
319
320
321
322
323 RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
324 goto drop_pkt_and_exit;
325 }
326
327
328 if (sa_idx == BYPASS) {
329 port_id = get_route(pkt, rt, type);
330 if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
331
332 goto drop_pkt_and_exit;
333 }
334
335 goto send_pkt;
336 }
337
338
339 if (sa_idx >= ctx->sa_ctx->nb_sa)
340 goto drop_pkt_and_exit;
341
342
343
344
345 sa_ctx = ctx->sa_ctx;
346
347
348 sa = &(sa_ctx->sa[sa_idx]);
349
350
351 sess = ipsec_get_primary_session(sa);
352
353
354 if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
355 RTE_LOG(ERR, IPSEC, "SA type not supported\n");
356 goto drop_pkt_and_exit;
357 }
358
359 if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
360 *(struct rte_security_session **)rte_security_dynfield(pkt) =
361 sess->security.ses;
362
363
364 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
365
366
367 port_id = sa->portid;
368
369send_pkt:
370
371 update_mac_addrs(pkt, port_id);
372
373
374 ipsec_event_pre_forward(pkt, port_id);
375 return PKT_FORWARDED;
376
377drop_pkt_and_exit:
378 RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
379 rte_pktmbuf_free(pkt);
380 ev->mbuf = NULL;
381 return PKT_DROPPED;
382}
383
384
385
386
387
388
389
390
391#define IPSEC_EVENTMODE_WORKERS 2
392
393
394
395
396
397static void
398ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
399 uint8_t nb_links)
400{
401 struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
402 unsigned int nb_rx = 0;
403 struct rte_mbuf *pkt;
404 struct rte_event ev;
405 uint32_t lcore_id;
406 int32_t socket_id;
407 int16_t port_id;
408
409
410 if (nb_links == 0) {
411
412 return;
413 }
414
415
416 lcore_id = rte_lcore_id();
417
418
419 socket_id = rte_lcore_to_socket_id(lcore_id);
420
421
422
423
424
425 prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
426 RTE_MAX_ETHPORTS);
427
428 RTE_LOG(INFO, IPSEC,
429 "Launching event mode worker (non-burst - Tx internal port - "
430 "driver mode) on lcore %d\n", lcore_id);
431
432
433
434
435 if (nb_links != 1) {
436 RTE_LOG(INFO, IPSEC,
437 "Multiple links not supported. Using first link\n");
438 }
439
440 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
441 links[0].event_port_id);
442 while (!force_quit) {
443
444 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
445 links[0].event_port_id,
446 &ev,
447 1,
448 0 );
449
450 if (nb_rx == 0)
451 continue;
452
453 pkt = ev.mbuf;
454 port_id = pkt->port;
455
456 rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
457
458
459 ipsec_event_pre_forward(pkt, port_id);
460
461 if (!is_unprotected_port(port_id)) {
462
463 if (unlikely(!sess_tbl[port_id])) {
464 rte_pktmbuf_free(pkt);
465 continue;
466 }
467
468
469 if (rte_security_dynfield_is_registered())
470 *(struct rte_security_session **)
471 rte_security_dynfield(pkt) =
472 sess_tbl[port_id];
473
474
475 pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
476 }
477
478
479
480
481
482
483 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
484 links[0].event_port_id,
485 &ev,
486 1,
487 0 );
488 }
489}
490
491
492
493
494
495static void
496ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
497 uint8_t nb_links)
498{
499 struct lcore_conf_ev_tx_int_port_wrkr lconf;
500 unsigned int nb_rx = 0;
501 struct rte_event ev;
502 uint32_t lcore_id;
503 int32_t socket_id;
504 int ret;
505
506
507 if (nb_links == 0) {
508
509 return;
510 }
511
512
513
514
515 lcore_id = rte_lcore_id();
516
517
518 socket_id = rte_lcore_to_socket_id(lcore_id);
519
520
521 lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
522 lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
523 lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
524 lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
525 lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
526 lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
527 lconf.inbound.session_priv_pool =
528 socket_ctx[socket_id].session_priv_pool;
529 lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
530 lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
531 lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
532 lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
533 lconf.outbound.session_priv_pool =
534 socket_ctx[socket_id].session_priv_pool;
535
536 RTE_LOG(INFO, IPSEC,
537 "Launching event mode worker (non-burst - Tx internal port - "
538 "app mode) on lcore %d\n", lcore_id);
539
540
541 if (nb_links != 1) {
542 RTE_LOG(INFO, IPSEC,
543 "Multiple links not supported. Using first link\n");
544 }
545
546 RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
547 links[0].event_port_id);
548
549 while (!force_quit) {
550
551 nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
552 links[0].event_port_id,
553 &ev,
554 1,
555 0 );
556
557 if (nb_rx == 0)
558 continue;
559
560 if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
561 RTE_LOG(ERR, IPSEC, "Invalid event type %u",
562 ev.event_type);
563
564 continue;
565 }
566
567 if (is_unprotected_port(ev.mbuf->port))
568 ret = process_ipsec_ev_inbound(&lconf.inbound,
569 &lconf.rt, &ev);
570 else
571 ret = process_ipsec_ev_outbound(&lconf.outbound,
572 &lconf.rt, &ev);
573 if (ret != 1)
574
575 continue;
576
577
578
579
580
581
582 rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
583 links[0].event_port_id,
584 &ev,
585 1,
586 0 );
587 }
588}
589
590static uint8_t
591ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
592{
593 struct eh_app_worker_params *wrkr;
594 uint8_t nb_wrkr_param = 0;
595
596
597 wrkr = wrkrs;
598
599
600 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
601 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
602 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
603 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
604 wrkr++;
605 nb_wrkr_param++;
606
607
608 wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
609 wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
610 wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
611 wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
612 nb_wrkr_param++;
613
614 return nb_wrkr_param;
615}
616
617static void
618ipsec_eventmode_worker(struct eh_conf *conf)
619{
620 struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
621 {{{0} }, NULL } };
622 uint8_t nb_wrkr_param;
623
624
625 nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
626
627
628
629
630
631 eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
632}
633
634int ipsec_launch_one_lcore(void *args)
635{
636 struct eh_conf *conf;
637
638 conf = (struct eh_conf *)args;
639
640 if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
641
642 ipsec_poll_mode_worker();
643 } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
644
645 ipsec_eventmode_worker(conf);
646 }
647 return 0;
648}
649