dpdk/examples/ipsec-secgw/ipsec_worker.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2016 Intel Corporation
   3 * Copyright (C) 2020 Marvell International Ltd.
   4 */
   5#include <rte_acl.h>
   6#include <rte_event_eth_tx_adapter.h>
   7#include <rte_lpm.h>
   8#include <rte_lpm6.h>
   9
  10#include "event_helper.h"
  11#include "ipsec.h"
  12#include "ipsec-secgw.h"
  13#include "ipsec_worker.h"
  14
  15struct port_drv_mode_data {
  16        struct rte_security_session *sess;
  17        struct rte_security_ctx *ctx;
  18};
  19
  20static inline enum pkt_type
  21process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
  22{
  23        struct rte_ether_hdr *eth;
  24        uint32_t ptype = pkt->packet_type;
  25
  26        eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
  27        rte_prefetch0(eth);
  28
  29        if (RTE_ETH_IS_IPV4_HDR(ptype)) {
  30                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
  31                                offsetof(struct ip, ip_p));
  32                if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
  33                        return PKT_TYPE_IPSEC_IPV4;
  34                else
  35                        return PKT_TYPE_PLAIN_IPV4;
  36        } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
  37                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
  38                                offsetof(struct ip6_hdr, ip6_nxt));
  39                if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
  40                        return PKT_TYPE_IPSEC_IPV6;
  41                else
  42                        return PKT_TYPE_PLAIN_IPV6;
  43        }
  44
  45        /* Unknown/Unsupported type */
  46        return PKT_TYPE_INVALID;
  47}
  48
  49static inline void
  50update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
  51{
  52        struct rte_ether_hdr *ethhdr;
  53
  54        ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
  55        memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
  56        memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
  57}
  58
  59static inline void
  60ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
  61{
  62        /* Save the destination port in the mbuf */
  63        m->port = port_id;
  64
  65        /* Save eth queue for Tx */
  66        rte_event_eth_tx_adapter_txq_set(m, 0);
  67}
  68
  69static inline void
  70ev_vector_attr_init(struct rte_event_vector *vec)
  71{
  72        vec->attr_valid = 1;
  73        vec->port = 0xFFFF;
  74        vec->queue = 0;
  75}
  76
  77static inline void
  78ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
  79{
  80        if (vec->port == 0xFFFF) {
  81                vec->port = pkt->port;
  82                return;
  83        }
  84        if (vec->attr_valid && (vec->port != pkt->port))
  85                vec->attr_valid = 0;
  86}
  87
  88static inline void
  89prepare_out_sessions_tbl(struct sa_ctx *sa_out,
  90                         struct port_drv_mode_data *data,
  91                         uint16_t size)
  92{
  93        struct rte_ipsec_session *pri_sess;
  94        struct ipsec_sa *sa;
  95        uint32_t i;
  96
  97        if (!sa_out)
  98                return;
  99
 100        for (i = 0; i < sa_out->nb_sa; i++) {
 101
 102                sa = &sa_out->sa[i];
 103                if (!sa)
 104                        continue;
 105
 106                pri_sess = ipsec_get_primary_session(sa);
 107                if (!pri_sess)
 108                        continue;
 109
 110                if (pri_sess->type !=
 111                        RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
 112
 113                        RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
 114                                pri_sess->type);
 115                        continue;
 116                }
 117
 118                if (sa->portid >= size) {
 119                        RTE_LOG(ERR, IPSEC,
 120                                "Port id >= than table size %d, %d\n",
 121                                sa->portid, size);
 122                        continue;
 123                }
 124
 125                /* Use only first inline session found for a given port */
 126                if (data[sa->portid].sess)
 127                        continue;
 128                data[sa->portid].sess = pri_sess->security.ses;
 129                data[sa->portid].ctx = pri_sess->security.ctx;
 130        }
 131}
 132
 133static inline int
 134check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
 135{
 136        uint32_t res;
 137
 138        if (unlikely(sp == NULL))
 139                return 0;
 140
 141        rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
 142                        DEFAULT_MAX_CATEGORIES);
 143
 144        if (unlikely(res == DISCARD))
 145                return 0;
 146        else if (res == BYPASS) {
 147                *sa_idx = -1;
 148                return 1;
 149        }
 150
 151        *sa_idx = res - 1;
 152        return 1;
 153}
 154
 155static inline void
 156check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
 157              struct traffic_type *ipsec)
 158{
 159        uint32_t i, j, res;
 160        struct rte_mbuf *m;
 161
 162        if (unlikely(sp == NULL || ip->num == 0))
 163                return;
 164
 165        rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
 166                         DEFAULT_MAX_CATEGORIES);
 167
 168        j = 0;
 169        for (i = 0; i < ip->num; i++) {
 170                m = ip->pkts[i];
 171                res = ip->res[i];
 172                if (unlikely(res == DISCARD))
 173                        free_pkts(&m, 1);
 174                else if (res == BYPASS)
 175                        ip->pkts[j++] = m;
 176                else {
 177                        ipsec->res[ipsec->num] = res - 1;
 178                        ipsec->pkts[ipsec->num++] = m;
 179                }
 180        }
 181        ip->num = j;
 182}
 183
 184static inline void
 185check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
 186                 struct traffic_type *ip)
 187{
 188        struct ipsec_sa *sa;
 189        uint32_t i, j, res;
 190        struct rte_mbuf *m;
 191
 192        if (unlikely(sp == NULL || ip->num == 0))
 193                return;
 194
 195        rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
 196                         DEFAULT_MAX_CATEGORIES);
 197
 198        j = 0;
 199        for (i = 0; i < ip->num; i++) {
 200                m = ip->pkts[i];
 201                res = ip->res[i];
 202                if (unlikely(res == DISCARD))
 203                        free_pkts(&m, 1);
 204                else if (res == BYPASS)
 205                        ip->pkts[j++] = m;
 206                else {
 207                        sa = *(struct ipsec_sa **)rte_security_dynfield(m);
 208                        if (sa == NULL)
 209                                free_pkts(&m, 1);
 210
 211                        /* SPI on the packet should match with the one in SA */
 212                        if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi))
 213                                free_pkts(&m, 1);
 214
 215                        ip->pkts[j++] = m;
 216                }
 217        }
 218        ip->num = j;
 219}
 220
 221static inline uint16_t
 222route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 223{
 224        uint32_t dst_ip;
 225        uint16_t offset;
 226        uint32_t hop;
 227        int ret;
 228
 229        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
 230        dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
 231        dst_ip = rte_be_to_cpu_32(dst_ip);
 232
 233        ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
 234
 235        if (ret == 0) {
 236                /* We have a hit */
 237                return hop;
 238        }
 239
 240        /* else */
 241        return RTE_MAX_ETHPORTS;
 242}
 243
 244/* TODO: To be tested */
 245static inline uint16_t
 246route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 247{
 248        uint8_t dst_ip[16];
 249        uint8_t *ip6_dst;
 250        uint16_t offset;
 251        uint32_t hop;
 252        int ret;
 253
 254        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
 255        ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
 256        memcpy(&dst_ip[0], ip6_dst, 16);
 257
 258        ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
 259
 260        if (ret == 0) {
 261                /* We have a hit */
 262                return hop;
 263        }
 264
 265        /* else */
 266        return RTE_MAX_ETHPORTS;
 267}
 268
 269static inline uint16_t
 270get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
 271{
 272        if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
 273                return route4_pkt(pkt, rt->rt4_ctx);
 274        else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
 275                return route6_pkt(pkt, rt->rt6_ctx);
 276
 277        return RTE_MAX_ETHPORTS;
 278}
 279
 280static inline int
 281process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 282                struct rte_event *ev)
 283{
 284        struct ipsec_sa *sa = NULL;
 285        struct rte_mbuf *pkt;
 286        uint16_t port_id = 0;
 287        enum pkt_type type;
 288        uint32_t sa_idx;
 289        uint8_t *nlp;
 290
 291        /* Get pkt from event */
 292        pkt = ev->mbuf;
 293
 294        /* Check the packet type */
 295        type = process_ipsec_get_pkt_type(pkt, &nlp);
 296
 297        switch (type) {
 298        case PKT_TYPE_PLAIN_IPV4:
 299                if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
 300                        if (unlikely(pkt->ol_flags &
 301                                     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
 302                                RTE_LOG(ERR, IPSEC,
 303                                        "Inbound security offload failed\n");
 304                                goto drop_pkt_and_exit;
 305                        }
 306                        sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
 307                }
 308
 309                /* Check if we have a match */
 310                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
 311                        /* No valid match */
 312                        goto drop_pkt_and_exit;
 313                }
 314                break;
 315
 316        case PKT_TYPE_PLAIN_IPV6:
 317                if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
 318                        if (unlikely(pkt->ol_flags &
 319                                     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
 320                                RTE_LOG(ERR, IPSEC,
 321                                        "Inbound security offload failed\n");
 322                                goto drop_pkt_and_exit;
 323                        }
 324                        sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
 325                }
 326
 327                /* Check if we have a match */
 328                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
 329                        /* No valid match */
 330                        goto drop_pkt_and_exit;
 331                }
 332                break;
 333
 334        default:
 335                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
 336                goto drop_pkt_and_exit;
 337        }
 338
 339        /* Check if the packet has to be bypassed */
 340        if (sa_idx == BYPASS)
 341                goto route_and_send_pkt;
 342
 343        /* Validate sa_idx */
 344        if (sa_idx >= ctx->sa_ctx->nb_sa)
 345                goto drop_pkt_and_exit;
 346
 347        /* Else the packet has to be protected with SA */
 348
 349        /* If the packet was IPsec processed, then SA pointer should be set */
 350        if (sa == NULL)
 351                goto drop_pkt_and_exit;
 352
 353        /* SPI on the packet should match with the one in SA */
 354        if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
 355                goto drop_pkt_and_exit;
 356
 357route_and_send_pkt:
 358        port_id = get_route(pkt, rt, type);
 359        if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 360                /* no match */
 361                goto drop_pkt_and_exit;
 362        }
 363        /* else, we have a matching route */
 364
 365        /* Update mac addresses */
 366        update_mac_addrs(pkt, port_id);
 367
 368        /* Update the event with the dest port */
 369        ipsec_event_pre_forward(pkt, port_id);
 370        return PKT_FORWARDED;
 371
 372drop_pkt_and_exit:
 373        RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
 374        rte_pktmbuf_free(pkt);
 375        ev->mbuf = NULL;
 376        return PKT_DROPPED;
 377}
 378
 379static inline int
 380process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 381                struct rte_event *ev)
 382{
 383        struct rte_ipsec_session *sess;
 384        struct sa_ctx *sa_ctx;
 385        struct rte_mbuf *pkt;
 386        uint16_t port_id = 0;
 387        struct ipsec_sa *sa;
 388        enum pkt_type type;
 389        uint32_t sa_idx;
 390        uint8_t *nlp;
 391
 392        /* Get pkt from event */
 393        pkt = ev->mbuf;
 394
 395        /* Check the packet type */
 396        type = process_ipsec_get_pkt_type(pkt, &nlp);
 397
 398        switch (type) {
 399        case PKT_TYPE_PLAIN_IPV4:
 400                /* Check if we have a match */
 401                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
 402                        /* No valid match */
 403                        goto drop_pkt_and_exit;
 404                }
 405                break;
 406        case PKT_TYPE_PLAIN_IPV6:
 407                /* Check if we have a match */
 408                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
 409                        /* No valid match */
 410                        goto drop_pkt_and_exit;
 411                }
 412                break;
 413        default:
 414                /*
 415                 * Only plain IPv4 & IPv6 packets are allowed
 416                 * on protected port. Drop the rest.
 417                 */
 418                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
 419                goto drop_pkt_and_exit;
 420        }
 421
 422        /* Check if the packet has to be bypassed */
 423        if (sa_idx == BYPASS) {
 424                port_id = get_route(pkt, rt, type);
 425                if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 426                        /* no match */
 427                        goto drop_pkt_and_exit;
 428                }
 429                /* else, we have a matching route */
 430                goto send_pkt;
 431        }
 432
 433        /* Validate sa_idx */
 434        if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
 435                goto drop_pkt_and_exit;
 436
 437        /* Else the packet has to be protected */
 438
 439        /* Get SA ctx*/
 440        sa_ctx = ctx->sa_ctx;
 441
 442        /* Get SA */
 443        sa = &(sa_ctx->sa[sa_idx]);
 444
 445        /* Get IPsec session */
 446        sess = ipsec_get_primary_session(sa);
 447
 448        /* Allow only inline protocol for now */
 449        if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
 450                RTE_LOG(ERR, IPSEC, "SA type not supported\n");
 451                goto drop_pkt_and_exit;
 452        }
 453
 454        rte_security_set_pkt_metadata(sess->security.ctx,
 455                                      sess->security.ses, pkt, NULL);
 456
 457        /* Mark the packet for Tx security offload */
 458        pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 459
 460        /* Get the port to which this pkt need to be submitted */
 461        port_id = sa->portid;
 462
 463send_pkt:
 464        /* Provide L2 len for Outbound processing */
 465        pkt->l2_len = RTE_ETHER_HDR_LEN;
 466
 467        /* Update mac addresses */
 468        update_mac_addrs(pkt, port_id);
 469
 470        /* Update the event with the dest port */
 471        ipsec_event_pre_forward(pkt, port_id);
 472        return PKT_FORWARDED;
 473
 474drop_pkt_and_exit:
 475        RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
 476        rte_pktmbuf_free(pkt);
 477        ev->mbuf = NULL;
 478        return PKT_DROPPED;
 479}
 480
 481static inline int
 482ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 483                    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
 484{
 485        struct rte_ipsec_session *sess;
 486        uint32_t sa_idx, i, j = 0;
 487        uint16_t port_id = 0;
 488        struct rte_mbuf *pkt;
 489        struct ipsec_sa *sa;
 490
 491        /* Route IPv4 packets */
 492        for (i = 0; i < t->ip4.num; i++) {
 493                pkt = t->ip4.pkts[i];
 494                port_id = route4_pkt(pkt, rt->rt4_ctx);
 495                if (port_id != RTE_MAX_ETHPORTS) {
 496                        /* Update mac addresses */
 497                        update_mac_addrs(pkt, port_id);
 498                        /* Update the event with the dest port */
 499                        ipsec_event_pre_forward(pkt, port_id);
 500                        ev_vector_attr_update(vec, pkt);
 501                        vec->mbufs[j++] = pkt;
 502                } else
 503                        free_pkts(&pkt, 1);
 504        }
 505
 506        /* Route IPv6 packets */
 507        for (i = 0; i < t->ip6.num; i++) {
 508                pkt = t->ip6.pkts[i];
 509                port_id = route6_pkt(pkt, rt->rt6_ctx);
 510                if (port_id != RTE_MAX_ETHPORTS) {
 511                        /* Update mac addresses */
 512                        update_mac_addrs(pkt, port_id);
 513                        /* Update the event with the dest port */
 514                        ipsec_event_pre_forward(pkt, port_id);
 515                        ev_vector_attr_update(vec, pkt);
 516                        vec->mbufs[j++] = pkt;
 517                } else
 518                        free_pkts(&pkt, 1);
 519        }
 520
 521        /* Route ESP packets */
 522        for (i = 0; i < t->ipsec.num; i++) {
 523                /* Validate sa_idx */
 524                sa_idx = t->ipsec.res[i];
 525                pkt = t->ipsec.pkts[i];
 526                if (unlikely(sa_idx >= sa_ctx->nb_sa))
 527                        free_pkts(&pkt, 1);
 528                else {
 529                        /* Else the packet has to be protected */
 530                        sa = &(sa_ctx->sa[sa_idx]);
 531                        /* Get IPsec session */
 532                        sess = ipsec_get_primary_session(sa);
 533                        /* Allow only inline protocol for now */
 534                        if (unlikely(sess->type !=
 535                                RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
 536                                RTE_LOG(ERR, IPSEC, "SA type not supported\n");
 537                                free_pkts(&pkt, 1);
 538                        }
 539                        rte_security_set_pkt_metadata(sess->security.ctx,
 540                                                sess->security.ses, pkt, NULL);
 541
 542                        pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 543                        port_id = sa->portid;
 544                        update_mac_addrs(pkt, port_id);
 545                        ipsec_event_pre_forward(pkt, port_id);
 546                        ev_vector_attr_update(vec, pkt);
 547                        vec->mbufs[j++] = pkt;
 548                }
 549        }
 550
 551        return j;
 552}
 553
 554static inline void
 555classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 556{
 557        enum pkt_type type;
 558        uint8_t *nlp;
 559
 560        /* Check the packet type */
 561        type = process_ipsec_get_pkt_type(pkt, &nlp);
 562
 563        switch (type) {
 564        case PKT_TYPE_PLAIN_IPV4:
 565                t->ip4.data[t->ip4.num] = nlp;
 566                t->ip4.pkts[(t->ip4.num)++] = pkt;
 567                break;
 568        case PKT_TYPE_PLAIN_IPV6:
 569                t->ip6.data[t->ip6.num] = nlp;
 570                t->ip6.pkts[(t->ip6.num)++] = pkt;
 571                break;
 572        default:
 573                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
 574                free_pkts(&pkt, 1);
 575                break;
 576        }
 577}
 578
 579static inline int
 580process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 581                                struct rte_event_vector *vec)
 582{
 583        struct ipsec_traffic t;
 584        struct rte_mbuf *pkt;
 585        int i;
 586
 587        t.ip4.num = 0;
 588        t.ip6.num = 0;
 589        t.ipsec.num = 0;
 590
 591        for (i = 0; i < vec->nb_elem; i++) {
 592                /* Get pkt from event */
 593                pkt = vec->mbufs[i];
 594
 595                if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
 596                        if (unlikely(pkt->ol_flags &
 597                                     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
 598                                RTE_LOG(ERR, IPSEC,
 599                                        "Inbound security offload failed\n");
 600                                free_pkts(&pkt, 1);
 601                                continue;
 602                        }
 603                }
 604
 605                classify_pkt(pkt, &t);
 606        }
 607
 608        check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
 609        check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
 610
 611        return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
 612}
 613
 614static inline int
 615process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 616                                 struct rte_event_vector *vec)
 617{
 618        struct ipsec_traffic t;
 619        struct rte_mbuf *pkt;
 620        uint32_t i;
 621
 622        t.ip4.num = 0;
 623        t.ip6.num = 0;
 624        t.ipsec.num = 0;
 625
 626        for (i = 0; i < vec->nb_elem; i++) {
 627                /* Get pkt from event */
 628                pkt = vec->mbufs[i];
 629
 630                classify_pkt(pkt, &t);
 631
 632                /* Provide L2 len for Outbound processing */
 633                pkt->l2_len = RTE_ETHER_HDR_LEN;
 634        }
 635
 636        check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
 637        check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
 638
 639        return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
 640}
 641
 642static inline int
 643process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
 644                                          struct port_drv_mode_data *data)
 645{
 646        struct rte_mbuf *pkt;
 647        int16_t port_id;
 648        uint32_t i;
 649        int j = 0;
 650
 651        for (i = 0; i < vec->nb_elem; i++) {
 652                pkt = vec->mbufs[i];
 653                port_id = pkt->port;
 654
 655                if (unlikely(!data[port_id].sess)) {
 656                        free_pkts(&pkt, 1);
 657                        continue;
 658                }
 659                ipsec_event_pre_forward(pkt, port_id);
 660                /* Save security session */
 661                rte_security_set_pkt_metadata(data[port_id].ctx,
 662                                              data[port_id].sess, pkt,
 663                                              NULL);
 664
 665                /* Mark the packet for Tx security offload */
 666                pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 667
 668                /* Provide L2 len for Outbound processing */
 669                pkt->l2_len = RTE_ETHER_HDR_LEN;
 670
 671                vec->mbufs[j++] = pkt;
 672        }
 673
 674        return j;
 675}
 676
 677static inline void
 678ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 679                        struct eh_event_link_info *links,
 680                        struct rte_event *ev)
 681{
 682        struct rte_event_vector *vec = ev->vec;
 683        struct rte_mbuf *pkt;
 684        int ret;
 685
 686        pkt = vec->mbufs[0];
 687
 688        ev_vector_attr_init(vec);
 689        if (is_unprotected_port(pkt->port))
 690                ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
 691                                                      &lconf->rt, vec);
 692        else
 693                ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
 694                                                       &lconf->rt, vec);
 695
 696        if (ret > 0) {
 697                vec->nb_elem = ret;
 698                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 699                                                 links[0].event_port_id,
 700                                                 ev, 1, 0);
 701        }
 702}
 703
 704static inline void
 705ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 706                                 struct rte_event *ev,
 707                                 struct port_drv_mode_data *data)
 708{
 709        struct rte_event_vector *vec = ev->vec;
 710        struct rte_mbuf *pkt;
 711
 712        pkt = vec->mbufs[0];
 713
 714        if (!is_unprotected_port(pkt->port))
 715                vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
 716                                                                         data);
 717        if (vec->nb_elem > 0)
 718                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 719                                                 links[0].event_port_id,
 720                                                 ev, 1, 0);
 721}
 722
 723/*
 724 * Event mode exposes various operating modes depending on the
 725 * capabilities of the event device and the operating mode
 726 * selected.
 727 */
 728
 729/* Workers registered */
 730#define IPSEC_EVENTMODE_WORKERS         2
 731
 732/*
 733 * Event mode worker
 734 * Operating parameters : non-burst - Tx internal port - driver mode
 735 */
 736static void
 737ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 738                uint8_t nb_links)
 739{
 740        struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
 741        unsigned int nb_rx = 0;
 742        struct rte_mbuf *pkt;
 743        struct rte_event ev;
 744        uint32_t lcore_id;
 745        int32_t socket_id;
 746        int16_t port_id;
 747
 748        /* Check if we have links registered for this lcore */
 749        if (nb_links == 0) {
 750                /* No links registered - exit */
 751                return;
 752        }
 753
 754        memset(&data, 0, sizeof(struct port_drv_mode_data));
 755
 756        /* Get core ID */
 757        lcore_id = rte_lcore_id();
 758
 759        /* Get socket ID */
 760        socket_id = rte_lcore_to_socket_id(lcore_id);
 761
 762        /*
 763         * Prepare security sessions table. In outbound driver mode
 764         * we always use first session configured for a given port
 765         */
 766        prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
 767                                 RTE_MAX_ETHPORTS);
 768
 769        RTE_LOG(INFO, IPSEC,
 770                "Launching event mode worker (non-burst - Tx internal port - "
 771                "driver mode) on lcore %d\n", lcore_id);
 772
 773        /* We have valid links */
 774
 775        /* Check if it's single link */
 776        if (nb_links != 1) {
 777                RTE_LOG(INFO, IPSEC,
 778                        "Multiple links not supported. Using first link\n");
 779        }
 780
 781        RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
 782                        links[0].event_port_id);
 783        while (!force_quit) {
 784                /* Read packet from event queues */
 785                nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
 786                                links[0].event_port_id,
 787                                &ev,    /* events */
 788                                1,      /* nb_events */
 789                                0       /* timeout_ticks */);
 790
 791                if (nb_rx == 0)
 792                        continue;
 793
 794                switch (ev.event_type) {
 795                case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
 796                case RTE_EVENT_TYPE_ETHDEV_VECTOR:
 797                        ipsec_ev_vector_drv_mode_process(links, &ev, data);
 798                        continue;
 799                case RTE_EVENT_TYPE_ETHDEV:
 800                        break;
 801                default:
 802                        RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 803                                ev.event_type);
 804                        continue;
 805                }
 806
 807                pkt = ev.mbuf;
 808                port_id = pkt->port;
 809
 810                rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
 811
 812                /* Process packet */
 813                ipsec_event_pre_forward(pkt, port_id);
 814
 815                if (!is_unprotected_port(port_id)) {
 816
 817                        if (unlikely(!data[port_id].sess)) {
 818                                rte_pktmbuf_free(pkt);
 819                                continue;
 820                        }
 821
 822                        /* Save security session */
 823                        rte_security_set_pkt_metadata(data[port_id].ctx,
 824                                                      data[port_id].sess, pkt,
 825                                                      NULL);
 826
 827                        /* Mark the packet for Tx security offload */
 828                        pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 829
 830                        /* Provide L2 len for Outbound processing */
 831                        pkt->l2_len = RTE_ETHER_HDR_LEN;
 832                }
 833
 834                /*
 835                 * Since tx internal port is available, events can be
 836                 * directly enqueued to the adapter and it would be
 837                 * internally submitted to the eth device.
 838                 */
 839                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 840                                links[0].event_port_id,
 841                                &ev,    /* events */
 842                                1,      /* nb_events */
 843                                0       /* flags */);
 844        }
 845}
 846
 847/*
 848 * Event mode worker
 849 * Operating parameters : non-burst - Tx internal port - app mode
 850 */
 851static void
 852ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 853                uint8_t nb_links)
 854{
 855        struct lcore_conf_ev_tx_int_port_wrkr lconf;
 856        unsigned int nb_rx = 0;
 857        struct rte_event ev;
 858        uint32_t lcore_id;
 859        int32_t socket_id;
 860        int ret;
 861
 862        /* Check if we have links registered for this lcore */
 863        if (nb_links == 0) {
 864                /* No links registered - exit */
 865                return;
 866        }
 867
 868        /* We have valid links */
 869
 870        /* Get core ID */
 871        lcore_id = rte_lcore_id();
 872
 873        /* Get socket ID */
 874        socket_id = rte_lcore_to_socket_id(lcore_id);
 875
 876        /* Save routing table */
 877        lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
 878        lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
 879        lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
 880        lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
 881        lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
 882        lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
 883        lconf.inbound.session_priv_pool =
 884                        socket_ctx[socket_id].session_priv_pool;
 885        lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
 886        lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
 887        lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
 888        lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
 889        lconf.outbound.session_priv_pool =
 890                        socket_ctx[socket_id].session_priv_pool;
 891
 892        RTE_LOG(INFO, IPSEC,
 893                "Launching event mode worker (non-burst - Tx internal port - "
 894                "app mode) on lcore %d\n", lcore_id);
 895
 896        /* Check if it's single link */
 897        if (nb_links != 1) {
 898                RTE_LOG(INFO, IPSEC,
 899                        "Multiple links not supported. Using first link\n");
 900        }
 901
 902        RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
 903                links[0].event_port_id);
 904
 905        while (!force_quit) {
 906                /* Read packet from event queues */
 907                nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
 908                                links[0].event_port_id,
 909                                &ev,     /* events */
 910                                1,       /* nb_events */
 911                                0        /* timeout_ticks */);
 912
 913                if (nb_rx == 0)
 914                        continue;
 915
 916                switch (ev.event_type) {
 917                case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
 918                case RTE_EVENT_TYPE_ETHDEV_VECTOR:
 919                        ipsec_ev_vector_process(&lconf, links, &ev);
 920                        continue;
 921                case RTE_EVENT_TYPE_ETHDEV:
 922                        break;
 923                default:
 924                        RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 925                                ev.event_type);
 926                        continue;
 927                }
 928
 929                if (is_unprotected_port(ev.mbuf->port))
 930                        ret = process_ipsec_ev_inbound(&lconf.inbound,
 931                                                        &lconf.rt, &ev);
 932                else
 933                        ret = process_ipsec_ev_outbound(&lconf.outbound,
 934                                                        &lconf.rt, &ev);
 935                if (ret != 1)
 936                        /* The pkt has been dropped */
 937                        continue;
 938
 939                /*
 940                 * Since tx internal port is available, events can be
 941                 * directly enqueued to the adapter and it would be
 942                 * internally submitted to the eth device.
 943                 */
 944                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 945                                links[0].event_port_id,
 946                                &ev,    /* events */
 947                                1,      /* nb_events */
 948                                0       /* flags */);
 949        }
 950}
 951
 952static uint8_t
 953ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
 954{
 955        struct eh_app_worker_params *wrkr;
 956        uint8_t nb_wrkr_param = 0;
 957
 958        /* Save workers */
 959        wrkr = wrkrs;
 960
 961        /* Non-burst - Tx internal port - driver mode */
 962        wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
 963        wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
 964        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
 965        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
 966        wrkr++;
 967        nb_wrkr_param++;
 968
 969        /* Non-burst - Tx internal port - app mode */
 970        wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
 971        wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
 972        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
 973        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
 974        nb_wrkr_param++;
 975
 976        return nb_wrkr_param;
 977}
 978
 979static void
 980ipsec_eventmode_worker(struct eh_conf *conf)
 981{
 982        struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
 983                                        {{{0} }, NULL } };
 984        uint8_t nb_wrkr_param;
 985
 986        /* Populate l2fwd_wrkr params */
 987        nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
 988
 989        /*
 990         * Launch correct worker after checking
 991         * the event device's capabilities.
 992         */
 993        eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
 994}
 995
 996int ipsec_launch_one_lcore(void *args)
 997{
 998        struct eh_conf *conf;
 999
1000        conf = (struct eh_conf *)args;
1001
1002        if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1003                /* Run in poll mode */
1004                ipsec_poll_mode_worker();
1005        } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1006                /* Run in event mode */
1007                ipsec_eventmode_worker(conf);
1008        }
1009        return 0;
1010}
1011