dpdk/examples/ipsec-secgw/ipsec_worker.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2016 Intel Corporation
   3 * Copyright (C) 2020 Marvell International Ltd.
   4 */
   5#include <rte_acl.h>
   6#include <rte_event_eth_tx_adapter.h>
   7#include <rte_lpm.h>
   8#include <rte_lpm6.h>
   9
  10#include "event_helper.h"
  11#include "ipsec.h"
  12#include "ipsec-secgw.h"
  13#include "ipsec_worker.h"
  14
  15struct port_drv_mode_data {
  16        struct rte_security_session *sess;
  17        struct rte_security_ctx *ctx;
  18};
  19
  20static inline enum pkt_type
  21process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
  22{
  23        struct rte_ether_hdr *eth;
  24        uint32_t ptype = pkt->packet_type;
  25
  26        eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
  27        rte_prefetch0(eth);
  28
  29        if (RTE_ETH_IS_IPV4_HDR(ptype)) {
  30                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
  31                                offsetof(struct ip, ip_p));
  32                if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
  33                        return PKT_TYPE_IPSEC_IPV4;
  34                else
  35                        return PKT_TYPE_PLAIN_IPV4;
  36        } else if (RTE_ETH_IS_IPV6_HDR(ptype)) {
  37                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
  38                                offsetof(struct ip6_hdr, ip6_nxt));
  39                if ((ptype & RTE_PTYPE_TUNNEL_MASK) == RTE_PTYPE_TUNNEL_ESP)
  40                        return PKT_TYPE_IPSEC_IPV6;
  41                else
  42                        return PKT_TYPE_PLAIN_IPV6;
  43        }
  44
  45        /* Unknown/Unsupported type */
  46        return PKT_TYPE_INVALID;
  47}
  48
  49static inline void
  50update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
  51{
  52        struct rte_ether_hdr *ethhdr;
  53
  54        ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
  55        memcpy(&ethhdr->src_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
  56        memcpy(&ethhdr->dst_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
  57}
  58
  59static inline void
  60ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
  61{
  62        /* Save the destination port in the mbuf */
  63        m->port = port_id;
  64
  65        /* Save eth queue for Tx */
  66        rte_event_eth_tx_adapter_txq_set(m, 0);
  67}
  68
  69static inline void
  70ev_vector_attr_init(struct rte_event_vector *vec)
  71{
  72        vec->attr_valid = 1;
  73        vec->port = 0xFFFF;
  74        vec->queue = 0;
  75}
  76
  77static inline void
  78ev_vector_attr_update(struct rte_event_vector *vec, struct rte_mbuf *pkt)
  79{
  80        if (vec->port == 0xFFFF) {
  81                vec->port = pkt->port;
  82                return;
  83        }
  84        if (vec->attr_valid && (vec->port != pkt->port))
  85                vec->attr_valid = 0;
  86}
  87
  88static inline void
  89prepare_out_sessions_tbl(struct sa_ctx *sa_out,
  90                         struct port_drv_mode_data *data,
  91                         uint16_t size)
  92{
  93        struct rte_ipsec_session *pri_sess;
  94        struct ipsec_sa *sa;
  95        uint32_t i;
  96
  97        if (!sa_out)
  98                return;
  99
 100        for (i = 0; i < sa_out->nb_sa; i++) {
 101
 102                sa = &sa_out->sa[i];
 103                if (!sa)
 104                        continue;
 105
 106                pri_sess = ipsec_get_primary_session(sa);
 107                if (!pri_sess)
 108                        continue;
 109
 110                if (pri_sess->type !=
 111                        RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
 112
 113                        RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
 114                                pri_sess->type);
 115                        continue;
 116                }
 117
 118                if (sa->portid >= size) {
 119                        RTE_LOG(ERR, IPSEC,
 120                                "Port id >= than table size %d, %d\n",
 121                                sa->portid, size);
 122                        continue;
 123                }
 124
 125                /* Use only first inline session found for a given port */
 126                if (data[sa->portid].sess)
 127                        continue;
 128                data[sa->portid].sess = pri_sess->security.ses;
 129                data[sa->portid].ctx = pri_sess->security.ctx;
 130        }
 131}
 132
 133static inline int
 134check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
 135{
 136        uint32_t res;
 137
 138        if (unlikely(sp == NULL))
 139                return 0;
 140
 141        rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
 142                        DEFAULT_MAX_CATEGORIES);
 143
 144        if (unlikely(res == DISCARD))
 145                return 0;
 146        else if (res == BYPASS) {
 147                *sa_idx = -1;
 148                return 1;
 149        }
 150
 151        *sa_idx = res - 1;
 152        return 1;
 153}
 154
 155static inline void
 156check_sp_bulk(struct sp_ctx *sp, struct traffic_type *ip,
 157              struct traffic_type *ipsec)
 158{
 159        uint32_t i, j, res;
 160        struct rte_mbuf *m;
 161
 162        if (unlikely(sp == NULL || ip->num == 0))
 163                return;
 164
 165        rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
 166                         DEFAULT_MAX_CATEGORIES);
 167
 168        j = 0;
 169        for (i = 0; i < ip->num; i++) {
 170                m = ip->pkts[i];
 171                res = ip->res[i];
 172                if (unlikely(res == DISCARD))
 173                        free_pkts(&m, 1);
 174                else if (res == BYPASS)
 175                        ip->pkts[j++] = m;
 176                else {
 177                        ipsec->res[ipsec->num] = res - 1;
 178                        ipsec->pkts[ipsec->num++] = m;
 179                }
 180        }
 181        ip->num = j;
 182}
 183
 184static inline void
 185check_sp_sa_bulk(struct sp_ctx *sp, struct sa_ctx *sa_ctx,
 186                 struct traffic_type *ip)
 187{
 188        struct ipsec_sa *sa;
 189        uint32_t i, j, res;
 190        struct rte_mbuf *m;
 191
 192        if (unlikely(sp == NULL || ip->num == 0))
 193                return;
 194
 195        rte_acl_classify((struct rte_acl_ctx *)sp, ip->data, ip->res, ip->num,
 196                         DEFAULT_MAX_CATEGORIES);
 197
 198        j = 0;
 199        for (i = 0; i < ip->num; i++) {
 200                m = ip->pkts[i];
 201                res = ip->res[i];
 202                if (unlikely(res == DISCARD))
 203                        free_pkts(&m, 1);
 204                else if (res == BYPASS)
 205                        ip->pkts[j++] = m;
 206                else {
 207                        sa = *(struct ipsec_sa **)rte_security_dynfield(m);
 208                        if (sa == NULL) {
 209                                free_pkts(&m, 1);
 210                                continue;
 211                        }
 212
 213                        /* SPI on the packet should match with the one in SA */
 214                        if (unlikely(sa->spi != sa_ctx->sa[res - 1].spi)) {
 215                                free_pkts(&m, 1);
 216                                continue;
 217                        }
 218
 219                        ip->pkts[j++] = m;
 220                }
 221        }
 222        ip->num = j;
 223}
 224
 225static inline uint16_t
 226route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 227{
 228        uint32_t dst_ip;
 229        uint16_t offset;
 230        uint32_t hop;
 231        int ret;
 232
 233        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
 234        dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
 235        dst_ip = rte_be_to_cpu_32(dst_ip);
 236
 237        ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
 238
 239        if (ret == 0) {
 240                /* We have a hit */
 241                return hop;
 242        }
 243
 244        /* else */
 245        return RTE_MAX_ETHPORTS;
 246}
 247
 248/* TODO: To be tested */
 249static inline uint16_t
 250route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 251{
 252        uint8_t dst_ip[16];
 253        uint8_t *ip6_dst;
 254        uint16_t offset;
 255        uint32_t hop;
 256        int ret;
 257
 258        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
 259        ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
 260        memcpy(&dst_ip[0], ip6_dst, 16);
 261
 262        ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
 263
 264        if (ret == 0) {
 265                /* We have a hit */
 266                return hop;
 267        }
 268
 269        /* else */
 270        return RTE_MAX_ETHPORTS;
 271}
 272
 273static inline uint16_t
 274get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
 275{
 276        if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
 277                return route4_pkt(pkt, rt->rt4_ctx);
 278        else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
 279                return route6_pkt(pkt, rt->rt6_ctx);
 280
 281        return RTE_MAX_ETHPORTS;
 282}
 283
 284static inline int
 285process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 286                struct rte_event *ev)
 287{
 288        struct ipsec_sa *sa = NULL;
 289        struct rte_mbuf *pkt;
 290        uint16_t port_id = 0;
 291        enum pkt_type type;
 292        uint32_t sa_idx;
 293        uint8_t *nlp;
 294
 295        /* Get pkt from event */
 296        pkt = ev->mbuf;
 297
 298        /* Check the packet type */
 299        type = process_ipsec_get_pkt_type(pkt, &nlp);
 300
 301        switch (type) {
 302        case PKT_TYPE_PLAIN_IPV4:
 303                if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
 304                        if (unlikely(pkt->ol_flags &
 305                                     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
 306                                RTE_LOG(ERR, IPSEC,
 307                                        "Inbound security offload failed\n");
 308                                goto drop_pkt_and_exit;
 309                        }
 310                        sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
 311                }
 312
 313                /* Check if we have a match */
 314                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
 315                        /* No valid match */
 316                        goto drop_pkt_and_exit;
 317                }
 318                break;
 319
 320        case PKT_TYPE_PLAIN_IPV6:
 321                if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
 322                        if (unlikely(pkt->ol_flags &
 323                                     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
 324                                RTE_LOG(ERR, IPSEC,
 325                                        "Inbound security offload failed\n");
 326                                goto drop_pkt_and_exit;
 327                        }
 328                        sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
 329                }
 330
 331                /* Check if we have a match */
 332                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
 333                        /* No valid match */
 334                        goto drop_pkt_and_exit;
 335                }
 336                break;
 337
 338        default:
 339                RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 340                           type);
 341                goto drop_pkt_and_exit;
 342        }
 343
 344        /* Check if the packet has to be bypassed */
 345        if (sa_idx == BYPASS)
 346                goto route_and_send_pkt;
 347
 348        /* Validate sa_idx */
 349        if (sa_idx >= ctx->sa_ctx->nb_sa)
 350                goto drop_pkt_and_exit;
 351
 352        /* Else the packet has to be protected with SA */
 353
 354        /* If the packet was IPsec processed, then SA pointer should be set */
 355        if (sa == NULL)
 356                goto drop_pkt_and_exit;
 357
 358        /* SPI on the packet should match with the one in SA */
 359        if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
 360                goto drop_pkt_and_exit;
 361
 362route_and_send_pkt:
 363        port_id = get_route(pkt, rt, type);
 364        if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 365                /* no match */
 366                goto drop_pkt_and_exit;
 367        }
 368        /* else, we have a matching route */
 369
 370        /* Update mac addresses */
 371        update_mac_addrs(pkt, port_id);
 372
 373        /* Update the event with the dest port */
 374        ipsec_event_pre_forward(pkt, port_id);
 375        return PKT_FORWARDED;
 376
 377drop_pkt_and_exit:
 378        RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
 379        rte_pktmbuf_free(pkt);
 380        ev->mbuf = NULL;
 381        return PKT_DROPPED;
 382}
 383
 384static inline int
 385process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 386                struct rte_event *ev)
 387{
 388        struct rte_ipsec_session *sess;
 389        struct sa_ctx *sa_ctx;
 390        struct rte_mbuf *pkt;
 391        uint16_t port_id = 0;
 392        struct ipsec_sa *sa;
 393        enum pkt_type type;
 394        uint32_t sa_idx;
 395        uint8_t *nlp;
 396
 397        /* Get pkt from event */
 398        pkt = ev->mbuf;
 399
 400        /* Check the packet type */
 401        type = process_ipsec_get_pkt_type(pkt, &nlp);
 402
 403        switch (type) {
 404        case PKT_TYPE_PLAIN_IPV4:
 405                /* Check if we have a match */
 406                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
 407                        /* No valid match */
 408                        goto drop_pkt_and_exit;
 409                }
 410                break;
 411        case PKT_TYPE_PLAIN_IPV6:
 412                /* Check if we have a match */
 413                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
 414                        /* No valid match */
 415                        goto drop_pkt_and_exit;
 416                }
 417                break;
 418        default:
 419                /*
 420                 * Only plain IPv4 & IPv6 packets are allowed
 421                 * on protected port. Drop the rest.
 422                 */
 423                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
 424                goto drop_pkt_and_exit;
 425        }
 426
 427        /* Check if the packet has to be bypassed */
 428        if (sa_idx == BYPASS) {
 429                port_id = get_route(pkt, rt, type);
 430                if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 431                        /* no match */
 432                        goto drop_pkt_and_exit;
 433                }
 434                /* else, we have a matching route */
 435                goto send_pkt;
 436        }
 437
 438        /* Validate sa_idx */
 439        if (unlikely(sa_idx >= ctx->sa_ctx->nb_sa))
 440                goto drop_pkt_and_exit;
 441
 442        /* Else the packet has to be protected */
 443
 444        /* Get SA ctx*/
 445        sa_ctx = ctx->sa_ctx;
 446
 447        /* Get SA */
 448        sa = &(sa_ctx->sa[sa_idx]);
 449
 450        /* Get IPsec session */
 451        sess = ipsec_get_primary_session(sa);
 452
 453        /* Allow only inline protocol for now */
 454        if (unlikely(sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
 455                RTE_LOG(ERR, IPSEC, "SA type not supported\n");
 456                goto drop_pkt_and_exit;
 457        }
 458
 459        rte_security_set_pkt_metadata(sess->security.ctx,
 460                                      sess->security.ses, pkt, NULL);
 461
 462        /* Mark the packet for Tx security offload */
 463        pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 464
 465        /* Get the port to which this pkt need to be submitted */
 466        port_id = sa->portid;
 467
 468send_pkt:
 469        /* Provide L2 len for Outbound processing */
 470        pkt->l2_len = RTE_ETHER_HDR_LEN;
 471
 472        /* Update mac addresses */
 473        update_mac_addrs(pkt, port_id);
 474
 475        /* Update the event with the dest port */
 476        ipsec_event_pre_forward(pkt, port_id);
 477        return PKT_FORWARDED;
 478
 479drop_pkt_and_exit:
 480        RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
 481        rte_pktmbuf_free(pkt);
 482        ev->mbuf = NULL;
 483        return PKT_DROPPED;
 484}
 485
 486static inline int
 487ipsec_ev_route_pkts(struct rte_event_vector *vec, struct route_table *rt,
 488                    struct ipsec_traffic *t, struct sa_ctx *sa_ctx)
 489{
 490        struct rte_ipsec_session *sess;
 491        uint32_t sa_idx, i, j = 0;
 492        uint16_t port_id = 0;
 493        struct rte_mbuf *pkt;
 494        struct ipsec_sa *sa;
 495
 496        /* Route IPv4 packets */
 497        for (i = 0; i < t->ip4.num; i++) {
 498                pkt = t->ip4.pkts[i];
 499                port_id = route4_pkt(pkt, rt->rt4_ctx);
 500                if (port_id != RTE_MAX_ETHPORTS) {
 501                        /* Update mac addresses */
 502                        update_mac_addrs(pkt, port_id);
 503                        /* Update the event with the dest port */
 504                        ipsec_event_pre_forward(pkt, port_id);
 505                        ev_vector_attr_update(vec, pkt);
 506                        vec->mbufs[j++] = pkt;
 507                } else
 508                        free_pkts(&pkt, 1);
 509        }
 510
 511        /* Route IPv6 packets */
 512        for (i = 0; i < t->ip6.num; i++) {
 513                pkt = t->ip6.pkts[i];
 514                port_id = route6_pkt(pkt, rt->rt6_ctx);
 515                if (port_id != RTE_MAX_ETHPORTS) {
 516                        /* Update mac addresses */
 517                        update_mac_addrs(pkt, port_id);
 518                        /* Update the event with the dest port */
 519                        ipsec_event_pre_forward(pkt, port_id);
 520                        ev_vector_attr_update(vec, pkt);
 521                        vec->mbufs[j++] = pkt;
 522                } else
 523                        free_pkts(&pkt, 1);
 524        }
 525
 526        /* Route ESP packets */
 527        for (i = 0; i < t->ipsec.num; i++) {
 528                /* Validate sa_idx */
 529                sa_idx = t->ipsec.res[i];
 530                pkt = t->ipsec.pkts[i];
 531                if (unlikely(sa_idx >= sa_ctx->nb_sa))
 532                        free_pkts(&pkt, 1);
 533                else {
 534                        /* Else the packet has to be protected */
 535                        sa = &(sa_ctx->sa[sa_idx]);
 536                        /* Get IPsec session */
 537                        sess = ipsec_get_primary_session(sa);
 538                        /* Allow only inline protocol for now */
 539                        if (unlikely(sess->type !=
 540                                RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)) {
 541                                RTE_LOG(ERR, IPSEC, "SA type not supported\n");
 542                                free_pkts(&pkt, 1);
 543                                continue;
 544                        }
 545                        rte_security_set_pkt_metadata(sess->security.ctx,
 546                                                sess->security.ses, pkt, NULL);
 547
 548                        pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 549                        port_id = sa->portid;
 550                        update_mac_addrs(pkt, port_id);
 551                        ipsec_event_pre_forward(pkt, port_id);
 552                        ev_vector_attr_update(vec, pkt);
 553                        vec->mbufs[j++] = pkt;
 554                }
 555        }
 556
 557        return j;
 558}
 559
 560static inline void
 561classify_pkt(struct rte_mbuf *pkt, struct ipsec_traffic *t)
 562{
 563        enum pkt_type type;
 564        uint8_t *nlp;
 565
 566        /* Check the packet type */
 567        type = process_ipsec_get_pkt_type(pkt, &nlp);
 568
 569        switch (type) {
 570        case PKT_TYPE_PLAIN_IPV4:
 571                t->ip4.data[t->ip4.num] = nlp;
 572                t->ip4.pkts[(t->ip4.num)++] = pkt;
 573                break;
 574        case PKT_TYPE_PLAIN_IPV6:
 575                t->ip6.data[t->ip6.num] = nlp;
 576                t->ip6.pkts[(t->ip6.num)++] = pkt;
 577                break;
 578        default:
 579                RTE_LOG_DP(DEBUG, IPSEC_ESP, "Unsupported packet type = %d\n",
 580                           type);
 581                free_pkts(&pkt, 1);
 582                break;
 583        }
 584}
 585
 586static inline int
 587process_ipsec_ev_inbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 588                                struct rte_event_vector *vec)
 589{
 590        struct ipsec_traffic t;
 591        struct rte_mbuf *pkt;
 592        int i;
 593
 594        t.ip4.num = 0;
 595        t.ip6.num = 0;
 596        t.ipsec.num = 0;
 597
 598        for (i = 0; i < vec->nb_elem; i++) {
 599                /* Get pkt from event */
 600                pkt = vec->mbufs[i];
 601
 602                if (pkt->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD) {
 603                        if (unlikely(pkt->ol_flags &
 604                                     RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED)) {
 605                                RTE_LOG(ERR, IPSEC,
 606                                        "Inbound security offload failed\n");
 607                                free_pkts(&pkt, 1);
 608                                continue;
 609                        }
 610                }
 611
 612                classify_pkt(pkt, &t);
 613        }
 614
 615        check_sp_sa_bulk(ctx->sp4_ctx, ctx->sa_ctx, &t.ip4);
 616        check_sp_sa_bulk(ctx->sp6_ctx, ctx->sa_ctx, &t.ip6);
 617
 618        return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
 619}
 620
 621static inline int
 622process_ipsec_ev_outbound_vector(struct ipsec_ctx *ctx, struct route_table *rt,
 623                                 struct rte_event_vector *vec)
 624{
 625        struct ipsec_traffic t;
 626        struct rte_mbuf *pkt;
 627        uint32_t i;
 628
 629        t.ip4.num = 0;
 630        t.ip6.num = 0;
 631        t.ipsec.num = 0;
 632
 633        for (i = 0; i < vec->nb_elem; i++) {
 634                /* Get pkt from event */
 635                pkt = vec->mbufs[i];
 636
 637                classify_pkt(pkt, &t);
 638
 639                /* Provide L2 len for Outbound processing */
 640                pkt->l2_len = RTE_ETHER_HDR_LEN;
 641        }
 642
 643        check_sp_bulk(ctx->sp4_ctx, &t.ip4, &t.ipsec);
 644        check_sp_bulk(ctx->sp6_ctx, &t.ip6, &t.ipsec);
 645
 646        return ipsec_ev_route_pkts(vec, rt, &t, ctx->sa_ctx);
 647}
 648
 649static inline int
 650process_ipsec_ev_drv_mode_outbound_vector(struct rte_event_vector *vec,
 651                                          struct port_drv_mode_data *data)
 652{
 653        struct rte_mbuf *pkt;
 654        int16_t port_id;
 655        uint32_t i;
 656        int j = 0;
 657
 658        for (i = 0; i < vec->nb_elem; i++) {
 659                pkt = vec->mbufs[i];
 660                port_id = pkt->port;
 661
 662                if (unlikely(!data[port_id].sess)) {
 663                        free_pkts(&pkt, 1);
 664                        continue;
 665                }
 666                ipsec_event_pre_forward(pkt, port_id);
 667                /* Save security session */
 668                rte_security_set_pkt_metadata(data[port_id].ctx,
 669                                              data[port_id].sess, pkt,
 670                                              NULL);
 671
 672                /* Mark the packet for Tx security offload */
 673                pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 674
 675                /* Provide L2 len for Outbound processing */
 676                pkt->l2_len = RTE_ETHER_HDR_LEN;
 677
 678                vec->mbufs[j++] = pkt;
 679        }
 680
 681        return j;
 682}
 683
 684static inline void
 685ipsec_ev_vector_process(struct lcore_conf_ev_tx_int_port_wrkr *lconf,
 686                        struct eh_event_link_info *links,
 687                        struct rte_event *ev)
 688{
 689        struct rte_event_vector *vec = ev->vec;
 690        struct rte_mbuf *pkt;
 691        int ret;
 692
 693        pkt = vec->mbufs[0];
 694
 695        ev_vector_attr_init(vec);
 696        if (is_unprotected_port(pkt->port))
 697                ret = process_ipsec_ev_inbound_vector(&lconf->inbound,
 698                                                      &lconf->rt, vec);
 699        else
 700                ret = process_ipsec_ev_outbound_vector(&lconf->outbound,
 701                                                       &lconf->rt, vec);
 702
 703        if (likely(ret > 0)) {
 704                vec->nb_elem = ret;
 705                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 706                                                 links[0].event_port_id,
 707                                                 ev, 1, 0);
 708        } else {
 709                rte_mempool_put(rte_mempool_from_obj(vec), vec);
 710        }
 711}
 712
 713static inline void
 714ipsec_ev_vector_drv_mode_process(struct eh_event_link_info *links,
 715                                 struct rte_event *ev,
 716                                 struct port_drv_mode_data *data)
 717{
 718        struct rte_event_vector *vec = ev->vec;
 719        struct rte_mbuf *pkt;
 720
 721        pkt = vec->mbufs[0];
 722
 723        if (!is_unprotected_port(pkt->port))
 724                vec->nb_elem = process_ipsec_ev_drv_mode_outbound_vector(vec,
 725                                                                         data);
 726        if (vec->nb_elem > 0)
 727                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 728                                                 links[0].event_port_id,
 729                                                 ev, 1, 0);
 730        else
 731                rte_mempool_put(rte_mempool_from_obj(vec), vec);
 732}
 733
 734/*
 735 * Event mode exposes various operating modes depending on the
 736 * capabilities of the event device and the operating mode
 737 * selected.
 738 */
 739
 740/* Workers registered */
 741#define IPSEC_EVENTMODE_WORKERS         2
 742
 743/*
 744 * Event mode worker
 745 * Operating parameters : non-burst - Tx internal port - driver mode
 746 */
 747static void
 748ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 749                uint8_t nb_links)
 750{
 751        struct port_drv_mode_data data[RTE_MAX_ETHPORTS];
 752        unsigned int nb_rx = 0;
 753        struct rte_mbuf *pkt;
 754        struct rte_event ev;
 755        uint32_t lcore_id;
 756        int32_t socket_id;
 757        int16_t port_id;
 758
 759        /* Check if we have links registered for this lcore */
 760        if (nb_links == 0) {
 761                /* No links registered - exit */
 762                return;
 763        }
 764
 765        memset(&data, 0, sizeof(struct port_drv_mode_data));
 766
 767        /* Get core ID */
 768        lcore_id = rte_lcore_id();
 769
 770        /* Get socket ID */
 771        socket_id = rte_lcore_to_socket_id(lcore_id);
 772
 773        /*
 774         * Prepare security sessions table. In outbound driver mode
 775         * we always use first session configured for a given port
 776         */
 777        prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, data,
 778                                 RTE_MAX_ETHPORTS);
 779
 780        RTE_LOG(INFO, IPSEC,
 781                "Launching event mode worker (non-burst - Tx internal port - "
 782                "driver mode) on lcore %d\n", lcore_id);
 783
 784        /* We have valid links */
 785
 786        /* Check if it's single link */
 787        if (nb_links != 1) {
 788                RTE_LOG(INFO, IPSEC,
 789                        "Multiple links not supported. Using first link\n");
 790        }
 791
 792        RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
 793                        links[0].event_port_id);
 794        while (!force_quit) {
 795                /* Read packet from event queues */
 796                nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
 797                                links[0].event_port_id,
 798                                &ev,    /* events */
 799                                1,      /* nb_events */
 800                                0       /* timeout_ticks */);
 801
 802                if (nb_rx == 0)
 803                        continue;
 804
 805                switch (ev.event_type) {
 806                case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
 807                case RTE_EVENT_TYPE_ETHDEV_VECTOR:
 808                        ipsec_ev_vector_drv_mode_process(links, &ev, data);
 809                        continue;
 810                case RTE_EVENT_TYPE_ETHDEV:
 811                        break;
 812                default:
 813                        RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 814                                ev.event_type);
 815                        continue;
 816                }
 817
 818                pkt = ev.mbuf;
 819                port_id = pkt->port;
 820
 821                rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
 822
 823                /* Process packet */
 824                ipsec_event_pre_forward(pkt, port_id);
 825
 826                if (!is_unprotected_port(port_id)) {
 827
 828                        if (unlikely(!data[port_id].sess)) {
 829                                rte_pktmbuf_free(pkt);
 830                                continue;
 831                        }
 832
 833                        /* Save security session */
 834                        rte_security_set_pkt_metadata(data[port_id].ctx,
 835                                                      data[port_id].sess, pkt,
 836                                                      NULL);
 837
 838                        /* Mark the packet for Tx security offload */
 839                        pkt->ol_flags |= RTE_MBUF_F_TX_SEC_OFFLOAD;
 840
 841                        /* Provide L2 len for Outbound processing */
 842                        pkt->l2_len = RTE_ETHER_HDR_LEN;
 843                }
 844
 845                /*
 846                 * Since tx internal port is available, events can be
 847                 * directly enqueued to the adapter and it would be
 848                 * internally submitted to the eth device.
 849                 */
 850                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 851                                links[0].event_port_id,
 852                                &ev,    /* events */
 853                                1,      /* nb_events */
 854                                0       /* flags */);
 855        }
 856}
 857
 858/*
 859 * Event mode worker
 860 * Operating parameters : non-burst - Tx internal port - app mode
 861 */
 862static void
 863ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 864                uint8_t nb_links)
 865{
 866        struct lcore_conf_ev_tx_int_port_wrkr lconf;
 867        unsigned int nb_rx = 0;
 868        struct rte_event ev;
 869        uint32_t lcore_id;
 870        int32_t socket_id;
 871        int ret;
 872
 873        /* Check if we have links registered for this lcore */
 874        if (nb_links == 0) {
 875                /* No links registered - exit */
 876                return;
 877        }
 878
 879        /* We have valid links */
 880
 881        /* Get core ID */
 882        lcore_id = rte_lcore_id();
 883
 884        /* Get socket ID */
 885        socket_id = rte_lcore_to_socket_id(lcore_id);
 886
 887        /* Save routing table */
 888        lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
 889        lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
 890        lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
 891        lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
 892        lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
 893        lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
 894        lconf.inbound.session_priv_pool =
 895                        socket_ctx[socket_id].session_priv_pool;
 896        lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
 897        lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
 898        lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
 899        lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
 900        lconf.outbound.session_priv_pool =
 901                        socket_ctx[socket_id].session_priv_pool;
 902
 903        RTE_LOG(INFO, IPSEC,
 904                "Launching event mode worker (non-burst - Tx internal port - "
 905                "app mode) on lcore %d\n", lcore_id);
 906
 907        /* Check if it's single link */
 908        if (nb_links != 1) {
 909                RTE_LOG(INFO, IPSEC,
 910                        "Multiple links not supported. Using first link\n");
 911        }
 912
 913        RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
 914                links[0].event_port_id);
 915
 916        while (!force_quit) {
 917                /* Read packet from event queues */
 918                nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
 919                                links[0].event_port_id,
 920                                &ev,     /* events */
 921                                1,       /* nb_events */
 922                                0        /* timeout_ticks */);
 923
 924                if (nb_rx == 0)
 925                        continue;
 926
 927                switch (ev.event_type) {
 928                case RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR:
 929                case RTE_EVENT_TYPE_ETHDEV_VECTOR:
 930                        ipsec_ev_vector_process(&lconf, links, &ev);
 931                        continue;
 932                case RTE_EVENT_TYPE_ETHDEV:
 933                        break;
 934                default:
 935                        RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 936                                ev.event_type);
 937                        continue;
 938                }
 939
 940                if (is_unprotected_port(ev.mbuf->port))
 941                        ret = process_ipsec_ev_inbound(&lconf.inbound,
 942                                                        &lconf.rt, &ev);
 943                else
 944                        ret = process_ipsec_ev_outbound(&lconf.outbound,
 945                                                        &lconf.rt, &ev);
 946                if (ret != 1)
 947                        /* The pkt has been dropped */
 948                        continue;
 949
 950                /*
 951                 * Since tx internal port is available, events can be
 952                 * directly enqueued to the adapter and it would be
 953                 * internally submitted to the eth device.
 954                 */
 955                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 956                                links[0].event_port_id,
 957                                &ev,    /* events */
 958                                1,      /* nb_events */
 959                                0       /* flags */);
 960        }
 961}
 962
 963static uint8_t
 964ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
 965{
 966        struct eh_app_worker_params *wrkr;
 967        uint8_t nb_wrkr_param = 0;
 968
 969        /* Save workers */
 970        wrkr = wrkrs;
 971
 972        /* Non-burst - Tx internal port - driver mode */
 973        wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
 974        wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
 975        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
 976        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
 977        wrkr++;
 978        nb_wrkr_param++;
 979
 980        /* Non-burst - Tx internal port - app mode */
 981        wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
 982        wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
 983        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
 984        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
 985        nb_wrkr_param++;
 986
 987        return nb_wrkr_param;
 988}
 989
 990static void
 991ipsec_eventmode_worker(struct eh_conf *conf)
 992{
 993        struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
 994                                        {{{0} }, NULL } };
 995        uint8_t nb_wrkr_param;
 996
 997        /* Populate l2fwd_wrkr params */
 998        nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
 999
1000        /*
1001         * Launch correct worker after checking
1002         * the event device's capabilities.
1003         */
1004        eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
1005}
1006
1007int ipsec_launch_one_lcore(void *args)
1008{
1009        struct eh_conf *conf;
1010
1011        conf = (struct eh_conf *)args;
1012
1013        if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
1014                /* Run in poll mode */
1015                ipsec_poll_mode_worker();
1016        } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
1017                /* Run in event mode */
1018                ipsec_eventmode_worker(conf);
1019        }
1020        return 0;
1021}
1022