dpdk/examples/ipsec-secgw/ipsec_worker.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2016 Intel Corporation
   3 * Copyright (C) 2020 Marvell International Ltd.
   4 */
   5#include <rte_acl.h>
   6#include <rte_event_eth_tx_adapter.h>
   7#include <rte_lpm.h>
   8#include <rte_lpm6.h>
   9
  10#include "event_helper.h"
  11#include "ipsec.h"
  12#include "ipsec-secgw.h"
  13#include "ipsec_worker.h"
  14
  15static inline enum pkt_type
  16process_ipsec_get_pkt_type(struct rte_mbuf *pkt, uint8_t **nlp)
  17{
  18        struct rte_ether_hdr *eth;
  19
  20        eth = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
  21        if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
  22                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
  23                                offsetof(struct ip, ip_p));
  24                if (**nlp == IPPROTO_ESP)
  25                        return PKT_TYPE_IPSEC_IPV4;
  26                else
  27                        return PKT_TYPE_PLAIN_IPV4;
  28        } else if (eth->ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6)) {
  29                *nlp = RTE_PTR_ADD(eth, RTE_ETHER_HDR_LEN +
  30                                offsetof(struct ip6_hdr, ip6_nxt));
  31                if (**nlp == IPPROTO_ESP)
  32                        return PKT_TYPE_IPSEC_IPV6;
  33                else
  34                        return PKT_TYPE_PLAIN_IPV6;
  35        }
  36
  37        /* Unknown/Unsupported type */
  38        return PKT_TYPE_INVALID;
  39}
  40
  41static inline void
  42update_mac_addrs(struct rte_mbuf *pkt, uint16_t portid)
  43{
  44        struct rte_ether_hdr *ethhdr;
  45
  46        ethhdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
  47        memcpy(&ethhdr->s_addr, &ethaddr_tbl[portid].src, RTE_ETHER_ADDR_LEN);
  48        memcpy(&ethhdr->d_addr, &ethaddr_tbl[portid].dst, RTE_ETHER_ADDR_LEN);
  49}
  50
  51static inline void
  52ipsec_event_pre_forward(struct rte_mbuf *m, unsigned int port_id)
  53{
  54        /* Save the destination port in the mbuf */
  55        m->port = port_id;
  56
  57        /* Save eth queue for Tx */
  58        rte_event_eth_tx_adapter_txq_set(m, 0);
  59}
  60
  61static inline void
  62prepare_out_sessions_tbl(struct sa_ctx *sa_out,
  63                struct rte_security_session **sess_tbl, uint16_t size)
  64{
  65        struct rte_ipsec_session *pri_sess;
  66        struct ipsec_sa *sa;
  67        uint32_t i;
  68
  69        if (!sa_out)
  70                return;
  71
  72        for (i = 0; i < sa_out->nb_sa; i++) {
  73
  74                sa = &sa_out->sa[i];
  75                if (!sa)
  76                        continue;
  77
  78                pri_sess = ipsec_get_primary_session(sa);
  79                if (!pri_sess)
  80                        continue;
  81
  82                if (pri_sess->type !=
  83                        RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
  84
  85                        RTE_LOG(ERR, IPSEC, "Invalid session type %d\n",
  86                                pri_sess->type);
  87                        continue;
  88                }
  89
  90                if (sa->portid >= size) {
  91                        RTE_LOG(ERR, IPSEC,
  92                                "Port id >= than table size %d, %d\n",
  93                                sa->portid, size);
  94                        continue;
  95                }
  96
  97                /* Use only first inline session found for a given port */
  98                if (sess_tbl[sa->portid])
  99                        continue;
 100                sess_tbl[sa->portid] = pri_sess->security.ses;
 101        }
 102}
 103
 104static inline int
 105check_sp(struct sp_ctx *sp, const uint8_t *nlp, uint32_t *sa_idx)
 106{
 107        uint32_t res;
 108
 109        if (unlikely(sp == NULL))
 110                return 0;
 111
 112        rte_acl_classify((struct rte_acl_ctx *)sp, &nlp, &res, 1,
 113                        DEFAULT_MAX_CATEGORIES);
 114
 115        if (unlikely(res == DISCARD))
 116                return 0;
 117        else if (res == BYPASS) {
 118                *sa_idx = -1;
 119                return 1;
 120        }
 121
 122        *sa_idx = res - 1;
 123        return 1;
 124}
 125
 126static inline uint16_t
 127route4_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 128{
 129        uint32_t dst_ip;
 130        uint16_t offset;
 131        uint32_t hop;
 132        int ret;
 133
 134        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip, ip_dst);
 135        dst_ip = *rte_pktmbuf_mtod_offset(pkt, uint32_t *, offset);
 136        dst_ip = rte_be_to_cpu_32(dst_ip);
 137
 138        ret = rte_lpm_lookup((struct rte_lpm *)rt_ctx, dst_ip, &hop);
 139
 140        if (ret == 0) {
 141                /* We have a hit */
 142                return hop;
 143        }
 144
 145        /* else */
 146        return RTE_MAX_ETHPORTS;
 147}
 148
 149/* TODO: To be tested */
 150static inline uint16_t
 151route6_pkt(struct rte_mbuf *pkt, struct rt_ctx *rt_ctx)
 152{
 153        uint8_t dst_ip[16];
 154        uint8_t *ip6_dst;
 155        uint16_t offset;
 156        uint32_t hop;
 157        int ret;
 158
 159        offset = RTE_ETHER_HDR_LEN + offsetof(struct ip6_hdr, ip6_dst);
 160        ip6_dst = rte_pktmbuf_mtod_offset(pkt, uint8_t *, offset);
 161        memcpy(&dst_ip[0], ip6_dst, 16);
 162
 163        ret = rte_lpm6_lookup((struct rte_lpm6 *)rt_ctx, dst_ip, &hop);
 164
 165        if (ret == 0) {
 166                /* We have a hit */
 167                return hop;
 168        }
 169
 170        /* else */
 171        return RTE_MAX_ETHPORTS;
 172}
 173
 174static inline uint16_t
 175get_route(struct rte_mbuf *pkt, struct route_table *rt, enum pkt_type type)
 176{
 177        if (type == PKT_TYPE_PLAIN_IPV4 || type == PKT_TYPE_IPSEC_IPV4)
 178                return route4_pkt(pkt, rt->rt4_ctx);
 179        else if (type == PKT_TYPE_PLAIN_IPV6 || type == PKT_TYPE_IPSEC_IPV6)
 180                return route6_pkt(pkt, rt->rt6_ctx);
 181
 182        return RTE_MAX_ETHPORTS;
 183}
 184
 185static inline int
 186process_ipsec_ev_inbound(struct ipsec_ctx *ctx, struct route_table *rt,
 187                struct rte_event *ev)
 188{
 189        struct ipsec_sa *sa = NULL;
 190        struct rte_mbuf *pkt;
 191        uint16_t port_id = 0;
 192        enum pkt_type type;
 193        uint32_t sa_idx;
 194        uint8_t *nlp;
 195
 196        /* Get pkt from event */
 197        pkt = ev->mbuf;
 198
 199        /* Check the packet type */
 200        type = process_ipsec_get_pkt_type(pkt, &nlp);
 201
 202        switch (type) {
 203        case PKT_TYPE_PLAIN_IPV4:
 204                if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
 205                        if (unlikely(pkt->ol_flags &
 206                                     PKT_RX_SEC_OFFLOAD_FAILED)) {
 207                                RTE_LOG(ERR, IPSEC,
 208                                        "Inbound security offload failed\n");
 209                                goto drop_pkt_and_exit;
 210                        }
 211                        sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
 212                }
 213
 214                /* Check if we have a match */
 215                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
 216                        /* No valid match */
 217                        goto drop_pkt_and_exit;
 218                }
 219                break;
 220
 221        case PKT_TYPE_PLAIN_IPV6:
 222                if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) {
 223                        if (unlikely(pkt->ol_flags &
 224                                     PKT_RX_SEC_OFFLOAD_FAILED)) {
 225                                RTE_LOG(ERR, IPSEC,
 226                                        "Inbound security offload failed\n");
 227                                goto drop_pkt_and_exit;
 228                        }
 229                        sa = *(struct ipsec_sa **)rte_security_dynfield(pkt);
 230                }
 231
 232                /* Check if we have a match */
 233                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
 234                        /* No valid match */
 235                        goto drop_pkt_and_exit;
 236                }
 237                break;
 238
 239        default:
 240                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
 241                goto drop_pkt_and_exit;
 242        }
 243
 244        /* Check if the packet has to be bypassed */
 245        if (sa_idx == BYPASS)
 246                goto route_and_send_pkt;
 247
 248        /* Validate sa_idx */
 249        if (sa_idx >= ctx->sa_ctx->nb_sa)
 250                goto drop_pkt_and_exit;
 251
 252        /* Else the packet has to be protected with SA */
 253
 254        /* If the packet was IPsec processed, then SA pointer should be set */
 255        if (sa == NULL)
 256                goto drop_pkt_and_exit;
 257
 258        /* SPI on the packet should match with the one in SA */
 259        if (unlikely(sa->spi != ctx->sa_ctx->sa[sa_idx].spi))
 260                goto drop_pkt_and_exit;
 261
 262route_and_send_pkt:
 263        port_id = get_route(pkt, rt, type);
 264        if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 265                /* no match */
 266                goto drop_pkt_and_exit;
 267        }
 268        /* else, we have a matching route */
 269
 270        /* Update mac addresses */
 271        update_mac_addrs(pkt, port_id);
 272
 273        /* Update the event with the dest port */
 274        ipsec_event_pre_forward(pkt, port_id);
 275        return PKT_FORWARDED;
 276
 277drop_pkt_and_exit:
 278        RTE_LOG(ERR, IPSEC, "Inbound packet dropped\n");
 279        rte_pktmbuf_free(pkt);
 280        ev->mbuf = NULL;
 281        return PKT_DROPPED;
 282}
 283
 284static inline int
 285process_ipsec_ev_outbound(struct ipsec_ctx *ctx, struct route_table *rt,
 286                struct rte_event *ev)
 287{
 288        struct rte_ipsec_session *sess;
 289        struct sa_ctx *sa_ctx;
 290        struct rte_mbuf *pkt;
 291        uint16_t port_id = 0;
 292        struct ipsec_sa *sa;
 293        enum pkt_type type;
 294        uint32_t sa_idx;
 295        uint8_t *nlp;
 296
 297        /* Get pkt from event */
 298        pkt = ev->mbuf;
 299
 300        /* Check the packet type */
 301        type = process_ipsec_get_pkt_type(pkt, &nlp);
 302
 303        switch (type) {
 304        case PKT_TYPE_PLAIN_IPV4:
 305                /* Check if we have a match */
 306                if (check_sp(ctx->sp4_ctx, nlp, &sa_idx) == 0) {
 307                        /* No valid match */
 308                        goto drop_pkt_and_exit;
 309                }
 310                break;
 311        case PKT_TYPE_PLAIN_IPV6:
 312                /* Check if we have a match */
 313                if (check_sp(ctx->sp6_ctx, nlp, &sa_idx) == 0) {
 314                        /* No valid match */
 315                        goto drop_pkt_and_exit;
 316                }
 317                break;
 318        default:
 319                /*
 320                 * Only plain IPv4 & IPv6 packets are allowed
 321                 * on protected port. Drop the rest.
 322                 */
 323                RTE_LOG(ERR, IPSEC, "Unsupported packet type = %d\n", type);
 324                goto drop_pkt_and_exit;
 325        }
 326
 327        /* Check if the packet has to be bypassed */
 328        if (sa_idx == BYPASS) {
 329                port_id = get_route(pkt, rt, type);
 330                if (unlikely(port_id == RTE_MAX_ETHPORTS)) {
 331                        /* no match */
 332                        goto drop_pkt_and_exit;
 333                }
 334                /* else, we have a matching route */
 335                goto send_pkt;
 336        }
 337
 338        /* Validate sa_idx */
 339        if (sa_idx >= ctx->sa_ctx->nb_sa)
 340                goto drop_pkt_and_exit;
 341
 342        /* Else the packet has to be protected */
 343
 344        /* Get SA ctx*/
 345        sa_ctx = ctx->sa_ctx;
 346
 347        /* Get SA */
 348        sa = &(sa_ctx->sa[sa_idx]);
 349
 350        /* Get IPsec session */
 351        sess = ipsec_get_primary_session(sa);
 352
 353        /* Allow only inline protocol for now */
 354        if (sess->type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) {
 355                RTE_LOG(ERR, IPSEC, "SA type not supported\n");
 356                goto drop_pkt_and_exit;
 357        }
 358
 359        if (sess->security.ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
 360                *(struct rte_security_session **)rte_security_dynfield(pkt) =
 361                                sess->security.ses;
 362
 363        /* Mark the packet for Tx security offload */
 364        pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
 365
 366        /* Get the port to which this pkt need to be submitted */
 367        port_id = sa->portid;
 368
 369send_pkt:
 370        /* Update mac addresses */
 371        update_mac_addrs(pkt, port_id);
 372
 373        /* Update the event with the dest port */
 374        ipsec_event_pre_forward(pkt, port_id);
 375        return PKT_FORWARDED;
 376
 377drop_pkt_and_exit:
 378        RTE_LOG(ERR, IPSEC, "Outbound packet dropped\n");
 379        rte_pktmbuf_free(pkt);
 380        ev->mbuf = NULL;
 381        return PKT_DROPPED;
 382}
 383
 384/*
 385 * Event mode exposes various operating modes depending on the
 386 * capabilities of the event device and the operating mode
 387 * selected.
 388 */
 389
 390/* Workers registered */
 391#define IPSEC_EVENTMODE_WORKERS         2
 392
 393/*
 394 * Event mode worker
 395 * Operating parameters : non-burst - Tx internal port - driver mode
 396 */
 397static void
 398ipsec_wrkr_non_burst_int_port_drv_mode(struct eh_event_link_info *links,
 399                uint8_t nb_links)
 400{
 401        struct rte_security_session *sess_tbl[RTE_MAX_ETHPORTS] = { NULL };
 402        unsigned int nb_rx = 0;
 403        struct rte_mbuf *pkt;
 404        struct rte_event ev;
 405        uint32_t lcore_id;
 406        int32_t socket_id;
 407        int16_t port_id;
 408
 409        /* Check if we have links registered for this lcore */
 410        if (nb_links == 0) {
 411                /* No links registered - exit */
 412                return;
 413        }
 414
 415        /* Get core ID */
 416        lcore_id = rte_lcore_id();
 417
 418        /* Get socket ID */
 419        socket_id = rte_lcore_to_socket_id(lcore_id);
 420
 421        /*
 422         * Prepare security sessions table. In outbound driver mode
 423         * we always use first session configured for a given port
 424         */
 425        prepare_out_sessions_tbl(socket_ctx[socket_id].sa_out, sess_tbl,
 426                        RTE_MAX_ETHPORTS);
 427
 428        RTE_LOG(INFO, IPSEC,
 429                "Launching event mode worker (non-burst - Tx internal port - "
 430                "driver mode) on lcore %d\n", lcore_id);
 431
 432        /* We have valid links */
 433
 434        /* Check if it's single link */
 435        if (nb_links != 1) {
 436                RTE_LOG(INFO, IPSEC,
 437                        "Multiple links not supported. Using first link\n");
 438        }
 439
 440        RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
 441                        links[0].event_port_id);
 442        while (!force_quit) {
 443                /* Read packet from event queues */
 444                nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
 445                                links[0].event_port_id,
 446                                &ev,    /* events */
 447                                1,      /* nb_events */
 448                                0       /* timeout_ticks */);
 449
 450                if (nb_rx == 0)
 451                        continue;
 452
 453                pkt = ev.mbuf;
 454                port_id = pkt->port;
 455
 456                rte_prefetch0(rte_pktmbuf_mtod(pkt, void *));
 457
 458                /* Process packet */
 459                ipsec_event_pre_forward(pkt, port_id);
 460
 461                if (!is_unprotected_port(port_id)) {
 462
 463                        if (unlikely(!sess_tbl[port_id])) {
 464                                rte_pktmbuf_free(pkt);
 465                                continue;
 466                        }
 467
 468                        /* Save security session */
 469                        if (rte_security_dynfield_is_registered())
 470                                *(struct rte_security_session **)
 471                                        rte_security_dynfield(pkt) =
 472                                                sess_tbl[port_id];
 473
 474                        /* Mark the packet for Tx security offload */
 475                        pkt->ol_flags |= PKT_TX_SEC_OFFLOAD;
 476                }
 477
 478                /*
 479                 * Since tx internal port is available, events can be
 480                 * directly enqueued to the adapter and it would be
 481                 * internally submitted to the eth device.
 482                 */
 483                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 484                                links[0].event_port_id,
 485                                &ev,    /* events */
 486                                1,      /* nb_events */
 487                                0       /* flags */);
 488        }
 489}
 490
 491/*
 492 * Event mode worker
 493 * Operating parameters : non-burst - Tx internal port - app mode
 494 */
 495static void
 496ipsec_wrkr_non_burst_int_port_app_mode(struct eh_event_link_info *links,
 497                uint8_t nb_links)
 498{
 499        struct lcore_conf_ev_tx_int_port_wrkr lconf;
 500        unsigned int nb_rx = 0;
 501        struct rte_event ev;
 502        uint32_t lcore_id;
 503        int32_t socket_id;
 504        int ret;
 505
 506        /* Check if we have links registered for this lcore */
 507        if (nb_links == 0) {
 508                /* No links registered - exit */
 509                return;
 510        }
 511
 512        /* We have valid links */
 513
 514        /* Get core ID */
 515        lcore_id = rte_lcore_id();
 516
 517        /* Get socket ID */
 518        socket_id = rte_lcore_to_socket_id(lcore_id);
 519
 520        /* Save routing table */
 521        lconf.rt.rt4_ctx = socket_ctx[socket_id].rt_ip4;
 522        lconf.rt.rt6_ctx = socket_ctx[socket_id].rt_ip6;
 523        lconf.inbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_in;
 524        lconf.inbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_in;
 525        lconf.inbound.sa_ctx = socket_ctx[socket_id].sa_in;
 526        lconf.inbound.session_pool = socket_ctx[socket_id].session_pool;
 527        lconf.inbound.session_priv_pool =
 528                        socket_ctx[socket_id].session_priv_pool;
 529        lconf.outbound.sp4_ctx = socket_ctx[socket_id].sp_ip4_out;
 530        lconf.outbound.sp6_ctx = socket_ctx[socket_id].sp_ip6_out;
 531        lconf.outbound.sa_ctx = socket_ctx[socket_id].sa_out;
 532        lconf.outbound.session_pool = socket_ctx[socket_id].session_pool;
 533        lconf.outbound.session_priv_pool =
 534                        socket_ctx[socket_id].session_priv_pool;
 535
 536        RTE_LOG(INFO, IPSEC,
 537                "Launching event mode worker (non-burst - Tx internal port - "
 538                "app mode) on lcore %d\n", lcore_id);
 539
 540        /* Check if it's single link */
 541        if (nb_links != 1) {
 542                RTE_LOG(INFO, IPSEC,
 543                        "Multiple links not supported. Using first link\n");
 544        }
 545
 546        RTE_LOG(INFO, IPSEC, " -- lcoreid=%u event_port_id=%u\n", lcore_id,
 547                links[0].event_port_id);
 548
 549        while (!force_quit) {
 550                /* Read packet from event queues */
 551                nb_rx = rte_event_dequeue_burst(links[0].eventdev_id,
 552                                links[0].event_port_id,
 553                                &ev,     /* events */
 554                                1,       /* nb_events */
 555                                0        /* timeout_ticks */);
 556
 557                if (nb_rx == 0)
 558                        continue;
 559
 560                if (unlikely(ev.event_type != RTE_EVENT_TYPE_ETHDEV)) {
 561                        RTE_LOG(ERR, IPSEC, "Invalid event type %u",
 562                                ev.event_type);
 563
 564                        continue;
 565                }
 566
 567                if (is_unprotected_port(ev.mbuf->port))
 568                        ret = process_ipsec_ev_inbound(&lconf.inbound,
 569                                                        &lconf.rt, &ev);
 570                else
 571                        ret = process_ipsec_ev_outbound(&lconf.outbound,
 572                                                        &lconf.rt, &ev);
 573                if (ret != 1)
 574                        /* The pkt has been dropped */
 575                        continue;
 576
 577                /*
 578                 * Since tx internal port is available, events can be
 579                 * directly enqueued to the adapter and it would be
 580                 * internally submitted to the eth device.
 581                 */
 582                rte_event_eth_tx_adapter_enqueue(links[0].eventdev_id,
 583                                links[0].event_port_id,
 584                                &ev,    /* events */
 585                                1,      /* nb_events */
 586                                0       /* flags */);
 587        }
 588}
 589
 590static uint8_t
 591ipsec_eventmode_populate_wrkr_params(struct eh_app_worker_params *wrkrs)
 592{
 593        struct eh_app_worker_params *wrkr;
 594        uint8_t nb_wrkr_param = 0;
 595
 596        /* Save workers */
 597        wrkr = wrkrs;
 598
 599        /* Non-burst - Tx internal port - driver mode */
 600        wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
 601        wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
 602        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_DRIVER;
 603        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_drv_mode;
 604        wrkr++;
 605        nb_wrkr_param++;
 606
 607        /* Non-burst - Tx internal port - app mode */
 608        wrkr->cap.burst = EH_RX_TYPE_NON_BURST;
 609        wrkr->cap.tx_internal_port = EH_TX_TYPE_INTERNAL_PORT;
 610        wrkr->cap.ipsec_mode = EH_IPSEC_MODE_TYPE_APP;
 611        wrkr->worker_thread = ipsec_wrkr_non_burst_int_port_app_mode;
 612        nb_wrkr_param++;
 613
 614        return nb_wrkr_param;
 615}
 616
 617static void
 618ipsec_eventmode_worker(struct eh_conf *conf)
 619{
 620        struct eh_app_worker_params ipsec_wrkr[IPSEC_EVENTMODE_WORKERS] = {
 621                                        {{{0} }, NULL } };
 622        uint8_t nb_wrkr_param;
 623
 624        /* Populate l2fwd_wrkr params */
 625        nb_wrkr_param = ipsec_eventmode_populate_wrkr_params(ipsec_wrkr);
 626
 627        /*
 628         * Launch correct worker after checking
 629         * the event device's capabilities.
 630         */
 631        eh_launch_worker(conf, ipsec_wrkr, nb_wrkr_param);
 632}
 633
 634int ipsec_launch_one_lcore(void *args)
 635{
 636        struct eh_conf *conf;
 637
 638        conf = (struct eh_conf *)args;
 639
 640        if (conf->mode == EH_PKT_TRANSFER_MODE_POLL) {
 641                /* Run in poll mode */
 642                ipsec_poll_mode_worker();
 643        } else if (conf->mode == EH_PKT_TRANSFER_MODE_EVENT) {
 644                /* Run in event mode */
 645                ipsec_eventmode_worker(conf);
 646        }
 647        return 0;
 648}
 649