dpdk/lib/ipsec/sa.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2020 Intel Corporation
   3 */
   4
   5#include <rte_ipsec.h>
   6#include <rte_esp.h>
   7#include <rte_ip.h>
   8#include <rte_udp.h>
   9#include <rte_errno.h>
  10
  11#include "sa.h"
  12#include "ipsec_sqn.h"
  13#include "crypto.h"
  14#include "misc.h"
  15
  16#define MBUF_MAX_L2_LEN         RTE_LEN2MASK(RTE_MBUF_L2_LEN_BITS, uint64_t)
  17#define MBUF_MAX_L3_LEN         RTE_LEN2MASK(RTE_MBUF_L3_LEN_BITS, uint64_t)
  18
  19/* some helper structures */
  20struct crypto_xform {
  21        struct rte_crypto_auth_xform *auth;
  22        struct rte_crypto_cipher_xform *cipher;
  23        struct rte_crypto_aead_xform *aead;
  24};
  25
  26/*
  27 * helper routine, fills internal crypto_xform structure.
  28 */
  29static int
  30fill_crypto_xform(struct crypto_xform *xform, uint64_t type,
  31        const struct rte_ipsec_sa_prm *prm)
  32{
  33        struct rte_crypto_sym_xform *xf, *xfn;
  34
  35        memset(xform, 0, sizeof(*xform));
  36
  37        xf = prm->crypto_xform;
  38        if (xf == NULL)
  39                return -EINVAL;
  40
  41        xfn = xf->next;
  42
  43        /* for AEAD just one xform required */
  44        if (xf->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
  45                if (xfn != NULL)
  46                        return -EINVAL;
  47                xform->aead = &xf->aead;
  48
  49        /* GMAC has only auth */
  50        } else if (xf->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
  51                        xf->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
  52                if (xfn != NULL)
  53                        return -EINVAL;
  54                xform->auth = &xf->auth;
  55                xform->cipher = &xfn->cipher;
  56
  57        /*
  58         * CIPHER+AUTH xforms are expected in strict order,
  59         * depending on SA direction:
  60         * inbound: AUTH+CIPHER
  61         * outbound: CIPHER+AUTH
  62         */
  63        } else if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
  64
  65                /* wrong order or no cipher */
  66                if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_AUTH ||
  67                                xfn->type != RTE_CRYPTO_SYM_XFORM_CIPHER)
  68                        return -EINVAL;
  69
  70                xform->auth = &xf->auth;
  71                xform->cipher = &xfn->cipher;
  72
  73        } else {
  74
  75                /* wrong order or no auth */
  76                if (xfn == NULL || xf->type != RTE_CRYPTO_SYM_XFORM_CIPHER ||
  77                                xfn->type != RTE_CRYPTO_SYM_XFORM_AUTH)
  78                        return -EINVAL;
  79
  80                xform->cipher = &xf->cipher;
  81                xform->auth = &xfn->auth;
  82        }
  83
  84        return 0;
  85}
  86
  87uint64_t
  88rte_ipsec_sa_type(const struct rte_ipsec_sa *sa)
  89{
  90        return sa->type;
  91}
  92
  93/**
  94 * Based on number of buckets calculated required size for the
  95 * structure that holds replay window and sequence number (RSN) information.
  96 */
  97static size_t
  98rsn_size(uint32_t nb_bucket)
  99{
 100        size_t sz;
 101        struct replay_sqn *rsn;
 102
 103        sz = sizeof(*rsn) + nb_bucket * sizeof(rsn->window[0]);
 104        sz = RTE_ALIGN_CEIL(sz, RTE_CACHE_LINE_SIZE);
 105        return sz;
 106}
 107
 108/*
 109 * for given size, calculate required number of buckets.
 110 */
 111static uint32_t
 112replay_num_bucket(uint32_t wsz)
 113{
 114        uint32_t nb;
 115
 116        nb = rte_align32pow2(RTE_ALIGN_MUL_CEIL(wsz, WINDOW_BUCKET_SIZE) /
 117                WINDOW_BUCKET_SIZE);
 118        nb = RTE_MAX(nb, (uint32_t)WINDOW_BUCKET_MIN);
 119
 120        return nb;
 121}
 122
 123static int32_t
 124ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket)
 125{
 126        uint32_t n, sz, wsz;
 127
 128        wsz = *wnd_sz;
 129        n = 0;
 130
 131        if ((type & RTE_IPSEC_SATP_DIR_MASK) == RTE_IPSEC_SATP_DIR_IB) {
 132
 133                /*
 134                 * RFC 4303 recommends 64 as minimum window size.
 135                 * there is no point to use ESN mode without SQN window,
 136                 * so make sure we have at least 64 window when ESN is enabled.
 137                 */
 138                wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
 139                        RTE_IPSEC_SATP_ESN_DISABLE) ?
 140                        wsz : RTE_MAX(wsz, (uint32_t)WINDOW_BUCKET_SIZE);
 141                if (wsz != 0)
 142                        n = replay_num_bucket(wsz);
 143        }
 144
 145        if (n > WINDOW_BUCKET_MAX)
 146                return -EINVAL;
 147
 148        *wnd_sz = wsz;
 149        *nb_bucket = n;
 150
 151        sz = rsn_size(n);
 152        if ((type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM)
 153                sz *= REPLAY_SQN_NUM;
 154
 155        sz += sizeof(struct rte_ipsec_sa);
 156        return sz;
 157}
 158
 159void
 160rte_ipsec_sa_fini(struct rte_ipsec_sa *sa)
 161{
 162        memset(sa, 0, sa->size);
 163}
 164
 165/*
 166 * Determine expected SA type based on input parameters.
 167 */
 168static int
 169fill_sa_type(const struct rte_ipsec_sa_prm *prm, uint64_t *type)
 170{
 171        uint64_t tp;
 172
 173        tp = 0;
 174
 175        if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_AH)
 176                tp |= RTE_IPSEC_SATP_PROTO_AH;
 177        else if (prm->ipsec_xform.proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP)
 178                tp |= RTE_IPSEC_SATP_PROTO_ESP;
 179        else
 180                return -EINVAL;
 181
 182        if (prm->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
 183                tp |= RTE_IPSEC_SATP_DIR_OB;
 184        else if (prm->ipsec_xform.direction ==
 185                        RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
 186                tp |= RTE_IPSEC_SATP_DIR_IB;
 187        else
 188                return -EINVAL;
 189
 190        if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
 191                if (prm->ipsec_xform.tunnel.type ==
 192                                RTE_SECURITY_IPSEC_TUNNEL_IPV4)
 193                        tp |= RTE_IPSEC_SATP_MODE_TUNLV4;
 194                else if (prm->ipsec_xform.tunnel.type ==
 195                                RTE_SECURITY_IPSEC_TUNNEL_IPV6)
 196                        tp |= RTE_IPSEC_SATP_MODE_TUNLV6;
 197                else
 198                        return -EINVAL;
 199
 200                if (prm->tun.next_proto == IPPROTO_IPIP)
 201                        tp |= RTE_IPSEC_SATP_IPV4;
 202                else if (prm->tun.next_proto == IPPROTO_IPV6)
 203                        tp |= RTE_IPSEC_SATP_IPV6;
 204                else
 205                        return -EINVAL;
 206        } else if (prm->ipsec_xform.mode ==
 207                        RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT) {
 208                tp |= RTE_IPSEC_SATP_MODE_TRANS;
 209                if (prm->trs.proto == IPPROTO_IPIP)
 210                        tp |= RTE_IPSEC_SATP_IPV4;
 211                else if (prm->trs.proto == IPPROTO_IPV6)
 212                        tp |= RTE_IPSEC_SATP_IPV6;
 213                else
 214                        return -EINVAL;
 215        } else
 216                return -EINVAL;
 217
 218        /* check for UDP encapsulation flag */
 219        if (prm->ipsec_xform.options.udp_encap == 1)
 220                tp |= RTE_IPSEC_SATP_NATT_ENABLE;
 221
 222        /* check for ESN flag */
 223        if (prm->ipsec_xform.options.esn == 0)
 224                tp |= RTE_IPSEC_SATP_ESN_DISABLE;
 225        else
 226                tp |= RTE_IPSEC_SATP_ESN_ENABLE;
 227
 228        /* check for ECN flag */
 229        if (prm->ipsec_xform.options.ecn == 0)
 230                tp |= RTE_IPSEC_SATP_ECN_DISABLE;
 231        else
 232                tp |= RTE_IPSEC_SATP_ECN_ENABLE;
 233
 234        /* check for DSCP flag */
 235        if (prm->ipsec_xform.options.copy_dscp == 0)
 236                tp |= RTE_IPSEC_SATP_DSCP_DISABLE;
 237        else
 238                tp |= RTE_IPSEC_SATP_DSCP_ENABLE;
 239
 240        /* interpret flags */
 241        if (prm->flags & RTE_IPSEC_SAFLAG_SQN_ATOM)
 242                tp |= RTE_IPSEC_SATP_SQN_ATOM;
 243        else
 244                tp |= RTE_IPSEC_SATP_SQN_RAW;
 245
 246        *type = tp;
 247        return 0;
 248}
 249
 250/*
 251 * Init ESP inbound specific things.
 252 */
 253static void
 254esp_inb_init(struct rte_ipsec_sa *sa)
 255{
 256        /* these params may differ with new algorithms support */
 257        sa->ctp.cipher.offset = sizeof(struct rte_esp_hdr) + sa->iv_len;
 258        sa->ctp.cipher.length = sa->icv_len + sa->ctp.cipher.offset;
 259
 260        /*
 261         * for AEAD algorithms we can assume that
 262         * auth and cipher offsets would be equal.
 263         */
 264        switch (sa->algo_type) {
 265        case ALGO_TYPE_AES_GCM:
 266        case ALGO_TYPE_AES_CCM:
 267        case ALGO_TYPE_CHACHA20_POLY1305:
 268                sa->ctp.auth.raw = sa->ctp.cipher.raw;
 269                break;
 270        default:
 271                sa->ctp.auth.offset = 0;
 272                sa->ctp.auth.length = sa->icv_len - sa->sqh_len;
 273                sa->cofs.ofs.cipher.tail = sa->sqh_len;
 274                break;
 275        }
 276
 277        sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
 278}
 279
 280/*
 281 * Init ESP inbound tunnel specific things.
 282 */
 283static void
 284esp_inb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
 285{
 286        sa->proto = prm->tun.next_proto;
 287        esp_inb_init(sa);
 288}
 289
 290/*
 291 * Init ESP outbound specific things.
 292 */
 293static void
 294esp_outb_init(struct rte_ipsec_sa *sa, uint32_t hlen, uint64_t sqn)
 295{
 296        uint8_t algo_type;
 297
 298        sa->sqn.outb = sqn > 1 ? sqn : 1;
 299
 300        algo_type = sa->algo_type;
 301
 302        /*
 303         * Setup auth and cipher length and offset.
 304         * these params may differ with new algorithms support
 305         */
 306
 307        switch (algo_type) {
 308        case ALGO_TYPE_AES_GCM:
 309        case ALGO_TYPE_AES_CCM:
 310        case ALGO_TYPE_CHACHA20_POLY1305:
 311        case ALGO_TYPE_AES_CTR:
 312        case ALGO_TYPE_NULL:
 313                sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr) +
 314                        sa->iv_len;
 315                sa->ctp.cipher.length = 0;
 316                break;
 317        case ALGO_TYPE_AES_CBC:
 318        case ALGO_TYPE_3DES_CBC:
 319                sa->ctp.cipher.offset = hlen + sizeof(struct rte_esp_hdr);
 320                sa->ctp.cipher.length = sa->iv_len;
 321                break;
 322        case ALGO_TYPE_AES_GMAC:
 323                sa->ctp.cipher.offset = 0;
 324                sa->ctp.cipher.length = 0;
 325                break;
 326        }
 327
 328        /*
 329         * for AEAD algorithms we can assume that
 330         * auth and cipher offsets would be equal.
 331         */
 332        switch (algo_type) {
 333        case ALGO_TYPE_AES_GCM:
 334        case ALGO_TYPE_AES_CCM:
 335        case ALGO_TYPE_CHACHA20_POLY1305:
 336                sa->ctp.auth.raw = sa->ctp.cipher.raw;
 337                break;
 338        default:
 339                sa->ctp.auth.offset = hlen;
 340                sa->ctp.auth.length = sizeof(struct rte_esp_hdr) +
 341                        sa->iv_len + sa->sqh_len;
 342                break;
 343        }
 344
 345        sa->cofs.ofs.cipher.head = sa->ctp.cipher.offset - sa->ctp.auth.offset;
 346        sa->cofs.ofs.cipher.tail = (sa->ctp.auth.offset + sa->ctp.auth.length) -
 347                        (sa->ctp.cipher.offset + sa->ctp.cipher.length);
 348}
 349
 350/*
 351 * Init ESP outbound tunnel specific things.
 352 */
 353static void
 354esp_outb_tun_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm)
 355{
 356        sa->proto = prm->tun.next_proto;
 357        sa->hdr_len = prm->tun.hdr_len;
 358        sa->hdr_l3_off = prm->tun.hdr_l3_off;
 359
 360        memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len);
 361
 362        /* insert UDP header if UDP encapsulation is enabled */
 363        if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
 364                struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
 365                                &sa->hdr[prm->tun.hdr_len];
 366                sa->hdr_len += sizeof(struct rte_udp_hdr);
 367                udph->src_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.sport);
 368                udph->dst_port = rte_cpu_to_be_16(prm->ipsec_xform.udp.dport);
 369                udph->dgram_cksum = 0;
 370        }
 371
 372        /* update l2_len and l3_len fields for outbound mbuf */
 373        sa->tx_offload.val = rte_mbuf_tx_offload(sa->hdr_l3_off,
 374                sa->hdr_len - sa->hdr_l3_off, 0, 0, 0, 0, 0);
 375
 376        esp_outb_init(sa, sa->hdr_len, prm->ipsec_xform.esn.value);
 377}
 378
 379/*
 380 * helper function, init SA structure.
 381 */
 382static int
 383esp_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
 384        const struct crypto_xform *cxf)
 385{
 386        static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
 387                                RTE_IPSEC_SATP_MODE_MASK |
 388                                RTE_IPSEC_SATP_NATT_MASK;
 389
 390        if (prm->ipsec_xform.options.ecn)
 391                sa->tos_mask |= RTE_IPV4_HDR_ECN_MASK;
 392
 393        if (prm->ipsec_xform.options.copy_dscp)
 394                sa->tos_mask |= RTE_IPV4_HDR_DSCP_MASK;
 395
 396        if (cxf->aead != NULL) {
 397                switch (cxf->aead->algo) {
 398                case RTE_CRYPTO_AEAD_AES_GCM:
 399                        /* RFC 4106 */
 400                        sa->aad_len = sizeof(struct aead_gcm_aad);
 401                        sa->icv_len = cxf->aead->digest_length;
 402                        sa->iv_ofs = cxf->aead->iv.offset;
 403                        sa->iv_len = sizeof(uint64_t);
 404                        sa->pad_align = IPSEC_PAD_AES_GCM;
 405                        sa->algo_type = ALGO_TYPE_AES_GCM;
 406                        break;
 407                case RTE_CRYPTO_AEAD_AES_CCM:
 408                        /* RFC 4309 */
 409                        sa->aad_len = sizeof(struct aead_ccm_aad);
 410                        sa->icv_len = cxf->aead->digest_length;
 411                        sa->iv_ofs = cxf->aead->iv.offset;
 412                        sa->iv_len = sizeof(uint64_t);
 413                        sa->pad_align = IPSEC_PAD_AES_CCM;
 414                        sa->algo_type = ALGO_TYPE_AES_CCM;
 415                        break;
 416                case RTE_CRYPTO_AEAD_CHACHA20_POLY1305:
 417                        /* RFC 7634 & 8439*/
 418                        sa->aad_len = sizeof(struct aead_chacha20_poly1305_aad);
 419                        sa->icv_len = cxf->aead->digest_length;
 420                        sa->iv_ofs = cxf->aead->iv.offset;
 421                        sa->iv_len = sizeof(uint64_t);
 422                        sa->pad_align = IPSEC_PAD_CHACHA20_POLY1305;
 423                        sa->algo_type = ALGO_TYPE_CHACHA20_POLY1305;
 424                        break;
 425                default:
 426                        return -EINVAL;
 427                }
 428        } else if (cxf->auth->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
 429                /* RFC 4543 */
 430                /* AES-GMAC is a special case of auth that needs IV */
 431                sa->pad_align = IPSEC_PAD_AES_GMAC;
 432                sa->iv_len = sizeof(uint64_t);
 433                sa->icv_len = cxf->auth->digest_length;
 434                sa->iv_ofs = cxf->auth->iv.offset;
 435                sa->algo_type = ALGO_TYPE_AES_GMAC;
 436
 437        } else {
 438                sa->icv_len = cxf->auth->digest_length;
 439                sa->iv_ofs = cxf->cipher->iv.offset;
 440
 441                switch (cxf->cipher->algo) {
 442                case RTE_CRYPTO_CIPHER_NULL:
 443                        sa->pad_align = IPSEC_PAD_NULL;
 444                        sa->iv_len = 0;
 445                        sa->algo_type = ALGO_TYPE_NULL;
 446                        break;
 447
 448                case RTE_CRYPTO_CIPHER_AES_CBC:
 449                        sa->pad_align = IPSEC_PAD_AES_CBC;
 450                        sa->iv_len = IPSEC_MAX_IV_SIZE;
 451                        sa->algo_type = ALGO_TYPE_AES_CBC;
 452                        break;
 453
 454                case RTE_CRYPTO_CIPHER_AES_CTR:
 455                        /* RFC 3686 */
 456                        sa->pad_align = IPSEC_PAD_AES_CTR;
 457                        sa->iv_len = IPSEC_AES_CTR_IV_SIZE;
 458                        sa->algo_type = ALGO_TYPE_AES_CTR;
 459                        break;
 460
 461                case RTE_CRYPTO_CIPHER_3DES_CBC:
 462                        /* RFC 1851 */
 463                        sa->pad_align = IPSEC_PAD_3DES_CBC;
 464                        sa->iv_len = IPSEC_3DES_IV_SIZE;
 465                        sa->algo_type = ALGO_TYPE_3DES_CBC;
 466                        break;
 467
 468                default:
 469                        return -EINVAL;
 470                }
 471        }
 472
 473        sa->sqh_len = IS_ESN(sa) ? sizeof(uint32_t) : 0;
 474        sa->udata = prm->userdata;
 475        sa->spi = rte_cpu_to_be_32(prm->ipsec_xform.spi);
 476        sa->salt = prm->ipsec_xform.salt;
 477
 478        /* preserve all values except l2_len and l3_len */
 479        sa->tx_offload.msk =
 480                ~rte_mbuf_tx_offload(MBUF_MAX_L2_LEN, MBUF_MAX_L3_LEN,
 481                                0, 0, 0, 0, 0);
 482
 483        switch (sa->type & msk) {
 484        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
 485        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
 486                esp_inb_tun_init(sa, prm);
 487                break;
 488        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
 489                esp_inb_init(sa);
 490                break;
 491        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4 |
 492                        RTE_IPSEC_SATP_NATT_ENABLE):
 493        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6 |
 494                        RTE_IPSEC_SATP_NATT_ENABLE):
 495        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
 496        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
 497                esp_outb_tun_init(sa, prm);
 498                break;
 499        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS |
 500                        RTE_IPSEC_SATP_NATT_ENABLE):
 501        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
 502                esp_outb_init(sa, 0, prm->ipsec_xform.esn.value);
 503                break;
 504        }
 505
 506        return 0;
 507}
 508
 509/*
 510 * helper function, init SA replay structure.
 511 */
 512static void
 513fill_sa_replay(struct rte_ipsec_sa *sa, uint32_t wnd_sz, uint32_t nb_bucket,
 514        uint64_t sqn)
 515{
 516        sa->replay.win_sz = wnd_sz;
 517        sa->replay.nb_bucket = nb_bucket;
 518        sa->replay.bucket_index_mask = nb_bucket - 1;
 519        sa->sqn.inb.rsn[0] = (struct replay_sqn *)(sa + 1);
 520        sa->sqn.inb.rsn[0]->sqn = sqn;
 521        if ((sa->type & RTE_IPSEC_SATP_SQN_MASK) == RTE_IPSEC_SATP_SQN_ATOM) {
 522                sa->sqn.inb.rsn[1] = (struct replay_sqn *)
 523                        ((uintptr_t)sa->sqn.inb.rsn[0] + rsn_size(nb_bucket));
 524                sa->sqn.inb.rsn[1]->sqn = sqn;
 525        }
 526}
 527
 528int
 529rte_ipsec_sa_size(const struct rte_ipsec_sa_prm *prm)
 530{
 531        uint64_t type;
 532        uint32_t nb, wsz;
 533        int32_t rc;
 534
 535        if (prm == NULL)
 536                return -EINVAL;
 537
 538        /* determine SA type */
 539        rc = fill_sa_type(prm, &type);
 540        if (rc != 0)
 541                return rc;
 542
 543        /* determine required size */
 544        wsz = prm->ipsec_xform.replay_win_sz;
 545        return ipsec_sa_size(type, &wsz, &nb);
 546}
 547
 548int
 549rte_ipsec_sa_init(struct rte_ipsec_sa *sa, const struct rte_ipsec_sa_prm *prm,
 550        uint32_t size)
 551{
 552        int32_t rc, sz;
 553        uint32_t nb, wsz;
 554        uint64_t type;
 555        struct crypto_xform cxf;
 556
 557        if (sa == NULL || prm == NULL)
 558                return -EINVAL;
 559
 560        /* determine SA type */
 561        rc = fill_sa_type(prm, &type);
 562        if (rc != 0)
 563                return rc;
 564
 565        /* determine required size */
 566        wsz = prm->ipsec_xform.replay_win_sz;
 567        sz = ipsec_sa_size(type, &wsz, &nb);
 568        if (sz < 0)
 569                return sz;
 570        else if (size < (uint32_t)sz)
 571                return -ENOSPC;
 572
 573        /* only esp is supported right now */
 574        if (prm->ipsec_xform.proto != RTE_SECURITY_IPSEC_SA_PROTO_ESP)
 575                return -EINVAL;
 576
 577        if (prm->ipsec_xform.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
 578                uint32_t hlen = prm->tun.hdr_len;
 579                if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE)
 580                        hlen += sizeof(struct rte_udp_hdr);
 581                if (hlen > sizeof(sa->hdr))
 582                        return -EINVAL;
 583        }
 584
 585        rc = fill_crypto_xform(&cxf, type, prm);
 586        if (rc != 0)
 587                return rc;
 588
 589        /* initialize SA */
 590
 591        memset(sa, 0, sz);
 592        sa->type = type;
 593        sa->size = sz;
 594
 595        /* check for ESN flag */
 596        sa->sqn_mask = (prm->ipsec_xform.options.esn == 0) ?
 597                UINT32_MAX : UINT64_MAX;
 598
 599        rc = esp_sa_init(sa, prm, &cxf);
 600        if (rc != 0)
 601                rte_ipsec_sa_fini(sa);
 602
 603        /* fill replay window related fields */
 604        if (nb != 0)
 605                fill_sa_replay(sa, wsz, nb, prm->ipsec_xform.esn.value);
 606
 607        return sz;
 608}
 609
 610/*
 611 *  setup crypto ops for LOOKASIDE_PROTO type of devices.
 612 */
 613static inline void
 614lksd_proto_cop_prepare(const struct rte_ipsec_session *ss,
 615        struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
 616{
 617        uint32_t i;
 618        struct rte_crypto_sym_op *sop;
 619
 620        for (i = 0; i != num; i++) {
 621                sop = cop[i]->sym;
 622                cop[i]->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
 623                cop[i]->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
 624                cop[i]->sess_type = RTE_CRYPTO_OP_SECURITY_SESSION;
 625                sop->m_src = mb[i];
 626                __rte_security_attach_session(sop, ss->security.ses);
 627        }
 628}
 629
 630/*
 631 *  setup packets and crypto ops for LOOKASIDE_PROTO type of devices.
 632 *  Note that for LOOKASIDE_PROTO all packet modifications will be
 633 *  performed by PMD/HW.
 634 *  SW has only to prepare crypto op.
 635 */
 636static uint16_t
 637lksd_proto_prepare(const struct rte_ipsec_session *ss,
 638        struct rte_mbuf *mb[], struct rte_crypto_op *cop[], uint16_t num)
 639{
 640        lksd_proto_cop_prepare(ss, mb, cop, num);
 641        return num;
 642}
 643
 644/*
 645 * simplest pkt process routine:
 646 * all actual processing is already done by HW/PMD,
 647 * just check mbuf ol_flags.
 648 * used for:
 649 * - inbound for RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL
 650 * - inbound/outbound for RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL
 651 * - outbound for RTE_SECURITY_ACTION_TYPE_NONE when ESN is disabled
 652 */
 653uint16_t
 654pkt_flag_process(const struct rte_ipsec_session *ss,
 655                struct rte_mbuf *mb[], uint16_t num)
 656{
 657        uint32_t i, k, bytes;
 658        uint32_t dr[num];
 659
 660        RTE_SET_USED(ss);
 661
 662        k = 0;
 663        bytes = 0;
 664        for (i = 0; i != num; i++) {
 665                if ((mb[i]->ol_flags & RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED) == 0) {
 666                        k++;
 667                        bytes += mb[i]->pkt_len;
 668                }
 669                else
 670                        dr[i - k] = i;
 671        }
 672
 673        ss->sa->statistics.count += k;
 674        ss->sa->statistics.bytes += bytes;
 675
 676        /* handle unprocessed mbufs */
 677        if (k != num) {
 678                rte_errno = EBADMSG;
 679                if (k != 0)
 680                        move_bad_mbufs(mb, dr, num, num - k);
 681        }
 682
 683        return k;
 684}
 685
 686/*
 687 * Select packet processing function for session on LOOKASIDE_NONE
 688 * type of device.
 689 */
 690static int
 691lksd_none_pkt_func_select(const struct rte_ipsec_sa *sa,
 692                struct rte_ipsec_sa_pkt_func *pf)
 693{
 694        int32_t rc;
 695
 696        static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
 697                        RTE_IPSEC_SATP_MODE_MASK;
 698
 699        rc = 0;
 700        switch (sa->type & msk) {
 701        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
 702        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
 703                pf->prepare.async = esp_inb_pkt_prepare;
 704                pf->process = esp_inb_tun_pkt_process;
 705                break;
 706        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
 707                pf->prepare.async = esp_inb_pkt_prepare;
 708                pf->process = esp_inb_trs_pkt_process;
 709                break;
 710        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
 711        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
 712                pf->prepare.async = esp_outb_tun_prepare;
 713                pf->process = (sa->sqh_len != 0) ?
 714                        esp_outb_sqh_process : pkt_flag_process;
 715                break;
 716        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
 717                pf->prepare.async = esp_outb_trs_prepare;
 718                pf->process = (sa->sqh_len != 0) ?
 719                        esp_outb_sqh_process : pkt_flag_process;
 720                break;
 721        default:
 722                rc = -ENOTSUP;
 723        }
 724
 725        return rc;
 726}
 727
 728static int
 729cpu_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
 730                struct rte_ipsec_sa_pkt_func *pf)
 731{
 732        int32_t rc;
 733
 734        static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
 735                        RTE_IPSEC_SATP_MODE_MASK;
 736
 737        rc = 0;
 738        switch (sa->type & msk) {
 739        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
 740        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
 741                pf->prepare.sync = cpu_inb_pkt_prepare;
 742                pf->process = esp_inb_tun_pkt_process;
 743                break;
 744        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
 745                pf->prepare.sync = cpu_inb_pkt_prepare;
 746                pf->process = esp_inb_trs_pkt_process;
 747                break;
 748        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
 749        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
 750                pf->prepare.sync = cpu_outb_tun_pkt_prepare;
 751                pf->process = (sa->sqh_len != 0) ?
 752                        esp_outb_sqh_process : pkt_flag_process;
 753                break;
 754        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
 755                pf->prepare.sync = cpu_outb_trs_pkt_prepare;
 756                pf->process = (sa->sqh_len != 0) ?
 757                        esp_outb_sqh_process : pkt_flag_process;
 758                break;
 759        default:
 760                rc = -ENOTSUP;
 761        }
 762
 763        return rc;
 764}
 765
 766/*
 767 * Select packet processing function for session on INLINE_CRYPTO
 768 * type of device.
 769 */
 770static int
 771inline_crypto_pkt_func_select(const struct rte_ipsec_sa *sa,
 772                struct rte_ipsec_sa_pkt_func *pf)
 773{
 774        int32_t rc;
 775
 776        static const uint64_t msk = RTE_IPSEC_SATP_DIR_MASK |
 777                        RTE_IPSEC_SATP_MODE_MASK;
 778
 779        rc = 0;
 780        switch (sa->type & msk) {
 781        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV4):
 782        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TUNLV6):
 783                pf->process = inline_inb_tun_pkt_process;
 784                break;
 785        case (RTE_IPSEC_SATP_DIR_IB | RTE_IPSEC_SATP_MODE_TRANS):
 786                pf->process = inline_inb_trs_pkt_process;
 787                break;
 788        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV4):
 789        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TUNLV6):
 790                pf->process = inline_outb_tun_pkt_process;
 791                break;
 792        case (RTE_IPSEC_SATP_DIR_OB | RTE_IPSEC_SATP_MODE_TRANS):
 793                pf->process = inline_outb_trs_pkt_process;
 794                break;
 795        default:
 796                rc = -ENOTSUP;
 797        }
 798
 799        return rc;
 800}
 801
 802/*
 803 * Select packet processing function for given session based on SA parameters
 804 * and type of associated with the session device.
 805 */
 806int
 807ipsec_sa_pkt_func_select(const struct rte_ipsec_session *ss,
 808        const struct rte_ipsec_sa *sa, struct rte_ipsec_sa_pkt_func *pf)
 809{
 810        int32_t rc;
 811
 812        rc = 0;
 813        pf[0] = (struct rte_ipsec_sa_pkt_func) { {NULL}, NULL };
 814
 815        switch (ss->type) {
 816        case RTE_SECURITY_ACTION_TYPE_NONE:
 817                rc = lksd_none_pkt_func_select(sa, pf);
 818                break;
 819        case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
 820                rc = inline_crypto_pkt_func_select(sa, pf);
 821                break;
 822        case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
 823                if ((sa->type & RTE_IPSEC_SATP_DIR_MASK) ==
 824                                RTE_IPSEC_SATP_DIR_IB)
 825                        pf->process = pkt_flag_process;
 826                else
 827                        pf->process = inline_proto_outb_pkt_process;
 828                break;
 829        case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
 830                pf->prepare.async = lksd_proto_prepare;
 831                pf->process = pkt_flag_process;
 832                break;
 833        case RTE_SECURITY_ACTION_TYPE_CPU_CRYPTO:
 834                rc = cpu_crypto_pkt_func_select(sa, pf);
 835                break;
 836        default:
 837                rc = -ENOTSUP;
 838        }
 839
 840        return rc;
 841}
 842