linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include <crypto/aead.h>
  35#include <net/xfrm.h>
  36#include <net/esp.h>
  37#include "accel/ipsec_offload.h"
  38#include "en_accel/ipsec_rxtx.h"
  39#include "en_accel/ipsec.h"
  40#include "accel/accel.h"
  41#include "en.h"
  42
  43enum {
  44        MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
  45        MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
  46        MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
  47};
  48
  49struct mlx5e_ipsec_rx_metadata {
  50        unsigned char   nexthdr;
  51        __be32          sa_handle;
  52} __packed;
  53
  54enum {
  55        MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
  56        MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
  57};
  58
  59struct mlx5e_ipsec_tx_metadata {
  60        __be16 mss_inv;         /* 1/MSS in 16bit fixed point, only for LSO */
  61        __be16 seq;             /* LSBs of the first TCP seq, only for LSO */
  62        u8     esp_next_proto;  /* Next protocol of ESP */
  63} __packed;
  64
  65struct mlx5e_ipsec_metadata {
  66        unsigned char syndrome;
  67        union {
  68                unsigned char raw[5];
  69                /* from FPGA to host, on successful decrypt */
  70                struct mlx5e_ipsec_rx_metadata rx;
  71                /* from host to FPGA */
  72                struct mlx5e_ipsec_tx_metadata tx;
  73        } __packed content;
  74        /* packet type ID field */
  75        __be16 ethertype;
  76} __packed;
  77
  78#define MAX_LSO_MSS 2048
  79
  80/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
  81static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
  82
  83static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
  84{
  85        return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
  86}
  87
  88static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
  89{
  90        struct mlx5e_ipsec_metadata *mdata;
  91        struct ethhdr *eth;
  92
  93        if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
  94                return ERR_PTR(-ENOMEM);
  95
  96        eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
  97        skb->mac_header -= sizeof(*mdata);
  98        mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
  99
 100        memmove(skb->data, skb->data + sizeof(*mdata),
 101                2 * ETH_ALEN);
 102
 103        eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
 104
 105        memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
 106        return mdata;
 107}
 108
 109static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
 110{
 111        unsigned int alen = crypto_aead_authsize(x->data);
 112        struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
 113        struct iphdr *ipv4hdr = ip_hdr(skb);
 114        unsigned int trailer_len;
 115        u8 plen;
 116        int ret;
 117
 118        ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
 119        if (unlikely(ret))
 120                return ret;
 121
 122        trailer_len = alen + plen + 2;
 123
 124        pskb_trim(skb, skb->len - trailer_len);
 125        if (skb->protocol == htons(ETH_P_IP)) {
 126                ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
 127                ip_send_check(ipv4hdr);
 128        } else {
 129                ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
 130                                             trailer_len);
 131        }
 132        return 0;
 133}
 134
 135static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
 136                                struct mlx5_wqe_eth_seg *eseg, u8 mode,
 137                                struct xfrm_offload *xo)
 138{
 139        /* Tunnel Mode:
 140         * SWP:      OutL3       InL3  InL4
 141         * Pkt: MAC  IP     ESP  IP    L4
 142         *
 143         * Transport Mode:
 144         * SWP:      OutL3       OutL4
 145         * Pkt: MAC  IP     ESP  L4
 146         *
 147         * Tunnel(VXLAN TCP/UDP) over Transport Mode
 148         * SWP:      OutL3                   InL3  InL4
 149         * Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
 150         */
 151
 152        /* Shared settings */
 153        eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
 154        if (skb->protocol == htons(ETH_P_IPV6))
 155                eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
 156
 157        /* Tunnel mode */
 158        if (mode == XFRM_MODE_TUNNEL) {
 159                eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
 160                eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
 161                if (xo->proto == IPPROTO_IPV6)
 162                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
 163                if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
 164                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
 165                return;
 166        }
 167
 168        /* Transport mode */
 169        if (mode != XFRM_MODE_TRANSPORT)
 170                return;
 171
 172        if (!xo->inner_ipproto) {
 173                switch (xo->proto) {
 174                case IPPROTO_UDP:
 175                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
 176                        fallthrough;
 177                case IPPROTO_TCP:
 178                        /* IP | ESP | TCP */
 179                        eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
 180                        break;
 181                default:
 182                        break;
 183                }
 184        } else {
 185                /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
 186                switch (xo->inner_ipproto) {
 187                case IPPROTO_UDP:
 188                        eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
 189                        fallthrough;
 190                case IPPROTO_TCP:
 191                        eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
 192                        eseg->swp_inner_l4_offset =
 193                                (skb->csum_start + skb->head - skb->data) / 2;
 194                        if (skb->protocol == htons(ETH_P_IPV6))
 195                                eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
 196                        break;
 197                default:
 198                        break;
 199                }
 200        }
 201
 202}
 203
 204void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
 205                            struct xfrm_offload *xo)
 206{
 207        struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
 208        __u32 oseq = replay_esn->oseq;
 209        int iv_offset;
 210        __be64 seqno;
 211        u32 seq_hi;
 212
 213        if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
 214                     MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
 215                seq_hi = xo->seq.hi - 1;
 216        } else {
 217                seq_hi = xo->seq.hi;
 218        }
 219
 220        /* Place the SN in the IV field */
 221        seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
 222        iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
 223        skb_store_bits(skb, iv_offset, &seqno, 8);
 224}
 225
 226void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
 227                        struct xfrm_offload *xo)
 228{
 229        int iv_offset;
 230        __be64 seqno;
 231
 232        /* Place the SN in the IV field */
 233        seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
 234        iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
 235        skb_store_bits(skb, iv_offset, &seqno, 8);
 236}
 237
 238static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
 239                                     struct mlx5e_ipsec_metadata *mdata,
 240                                     struct xfrm_offload *xo)
 241{
 242        struct ip_esp_hdr *esph;
 243        struct tcphdr *tcph;
 244
 245        if (skb_is_gso(skb)) {
 246                /* Add LSO metadata indication */
 247                esph = ip_esp_hdr(skb);
 248                tcph = inner_tcp_hdr(skb);
 249                netdev_dbg(skb->dev, "   Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
 250                           skb->network_header,
 251                           skb->transport_header,
 252                           skb->inner_network_header,
 253                           skb->inner_transport_header);
 254                netdev_dbg(skb->dev, "   Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
 255                           skb->len, skb_shinfo(skb)->gso_size,
 256                           ntohs(tcph->source), ntohs(tcph->dest),
 257                           ntohl(tcph->seq), ntohl(esph->seq_no));
 258                mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
 259                mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
 260                mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
 261        } else {
 262                mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
 263        }
 264        mdata->content.tx.esp_next_proto = xo->proto;
 265
 266        netdev_dbg(skb->dev, "   TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
 267                   mdata->syndrome, mdata->content.tx.esp_next_proto,
 268                   ntohs(mdata->content.tx.mss_inv),
 269                   ntohs(mdata->content.tx.seq));
 270}
 271
 272void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
 273                               struct mlx5e_accel_tx_ipsec_state *ipsec_st,
 274                               struct mlx5_wqe_inline_seg *inlseg)
 275{
 276        inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
 277        esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
 278}
 279
 280static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
 281                                 struct sk_buff *skb,
 282                                 struct xfrm_state *x,
 283                                 struct xfrm_offload *xo,
 284                                 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
 285{
 286        unsigned int blksize, clen, alen, plen;
 287        struct crypto_aead *aead;
 288        unsigned int tailen;
 289
 290        ipsec_st->x = x;
 291        ipsec_st->xo = xo;
 292        if (mlx5_is_ipsec_device(priv->mdev)) {
 293                aead = x->data;
 294                alen = crypto_aead_authsize(aead);
 295                blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 296                clen = ALIGN(skb->len + 2, blksize);
 297                plen = max_t(u32, clen - skb->len, 4);
 298                tailen = plen + alen;
 299                ipsec_st->plen = plen;
 300                ipsec_st->tailen = tailen;
 301        }
 302
 303        return 0;
 304}
 305
 306void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
 307                               struct mlx5_wqe_eth_seg *eseg)
 308{
 309        struct xfrm_offload *xo = xfrm_offload(skb);
 310        struct xfrm_encap_tmpl  *encap;
 311        struct xfrm_state *x;
 312        struct sec_path *sp;
 313        u8 l3_proto;
 314
 315        sp = skb_sec_path(skb);
 316        if (unlikely(sp->len != 1))
 317                return;
 318
 319        x = xfrm_input_state(skb);
 320        if (unlikely(!x))
 321                return;
 322
 323        if (unlikely(!x->xso.offload_handle ||
 324                     (skb->protocol != htons(ETH_P_IP) &&
 325                      skb->protocol != htons(ETH_P_IPV6))))
 326                return;
 327
 328        mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
 329
 330        l3_proto = (x->props.family == AF_INET) ?
 331                   ((struct iphdr *)skb_network_header(skb))->protocol :
 332                   ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
 333
 334        if (mlx5_is_ipsec_device(priv->mdev)) {
 335                eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
 336                eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
 337                encap = x->encap;
 338                if (!encap) {
 339                        eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
 340                                cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
 341                                cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
 342                } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
 343                        eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
 344                                cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
 345                                cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
 346                }
 347        }
 348}
 349
 350bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
 351                               struct sk_buff *skb,
 352                               struct mlx5e_accel_tx_ipsec_state *ipsec_st)
 353{
 354        struct mlx5e_priv *priv = netdev_priv(netdev);
 355        struct xfrm_offload *xo = xfrm_offload(skb);
 356        struct mlx5e_ipsec_sa_entry *sa_entry;
 357        struct mlx5e_ipsec_metadata *mdata;
 358        struct xfrm_state *x;
 359        struct sec_path *sp;
 360
 361        sp = skb_sec_path(skb);
 362        if (unlikely(sp->len != 1)) {
 363                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
 364                goto drop;
 365        }
 366
 367        x = xfrm_input_state(skb);
 368        if (unlikely(!x)) {
 369                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
 370                goto drop;
 371        }
 372
 373        if (unlikely(!x->xso.offload_handle ||
 374                     (skb->protocol != htons(ETH_P_IP) &&
 375                      skb->protocol != htons(ETH_P_IPV6)))) {
 376                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
 377                goto drop;
 378        }
 379
 380        if (!skb_is_gso(skb))
 381                if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
 382                        atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
 383                        goto drop;
 384                }
 385
 386        if (MLX5_CAP_GEN(priv->mdev, fpga)) {
 387                mdata = mlx5e_ipsec_add_metadata(skb);
 388                if (IS_ERR(mdata)) {
 389                        atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
 390                        goto drop;
 391                }
 392        }
 393
 394        sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
 395        sa_entry->set_iv_op(skb, x, xo);
 396        if (MLX5_CAP_GEN(priv->mdev, fpga))
 397                mlx5e_ipsec_set_metadata(skb, mdata, xo);
 398
 399        mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
 400
 401        return true;
 402
 403drop:
 404        kfree_skb(skb);
 405        return false;
 406}
 407
 408static inline struct xfrm_state *
 409mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
 410                     struct mlx5e_ipsec_metadata *mdata)
 411{
 412        struct mlx5e_priv *priv = netdev_priv(netdev);
 413        struct xfrm_offload *xo;
 414        struct xfrm_state *xs;
 415        struct sec_path *sp;
 416        u32 sa_handle;
 417
 418        sp = secpath_set(skb);
 419        if (unlikely(!sp)) {
 420                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
 421                return NULL;
 422        }
 423
 424        sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
 425        xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
 426        if (unlikely(!xs)) {
 427                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
 428                return NULL;
 429        }
 430
 431        sp = skb_sec_path(skb);
 432        sp->xvec[sp->len++] = xs;
 433        sp->olen++;
 434
 435        xo = xfrm_offload(skb);
 436        xo->flags = CRYPTO_DONE;
 437        switch (mdata->syndrome) {
 438        case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
 439                xo->status = CRYPTO_SUCCESS;
 440                if (likely(priv->ipsec->no_trailer)) {
 441                        xo->flags |= XFRM_ESP_NO_TRAILER;
 442                        xo->proto = mdata->content.rx.nexthdr;
 443                }
 444                break;
 445        case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
 446                xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
 447                break;
 448        case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
 449                xo->status = CRYPTO_INVALID_PROTOCOL;
 450                break;
 451        default:
 452                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
 453                return NULL;
 454        }
 455        return xs;
 456}
 457
 458struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
 459                                          struct sk_buff *skb, u32 *cqe_bcnt)
 460{
 461        struct mlx5e_ipsec_metadata *mdata;
 462        struct xfrm_state *xs;
 463
 464        if (!is_metadata_hdr_valid(skb))
 465                return skb;
 466
 467        /* Use the metadata */
 468        mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
 469        xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
 470        if (unlikely(!xs)) {
 471                kfree_skb(skb);
 472                return NULL;
 473        }
 474
 475        remove_metadata_hdr(skb);
 476        *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
 477
 478        return skb;
 479}
 480
 481enum {
 482        MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
 483        MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
 484        MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
 485};
 486
 487void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 488                                       struct sk_buff *skb,
 489                                       struct mlx5_cqe64 *cqe)
 490{
 491        u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
 492        struct mlx5e_priv *priv;
 493        struct xfrm_offload *xo;
 494        struct xfrm_state *xs;
 495        struct sec_path *sp;
 496        u32  sa_handle;
 497
 498        sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
 499        priv = netdev_priv(netdev);
 500        sp = secpath_set(skb);
 501        if (unlikely(!sp)) {
 502                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
 503                return;
 504        }
 505
 506        xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
 507        if (unlikely(!xs)) {
 508                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
 509                return;
 510        }
 511
 512        sp = skb_sec_path(skb);
 513        sp->xvec[sp->len++] = xs;
 514        sp->olen++;
 515
 516        xo = xfrm_offload(skb);
 517        xo->flags = CRYPTO_DONE;
 518
 519        switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
 520        case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
 521                xo->status = CRYPTO_SUCCESS;
 522                if (WARN_ON_ONCE(priv->ipsec->no_trailer))
 523                        xo->flags |= XFRM_ESP_NO_TRAILER;
 524                break;
 525        case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
 526                xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
 527                break;
 528        case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
 529                xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
 530                break;
 531        default:
 532                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
 533        }
 534}
 535
 536void mlx5e_ipsec_build_inverse_table(void)
 537{
 538        u16 mss_inv;
 539        u32 mss;
 540
 541        /* Calculate 1/x inverse table for use in GSO data path.
 542         * Using this table, we provide the IPSec accelerator with the value of
 543         * 1/gso_size so that it can infer the position of each segment inside
 544         * the GSO, and increment the ESP sequence number, and generate the IV.
 545         * The HW needs this value in Q0.16 fixed-point number format
 546         */
 547        mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
 548        for (mss = 2; mss < MAX_LSO_MSS; mss++) {
 549                mss_inv = div_u64(1ULL << 32, mss) >> 16;
 550                mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
 551        }
 552}
 553