linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include <crypto/aead.h>
  35#include <net/xfrm.h>
  36#include <net/esp.h>
  37
  38#include "en_accel/ipsec_rxtx.h"
  39#include "en_accel/ipsec.h"
  40#include "accel/accel.h"
  41#include "en.h"
  42
  43enum {
  44        MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
  45        MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
  46        MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
  47};
  48
  49struct mlx5e_ipsec_rx_metadata {
  50        unsigned char   nexthdr;
  51        __be32          sa_handle;
  52} __packed;
  53
  54enum {
  55        MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
  56        MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
  57};
  58
  59struct mlx5e_ipsec_tx_metadata {
  60        __be16 mss_inv;         /* 1/MSS in 16bit fixed point, only for LSO */
  61        __be16 seq;             /* LSBs of the first TCP seq, only for LSO */
  62        u8     esp_next_proto;  /* Next protocol of ESP */
  63} __packed;
  64
  65struct mlx5e_ipsec_metadata {
  66        unsigned char syndrome;
  67        union {
  68                unsigned char raw[5];
  69                /* from FPGA to host, on successful decrypt */
  70                struct mlx5e_ipsec_rx_metadata rx;
  71                /* from host to FPGA */
  72                struct mlx5e_ipsec_tx_metadata tx;
  73        } __packed content;
  74        /* packet type ID field */
  75        __be16 ethertype;
  76} __packed;
  77
  78#define MAX_LSO_MSS 2048
  79
  80/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
  81static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
  82
  83static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
  84{
  85        return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
  86}
  87
  88static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
  89{
  90        struct mlx5e_ipsec_metadata *mdata;
  91        struct ethhdr *eth;
  92
  93        if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
  94                return ERR_PTR(-ENOMEM);
  95
  96        eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
  97        skb->mac_header -= sizeof(*mdata);
  98        mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
  99
 100        memmove(skb->data, skb->data + sizeof(*mdata),
 101                2 * ETH_ALEN);
 102
 103        eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
 104
 105        memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
 106        return mdata;
 107}
 108
 109static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
 110{
 111        unsigned int alen = crypto_aead_authsize(x->data);
 112        struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
 113        struct iphdr *ipv4hdr = ip_hdr(skb);
 114        unsigned int trailer_len;
 115        u8 plen;
 116        int ret;
 117
 118        ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
 119        if (unlikely(ret))
 120                return ret;
 121
 122        trailer_len = alen + plen + 2;
 123
 124        pskb_trim(skb, skb->len - trailer_len);
 125        if (skb->protocol == htons(ETH_P_IP)) {
 126                ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
 127                ip_send_check(ipv4hdr);
 128        } else {
 129                ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
 130                                             trailer_len);
 131        }
 132        return 0;
 133}
 134
 135static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
 136                                struct mlx5_wqe_eth_seg *eseg, u8 mode,
 137                                struct xfrm_offload *xo)
 138{
 139        struct mlx5e_swp_spec swp_spec = {};
 140
 141        /* Tunnel Mode:
 142         * SWP:      OutL3       InL3  InL4
 143         * Pkt: MAC  IP     ESP  IP    L4
 144         *
 145         * Transport Mode:
 146         * SWP:      OutL3       InL4
 147         *           InL3
 148         * Pkt: MAC  IP     ESP  L4
 149         */
 150        swp_spec.l3_proto = skb->protocol;
 151        swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
 152        if (swp_spec.is_tun) {
 153                if (xo->proto == IPPROTO_IPV6) {
 154                        swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
 155                        swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
 156                } else {
 157                        swp_spec.tun_l3_proto = htons(ETH_P_IP);
 158                        swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
 159                }
 160        } else {
 161                swp_spec.tun_l3_proto = skb->protocol;
 162                swp_spec.tun_l4_proto = xo->proto;
 163        }
 164
 165        mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
 166}
 167
 168void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
 169                            struct xfrm_offload *xo)
 170{
 171        struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
 172        __u32 oseq = replay_esn->oseq;
 173        int iv_offset;
 174        __be64 seqno;
 175        u32 seq_hi;
 176
 177        if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
 178                     MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
 179                seq_hi = xo->seq.hi - 1;
 180        } else {
 181                seq_hi = xo->seq.hi;
 182        }
 183
 184        /* Place the SN in the IV field */
 185        seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
 186        iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
 187        skb_store_bits(skb, iv_offset, &seqno, 8);
 188}
 189
 190void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
 191                        struct xfrm_offload *xo)
 192{
 193        int iv_offset;
 194        __be64 seqno;
 195
 196        /* Place the SN in the IV field */
 197        seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
 198        iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
 199        skb_store_bits(skb, iv_offset, &seqno, 8);
 200}
 201
 202static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
 203                                     struct mlx5e_ipsec_metadata *mdata,
 204                                     struct xfrm_offload *xo)
 205{
 206        struct ip_esp_hdr *esph;
 207        struct tcphdr *tcph;
 208
 209        if (skb_is_gso(skb)) {
 210                /* Add LSO metadata indication */
 211                esph = ip_esp_hdr(skb);
 212                tcph = inner_tcp_hdr(skb);
 213                netdev_dbg(skb->dev, "   Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
 214                           skb->network_header,
 215                           skb->transport_header,
 216                           skb->inner_network_header,
 217                           skb->inner_transport_header);
 218                netdev_dbg(skb->dev, "   Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
 219                           skb->len, skb_shinfo(skb)->gso_size,
 220                           ntohs(tcph->source), ntohs(tcph->dest),
 221                           ntohl(tcph->seq), ntohl(esph->seq_no));
 222                mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
 223                mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
 224                mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
 225        } else {
 226                mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
 227        }
 228        mdata->content.tx.esp_next_proto = xo->proto;
 229
 230        netdev_dbg(skb->dev, "   TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
 231                   mdata->syndrome, mdata->content.tx.esp_next_proto,
 232                   ntohs(mdata->content.tx.mss_inv),
 233                   ntohs(mdata->content.tx.seq));
 234}
 235
 236struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
 237                                          struct mlx5e_tx_wqe *wqe,
 238                                          struct sk_buff *skb)
 239{
 240        struct mlx5e_priv *priv = netdev_priv(netdev);
 241        struct xfrm_offload *xo = xfrm_offload(skb);
 242        struct mlx5e_ipsec_metadata *mdata;
 243        struct mlx5e_ipsec_sa_entry *sa_entry;
 244        struct xfrm_state *x;
 245        struct sec_path *sp;
 246
 247        if (!xo)
 248                return skb;
 249
 250        sp = skb_sec_path(skb);
 251        if (unlikely(sp->len != 1)) {
 252                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
 253                goto drop;
 254        }
 255
 256        x = xfrm_input_state(skb);
 257        if (unlikely(!x)) {
 258                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
 259                goto drop;
 260        }
 261
 262        if (unlikely(!x->xso.offload_handle ||
 263                     (skb->protocol != htons(ETH_P_IP) &&
 264                      skb->protocol != htons(ETH_P_IPV6)))) {
 265                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
 266                goto drop;
 267        }
 268
 269        if (!skb_is_gso(skb))
 270                if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
 271                        atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
 272                        goto drop;
 273                }
 274        mdata = mlx5e_ipsec_add_metadata(skb);
 275        if (IS_ERR(mdata)) {
 276                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
 277                goto drop;
 278        }
 279        mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
 280        sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
 281        sa_entry->set_iv_op(skb, x, xo);
 282        mlx5e_ipsec_set_metadata(skb, mdata, xo);
 283
 284        return skb;
 285
 286drop:
 287        kfree_skb(skb);
 288        return NULL;
 289}
 290
 291static inline struct xfrm_state *
 292mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
 293                     struct mlx5e_ipsec_metadata *mdata)
 294{
 295        struct mlx5e_priv *priv = netdev_priv(netdev);
 296        struct xfrm_offload *xo;
 297        struct xfrm_state *xs;
 298        struct sec_path *sp;
 299        u32 sa_handle;
 300
 301        sp = secpath_set(skb);
 302        if (unlikely(!sp)) {
 303                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
 304                return NULL;
 305        }
 306
 307        sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
 308        xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
 309        if (unlikely(!xs)) {
 310                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
 311                return NULL;
 312        }
 313
 314        sp = skb_sec_path(skb);
 315        sp->xvec[sp->len++] = xs;
 316        sp->olen++;
 317
 318        xo = xfrm_offload(skb);
 319        xo->flags = CRYPTO_DONE;
 320        switch (mdata->syndrome) {
 321        case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
 322                xo->status = CRYPTO_SUCCESS;
 323                if (likely(priv->ipsec->no_trailer)) {
 324                        xo->flags |= XFRM_ESP_NO_TRAILER;
 325                        xo->proto = mdata->content.rx.nexthdr;
 326                }
 327                break;
 328        case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
 329                xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
 330                break;
 331        case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
 332                xo->status = CRYPTO_INVALID_PROTOCOL;
 333                break;
 334        default:
 335                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
 336                return NULL;
 337        }
 338        return xs;
 339}
 340
 341struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
 342                                          struct sk_buff *skb, u32 *cqe_bcnt)
 343{
 344        struct mlx5e_ipsec_metadata *mdata;
 345        struct xfrm_state *xs;
 346
 347        if (!is_metadata_hdr_valid(skb))
 348                return skb;
 349
 350        /* Use the metadata */
 351        mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
 352        xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
 353        if (unlikely(!xs)) {
 354                kfree_skb(skb);
 355                return NULL;
 356        }
 357
 358        remove_metadata_hdr(skb);
 359        *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
 360
 361        return skb;
 362}
 363
 364bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
 365                               netdev_features_t features)
 366{
 367        struct sec_path *sp = skb_sec_path(skb);
 368        struct xfrm_state *x;
 369
 370        if (sp && sp->len) {
 371                x = sp->xvec[0];
 372                if (x && x->xso.offload_handle)
 373                        return true;
 374        }
 375        return false;
 376}
 377
 378void mlx5e_ipsec_build_inverse_table(void)
 379{
 380        u16 mss_inv;
 381        u32 mss;
 382
 383        /* Calculate 1/x inverse table for use in GSO data path.
 384         * Using this table, we provide the IPSec accelerator with the value of
 385         * 1/gso_size so that it can infer the position of each segment inside
 386         * the GSO, and increment the ESP sequence number, and generate the IV.
 387         * The HW needs this value in Q0.16 fixed-point number format
 388         */
 389        mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
 390        for (mss = 2; mss < MAX_LSO_MSS; mss++) {
 391                mss_inv = div_u64(1ULL << 32, mss) >> 16;
 392                mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
 393        }
 394}
 395