linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include <crypto/aead.h>
  35#include <net/xfrm.h>
  36#include <net/esp.h>
  37
  38#include "en_accel/ipsec_rxtx.h"
  39#include "en_accel/ipsec.h"
  40#include "accel/accel.h"
  41#include "en.h"
  42
  43enum {
  44        MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
  45        MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
  46        MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
  47};
  48
  49struct mlx5e_ipsec_rx_metadata {
  50        unsigned char   nexthdr;
  51        __be32          sa_handle;
  52} __packed;
  53
  54enum {
  55        MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
  56        MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
  57};
  58
  59struct mlx5e_ipsec_tx_metadata {
  60        __be16 mss_inv;         /* 1/MSS in 16bit fixed point, only for LSO */
  61        __be16 seq;             /* LSBs of the first TCP seq, only for LSO */
  62        u8     esp_next_proto;  /* Next protocol of ESP */
  63} __packed;
  64
  65struct mlx5e_ipsec_metadata {
  66        unsigned char syndrome;
  67        union {
  68                unsigned char raw[5];
  69                /* from FPGA to host, on successful decrypt */
  70                struct mlx5e_ipsec_rx_metadata rx;
  71                /* from host to FPGA */
  72                struct mlx5e_ipsec_tx_metadata tx;
  73        } __packed content;
  74        /* packet type ID field */
  75        __be16 ethertype;
  76} __packed;
  77
  78#define MAX_LSO_MSS 2048
  79
  80/* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
  81static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
  82
  83static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
  84{
  85        return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
  86}
  87
  88static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
  89{
  90        struct mlx5e_ipsec_metadata *mdata;
  91        struct ethhdr *eth;
  92
  93        if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
  94                return ERR_PTR(-ENOMEM);
  95
  96        eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
  97        skb->mac_header -= sizeof(*mdata);
  98        mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
  99
 100        memmove(skb->data, skb->data + sizeof(*mdata),
 101                2 * ETH_ALEN);
 102
 103        eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
 104
 105        memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
 106        return mdata;
 107}
 108
 109static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
 110{
 111        unsigned int alen = crypto_aead_authsize(x->data);
 112        struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
 113        struct iphdr *ipv4hdr = ip_hdr(skb);
 114        unsigned int trailer_len;
 115        u8 plen;
 116        int ret;
 117
 118        ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
 119        if (unlikely(ret))
 120                return ret;
 121
 122        trailer_len = alen + plen + 2;
 123
 124        pskb_trim(skb, skb->len - trailer_len);
 125        if (skb->protocol == htons(ETH_P_IP)) {
 126                ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
 127                ip_send_check(ipv4hdr);
 128        } else {
 129                ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
 130                                             trailer_len);
 131        }
 132        return 0;
 133}
 134
 135static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
 136                                struct mlx5_wqe_eth_seg *eseg, u8 mode,
 137                                struct xfrm_offload *xo)
 138{
 139        struct mlx5e_swp_spec swp_spec = {};
 140
 141        /* Tunnel Mode:
 142         * SWP:      OutL3       InL3  InL4
 143         * Pkt: MAC  IP     ESP  IP    L4
 144         *
 145         * Transport Mode:
 146         * SWP:      OutL3       InL4
 147         *           InL3
 148         * Pkt: MAC  IP     ESP  L4
 149         */
 150        swp_spec.l3_proto = skb->protocol;
 151        swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
 152        if (swp_spec.is_tun) {
 153                if (xo->proto == IPPROTO_IPV6) {
 154                        swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
 155                        swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
 156                } else {
 157                        swp_spec.tun_l3_proto = htons(ETH_P_IP);
 158                        swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
 159                }
 160        } else {
 161                swp_spec.tun_l3_proto = skb->protocol;
 162                swp_spec.tun_l4_proto = xo->proto;
 163        }
 164
 165        mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
 166}
 167
 168void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
 169                            struct xfrm_offload *xo)
 170{
 171        struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
 172        __u32 oseq = replay_esn->oseq;
 173        int iv_offset;
 174        __be64 seqno;
 175        u32 seq_hi;
 176
 177        if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
 178                     MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
 179                seq_hi = xo->seq.hi - 1;
 180        } else {
 181                seq_hi = xo->seq.hi;
 182        }
 183
 184        /* Place the SN in the IV field */
 185        seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
 186        iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
 187        skb_store_bits(skb, iv_offset, &seqno, 8);
 188}
 189
 190void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
 191                        struct xfrm_offload *xo)
 192{
 193        int iv_offset;
 194        __be64 seqno;
 195
 196        /* Place the SN in the IV field */
 197        seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
 198        iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
 199        skb_store_bits(skb, iv_offset, &seqno, 8);
 200}
 201
 202static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
 203                                     struct mlx5e_ipsec_metadata *mdata,
 204                                     struct xfrm_offload *xo)
 205{
 206        struct ip_esp_hdr *esph;
 207        struct tcphdr *tcph;
 208
 209        if (skb_is_gso(skb)) {
 210                /* Add LSO metadata indication */
 211                esph = ip_esp_hdr(skb);
 212                tcph = inner_tcp_hdr(skb);
 213                netdev_dbg(skb->dev, "   Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
 214                           skb->network_header,
 215                           skb->transport_header,
 216                           skb->inner_network_header,
 217                           skb->inner_transport_header);
 218                netdev_dbg(skb->dev, "   Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
 219                           skb->len, skb_shinfo(skb)->gso_size,
 220                           ntohs(tcph->source), ntohs(tcph->dest),
 221                           ntohl(tcph->seq), ntohl(esph->seq_no));
 222                mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
 223                mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
 224                mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
 225        } else {
 226                mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
 227        }
 228        mdata->content.tx.esp_next_proto = xo->proto;
 229
 230        netdev_dbg(skb->dev, "   TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
 231                   mdata->syndrome, mdata->content.tx.esp_next_proto,
 232                   ntohs(mdata->content.tx.mss_inv),
 233                   ntohs(mdata->content.tx.seq));
 234}
 235
 236bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
 237                               struct mlx5_wqe_eth_seg *eseg,
 238                               struct sk_buff *skb)
 239{
 240        struct xfrm_offload *xo = xfrm_offload(skb);
 241        struct mlx5e_ipsec_metadata *mdata;
 242        struct mlx5e_ipsec_sa_entry *sa_entry;
 243        struct xfrm_state *x;
 244        struct sec_path *sp;
 245
 246        if (!xo)
 247                return true;
 248
 249        sp = skb_sec_path(skb);
 250        if (unlikely(sp->len != 1)) {
 251                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
 252                goto drop;
 253        }
 254
 255        x = xfrm_input_state(skb);
 256        if (unlikely(!x)) {
 257                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
 258                goto drop;
 259        }
 260
 261        if (unlikely(!x->xso.offload_handle ||
 262                     (skb->protocol != htons(ETH_P_IP) &&
 263                      skb->protocol != htons(ETH_P_IPV6)))) {
 264                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
 265                goto drop;
 266        }
 267
 268        if (!skb_is_gso(skb))
 269                if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
 270                        atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
 271                        goto drop;
 272                }
 273        mdata = mlx5e_ipsec_add_metadata(skb);
 274        if (IS_ERR(mdata)) {
 275                atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
 276                goto drop;
 277        }
 278        mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
 279        sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
 280        sa_entry->set_iv_op(skb, x, xo);
 281        mlx5e_ipsec_set_metadata(skb, mdata, xo);
 282
 283        return true;
 284
 285drop:
 286        kfree_skb(skb);
 287        return false;
 288}
 289
 290static inline struct xfrm_state *
 291mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
 292                     struct mlx5e_ipsec_metadata *mdata)
 293{
 294        struct mlx5e_priv *priv = netdev_priv(netdev);
 295        struct xfrm_offload *xo;
 296        struct xfrm_state *xs;
 297        struct sec_path *sp;
 298        u32 sa_handle;
 299
 300        sp = secpath_set(skb);
 301        if (unlikely(!sp)) {
 302                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
 303                return NULL;
 304        }
 305
 306        sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
 307        xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
 308        if (unlikely(!xs)) {
 309                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
 310                return NULL;
 311        }
 312
 313        sp = skb_sec_path(skb);
 314        sp->xvec[sp->len++] = xs;
 315        sp->olen++;
 316
 317        xo = xfrm_offload(skb);
 318        xo->flags = CRYPTO_DONE;
 319        switch (mdata->syndrome) {
 320        case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
 321                xo->status = CRYPTO_SUCCESS;
 322                if (likely(priv->ipsec->no_trailer)) {
 323                        xo->flags |= XFRM_ESP_NO_TRAILER;
 324                        xo->proto = mdata->content.rx.nexthdr;
 325                }
 326                break;
 327        case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
 328                xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
 329                break;
 330        case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
 331                xo->status = CRYPTO_INVALID_PROTOCOL;
 332                break;
 333        default:
 334                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
 335                return NULL;
 336        }
 337        return xs;
 338}
 339
 340struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
 341                                          struct sk_buff *skb, u32 *cqe_bcnt)
 342{
 343        struct mlx5e_ipsec_metadata *mdata;
 344        struct xfrm_state *xs;
 345
 346        if (!is_metadata_hdr_valid(skb))
 347                return skb;
 348
 349        /* Use the metadata */
 350        mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
 351        xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
 352        if (unlikely(!xs)) {
 353                kfree_skb(skb);
 354                return NULL;
 355        }
 356
 357        remove_metadata_hdr(skb);
 358        *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
 359
 360        return skb;
 361}
 362
 363enum {
 364        MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
 365        MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
 366        MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
 367};
 368
 369void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
 370                                       struct sk_buff *skb,
 371                                       struct mlx5_cqe64 *cqe)
 372{
 373        u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
 374        u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
 375        struct mlx5e_priv *priv;
 376        struct xfrm_offload *xo;
 377        struct xfrm_state *xs;
 378        struct sec_path *sp;
 379        u32  sa_handle;
 380
 381        sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
 382        priv = netdev_priv(netdev);
 383        sp = secpath_set(skb);
 384        if (unlikely(!sp)) {
 385                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
 386                return;
 387        }
 388
 389        xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
 390        if (unlikely(!xs)) {
 391                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
 392                return;
 393        }
 394
 395        sp = skb_sec_path(skb);
 396        sp->xvec[sp->len++] = xs;
 397        sp->olen++;
 398
 399        xo = xfrm_offload(skb);
 400        xo->flags = CRYPTO_DONE;
 401
 402        switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
 403        case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
 404                xo->status = CRYPTO_SUCCESS;
 405                if (WARN_ON_ONCE(priv->ipsec->no_trailer))
 406                        xo->flags |= XFRM_ESP_NO_TRAILER;
 407                break;
 408        case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
 409                xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
 410                break;
 411        case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
 412                xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
 413                break;
 414        default:
 415                atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
 416        }
 417}
 418
 419bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
 420                               netdev_features_t features)
 421{
 422        struct sec_path *sp = skb_sec_path(skb);
 423        struct xfrm_state *x;
 424
 425        if (sp && sp->len) {
 426                x = sp->xvec[0];
 427                if (x && x->xso.offload_handle)
 428                        return true;
 429        }
 430        return false;
 431}
 432
 433void mlx5e_ipsec_build_inverse_table(void)
 434{
 435        u16 mss_inv;
 436        u32 mss;
 437
 438        /* Calculate 1/x inverse table for use in GSO data path.
 439         * Using this table, we provide the IPSec accelerator with the value of
 440         * 1/gso_size so that it can infer the position of each segment inside
 441         * the GSO, and increment the ESP sequence number, and generate the IV.
 442         * The HW needs this value in Q0.16 fixed-point number format
 443         */
 444        mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
 445        for (mss = 2; mss < MAX_LSO_MSS; mss++) {
 446                mss_inv = div_u64(1ULL << 32, mss) >> 16;
 447                mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
 448        }
 449}
 450