linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include <crypto/internal/geniv.h>
  35#include <crypto/aead.h>
  36#include <linux/inetdevice.h>
  37#include <linux/netdevice.h>
  38#include <linux/module.h>
  39
  40#include "en.h"
  41#include "accel/ipsec.h"
  42#include "en_accel/ipsec.h"
  43#include "en_accel/ipsec_rxtx.h"
  44
  45struct mlx5e_ipsec_sa_entry {
  46        struct hlist_node hlist; /* Item in SADB_RX hashtable */
  47        unsigned int handle; /* Handle in SADB_RX */
  48        struct xfrm_state *x;
  49        struct mlx5e_ipsec *ipsec;
  50        void *context;
  51};
  52
  53struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
  54                                              unsigned int handle)
  55{
  56        struct mlx5e_ipsec_sa_entry *sa_entry;
  57        struct xfrm_state *ret = NULL;
  58
  59        rcu_read_lock();
  60        hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
  61                if (sa_entry->handle == handle) {
  62                        ret = sa_entry->x;
  63                        xfrm_state_hold(ret);
  64                        break;
  65                }
  66        rcu_read_unlock();
  67
  68        return ret;
  69}
  70
  71static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
  72{
  73        struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
  74        unsigned long flags;
  75        int ret;
  76
  77        spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
  78        ret = ida_simple_get(&ipsec->halloc, 1, 0, GFP_KERNEL);
  79        if (ret < 0)
  80                goto out;
  81
  82        sa_entry->handle = ret;
  83        hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
  84        ret = 0;
  85
  86out:
  87        spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
  88        return ret;
  89}
  90
  91static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
  92{
  93        struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
  94        unsigned long flags;
  95
  96        spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
  97        hash_del_rcu(&sa_entry->hlist);
  98        spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
  99}
 100
 101static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
 102{
 103        struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
 104        unsigned long flags;
 105
 106        /* Wait for the hash_del_rcu call in sadb_rx_del to affect data path */
 107        synchronize_rcu();
 108        spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
 109        ida_simple_remove(&ipsec->halloc, sa_entry->handle);
 110        spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
 111}
 112
 113static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
 114{
 115        unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
 116
 117        switch (key_len) {
 118        case 16:
 119                return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
 120        case 32:
 121                return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
 122        default:
 123                netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
 124                            key_len, x->aead->alg_name);
 125                return -1;
 126        }
 127}
 128
 129static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
 130                                    struct mlx5_accel_ipsec_sa *hw_sa)
 131{
 132        struct xfrm_state *x = sa_entry->x;
 133        struct aead_geniv_ctx *geniv_ctx;
 134        unsigned int crypto_data_len;
 135        struct crypto_aead *aead;
 136        unsigned int key_len;
 137        int ivsize;
 138
 139        memset(hw_sa, 0, sizeof(*hw_sa));
 140
 141        if (op == MLX5_IPSEC_CMD_ADD_SA) {
 142                crypto_data_len = (x->aead->alg_key_len + 7) / 8;
 143                key_len = crypto_data_len - 4; /* 4 bytes salt at end */
 144                aead = x->data;
 145                geniv_ctx = crypto_aead_ctx(aead);
 146                ivsize = crypto_aead_ivsize(aead);
 147
 148                memcpy(&hw_sa->key_enc, x->aead->alg_key, key_len);
 149                /* Duplicate 128 bit key twice according to HW layout */
 150                if (key_len == 16)
 151                        memcpy(&hw_sa->key_enc[16], x->aead->alg_key, key_len);
 152                memcpy(&hw_sa->gcm.salt_iv, geniv_ctx->salt, ivsize);
 153                hw_sa->gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
 154        }
 155
 156        hw_sa->cmd = htonl(op);
 157        hw_sa->flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
 158        if (x->props.family == AF_INET) {
 159                hw_sa->sip[3] = x->props.saddr.a4;
 160                hw_sa->dip[3] = x->id.daddr.a4;
 161                hw_sa->sip_masklen = 32;
 162                hw_sa->dip_masklen = 32;
 163        } else {
 164                memcpy(hw_sa->sip, x->props.saddr.a6, sizeof(hw_sa->sip));
 165                memcpy(hw_sa->dip, x->id.daddr.a6, sizeof(hw_sa->dip));
 166                hw_sa->sip_masklen = 128;
 167                hw_sa->dip_masklen = 128;
 168                hw_sa->flags |= MLX5_IPSEC_SADB_IPV6;
 169        }
 170        hw_sa->spi = x->id.spi;
 171        hw_sa->sw_sa_handle = htonl(sa_entry->handle);
 172        switch (x->id.proto) {
 173        case IPPROTO_ESP:
 174                hw_sa->flags |= MLX5_IPSEC_SADB_IP_ESP;
 175                break;
 176        case IPPROTO_AH:
 177                hw_sa->flags |= MLX5_IPSEC_SADB_IP_AH;
 178                break;
 179        default:
 180                break;
 181        }
 182        hw_sa->enc_mode = mlx5e_ipsec_enc_mode(x);
 183        if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
 184                hw_sa->flags |= MLX5_IPSEC_SADB_DIR_SX;
 185}
 186
 187static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 188{
 189        struct net_device *netdev = x->xso.dev;
 190        struct mlx5e_priv *priv;
 191
 192        priv = netdev_priv(netdev);
 193
 194        if (x->props.aalgo != SADB_AALG_NONE) {
 195                netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
 196                return -EINVAL;
 197        }
 198        if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
 199                netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
 200                return -EINVAL;
 201        }
 202        if (x->props.calgo != SADB_X_CALG_NONE) {
 203                netdev_info(netdev, "Cannot offload compressed xfrm states\n");
 204                return -EINVAL;
 205        }
 206        if (x->props.flags & XFRM_STATE_ESN) {
 207                netdev_info(netdev, "Cannot offload ESN xfrm states\n");
 208                return -EINVAL;
 209        }
 210        if (x->props.family != AF_INET &&
 211            x->props.family != AF_INET6) {
 212                netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
 213                return -EINVAL;
 214        }
 215        if (x->props.mode != XFRM_MODE_TRANSPORT &&
 216            x->props.mode != XFRM_MODE_TUNNEL) {
 217                dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
 218                return -EINVAL;
 219        }
 220        if (x->id.proto != IPPROTO_ESP) {
 221                netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
 222                return -EINVAL;
 223        }
 224        if (x->encap) {
 225                netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
 226                return -EINVAL;
 227        }
 228        if (!x->aead) {
 229                netdev_info(netdev, "Cannot offload xfrm states without aead\n");
 230                return -EINVAL;
 231        }
 232        if (x->aead->alg_icv_len != 128) {
 233                netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
 234                return -EINVAL;
 235        }
 236        if ((x->aead->alg_key_len != 128 + 32) &&
 237            (x->aead->alg_key_len != 256 + 32)) {
 238                netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
 239                return -EINVAL;
 240        }
 241        if (x->tfcpad) {
 242                netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
 243                return -EINVAL;
 244        }
 245        if (!x->geniv) {
 246                netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
 247                return -EINVAL;
 248        }
 249        if (strcmp(x->geniv, "seqiv")) {
 250                netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
 251                return -EINVAL;
 252        }
 253        if (x->props.family == AF_INET6 &&
 254            !(mlx5_accel_ipsec_device_caps(priv->mdev) & MLX5_ACCEL_IPSEC_IPV6)) {
 255                netdev_info(netdev, "IPv6 xfrm state offload is not supported by this device\n");
 256                return -EINVAL;
 257        }
 258        return 0;
 259}
 260
 261static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 262{
 263        struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
 264        struct net_device *netdev = x->xso.dev;
 265        struct mlx5_accel_ipsec_sa hw_sa;
 266        struct mlx5e_priv *priv;
 267        void *context;
 268        int err;
 269
 270        priv = netdev_priv(netdev);
 271
 272        err = mlx5e_xfrm_validate_state(x);
 273        if (err)
 274                return err;
 275
 276        sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
 277        if (!sa_entry) {
 278                err = -ENOMEM;
 279                goto out;
 280        }
 281
 282        sa_entry->x = x;
 283        sa_entry->ipsec = priv->ipsec;
 284
 285        /* Add the SA to handle processed incoming packets before the add SA
 286         * completion was received
 287         */
 288        if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
 289                err = mlx5e_ipsec_sadb_rx_add(sa_entry);
 290                if (err) {
 291                        netdev_info(netdev, "Failed adding to SADB_RX: %d\n", err);
 292                        goto err_entry;
 293                }
 294        }
 295
 296        mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
 297        context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
 298        if (IS_ERR(context)) {
 299                err = PTR_ERR(context);
 300                goto err_sadb_rx;
 301        }
 302
 303        err = mlx5_accel_ipsec_sa_cmd_wait(context);
 304        if (err)
 305                goto err_sadb_rx;
 306
 307        x->xso.offload_handle = (unsigned long)sa_entry;
 308        goto out;
 309
 310err_sadb_rx:
 311        if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
 312                mlx5e_ipsec_sadb_rx_del(sa_entry);
 313                mlx5e_ipsec_sadb_rx_free(sa_entry);
 314        }
 315err_entry:
 316        kfree(sa_entry);
 317out:
 318        return err;
 319}
 320
 321static void mlx5e_xfrm_del_state(struct xfrm_state *x)
 322{
 323        struct mlx5e_ipsec_sa_entry *sa_entry;
 324        struct mlx5_accel_ipsec_sa hw_sa;
 325        void *context;
 326
 327        if (!x->xso.offload_handle)
 328                return;
 329
 330        sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
 331        WARN_ON(sa_entry->x != x);
 332
 333        if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
 334                mlx5e_ipsec_sadb_rx_del(sa_entry);
 335
 336        mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
 337        context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
 338        if (IS_ERR(context))
 339                return;
 340
 341        sa_entry->context = context;
 342}
 343
 344static void mlx5e_xfrm_free_state(struct xfrm_state *x)
 345{
 346        struct mlx5e_ipsec_sa_entry *sa_entry;
 347        int res;
 348
 349        if (!x->xso.offload_handle)
 350                return;
 351
 352        sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
 353        WARN_ON(sa_entry->x != x);
 354
 355        res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
 356        sa_entry->context = NULL;
 357        if (res) {
 358                /* Leftover object will leak */
 359                return;
 360        }
 361
 362        if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
 363                mlx5e_ipsec_sadb_rx_free(sa_entry);
 364
 365        kfree(sa_entry);
 366}
 367
 368int mlx5e_ipsec_init(struct mlx5e_priv *priv)
 369{
 370        struct mlx5e_ipsec *ipsec = NULL;
 371
 372        if (!MLX5_IPSEC_DEV(priv->mdev)) {
 373                netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
 374                return 0;
 375        }
 376
 377        ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
 378        if (!ipsec)
 379                return -ENOMEM;
 380
 381        hash_init(ipsec->sadb_rx);
 382        spin_lock_init(&ipsec->sadb_rx_lock);
 383        ida_init(&ipsec->halloc);
 384        ipsec->en_priv = priv;
 385        ipsec->en_priv->ipsec = ipsec;
 386        netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
 387        return 0;
 388}
 389
 390void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
 391{
 392        struct mlx5e_ipsec *ipsec = priv->ipsec;
 393
 394        if (!ipsec)
 395                return;
 396
 397        ida_destroy(&ipsec->halloc);
 398        kfree(ipsec);
 399        priv->ipsec = NULL;
 400}
 401
 402static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 403{
 404        if (x->props.family == AF_INET) {
 405                /* Offload with IPv4 options is not supported yet */
 406                if (ip_hdr(skb)->ihl > 5)
 407                        return false;
 408        } else {
 409                /* Offload with IPv6 extension headers is not support yet */
 410                if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
 411                        return false;
 412        }
 413
 414        return true;
 415}
 416
 417static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
 418        .xdo_dev_state_add      = mlx5e_xfrm_add_state,
 419        .xdo_dev_state_delete   = mlx5e_xfrm_del_state,
 420        .xdo_dev_state_free     = mlx5e_xfrm_free_state,
 421        .xdo_dev_offload_ok     = mlx5e_ipsec_offload_ok,
 422};
 423
 424void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
 425{
 426        struct mlx5_core_dev *mdev = priv->mdev;
 427        struct net_device *netdev = priv->netdev;
 428
 429        if (!priv->ipsec)
 430                return;
 431
 432        if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_ESP) ||
 433            !MLX5_CAP_ETH(mdev, swp)) {
 434                mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
 435                return;
 436        }
 437
 438        mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
 439        netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
 440        netdev->features |= NETIF_F_HW_ESP;
 441        netdev->hw_enc_features |= NETIF_F_HW_ESP;
 442
 443        if (!MLX5_CAP_ETH(mdev, swp_csum)) {
 444                mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
 445                return;
 446        }
 447
 448        netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
 449        netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
 450
 451        if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_LSO) ||
 452            !MLX5_CAP_ETH(mdev, swp_lso)) {
 453                mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
 454                return;
 455        }
 456
 457        mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
 458        netdev->features |= NETIF_F_GSO_ESP;
 459        netdev->hw_features |= NETIF_F_GSO_ESP;
 460        netdev->hw_enc_features |= NETIF_F_GSO_ESP;
 461}
 462