linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 *
  32 */
  33
  34#include "en_accel/tls.h"
  35#include "en_accel/tls_rxtx.h"
  36#include "accel/accel.h"
  37
  38#include <net/inet6_hashtables.h>
  39#include <linux/ipv6.h>
  40
  41#define SYNDROM_DECRYPTED  0x30
  42#define SYNDROM_RESYNC_REQUEST 0x31
  43#define SYNDROM_AUTH_FAILED 0x32
  44
  45#define SYNDROME_OFFLOAD_REQUIRED 32
  46#define SYNDROME_SYNC 33
  47
  48struct sync_info {
  49        u64 rcd_sn;
  50        s32 sync_len;
  51        int nr_frags;
  52        skb_frag_t frags[MAX_SKB_FRAGS];
  53};
  54
  55struct recv_metadata_content {
  56        u8 syndrome;
  57        u8 reserved;
  58        __be32 sync_seq;
  59} __packed;
  60
  61struct send_metadata_content {
  62        /* One byte of syndrome followed by 3 bytes of swid */
  63        __be32 syndrome_swid;
  64        __be16 first_seq;
  65} __packed;
  66
  67struct mlx5e_tls_metadata {
  68        union {
  69                /* from fpga to host */
  70                struct recv_metadata_content recv;
  71                /* from host to fpga */
  72                struct send_metadata_content send;
  73                unsigned char raw[6];
  74        } __packed content;
  75        /* packet type ID field */
  76        __be16 ethertype;
  77} __packed;
  78
  79static int mlx5e_tls_add_metadata(struct sk_buff *skb, __be32 swid)
  80{
  81        struct mlx5e_tls_metadata *pet;
  82        struct ethhdr *eth;
  83
  84        if (skb_cow_head(skb, sizeof(struct mlx5e_tls_metadata)))
  85                return -ENOMEM;
  86
  87        eth = (struct ethhdr *)skb_push(skb, sizeof(struct mlx5e_tls_metadata));
  88        skb->mac_header -= sizeof(struct mlx5e_tls_metadata);
  89        pet = (struct mlx5e_tls_metadata *)(eth + 1);
  90
  91        memmove(skb->data, skb->data + sizeof(struct mlx5e_tls_metadata),
  92                2 * ETH_ALEN);
  93
  94        eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
  95        pet->content.send.syndrome_swid =
  96                htonl(SYNDROME_OFFLOAD_REQUIRED << 24) | swid;
  97
  98        return 0;
  99}
 100
 101static int mlx5e_tls_get_sync_data(struct mlx5e_tls_offload_context_tx *context,
 102                                   u32 tcp_seq, struct sync_info *info)
 103{
 104        int remaining, i = 0, ret = -EINVAL;
 105        struct tls_record_info *record;
 106        unsigned long flags;
 107        s32 sync_size;
 108
 109        spin_lock_irqsave(&context->base.lock, flags);
 110        record = tls_get_record(&context->base, tcp_seq, &info->rcd_sn);
 111
 112        if (unlikely(!record))
 113                goto out;
 114
 115        sync_size = tcp_seq - tls_record_start_seq(record);
 116        info->sync_len = sync_size;
 117        if (unlikely(sync_size < 0)) {
 118                if (tls_record_is_start_marker(record))
 119                        goto done;
 120
 121                goto out;
 122        }
 123
 124        remaining = sync_size;
 125        while (remaining > 0) {
 126                info->frags[i] = record->frags[i];
 127                __skb_frag_ref(&info->frags[i]);
 128                remaining -= skb_frag_size(&info->frags[i]);
 129
 130                if (remaining < 0)
 131                        skb_frag_size_add(&info->frags[i], remaining);
 132
 133                i++;
 134        }
 135        info->nr_frags = i;
 136done:
 137        ret = 0;
 138out:
 139        spin_unlock_irqrestore(&context->base.lock, flags);
 140        return ret;
 141}
 142
 143static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
 144                                        struct sk_buff *nskb, u32 tcp_seq,
 145                                        int headln, __be64 rcd_sn)
 146{
 147        struct mlx5e_tls_metadata *pet;
 148        u8 syndrome = SYNDROME_SYNC;
 149        struct iphdr *iph;
 150        struct tcphdr *th;
 151        int data_len, mss;
 152
 153        nskb->dev = skb->dev;
 154        skb_reset_mac_header(nskb);
 155        skb_set_network_header(nskb, skb_network_offset(skb));
 156        skb_set_transport_header(nskb, skb_transport_offset(skb));
 157        memcpy(nskb->data, skb->data, headln);
 158        memcpy(nskb->data + headln, &rcd_sn, sizeof(rcd_sn));
 159
 160        iph = ip_hdr(nskb);
 161        iph->tot_len = htons(nskb->len - skb_network_offset(nskb));
 162        th = tcp_hdr(nskb);
 163        data_len = nskb->len - headln;
 164        tcp_seq -= data_len;
 165        th->seq = htonl(tcp_seq);
 166
 167        mss = nskb->dev->mtu - (headln - skb_network_offset(nskb));
 168        skb_shinfo(nskb)->gso_size = 0;
 169        if (data_len > mss) {
 170                skb_shinfo(nskb)->gso_size = mss;
 171                skb_shinfo(nskb)->gso_segs = DIV_ROUND_UP(data_len, mss);
 172        }
 173        skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
 174
 175        pet = (struct mlx5e_tls_metadata *)(nskb->data + sizeof(struct ethhdr));
 176        memcpy(pet, &syndrome, sizeof(syndrome));
 177        pet->content.send.first_seq = htons(tcp_seq);
 178
 179        /* MLX5 devices don't care about the checksum partial start, offset
 180         * and pseudo header
 181         */
 182        nskb->ip_summed = CHECKSUM_PARTIAL;
 183
 184        nskb->queue_mapping = skb->queue_mapping;
 185}
 186
 187static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
 188                                 struct mlx5e_txqsq *sq, struct sk_buff *skb,
 189                                 struct mlx5e_tls *tls)
 190{
 191        u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
 192        struct sync_info info;
 193        struct sk_buff *nskb;
 194        int linear_len = 0;
 195        int headln;
 196        int i;
 197
 198        sq->stats->tls_ooo++;
 199
 200        if (mlx5e_tls_get_sync_data(context, tcp_seq, &info)) {
 201                /* We might get here if a retransmission reaches the driver
 202                 * after the relevant record is acked.
 203                 * It should be safe to drop the packet in this case
 204                 */
 205                atomic64_inc(&tls->sw_stats.tx_tls_drop_no_sync_data);
 206                goto err_out;
 207        }
 208
 209        if (unlikely(info.sync_len < 0)) {
 210                u32 payload;
 211
 212                headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
 213                payload = skb->len - headln;
 214                if (likely(payload <= -info.sync_len))
 215                        /* SKB payload doesn't require offload
 216                         */
 217                        return true;
 218
 219                atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required);
 220                goto err_out;
 221        }
 222
 223        if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
 224                atomic64_inc(&tls->sw_stats.tx_tls_drop_metadata);
 225                goto err_out;
 226        }
 227
 228        headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
 229        linear_len += headln + sizeof(info.rcd_sn);
 230        nskb = alloc_skb(linear_len, GFP_ATOMIC);
 231        if (unlikely(!nskb)) {
 232                atomic64_inc(&tls->sw_stats.tx_tls_drop_resync_alloc);
 233                goto err_out;
 234        }
 235
 236        context->expected_seq = tcp_seq + skb->len - headln;
 237        skb_put(nskb, linear_len);
 238        for (i = 0; i < info.nr_frags; i++)
 239                skb_shinfo(nskb)->frags[i] = info.frags[i];
 240
 241        skb_shinfo(nskb)->nr_frags = info.nr_frags;
 242        nskb->data_len = info.sync_len;
 243        nskb->len += info.sync_len;
 244        sq->stats->tls_resync_bytes += nskb->len;
 245        mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
 246                                    cpu_to_be64(info.rcd_sn));
 247        mlx5e_sq_xmit_simple(sq, nskb, true);
 248
 249        return true;
 250
 251err_out:
 252        dev_kfree_skb_any(skb);
 253        return false;
 254}
 255
 256bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
 257                             struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state)
 258{
 259        struct mlx5e_priv *priv = netdev_priv(netdev);
 260        struct mlx5e_tls_offload_context_tx *context;
 261        struct tls_context *tls_ctx;
 262        u32 expected_seq;
 263        int datalen;
 264        u32 skb_seq;
 265
 266        datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
 267        if (!datalen)
 268                return true;
 269
 270        mlx5e_tx_mpwqe_ensure_complete(sq);
 271
 272        tls_ctx = tls_get_ctx(skb->sk);
 273        if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
 274                goto err_out;
 275
 276        if (mlx5e_accel_is_ktls_tx(sq->mdev))
 277                return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
 278
 279        /* FPGA */
 280        skb_seq = ntohl(tcp_hdr(skb)->seq);
 281        context = mlx5e_get_tls_tx_context(tls_ctx);
 282        expected_seq = context->expected_seq;
 283
 284        if (unlikely(expected_seq != skb_seq))
 285                return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls);
 286
 287        if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) {
 288                atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata);
 289                dev_kfree_skb_any(skb);
 290                return false;
 291        }
 292
 293        context->expected_seq = skb_seq + datalen;
 294        return true;
 295
 296err_out:
 297        dev_kfree_skb_any(skb);
 298        return false;
 299}
 300
 301static int tls_update_resync_sn(struct net_device *netdev,
 302                                struct sk_buff *skb,
 303                                struct mlx5e_tls_metadata *mdata)
 304{
 305        struct sock *sk = NULL;
 306        struct iphdr *iph;
 307        struct tcphdr *th;
 308        __be32 seq;
 309
 310        if (mdata->ethertype != htons(ETH_P_IP))
 311                return -EINVAL;
 312
 313        iph = (struct iphdr *)(mdata + 1);
 314
 315        th = ((void *)iph) + iph->ihl * 4;
 316
 317        if (iph->version == 4) {
 318                sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
 319                                             iph->saddr, th->source, iph->daddr,
 320                                             th->dest, netdev->ifindex);
 321#if IS_ENABLED(CONFIG_IPV6)
 322        } else {
 323                struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
 324
 325                sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
 326                                                &ipv6h->saddr, th->source,
 327                                                &ipv6h->daddr, ntohs(th->dest),
 328                                                netdev->ifindex, 0);
 329#endif
 330        }
 331        if (!sk || sk->sk_state == TCP_TIME_WAIT) {
 332                struct mlx5e_priv *priv = netdev_priv(netdev);
 333
 334                atomic64_inc(&priv->tls->sw_stats.rx_tls_drop_resync_request);
 335                goto out;
 336        }
 337
 338        skb->sk = sk;
 339        skb->destructor = sock_edemux;
 340
 341        memcpy(&seq, &mdata->content.recv.sync_seq, sizeof(seq));
 342        tls_offload_rx_resync_request(sk, seq);
 343out:
 344        return 0;
 345}
 346
 347/* FPGA tls rx handler */
 348void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
 349                                      u32 *cqe_bcnt)
 350{
 351        struct mlx5e_tls_metadata *mdata;
 352        struct mlx5e_priv *priv;
 353
 354        /* Use the metadata */
 355        mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
 356        switch (mdata->content.recv.syndrome) {
 357        case SYNDROM_DECRYPTED:
 358                skb->decrypted = 1;
 359                break;
 360        case SYNDROM_RESYNC_REQUEST:
 361                tls_update_resync_sn(rq->netdev, skb, mdata);
 362                priv = netdev_priv(rq->netdev);
 363                atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
 364                break;
 365        case SYNDROM_AUTH_FAILED:
 366                /* Authentication failure will be observed and verified by kTLS */
 367                priv = netdev_priv(rq->netdev);
 368                atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
 369                break;
 370        default:
 371                /* Bypass the metadata header to others */
 372                return;
 373        }
 374
 375        remove_metadata_hdr(skb);
 376        *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
 377}
 378
 379u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
 380{
 381        if (!mlx5e_accel_is_tls_device(mdev))
 382                return 0;
 383
 384        if (mlx5e_accel_is_ktls_device(mdev))
 385                return mlx5e_ktls_get_stop_room(mdev, params);
 386
 387        /* FPGA */
 388        /* Resync SKB. */
 389        return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
 390}
 391