linux/net/ipv4/esp4_offload.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * IPV4 GSO/GRO offload support
   4 * Linux INET implementation
   5 *
   6 * Copyright (C) 2016 secunet Security Networks AG
   7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
   8 *
   9 * ESP GRO support
  10 */
  11
  12#include <linux/skbuff.h>
  13#include <linux/init.h>
  14#include <net/protocol.h>
  15#include <crypto/aead.h>
  16#include <crypto/authenc.h>
  17#include <linux/err.h>
  18#include <linux/module.h>
  19#include <net/ip.h>
  20#include <net/xfrm.h>
  21#include <net/esp.h>
  22#include <linux/scatterlist.h>
  23#include <linux/kernel.h>
  24#include <linux/slab.h>
  25#include <linux/spinlock.h>
  26#include <net/udp.h>
  27
  28static struct sk_buff *esp4_gro_receive(struct list_head *head,
  29                                        struct sk_buff *skb)
  30{
  31        int offset = skb_gro_offset(skb);
  32        struct xfrm_offload *xo;
  33        struct xfrm_state *x;
  34        __be32 seq;
  35        __be32 spi;
  36
  37        if (!pskb_pull(skb, offset))
  38                return NULL;
  39
  40        if (xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq) != 0)
  41                goto out;
  42
  43        xo = xfrm_offload(skb);
  44        if (!xo || !(xo->flags & CRYPTO_DONE)) {
  45                struct sec_path *sp = secpath_set(skb);
  46
  47                if (!sp)
  48                        goto out;
  49
  50                if (sp->len == XFRM_MAX_DEPTH)
  51                        goto out_reset;
  52
  53                x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
  54                                      (xfrm_address_t *)&ip_hdr(skb)->daddr,
  55                                      spi, IPPROTO_ESP, AF_INET);
  56                if (!x)
  57                        goto out_reset;
  58
  59                skb->mark = xfrm_smark_get(skb->mark, x);
  60
  61                sp->xvec[sp->len++] = x;
  62                sp->olen++;
  63
  64                xo = xfrm_offload(skb);
  65                if (!xo)
  66                        goto out_reset;
  67        }
  68
  69        xo->flags |= XFRM_GRO;
  70
  71        XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
  72        XFRM_SPI_SKB_CB(skb)->family = AF_INET;
  73        XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
  74        XFRM_SPI_SKB_CB(skb)->seq = seq;
  75
  76        /* We don't need to handle errors from xfrm_input, it does all
  77         * the error handling and frees the resources on error. */
  78        xfrm_input(skb, IPPROTO_ESP, spi, -2);
  79
  80        return ERR_PTR(-EINPROGRESS);
  81out_reset:
  82        secpath_reset(skb);
  83out:
  84        skb_push(skb, offset);
  85        NAPI_GRO_CB(skb)->same_flow = 0;
  86        NAPI_GRO_CB(skb)->flush = 1;
  87
  88        return NULL;
  89}
  90
  91static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
  92{
  93        struct ip_esp_hdr *esph;
  94        struct iphdr *iph = ip_hdr(skb);
  95        struct xfrm_offload *xo = xfrm_offload(skb);
  96        int proto = iph->protocol;
  97
  98        skb_push(skb, -skb_network_offset(skb));
  99        esph = ip_esp_hdr(skb);
 100        *skb_mac_header(skb) = IPPROTO_ESP;
 101
 102        esph->spi = x->id.spi;
 103        esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
 104
 105        xo->proto = proto;
 106}
 107
 108static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
 109                                                struct sk_buff *skb,
 110                                                netdev_features_t features)
 111{
 112        __skb_push(skb, skb->mac_len);
 113        return skb_mac_gso_segment(skb, features);
 114}
 115
 116static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
 117                                                   struct sk_buff *skb,
 118                                                   netdev_features_t features)
 119{
 120        const struct net_offload *ops;
 121        struct sk_buff *segs = ERR_PTR(-EINVAL);
 122        struct xfrm_offload *xo = xfrm_offload(skb);
 123
 124        skb->transport_header += x->props.header_len;
 125        ops = rcu_dereference(inet_offloads[xo->proto]);
 126        if (likely(ops && ops->callbacks.gso_segment))
 127                segs = ops->callbacks.gso_segment(skb, features);
 128
 129        return segs;
 130}
 131
 132static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
 133                                              struct sk_buff *skb,
 134                                              netdev_features_t features)
 135{
 136        struct xfrm_offload *xo = xfrm_offload(skb);
 137        struct sk_buff *segs = ERR_PTR(-EINVAL);
 138        const struct net_offload *ops;
 139        u8 proto = xo->proto;
 140
 141        skb->transport_header += x->props.header_len;
 142
 143        if (x->sel.family != AF_INET6) {
 144                if (proto == IPPROTO_BEETPH) {
 145                        struct ip_beet_phdr *ph =
 146                                (struct ip_beet_phdr *)skb->data;
 147
 148                        skb->transport_header += ph->hdrlen * 8;
 149                        proto = ph->nexthdr;
 150                } else {
 151                        skb->transport_header -= IPV4_BEET_PHMAXLEN;
 152                }
 153        } else {
 154                __be16 frag;
 155
 156                skb->transport_header +=
 157                        ipv6_skip_exthdr(skb, 0, &proto, &frag);
 158                if (proto == IPPROTO_TCP)
 159                        skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 160        }
 161
 162        __skb_pull(skb, skb_transport_offset(skb));
 163        ops = rcu_dereference(inet_offloads[proto]);
 164        if (likely(ops && ops->callbacks.gso_segment))
 165                segs = ops->callbacks.gso_segment(skb, features);
 166
 167        return segs;
 168}
 169
 170static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
 171                                                    struct sk_buff *skb,
 172                                                    netdev_features_t features)
 173{
 174        switch (x->outer_mode.encap) {
 175        case XFRM_MODE_TUNNEL:
 176                return xfrm4_tunnel_gso_segment(x, skb, features);
 177        case XFRM_MODE_TRANSPORT:
 178                return xfrm4_transport_gso_segment(x, skb, features);
 179        case XFRM_MODE_BEET:
 180                return xfrm4_beet_gso_segment(x, skb, features);
 181        }
 182
 183        return ERR_PTR(-EOPNOTSUPP);
 184}
 185
 186static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
 187                                        netdev_features_t features)
 188{
 189        struct xfrm_state *x;
 190        struct ip_esp_hdr *esph;
 191        struct crypto_aead *aead;
 192        netdev_features_t esp_features = features;
 193        struct xfrm_offload *xo = xfrm_offload(skb);
 194        struct sec_path *sp;
 195
 196        if (!xo)
 197                return ERR_PTR(-EINVAL);
 198
 199        if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
 200                return ERR_PTR(-EINVAL);
 201
 202        sp = skb_sec_path(skb);
 203        x = sp->xvec[sp->len - 1];
 204        aead = x->data;
 205        esph = ip_esp_hdr(skb);
 206
 207        if (esph->spi != x->id.spi)
 208                return ERR_PTR(-EINVAL);
 209
 210        if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
 211                return ERR_PTR(-EINVAL);
 212
 213        __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
 214
 215        skb->encap_hdr_csum = 1;
 216
 217        if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
 218             !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
 219                esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
 220                                            NETIF_F_SCTP_CRC);
 221        else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
 222                 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
 223                esp_features = features & ~(NETIF_F_CSUM_MASK |
 224                                            NETIF_F_SCTP_CRC);
 225
 226        xo->flags |= XFRM_GSO_SEGMENT;
 227
 228        return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
 229}
 230
 231static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
 232{
 233        struct crypto_aead *aead = x->data;
 234        struct xfrm_offload *xo = xfrm_offload(skb);
 235
 236        if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
 237                return -EINVAL;
 238
 239        if (!(xo->flags & CRYPTO_DONE))
 240                skb->ip_summed = CHECKSUM_NONE;
 241
 242        return esp_input_done2(skb, 0);
 243}
 244
 245static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_t features)
 246{
 247        int err;
 248        int alen;
 249        int blksize;
 250        struct xfrm_offload *xo;
 251        struct ip_esp_hdr *esph;
 252        struct crypto_aead *aead;
 253        struct esp_info esp;
 254        bool hw_offload = true;
 255        __u32 seq;
 256
 257        esp.inplace = true;
 258
 259        xo = xfrm_offload(skb);
 260
 261        if (!xo)
 262                return -EINVAL;
 263
 264        if ((!(features & NETIF_F_HW_ESP) &&
 265             !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
 266            x->xso.dev != skb->dev) {
 267                xo->flags |= CRYPTO_FALLBACK;
 268                hw_offload = false;
 269        }
 270
 271        esp.proto = xo->proto;
 272
 273        /* skb is pure payload to encrypt */
 274
 275        aead = x->data;
 276        alen = crypto_aead_authsize(aead);
 277
 278        esp.tfclen = 0;
 279        /* XXX: Add support for tfc padding here. */
 280
 281        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
 282        esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
 283        esp.plen = esp.clen - skb->len - esp.tfclen;
 284        esp.tailen = esp.tfclen + esp.plen + alen;
 285
 286        esp.esph = ip_esp_hdr(skb);
 287
 288
 289        if (!hw_offload || !skb_is_gso(skb)) {
 290                esp.nfrags = esp_output_head(x, skb, &esp);
 291                if (esp.nfrags < 0)
 292                        return esp.nfrags;
 293        }
 294
 295        seq = xo->seq.low;
 296
 297        esph = esp.esph;
 298        esph->spi = x->id.spi;
 299
 300        skb_push(skb, -skb_network_offset(skb));
 301
 302        if (xo->flags & XFRM_GSO_SEGMENT) {
 303                esph->seq_no = htonl(seq);
 304
 305                if (!skb_is_gso(skb))
 306                        xo->seq.low++;
 307                else
 308                        xo->seq.low += skb_shinfo(skb)->gso_segs;
 309        }
 310
 311        esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
 312
 313        ip_hdr(skb)->tot_len = htons(skb->len);
 314        ip_send_check(ip_hdr(skb));
 315
 316        if (hw_offload) {
 317                if (!skb_ext_add(skb, SKB_EXT_SEC_PATH))
 318                        return -ENOMEM;
 319
 320                xo = xfrm_offload(skb);
 321                if (!xo)
 322                        return -EINVAL;
 323
 324                xo->flags |= XFRM_XMIT;
 325                return 0;
 326        }
 327
 328        err = esp_output_tail(x, skb, &esp);
 329        if (err)
 330                return err;
 331
 332        secpath_reset(skb);
 333
 334        return 0;
 335}
 336
 337static const struct net_offload esp4_offload = {
 338        .callbacks = {
 339                .gro_receive = esp4_gro_receive,
 340                .gso_segment = esp4_gso_segment,
 341        },
 342};
 343
 344static const struct xfrm_type_offload esp_type_offload = {
 345        .owner          = THIS_MODULE,
 346        .proto          = IPPROTO_ESP,
 347        .input_tail     = esp_input_tail,
 348        .xmit           = esp_xmit,
 349        .encap          = esp4_gso_encap,
 350};
 351
 352static int __init esp4_offload_init(void)
 353{
 354        if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
 355                pr_info("%s: can't add xfrm type offload\n", __func__);
 356                return -EAGAIN;
 357        }
 358
 359        return inet_add_offload(&esp4_offload, IPPROTO_ESP);
 360}
 361
 362static void __exit esp4_offload_exit(void)
 363{
 364        xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
 365        inet_del_offload(&esp4_offload, IPPROTO_ESP);
 366}
 367
 368module_init(esp4_offload_init);
 369module_exit(esp4_offload_exit);
 370MODULE_LICENSE("GPL");
 371MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 372MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
 373MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
 374