linux/net/xfrm/xfrm_device.c
<<
>>
Prefs
   1/*
   2 * xfrm_device.c - IPsec device offloading code.
   3 *
   4 * Copyright (c) 2015 secunet Security Networks AG
   5 *
   6 * Author:
   7 * Steffen Klassert <steffen.klassert@secunet.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; either version
  12 * 2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/errno.h>
  16#include <linux/module.h>
  17#include <linux/netdevice.h>
  18#include <linux/skbuff.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <net/dst.h>
  22#include <net/xfrm.h>
  23#include <linux/notifier.h>
  24
  25#ifdef CONFIG_XFRM_OFFLOAD
  26struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
  27{
  28        int err;
  29        unsigned long flags;
  30        struct xfrm_state *x;
  31        struct sk_buff *skb2;
  32        struct softnet_data *sd;
  33        netdev_features_t esp_features = features;
  34        struct xfrm_offload *xo = xfrm_offload(skb);
  35
  36        if (!xo)
  37                return skb;
  38
  39        if (!(features & NETIF_F_HW_ESP))
  40                esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
  41
  42        x = skb->sp->xvec[skb->sp->len - 1];
  43        if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
  44                return skb;
  45
  46        local_irq_save(flags);
  47        sd = this_cpu_ptr(&softnet_data);
  48        err = !skb_queue_empty(&sd->xfrm_backlog);
  49        local_irq_restore(flags);
  50
  51        if (err) {
  52                *again = true;
  53                return skb;
  54        }
  55
  56        if (skb_is_gso(skb)) {
  57                struct net_device *dev = skb->dev;
  58
  59                if (unlikely(!x->xso.offload_handle || (x->xso.dev != dev))) {
  60                        struct sk_buff *segs;
  61
  62                        /* Packet got rerouted, fixup features and segment it. */
  63                        esp_features = esp_features & ~(NETIF_F_HW_ESP
  64                                                        | NETIF_F_GSO_ESP);
  65
  66                        segs = skb_gso_segment(skb, esp_features);
  67                        if (IS_ERR(segs)) {
  68                                kfree_skb(skb);
  69                                atomic_long_inc(&dev->tx_dropped);
  70                                return NULL;
  71                        } else {
  72                                consume_skb(skb);
  73                                skb = segs;
  74                        }
  75                }
  76        }
  77
  78        if (!skb->next) {
  79                x->outer_mode->xmit(x, skb);
  80
  81                xo->flags |= XFRM_DEV_RESUME;
  82
  83                err = x->type_offload->xmit(x, skb, esp_features);
  84                if (err) {
  85                        if (err == -EINPROGRESS)
  86                                return NULL;
  87
  88                        XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
  89                        kfree_skb(skb);
  90                        return NULL;
  91                }
  92
  93                skb_push(skb, skb->data - skb_mac_header(skb));
  94
  95                return skb;
  96        }
  97
  98        skb2 = skb;
  99
 100        do {
 101                struct sk_buff *nskb = skb2->next;
 102                skb2->next = NULL;
 103
 104                xo = xfrm_offload(skb2);
 105                xo->flags |= XFRM_DEV_RESUME;
 106
 107                x->outer_mode->xmit(x, skb2);
 108
 109                err = x->type_offload->xmit(x, skb2, esp_features);
 110                if (!err) {
 111                        skb2->next = nskb;
 112                } else if (err != -EINPROGRESS) {
 113                        XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
 114                        skb2->next = nskb;
 115                        kfree_skb_list(skb2);
 116                        return NULL;
 117                } else {
 118                        if (skb == skb2)
 119                                skb = nskb;
 120
 121                        if (!skb)
 122                                return NULL;
 123
 124                        goto skip_push;
 125                }
 126
 127                skb_push(skb2, skb2->data - skb_mac_header(skb2));
 128
 129skip_push:
 130                skb2 = nskb;
 131        } while (skb2);
 132
 133        return skb;
 134}
 135EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
 136
 137int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 138                       struct xfrm_user_offload *xuo)
 139{
 140        int err;
 141        struct dst_entry *dst;
 142        struct net_device *dev;
 143        struct xfrm_state_offload *xso = &x->xso;
 144        xfrm_address_t *saddr;
 145        xfrm_address_t *daddr;
 146
 147        if (!x->type_offload)
 148                return -EINVAL;
 149
 150        /* We don't yet support UDP encapsulation and TFC padding. */
 151        if (x->encap || x->tfcpad)
 152                return -EINVAL;
 153
 154        dev = dev_get_by_index(net, xuo->ifindex);
 155        if (!dev) {
 156                if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
 157                        saddr = &x->props.saddr;
 158                        daddr = &x->id.daddr;
 159                } else {
 160                        saddr = &x->id.daddr;
 161                        daddr = &x->props.saddr;
 162                }
 163
 164                dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
 165                                        x->props.family, x->props.output_mark);
 166                if (IS_ERR(dst))
 167                        return 0;
 168
 169                dev = dst->dev;
 170
 171                dev_hold(dev);
 172                dst_release(dst);
 173        }
 174
 175        if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
 176                xso->dev = NULL;
 177                dev_put(dev);
 178                return 0;
 179        }
 180
 181        if (x->props.flags & XFRM_STATE_ESN &&
 182            !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
 183                xso->dev = NULL;
 184                dev_put(dev);
 185                return -EINVAL;
 186        }
 187
 188        xso->dev = dev;
 189        xso->num_exthdrs = 1;
 190        xso->flags = xuo->flags;
 191
 192        err = dev->xfrmdev_ops->xdo_dev_state_add(x);
 193        if (err) {
 194                xso->dev = NULL;
 195                dev_put(dev);
 196                return err;
 197        }
 198
 199        return 0;
 200}
 201EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
 202
 203bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 204{
 205        int mtu;
 206        struct dst_entry *dst = skb_dst(skb);
 207        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 208        struct net_device *dev = x->xso.dev;
 209
 210        if (!x->type_offload || x->encap)
 211                return false;
 212
 213        if ((!dev || (x->xso.offload_handle && (dev == xfrm_dst_path(dst)->dev))) &&
 214             (!xdst->child->xfrm && x->type->get_mtu)) {
 215                mtu = x->type->get_mtu(x, xdst->child_mtu_cached);
 216
 217                if (skb->len <= mtu)
 218                        goto ok;
 219
 220                if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
 221                        goto ok;
 222        }
 223
 224        return false;
 225
 226ok:
 227        if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
 228                return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
 229
 230        return true;
 231}
 232EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
 233
 234void xfrm_dev_resume(struct sk_buff *skb)
 235{
 236        struct net_device *dev = skb->dev;
 237        int ret = NETDEV_TX_BUSY;
 238        struct netdev_queue *txq;
 239        struct softnet_data *sd;
 240        unsigned long flags;
 241
 242        rcu_read_lock();
 243        txq = netdev_pick_tx(dev, skb, NULL);
 244
 245        HARD_TX_LOCK(dev, txq, smp_processor_id());
 246        if (!netif_xmit_frozen_or_stopped(txq))
 247                skb = dev_hard_start_xmit(skb, dev, txq, &ret);
 248        HARD_TX_UNLOCK(dev, txq);
 249
 250        if (!dev_xmit_complete(ret)) {
 251                local_irq_save(flags);
 252                sd = this_cpu_ptr(&softnet_data);
 253                skb_queue_tail(&sd->xfrm_backlog, skb);
 254                raise_softirq_irqoff(NET_TX_SOFTIRQ);
 255                local_irq_restore(flags);
 256        }
 257        rcu_read_unlock();
 258}
 259EXPORT_SYMBOL_GPL(xfrm_dev_resume);
 260
 261void xfrm_dev_backlog(struct softnet_data *sd)
 262{
 263        struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
 264        struct sk_buff_head list;
 265        struct sk_buff *skb;
 266
 267        if (skb_queue_empty(xfrm_backlog))
 268                return;
 269
 270        __skb_queue_head_init(&list);
 271
 272        spin_lock(&xfrm_backlog->lock);
 273        skb_queue_splice_init(xfrm_backlog, &list);
 274        spin_unlock(&xfrm_backlog->lock);
 275
 276        while (!skb_queue_empty(&list)) {
 277                skb = __skb_dequeue(&list);
 278                xfrm_dev_resume(skb);
 279        }
 280
 281}
 282#endif
 283
 284static int xfrm_api_check(struct net_device *dev)
 285{
 286#ifdef CONFIG_XFRM_OFFLOAD
 287        if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
 288            !(dev->features & NETIF_F_HW_ESP))
 289                return NOTIFY_BAD;
 290
 291        if ((dev->features & NETIF_F_HW_ESP) &&
 292            (!(dev->xfrmdev_ops &&
 293               dev->xfrmdev_ops->xdo_dev_state_add &&
 294               dev->xfrmdev_ops->xdo_dev_state_delete)))
 295                return NOTIFY_BAD;
 296#else
 297        if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
 298                return NOTIFY_BAD;
 299#endif
 300
 301        return NOTIFY_DONE;
 302}
 303
 304static int xfrm_dev_register(struct net_device *dev)
 305{
 306        return xfrm_api_check(dev);
 307}
 308
 309static int xfrm_dev_unregister(struct net_device *dev)
 310{
 311        xfrm_policy_cache_flush();
 312        return NOTIFY_DONE;
 313}
 314
 315static int xfrm_dev_feat_change(struct net_device *dev)
 316{
 317        return xfrm_api_check(dev);
 318}
 319
 320static int xfrm_dev_down(struct net_device *dev)
 321{
 322        if (dev->features & NETIF_F_HW_ESP)
 323                xfrm_dev_state_flush(dev_net(dev), dev, true);
 324
 325        xfrm_policy_cache_flush();
 326        return NOTIFY_DONE;
 327}
 328
 329static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
 330{
 331        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 332
 333        switch (event) {
 334        case NETDEV_REGISTER:
 335                return xfrm_dev_register(dev);
 336
 337        case NETDEV_UNREGISTER:
 338                return xfrm_dev_unregister(dev);
 339
 340        case NETDEV_FEAT_CHANGE:
 341                return xfrm_dev_feat_change(dev);
 342
 343        case NETDEV_DOWN:
 344                return xfrm_dev_down(dev);
 345        }
 346        return NOTIFY_DONE;
 347}
 348
 349static struct notifier_block xfrm_dev_notifier = {
 350        .notifier_call  = xfrm_dev_event,
 351};
 352
 353void __init xfrm_dev_init(void)
 354{
 355        register_netdevice_notifier(&xfrm_dev_notifier);
 356}
 357