linux/net/8021q/vlan_core.c
<<
>>
Prefs
   1#include <linux/skbuff.h>
   2#include <linux/netdevice.h>
   3#include <linux/if_vlan.h>
   4#include <linux/netpoll.h>
   5#include <linux/export.h>
   6#include "vlan.h"
   7
   8bool vlan_do_receive(struct sk_buff **skbp)
   9{
  10        struct sk_buff *skb = *skbp;
  11        __be16 vlan_proto = skb->vlan_proto;
  12        u16 vlan_id = skb_vlan_tag_get_id(skb);
  13        struct net_device *vlan_dev;
  14        struct vlan_pcpu_stats *rx_stats;
  15
  16        vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
  17        if (!vlan_dev)
  18                return false;
  19
  20        skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
  21        if (unlikely(!skb))
  22                return false;
  23
  24        skb->dev = vlan_dev;
  25        if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
  26                /* Our lower layer thinks this is not local, let's make sure.
  27                 * This allows the VLAN to have a different MAC than the
  28                 * underlying device, and still route correctly. */
  29                if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
  30                        skb->pkt_type = PACKET_HOST;
  31        }
  32
  33        if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
  34            !netif_is_macvlan_port(vlan_dev) &&
  35            !netif_is_bridge_port(vlan_dev)) {
  36                unsigned int offset = skb->data - skb_mac_header(skb);
  37
  38                /*
  39                 * vlan_insert_tag expect skb->data pointing to mac header.
  40                 * So change skb->data before calling it and change back to
  41                 * original position later
  42                 */
  43                skb_push(skb, offset);
  44                skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
  45                                              skb->vlan_tci);
  46                if (!skb)
  47                        return false;
  48                skb_pull(skb, offset + VLAN_HLEN);
  49                skb_reset_mac_len(skb);
  50        }
  51
  52        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
  53        skb->vlan_tci = 0;
  54
  55        rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
  56
  57        u64_stats_update_begin(&rx_stats->syncp);
  58        rx_stats->rx_packets++;
  59        rx_stats->rx_bytes += skb->len;
  60        if (skb->pkt_type == PACKET_MULTICAST)
  61                rx_stats->rx_multicast++;
  62        u64_stats_update_end(&rx_stats->syncp);
  63
  64        return true;
  65}
  66
  67/* Must be invoked with rcu_read_lock. */
  68struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
  69                                        __be16 vlan_proto, u16 vlan_id)
  70{
  71        struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
  72
  73        if (vlan_info) {
  74                return vlan_group_get_device(&vlan_info->grp,
  75                                             vlan_proto, vlan_id);
  76        } else {
  77                /*
  78                 * Lower devices of master uppers (bonding, team) do not have
  79                 * grp assigned to themselves. Grp is assigned to upper device
  80                 * instead.
  81                 */
  82                struct net_device *upper_dev;
  83
  84                upper_dev = netdev_master_upper_dev_get_rcu(dev);
  85                if (upper_dev)
  86                        return __vlan_find_dev_deep_rcu(upper_dev,
  87                                                    vlan_proto, vlan_id);
  88        }
  89
  90        return NULL;
  91}
  92EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
  93
  94struct net_device *vlan_dev_real_dev(const struct net_device *dev)
  95{
  96        struct net_device *ret = vlan_dev_priv(dev)->real_dev;
  97
  98        while (is_vlan_dev(ret))
  99                ret = vlan_dev_priv(ret)->real_dev;
 100
 101        return ret;
 102}
 103EXPORT_SYMBOL(vlan_dev_real_dev);
 104
 105u16 vlan_dev_vlan_id(const struct net_device *dev)
 106{
 107        return vlan_dev_priv(dev)->vlan_id;
 108}
 109EXPORT_SYMBOL(vlan_dev_vlan_id);
 110
 111__be16 vlan_dev_vlan_proto(const struct net_device *dev)
 112{
 113        return vlan_dev_priv(dev)->vlan_proto;
 114}
 115EXPORT_SYMBOL(vlan_dev_vlan_proto);
 116
 117/*
 118 * vlan info and vid list
 119 */
 120
 121static void vlan_group_free(struct vlan_group *grp)
 122{
 123        int i, j;
 124
 125        for (i = 0; i < VLAN_PROTO_NUM; i++)
 126                for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
 127                        kfree(grp->vlan_devices_arrays[i][j]);
 128}
 129
 130static void vlan_info_free(struct vlan_info *vlan_info)
 131{
 132        vlan_group_free(&vlan_info->grp);
 133        kfree(vlan_info);
 134}
 135
 136static void vlan_info_rcu_free(struct rcu_head *rcu)
 137{
 138        vlan_info_free(container_of(rcu, struct vlan_info, rcu));
 139}
 140
 141static struct vlan_info *vlan_info_alloc(struct net_device *dev)
 142{
 143        struct vlan_info *vlan_info;
 144
 145        vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
 146        if (!vlan_info)
 147                return NULL;
 148
 149        vlan_info->real_dev = dev;
 150        INIT_LIST_HEAD(&vlan_info->vid_list);
 151        return vlan_info;
 152}
 153
 154struct vlan_vid_info {
 155        struct list_head list;
 156        __be16 proto;
 157        u16 vid;
 158        int refcount;
 159};
 160
 161static bool vlan_hw_filter_capable(const struct net_device *dev,
 162                                     const struct vlan_vid_info *vid_info)
 163{
 164        if (vid_info->proto == htons(ETH_P_8021Q) &&
 165            dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
 166                return true;
 167        if (vid_info->proto == htons(ETH_P_8021AD) &&
 168            dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
 169                return true;
 170        return false;
 171}
 172
 173static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
 174                                               __be16 proto, u16 vid)
 175{
 176        struct vlan_vid_info *vid_info;
 177
 178        list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 179                if (vid_info->proto == proto && vid_info->vid == vid)
 180                        return vid_info;
 181        }
 182        return NULL;
 183}
 184
 185static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
 186{
 187        struct vlan_vid_info *vid_info;
 188
 189        vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
 190        if (!vid_info)
 191                return NULL;
 192        vid_info->proto = proto;
 193        vid_info->vid = vid;
 194
 195        return vid_info;
 196}
 197
 198static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
 199                          struct vlan_vid_info **pvid_info)
 200{
 201        struct net_device *dev = vlan_info->real_dev;
 202        const struct net_device_ops *ops = dev->netdev_ops;
 203        struct vlan_vid_info *vid_info;
 204        int err;
 205
 206        vid_info = vlan_vid_info_alloc(proto, vid);
 207        if (!vid_info)
 208                return -ENOMEM;
 209
 210        if (vlan_hw_filter_capable(dev, vid_info)) {
 211                if (netif_device_present(dev))
 212                        err = ops->ndo_vlan_rx_add_vid(dev, proto, vid);
 213                else
 214                        err = -ENODEV;
 215                if (err) {
 216                        kfree(vid_info);
 217                        return err;
 218                }
 219        }
 220        list_add(&vid_info->list, &vlan_info->vid_list);
 221        vlan_info->nr_vids++;
 222        *pvid_info = vid_info;
 223        return 0;
 224}
 225
 226int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
 227{
 228        struct vlan_info *vlan_info;
 229        struct vlan_vid_info *vid_info;
 230        bool vlan_info_created = false;
 231        int err;
 232
 233        ASSERT_RTNL();
 234
 235        vlan_info = rtnl_dereference(dev->vlan_info);
 236        if (!vlan_info) {
 237                vlan_info = vlan_info_alloc(dev);
 238                if (!vlan_info)
 239                        return -ENOMEM;
 240                vlan_info_created = true;
 241        }
 242        vid_info = vlan_vid_info_get(vlan_info, proto, vid);
 243        if (!vid_info) {
 244                err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
 245                if (err)
 246                        goto out_free_vlan_info;
 247        }
 248        vid_info->refcount++;
 249
 250        if (vlan_info_created)
 251                rcu_assign_pointer(dev->vlan_info, vlan_info);
 252
 253        return 0;
 254
 255out_free_vlan_info:
 256        if (vlan_info_created)
 257                kfree(vlan_info);
 258        return err;
 259}
 260EXPORT_SYMBOL(vlan_vid_add);
 261
 262static void __vlan_vid_del(struct vlan_info *vlan_info,
 263                           struct vlan_vid_info *vid_info)
 264{
 265        struct net_device *dev = vlan_info->real_dev;
 266        const struct net_device_ops *ops = dev->netdev_ops;
 267        __be16 proto = vid_info->proto;
 268        u16 vid = vid_info->vid;
 269        int err;
 270
 271        if (vlan_hw_filter_capable(dev, vid_info)) {
 272                if (netif_device_present(dev))
 273                        err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
 274                else
 275                        err = -ENODEV;
 276                if (err) {
 277                        pr_warn("failed to kill vid %04x/%d for device %s\n",
 278                                proto, vid, dev->name);
 279                }
 280        }
 281        list_del(&vid_info->list);
 282        kfree(vid_info);
 283        vlan_info->nr_vids--;
 284}
 285
 286void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
 287{
 288        struct vlan_info *vlan_info;
 289        struct vlan_vid_info *vid_info;
 290
 291        ASSERT_RTNL();
 292
 293        vlan_info = rtnl_dereference(dev->vlan_info);
 294        if (!vlan_info)
 295                return;
 296
 297        vid_info = vlan_vid_info_get(vlan_info, proto, vid);
 298        if (!vid_info)
 299                return;
 300        vid_info->refcount--;
 301        if (vid_info->refcount == 0) {
 302                __vlan_vid_del(vlan_info, vid_info);
 303                if (vlan_info->nr_vids == 0) {
 304                        RCU_INIT_POINTER(dev->vlan_info, NULL);
 305                        call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
 306                }
 307        }
 308}
 309EXPORT_SYMBOL(vlan_vid_del);
 310
 311int vlan_vids_add_by_dev(struct net_device *dev,
 312                         const struct net_device *by_dev)
 313{
 314        struct vlan_vid_info *vid_info;
 315        struct vlan_info *vlan_info;
 316        int err;
 317
 318        ASSERT_RTNL();
 319
 320        vlan_info = rtnl_dereference(by_dev->vlan_info);
 321        if (!vlan_info)
 322                return 0;
 323
 324        list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 325                err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
 326                if (err)
 327                        goto unwind;
 328        }
 329        return 0;
 330
 331unwind:
 332        list_for_each_entry_continue_reverse(vid_info,
 333                                             &vlan_info->vid_list,
 334                                             list) {
 335                vlan_vid_del(dev, vid_info->proto, vid_info->vid);
 336        }
 337
 338        return err;
 339}
 340EXPORT_SYMBOL(vlan_vids_add_by_dev);
 341
 342void vlan_vids_del_by_dev(struct net_device *dev,
 343                          const struct net_device *by_dev)
 344{
 345        struct vlan_vid_info *vid_info;
 346        struct vlan_info *vlan_info;
 347
 348        ASSERT_RTNL();
 349
 350        vlan_info = rtnl_dereference(by_dev->vlan_info);
 351        if (!vlan_info)
 352                return;
 353
 354        list_for_each_entry(vid_info, &vlan_info->vid_list, list)
 355                vlan_vid_del(dev, vid_info->proto, vid_info->vid);
 356}
 357EXPORT_SYMBOL(vlan_vids_del_by_dev);
 358
 359bool vlan_uses_dev(const struct net_device *dev)
 360{
 361        struct vlan_info *vlan_info;
 362
 363        ASSERT_RTNL();
 364
 365        vlan_info = rtnl_dereference(dev->vlan_info);
 366        if (!vlan_info)
 367                return false;
 368        return vlan_info->grp.nr_vlan_devs ? true : false;
 369}
 370EXPORT_SYMBOL(vlan_uses_dev);
 371