linux/net/8021q/vlan_core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/skbuff.h>
   3#include <linux/netdevice.h>
   4#include <linux/if_vlan.h>
   5#include <linux/netpoll.h>
   6#include <linux/export.h>
   7#include <net/gro.h>
   8#include "vlan.h"
   9
  10bool vlan_do_receive(struct sk_buff **skbp)
  11{
  12        struct sk_buff *skb = *skbp;
  13        __be16 vlan_proto = skb->vlan_proto;
  14        u16 vlan_id = skb_vlan_tag_get_id(skb);
  15        struct net_device *vlan_dev;
  16        struct vlan_pcpu_stats *rx_stats;
  17
  18        vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
  19        if (!vlan_dev)
  20                return false;
  21
  22        skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
  23        if (unlikely(!skb))
  24                return false;
  25
  26        if (unlikely(!(vlan_dev->flags & IFF_UP))) {
  27                kfree_skb(skb);
  28                *skbp = NULL;
  29                return false;
  30        }
  31
  32        skb->dev = vlan_dev;
  33        if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
  34                /* Our lower layer thinks this is not local, let's make sure.
  35                 * This allows the VLAN to have a different MAC than the
  36                 * underlying device, and still route correctly. */
  37                if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
  38                        skb->pkt_type = PACKET_HOST;
  39        }
  40
  41        if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
  42            !netif_is_macvlan_port(vlan_dev) &&
  43            !netif_is_bridge_port(vlan_dev)) {
  44                unsigned int offset = skb->data - skb_mac_header(skb);
  45
  46                /*
  47                 * vlan_insert_tag expect skb->data pointing to mac header.
  48                 * So change skb->data before calling it and change back to
  49                 * original position later
  50                 */
  51                skb_push(skb, offset);
  52                skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
  53                                                    skb->vlan_tci, skb->mac_len);
  54                if (!skb)
  55                        return false;
  56                skb_pull(skb, offset + VLAN_HLEN);
  57                skb_reset_mac_len(skb);
  58        }
  59
  60        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
  61        __vlan_hwaccel_clear_tag(skb);
  62
  63        rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
  64
  65        u64_stats_update_begin(&rx_stats->syncp);
  66        rx_stats->rx_packets++;
  67        rx_stats->rx_bytes += skb->len;
  68        if (skb->pkt_type == PACKET_MULTICAST)
  69                rx_stats->rx_multicast++;
  70        u64_stats_update_end(&rx_stats->syncp);
  71
  72        return true;
  73}
  74
  75/* Must be invoked with rcu_read_lock. */
  76struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
  77                                        __be16 vlan_proto, u16 vlan_id)
  78{
  79        struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
  80
  81        if (vlan_info) {
  82                return vlan_group_get_device(&vlan_info->grp,
  83                                             vlan_proto, vlan_id);
  84        } else {
  85                /*
  86                 * Lower devices of master uppers (bonding, team) do not have
  87                 * grp assigned to themselves. Grp is assigned to upper device
  88                 * instead.
  89                 */
  90                struct net_device *upper_dev;
  91
  92                upper_dev = netdev_master_upper_dev_get_rcu(dev);
  93                if (upper_dev)
  94                        return __vlan_find_dev_deep_rcu(upper_dev,
  95                                                    vlan_proto, vlan_id);
  96        }
  97
  98        return NULL;
  99}
 100EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
 101
 102struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 103{
 104        struct net_device *ret = vlan_dev_priv(dev)->real_dev;
 105
 106        while (is_vlan_dev(ret))
 107                ret = vlan_dev_priv(ret)->real_dev;
 108
 109        return ret;
 110}
 111EXPORT_SYMBOL(vlan_dev_real_dev);
 112
 113u16 vlan_dev_vlan_id(const struct net_device *dev)
 114{
 115        return vlan_dev_priv(dev)->vlan_id;
 116}
 117EXPORT_SYMBOL(vlan_dev_vlan_id);
 118
 119__be16 vlan_dev_vlan_proto(const struct net_device *dev)
 120{
 121        return vlan_dev_priv(dev)->vlan_proto;
 122}
 123EXPORT_SYMBOL(vlan_dev_vlan_proto);
 124
 125/*
 126 * vlan info and vid list
 127 */
 128
 129static void vlan_group_free(struct vlan_group *grp)
 130{
 131        int i, j;
 132
 133        for (i = 0; i < VLAN_PROTO_NUM; i++)
 134                for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
 135                        kfree(grp->vlan_devices_arrays[i][j]);
 136}
 137
 138static void vlan_info_free(struct vlan_info *vlan_info)
 139{
 140        vlan_group_free(&vlan_info->grp);
 141        kfree(vlan_info);
 142}
 143
 144static void vlan_info_rcu_free(struct rcu_head *rcu)
 145{
 146        vlan_info_free(container_of(rcu, struct vlan_info, rcu));
 147}
 148
 149static struct vlan_info *vlan_info_alloc(struct net_device *dev)
 150{
 151        struct vlan_info *vlan_info;
 152
 153        vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
 154        if (!vlan_info)
 155                return NULL;
 156
 157        vlan_info->real_dev = dev;
 158        INIT_LIST_HEAD(&vlan_info->vid_list);
 159        return vlan_info;
 160}
 161
 162struct vlan_vid_info {
 163        struct list_head list;
 164        __be16 proto;
 165        u16 vid;
 166        int refcount;
 167};
 168
 169static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
 170{
 171        if (proto == htons(ETH_P_8021Q) &&
 172            dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
 173                return true;
 174        if (proto == htons(ETH_P_8021AD) &&
 175            dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
 176                return true;
 177        return false;
 178}
 179
 180static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
 181                                               __be16 proto, u16 vid)
 182{
 183        struct vlan_vid_info *vid_info;
 184
 185        list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 186                if (vid_info->proto == proto && vid_info->vid == vid)
 187                        return vid_info;
 188        }
 189        return NULL;
 190}
 191
 192static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
 193{
 194        struct vlan_vid_info *vid_info;
 195
 196        vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
 197        if (!vid_info)
 198                return NULL;
 199        vid_info->proto = proto;
 200        vid_info->vid = vid;
 201
 202        return vid_info;
 203}
 204
 205static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
 206{
 207        if (!vlan_hw_filter_capable(dev, proto))
 208                return 0;
 209
 210        if (netif_device_present(dev))
 211                return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
 212        else
 213                return -ENODEV;
 214}
 215
 216static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
 217{
 218        if (!vlan_hw_filter_capable(dev, proto))
 219                return 0;
 220
 221        if (netif_device_present(dev))
 222                return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
 223        else
 224                return -ENODEV;
 225}
 226
 227int vlan_for_each(struct net_device *dev,
 228                  int (*action)(struct net_device *dev, int vid, void *arg),
 229                  void *arg)
 230{
 231        struct vlan_vid_info *vid_info;
 232        struct vlan_info *vlan_info;
 233        struct net_device *vdev;
 234        int ret;
 235
 236        ASSERT_RTNL();
 237
 238        vlan_info = rtnl_dereference(dev->vlan_info);
 239        if (!vlan_info)
 240                return 0;
 241
 242        list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 243                vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
 244                                             vid_info->vid);
 245                ret = action(vdev, vid_info->vid, arg);
 246                if (ret)
 247                        return ret;
 248        }
 249
 250        return 0;
 251}
 252EXPORT_SYMBOL(vlan_for_each);
 253
 254int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
 255{
 256        struct net_device *real_dev = vlan_info->real_dev;
 257        struct vlan_vid_info *vlan_vid_info;
 258        int err;
 259
 260        list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
 261                if (vlan_vid_info->proto == proto) {
 262                        err = vlan_add_rx_filter_info(real_dev, proto,
 263                                                      vlan_vid_info->vid);
 264                        if (err)
 265                                goto unwind;
 266                }
 267        }
 268
 269        return 0;
 270
 271unwind:
 272        list_for_each_entry_continue_reverse(vlan_vid_info,
 273                                             &vlan_info->vid_list, list) {
 274                if (vlan_vid_info->proto == proto)
 275                        vlan_kill_rx_filter_info(real_dev, proto,
 276                                                 vlan_vid_info->vid);
 277        }
 278
 279        return err;
 280}
 281EXPORT_SYMBOL(vlan_filter_push_vids);
 282
 283void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
 284{
 285        struct vlan_vid_info *vlan_vid_info;
 286
 287        list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
 288                if (vlan_vid_info->proto == proto)
 289                        vlan_kill_rx_filter_info(vlan_info->real_dev,
 290                                                 vlan_vid_info->proto,
 291                                                 vlan_vid_info->vid);
 292}
 293EXPORT_SYMBOL(vlan_filter_drop_vids);
 294
 295static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
 296                          struct vlan_vid_info **pvid_info)
 297{
 298        struct net_device *dev = vlan_info->real_dev;
 299        struct vlan_vid_info *vid_info;
 300        int err;
 301
 302        vid_info = vlan_vid_info_alloc(proto, vid);
 303        if (!vid_info)
 304                return -ENOMEM;
 305
 306        err = vlan_add_rx_filter_info(dev, proto, vid);
 307        if (err) {
 308                kfree(vid_info);
 309                return err;
 310        }
 311
 312        list_add(&vid_info->list, &vlan_info->vid_list);
 313        vlan_info->nr_vids++;
 314        *pvid_info = vid_info;
 315        return 0;
 316}
 317
 318int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
 319{
 320        struct vlan_info *vlan_info;
 321        struct vlan_vid_info *vid_info;
 322        bool vlan_info_created = false;
 323        int err;
 324
 325        ASSERT_RTNL();
 326
 327        vlan_info = rtnl_dereference(dev->vlan_info);
 328        if (!vlan_info) {
 329                vlan_info = vlan_info_alloc(dev);
 330                if (!vlan_info)
 331                        return -ENOMEM;
 332                vlan_info_created = true;
 333        }
 334        vid_info = vlan_vid_info_get(vlan_info, proto, vid);
 335        if (!vid_info) {
 336                err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
 337                if (err)
 338                        goto out_free_vlan_info;
 339        }
 340        vid_info->refcount++;
 341
 342        if (vlan_info_created)
 343                rcu_assign_pointer(dev->vlan_info, vlan_info);
 344
 345        return 0;
 346
 347out_free_vlan_info:
 348        if (vlan_info_created)
 349                kfree(vlan_info);
 350        return err;
 351}
 352EXPORT_SYMBOL(vlan_vid_add);
 353
 354static void __vlan_vid_del(struct vlan_info *vlan_info,
 355                           struct vlan_vid_info *vid_info)
 356{
 357        struct net_device *dev = vlan_info->real_dev;
 358        __be16 proto = vid_info->proto;
 359        u16 vid = vid_info->vid;
 360        int err;
 361
 362        err = vlan_kill_rx_filter_info(dev, proto, vid);
 363        if (err && dev->reg_state != NETREG_UNREGISTERING)
 364                netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
 365
 366        list_del(&vid_info->list);
 367        kfree(vid_info);
 368        vlan_info->nr_vids--;
 369}
 370
 371void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
 372{
 373        struct vlan_info *vlan_info;
 374        struct vlan_vid_info *vid_info;
 375
 376        ASSERT_RTNL();
 377
 378        vlan_info = rtnl_dereference(dev->vlan_info);
 379        if (!vlan_info)
 380                return;
 381
 382        vid_info = vlan_vid_info_get(vlan_info, proto, vid);
 383        if (!vid_info)
 384                return;
 385        vid_info->refcount--;
 386        if (vid_info->refcount == 0) {
 387                __vlan_vid_del(vlan_info, vid_info);
 388                if (vlan_info->nr_vids == 0) {
 389                        RCU_INIT_POINTER(dev->vlan_info, NULL);
 390                        call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
 391                }
 392        }
 393}
 394EXPORT_SYMBOL(vlan_vid_del);
 395
 396int vlan_vids_add_by_dev(struct net_device *dev,
 397                         const struct net_device *by_dev)
 398{
 399        struct vlan_vid_info *vid_info;
 400        struct vlan_info *vlan_info;
 401        int err;
 402
 403        ASSERT_RTNL();
 404
 405        vlan_info = rtnl_dereference(by_dev->vlan_info);
 406        if (!vlan_info)
 407                return 0;
 408
 409        list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
 410                err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
 411                if (err)
 412                        goto unwind;
 413        }
 414        return 0;
 415
 416unwind:
 417        list_for_each_entry_continue_reverse(vid_info,
 418                                             &vlan_info->vid_list,
 419                                             list) {
 420                vlan_vid_del(dev, vid_info->proto, vid_info->vid);
 421        }
 422
 423        return err;
 424}
 425EXPORT_SYMBOL(vlan_vids_add_by_dev);
 426
 427void vlan_vids_del_by_dev(struct net_device *dev,
 428                          const struct net_device *by_dev)
 429{
 430        struct vlan_vid_info *vid_info;
 431        struct vlan_info *vlan_info;
 432
 433        ASSERT_RTNL();
 434
 435        vlan_info = rtnl_dereference(by_dev->vlan_info);
 436        if (!vlan_info)
 437                return;
 438
 439        list_for_each_entry(vid_info, &vlan_info->vid_list, list)
 440                vlan_vid_del(dev, vid_info->proto, vid_info->vid);
 441}
 442EXPORT_SYMBOL(vlan_vids_del_by_dev);
 443
 444bool vlan_uses_dev(const struct net_device *dev)
 445{
 446        struct vlan_info *vlan_info;
 447
 448        ASSERT_RTNL();
 449
 450        vlan_info = rtnl_dereference(dev->vlan_info);
 451        if (!vlan_info)
 452                return false;
 453        return vlan_info->grp.nr_vlan_devs ? true : false;
 454}
 455EXPORT_SYMBOL(vlan_uses_dev);
 456
 457static struct sk_buff *vlan_gro_receive(struct list_head *head,
 458                                        struct sk_buff *skb)
 459{
 460        const struct packet_offload *ptype;
 461        unsigned int hlen, off_vlan;
 462        struct sk_buff *pp = NULL;
 463        struct vlan_hdr *vhdr;
 464        struct sk_buff *p;
 465        __be16 type;
 466        int flush = 1;
 467
 468        off_vlan = skb_gro_offset(skb);
 469        hlen = off_vlan + sizeof(*vhdr);
 470        vhdr = skb_gro_header_fast(skb, off_vlan);
 471        if (skb_gro_header_hard(skb, hlen)) {
 472                vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
 473                if (unlikely(!vhdr))
 474                        goto out;
 475        }
 476
 477        type = vhdr->h_vlan_encapsulated_proto;
 478
 479        rcu_read_lock();
 480        ptype = gro_find_receive_by_type(type);
 481        if (!ptype)
 482                goto out_unlock;
 483
 484        flush = 0;
 485
 486        list_for_each_entry(p, head, list) {
 487                struct vlan_hdr *vhdr2;
 488
 489                if (!NAPI_GRO_CB(p)->same_flow)
 490                        continue;
 491
 492                vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
 493                if (compare_vlan_header(vhdr, vhdr2))
 494                        NAPI_GRO_CB(p)->same_flow = 0;
 495        }
 496
 497        skb_gro_pull(skb, sizeof(*vhdr));
 498        skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
 499
 500        pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
 501                                            ipv6_gro_receive, inet_gro_receive,
 502                                            head, skb);
 503
 504out_unlock:
 505        rcu_read_unlock();
 506out:
 507        skb_gro_flush_final(skb, pp, flush);
 508
 509        return pp;
 510}
 511
 512static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
 513{
 514        struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
 515        __be16 type = vhdr->h_vlan_encapsulated_proto;
 516        struct packet_offload *ptype;
 517        int err = -ENOENT;
 518
 519        rcu_read_lock();
 520        ptype = gro_find_complete_by_type(type);
 521        if (ptype)
 522                err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
 523                                         ipv6_gro_complete, inet_gro_complete,
 524                                         skb, nhoff + sizeof(*vhdr));
 525
 526        rcu_read_unlock();
 527        return err;
 528}
 529
 530static struct packet_offload vlan_packet_offloads[] __read_mostly = {
 531        {
 532                .type = cpu_to_be16(ETH_P_8021Q),
 533                .priority = 10,
 534                .callbacks = {
 535                        .gro_receive = vlan_gro_receive,
 536                        .gro_complete = vlan_gro_complete,
 537                },
 538        },
 539        {
 540                .type = cpu_to_be16(ETH_P_8021AD),
 541                .priority = 10,
 542                .callbacks = {
 543                        .gro_receive = vlan_gro_receive,
 544                        .gro_complete = vlan_gro_complete,
 545                },
 546        },
 547};
 548
 549static int __init vlan_offload_init(void)
 550{
 551        unsigned int i;
 552
 553        for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
 554                dev_add_offload(&vlan_packet_offloads[i]);
 555
 556        return 0;
 557}
 558
 559fs_initcall(vlan_offload_init);
 560