linux/net/bridge/br_vlan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/kernel.h>
   3#include <linux/netdevice.h>
   4#include <linux/rtnetlink.h>
   5#include <linux/slab.h>
   6#include <net/switchdev.h>
   7
   8#include "br_private.h"
   9#include "br_private_tunnel.h"
  10
  11static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
  12
  13static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  14                              const void *ptr)
  15{
  16        const struct net_bridge_vlan *vle = ptr;
  17        u16 vid = *(u16 *)arg->key;
  18
  19        return vle->vid != vid;
  20}
  21
  22static const struct rhashtable_params br_vlan_rht_params = {
  23        .head_offset = offsetof(struct net_bridge_vlan, vnode),
  24        .key_offset = offsetof(struct net_bridge_vlan, vid),
  25        .key_len = sizeof(u16),
  26        .nelem_hint = 3,
  27        .max_size = VLAN_N_VID,
  28        .obj_cmpfn = br_vlan_cmp,
  29        .automatic_shrinking = true,
  30};
  31
  32static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  33{
  34        return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  35}
  36
  37static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  38{
  39        if (vg->pvid == vid)
  40                return false;
  41
  42        smp_wmb();
  43        vg->pvid = vid;
  44
  45        return true;
  46}
  47
  48static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  49{
  50        if (vg->pvid != vid)
  51                return false;
  52
  53        smp_wmb();
  54        vg->pvid = 0;
  55
  56        return true;
  57}
  58
  59/* return true if anything changed, false otherwise */
  60static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  61{
  62        struct net_bridge_vlan_group *vg;
  63        u16 old_flags = v->flags;
  64        bool ret;
  65
  66        if (br_vlan_is_master(v))
  67                vg = br_vlan_group(v->br);
  68        else
  69                vg = nbp_vlan_group(v->port);
  70
  71        if (flags & BRIDGE_VLAN_INFO_PVID)
  72                ret = __vlan_add_pvid(vg, v->vid);
  73        else
  74                ret = __vlan_delete_pvid(vg, v->vid);
  75
  76        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  77                v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  78        else
  79                v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  80
  81        return ret || !!(old_flags ^ v->flags);
  82}
  83
  84static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  85                          struct net_bridge_vlan *v, u16 flags,
  86                          struct netlink_ext_ack *extack)
  87{
  88        int err;
  89
  90        /* Try switchdev op first. In case it is not supported, fallback to
  91         * 8021q add.
  92         */
  93        err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
  94        if (err == -EOPNOTSUPP)
  95                return vlan_vid_add(dev, br->vlan_proto, v->vid);
  96        v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
  97        return err;
  98}
  99
 100static void __vlan_add_list(struct net_bridge_vlan *v)
 101{
 102        struct net_bridge_vlan_group *vg;
 103        struct list_head *headp, *hpos;
 104        struct net_bridge_vlan *vent;
 105
 106        if (br_vlan_is_master(v))
 107                vg = br_vlan_group(v->br);
 108        else
 109                vg = nbp_vlan_group(v->port);
 110
 111        headp = &vg->vlan_list;
 112        list_for_each_prev(hpos, headp) {
 113                vent = list_entry(hpos, struct net_bridge_vlan, vlist);
 114                if (v->vid < vent->vid)
 115                        continue;
 116                else
 117                        break;
 118        }
 119        list_add_rcu(&v->vlist, hpos);
 120}
 121
 122static void __vlan_del_list(struct net_bridge_vlan *v)
 123{
 124        list_del_rcu(&v->vlist);
 125}
 126
 127static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
 128                          const struct net_bridge_vlan *v)
 129{
 130        int err;
 131
 132        /* Try switchdev op first. In case it is not supported, fallback to
 133         * 8021q del.
 134         */
 135        err = br_switchdev_port_vlan_del(dev, v->vid);
 136        if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
 137                vlan_vid_del(dev, br->vlan_proto, v->vid);
 138        return err == -EOPNOTSUPP ? 0 : err;
 139}
 140
 141/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 142 * a reference is taken to the master vlan before returning.
 143 */
 144static struct net_bridge_vlan *
 145br_vlan_get_master(struct net_bridge *br, u16 vid,
 146                   struct netlink_ext_ack *extack)
 147{
 148        struct net_bridge_vlan_group *vg;
 149        struct net_bridge_vlan *masterv;
 150
 151        vg = br_vlan_group(br);
 152        masterv = br_vlan_find(vg, vid);
 153        if (!masterv) {
 154                bool changed;
 155
 156                /* missing global ctx, create it now */
 157                if (br_vlan_add(br, vid, 0, &changed, extack))
 158                        return NULL;
 159                masterv = br_vlan_find(vg, vid);
 160                if (WARN_ON(!masterv))
 161                        return NULL;
 162                refcount_set(&masterv->refcnt, 1);
 163                return masterv;
 164        }
 165        refcount_inc(&masterv->refcnt);
 166
 167        return masterv;
 168}
 169
 170static void br_master_vlan_rcu_free(struct rcu_head *rcu)
 171{
 172        struct net_bridge_vlan *v;
 173
 174        v = container_of(rcu, struct net_bridge_vlan, rcu);
 175        WARN_ON(!br_vlan_is_master(v));
 176        free_percpu(v->stats);
 177        v->stats = NULL;
 178        kfree(v);
 179}
 180
 181static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 182{
 183        struct net_bridge_vlan_group *vg;
 184
 185        if (!br_vlan_is_master(masterv))
 186                return;
 187
 188        vg = br_vlan_group(masterv->br);
 189        if (refcount_dec_and_test(&masterv->refcnt)) {
 190                rhashtable_remove_fast(&vg->vlan_hash,
 191                                       &masterv->vnode, br_vlan_rht_params);
 192                __vlan_del_list(masterv);
 193                call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
 194        }
 195}
 196
 197static void nbp_vlan_rcu_free(struct rcu_head *rcu)
 198{
 199        struct net_bridge_vlan *v;
 200
 201        v = container_of(rcu, struct net_bridge_vlan, rcu);
 202        WARN_ON(br_vlan_is_master(v));
 203        /* if we had per-port stats configured then free them here */
 204        if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
 205                free_percpu(v->stats);
 206        v->stats = NULL;
 207        kfree(v);
 208}
 209
 210/* This is the shared VLAN add function which works for both ports and bridge
 211 * devices. There are four possible calls to this function in terms of the
 212 * vlan entry type:
 213 * 1. vlan is being added on a port (no master flags, global entry exists)
 214 * 2. vlan is being added on a bridge (both master and brentry flags)
 215 * 3. vlan is being added on a port, but a global entry didn't exist which
 216 *    is being created right now (master flag set, brentry flag unset), the
 217 *    global entry is used for global per-vlan features, but not for filtering
 218 * 4. same as 3 but with both master and brentry flags set so the entry
 219 *    will be used for filtering in both the port and the bridge
 220 */
 221static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
 222                      struct netlink_ext_ack *extack)
 223{
 224        struct net_bridge_vlan *masterv = NULL;
 225        struct net_bridge_port *p = NULL;
 226        struct net_bridge_vlan_group *vg;
 227        struct net_device *dev;
 228        struct net_bridge *br;
 229        int err;
 230
 231        if (br_vlan_is_master(v)) {
 232                br = v->br;
 233                dev = br->dev;
 234                vg = br_vlan_group(br);
 235        } else {
 236                p = v->port;
 237                br = p->br;
 238                dev = p->dev;
 239                vg = nbp_vlan_group(p);
 240        }
 241
 242        if (p) {
 243                /* Add VLAN to the device filter if it is supported.
 244                 * This ensures tagged traffic enters the bridge when
 245                 * promiscuous mode is disabled by br_manage_promisc().
 246                 */
 247                err = __vlan_vid_add(dev, br, v, flags, extack);
 248                if (err)
 249                        goto out;
 250
 251                /* need to work on the master vlan too */
 252                if (flags & BRIDGE_VLAN_INFO_MASTER) {
 253                        bool changed;
 254
 255                        err = br_vlan_add(br, v->vid,
 256                                          flags | BRIDGE_VLAN_INFO_BRENTRY,
 257                                          &changed, extack);
 258                        if (err)
 259                                goto out_filt;
 260                }
 261
 262                masterv = br_vlan_get_master(br, v->vid, extack);
 263                if (!masterv)
 264                        goto out_filt;
 265                v->brvlan = masterv;
 266                if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
 267                        v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 268                        if (!v->stats) {
 269                                err = -ENOMEM;
 270                                goto out_filt;
 271                        }
 272                        v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
 273                } else {
 274                        v->stats = masterv->stats;
 275                }
 276        } else {
 277                err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
 278                if (err && err != -EOPNOTSUPP)
 279                        goto out;
 280        }
 281
 282        /* Add the dev mac and count the vlan only if it's usable */
 283        if (br_vlan_should_use(v)) {
 284                err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
 285                if (err) {
 286                        br_err(br, "failed insert local address into bridge forwarding table\n");
 287                        goto out_filt;
 288                }
 289                vg->num_vlans++;
 290        }
 291
 292        err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 293                                            br_vlan_rht_params);
 294        if (err)
 295                goto out_fdb_insert;
 296
 297        __vlan_add_list(v);
 298        __vlan_add_flags(v, flags);
 299
 300        if (p)
 301                nbp_vlan_set_vlan_dev_state(p, v->vid);
 302out:
 303        return err;
 304
 305out_fdb_insert:
 306        if (br_vlan_should_use(v)) {
 307                br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
 308                vg->num_vlans--;
 309        }
 310
 311out_filt:
 312        if (p) {
 313                __vlan_vid_del(dev, br, v);
 314                if (masterv) {
 315                        if (v->stats && masterv->stats != v->stats)
 316                                free_percpu(v->stats);
 317                        v->stats = NULL;
 318
 319                        br_vlan_put_master(masterv);
 320                        v->brvlan = NULL;
 321                }
 322        } else {
 323                br_switchdev_port_vlan_del(dev, v->vid);
 324        }
 325
 326        goto out;
 327}
 328
 329static int __vlan_del(struct net_bridge_vlan *v)
 330{
 331        struct net_bridge_vlan *masterv = v;
 332        struct net_bridge_vlan_group *vg;
 333        struct net_bridge_port *p = NULL;
 334        int err = 0;
 335
 336        if (br_vlan_is_master(v)) {
 337                vg = br_vlan_group(v->br);
 338        } else {
 339                p = v->port;
 340                vg = nbp_vlan_group(v->port);
 341                masterv = v->brvlan;
 342        }
 343
 344        __vlan_delete_pvid(vg, v->vid);
 345        if (p) {
 346                err = __vlan_vid_del(p->dev, p->br, v);
 347                if (err)
 348                        goto out;
 349        } else {
 350                err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
 351                if (err && err != -EOPNOTSUPP)
 352                        goto out;
 353                err = 0;
 354        }
 355
 356        if (br_vlan_should_use(v)) {
 357                v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
 358                vg->num_vlans--;
 359        }
 360
 361        if (masterv != v) {
 362                vlan_tunnel_info_del(vg, v);
 363                rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
 364                                       br_vlan_rht_params);
 365                __vlan_del_list(v);
 366                nbp_vlan_set_vlan_dev_state(p, v->vid);
 367                call_rcu(&v->rcu, nbp_vlan_rcu_free);
 368        }
 369
 370        br_vlan_put_master(masterv);
 371out:
 372        return err;
 373}
 374
 375static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 376{
 377        WARN_ON(!list_empty(&vg->vlan_list));
 378        rhashtable_destroy(&vg->vlan_hash);
 379        vlan_tunnel_deinit(vg);
 380        kfree(vg);
 381}
 382
 383static void __vlan_flush(struct net_bridge_vlan_group *vg)
 384{
 385        struct net_bridge_vlan *vlan, *tmp;
 386
 387        __vlan_delete_pvid(vg, vg->pvid);
 388        list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
 389                __vlan_del(vlan);
 390}
 391
 392struct sk_buff *br_handle_vlan(struct net_bridge *br,
 393                               const struct net_bridge_port *p,
 394                               struct net_bridge_vlan_group *vg,
 395                               struct sk_buff *skb)
 396{
 397        struct br_vlan_stats *stats;
 398        struct net_bridge_vlan *v;
 399        u16 vid;
 400
 401        /* If this packet was not filtered at input, let it pass */
 402        if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 403                goto out;
 404
 405        /* At this point, we know that the frame was filtered and contains
 406         * a valid vlan id.  If the vlan id has untagged flag set,
 407         * send untagged; otherwise, send tagged.
 408         */
 409        br_vlan_get_tag(skb, &vid);
 410        v = br_vlan_find(vg, vid);
 411        /* Vlan entry must be configured at this point.  The
 412         * only exception is the bridge is set in promisc mode and the
 413         * packet is destined for the bridge device.  In this case
 414         * pass the packet as is.
 415         */
 416        if (!v || !br_vlan_should_use(v)) {
 417                if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 418                        goto out;
 419                } else {
 420                        kfree_skb(skb);
 421                        return NULL;
 422                }
 423        }
 424        if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 425                stats = this_cpu_ptr(v->stats);
 426                u64_stats_update_begin(&stats->syncp);
 427                stats->tx_bytes += skb->len;
 428                stats->tx_packets++;
 429                u64_stats_update_end(&stats->syncp);
 430        }
 431
 432        if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 433                __vlan_hwaccel_clear_tag(skb);
 434
 435        if (p && (p->flags & BR_VLAN_TUNNEL) &&
 436            br_handle_egress_vlan_tunnel(skb, v)) {
 437                kfree_skb(skb);
 438                return NULL;
 439        }
 440out:
 441        return skb;
 442}
 443
 444/* Called under RCU */
 445static bool __allowed_ingress(const struct net_bridge *br,
 446                              struct net_bridge_vlan_group *vg,
 447                              struct sk_buff *skb, u16 *vid)
 448{
 449        struct br_vlan_stats *stats;
 450        struct net_bridge_vlan *v;
 451        bool tagged;
 452
 453        BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
 454        /* If vlan tx offload is disabled on bridge device and frame was
 455         * sent from vlan device on the bridge device, it does not have
 456         * HW accelerated vlan tag.
 457         */
 458        if (unlikely(!skb_vlan_tag_present(skb) &&
 459                     skb->protocol == br->vlan_proto)) {
 460                skb = skb_vlan_untag(skb);
 461                if (unlikely(!skb))
 462                        return false;
 463        }
 464
 465        if (!br_vlan_get_tag(skb, vid)) {
 466                /* Tagged frame */
 467                if (skb->vlan_proto != br->vlan_proto) {
 468                        /* Protocol-mismatch, empty out vlan_tci for new tag */
 469                        skb_push(skb, ETH_HLEN);
 470                        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 471                                                        skb_vlan_tag_get(skb));
 472                        if (unlikely(!skb))
 473                                return false;
 474
 475                        skb_pull(skb, ETH_HLEN);
 476                        skb_reset_mac_len(skb);
 477                        *vid = 0;
 478                        tagged = false;
 479                } else {
 480                        tagged = true;
 481                }
 482        } else {
 483                /* Untagged frame */
 484                tagged = false;
 485        }
 486
 487        if (!*vid) {
 488                u16 pvid = br_get_pvid(vg);
 489
 490                /* Frame had a tag with VID 0 or did not have a tag.
 491                 * See if pvid is set on this port.  That tells us which
 492                 * vlan untagged or priority-tagged traffic belongs to.
 493                 */
 494                if (!pvid)
 495                        goto drop;
 496
 497                /* PVID is set on this port.  Any untagged or priority-tagged
 498                 * ingress frame is considered to belong to this vlan.
 499                 */
 500                *vid = pvid;
 501                if (likely(!tagged))
 502                        /* Untagged Frame. */
 503                        __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 504                else
 505                        /* Priority-tagged Frame.
 506                         * At this point, we know that skb->vlan_tci VID
 507                         * field was 0.
 508                         * We update only VID field and preserve PCP field.
 509                         */
 510                        skb->vlan_tci |= pvid;
 511
 512                /* if stats are disabled we can avoid the lookup */
 513                if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
 514                        return true;
 515        }
 516        v = br_vlan_find(vg, *vid);
 517        if (!v || !br_vlan_should_use(v))
 518                goto drop;
 519
 520        if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 521                stats = this_cpu_ptr(v->stats);
 522                u64_stats_update_begin(&stats->syncp);
 523                stats->rx_bytes += skb->len;
 524                stats->rx_packets++;
 525                u64_stats_update_end(&stats->syncp);
 526        }
 527
 528        return true;
 529
 530drop:
 531        kfree_skb(skb);
 532        return false;
 533}
 534
 535bool br_allowed_ingress(const struct net_bridge *br,
 536                        struct net_bridge_vlan_group *vg, struct sk_buff *skb,
 537                        u16 *vid)
 538{
 539        /* If VLAN filtering is disabled on the bridge, all packets are
 540         * permitted.
 541         */
 542        if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
 543                BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
 544                return true;
 545        }
 546
 547        return __allowed_ingress(br, vg, skb, vid);
 548}
 549
 550/* Called under RCU. */
 551bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 552                       const struct sk_buff *skb)
 553{
 554        const struct net_bridge_vlan *v;
 555        u16 vid;
 556
 557        /* If this packet was not filtered at input, let it pass */
 558        if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 559                return true;
 560
 561        br_vlan_get_tag(skb, &vid);
 562        v = br_vlan_find(vg, vid);
 563        if (v && br_vlan_should_use(v))
 564                return true;
 565
 566        return false;
 567}
 568
 569/* Called under RCU */
 570bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 571{
 572        struct net_bridge_vlan_group *vg;
 573        struct net_bridge *br = p->br;
 574
 575        /* If filtering was disabled at input, let it pass. */
 576        if (!br_opt_get(br, BROPT_VLAN_ENABLED))
 577                return true;
 578
 579        vg = nbp_vlan_group_rcu(p);
 580        if (!vg || !vg->num_vlans)
 581                return false;
 582
 583        if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 584                *vid = 0;
 585
 586        if (!*vid) {
 587                *vid = br_get_pvid(vg);
 588                if (!*vid)
 589                        return false;
 590
 591                return true;
 592        }
 593
 594        if (br_vlan_find(vg, *vid))
 595                return true;
 596
 597        return false;
 598}
 599
 600static int br_vlan_add_existing(struct net_bridge *br,
 601                                struct net_bridge_vlan_group *vg,
 602                                struct net_bridge_vlan *vlan,
 603                                u16 flags, bool *changed,
 604                                struct netlink_ext_ack *extack)
 605{
 606        int err;
 607
 608        err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
 609        if (err && err != -EOPNOTSUPP)
 610                return err;
 611
 612        if (!br_vlan_is_brentry(vlan)) {
 613                /* Trying to change flags of non-existent bridge vlan */
 614                if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
 615                        err = -EINVAL;
 616                        goto err_flags;
 617                }
 618                /* It was only kept for port vlans, now make it real */
 619                err = br_fdb_insert(br, NULL, br->dev->dev_addr,
 620                                    vlan->vid);
 621                if (err) {
 622                        br_err(br, "failed to insert local address into bridge forwarding table\n");
 623                        goto err_fdb_insert;
 624                }
 625
 626                refcount_inc(&vlan->refcnt);
 627                vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 628                vg->num_vlans++;
 629                *changed = true;
 630        }
 631
 632        if (__vlan_add_flags(vlan, flags))
 633                *changed = true;
 634
 635        return 0;
 636
 637err_fdb_insert:
 638err_flags:
 639        br_switchdev_port_vlan_del(br->dev, vlan->vid);
 640        return err;
 641}
 642
 643/* Must be protected by RTNL.
 644 * Must be called with vid in range from 1 to 4094 inclusive.
 645 * changed must be true only if the vlan was created or updated
 646 */
 647int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
 648                struct netlink_ext_ack *extack)
 649{
 650        struct net_bridge_vlan_group *vg;
 651        struct net_bridge_vlan *vlan;
 652        int ret;
 653
 654        ASSERT_RTNL();
 655
 656        *changed = false;
 657        vg = br_vlan_group(br);
 658        vlan = br_vlan_find(vg, vid);
 659        if (vlan)
 660                return br_vlan_add_existing(br, vg, vlan, flags, changed,
 661                                            extack);
 662
 663        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 664        if (!vlan)
 665                return -ENOMEM;
 666
 667        vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 668        if (!vlan->stats) {
 669                kfree(vlan);
 670                return -ENOMEM;
 671        }
 672        vlan->vid = vid;
 673        vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 674        vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
 675        vlan->br = br;
 676        if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 677                refcount_set(&vlan->refcnt, 1);
 678        ret = __vlan_add(vlan, flags, extack);
 679        if (ret) {
 680                free_percpu(vlan->stats);
 681                kfree(vlan);
 682        } else {
 683                *changed = true;
 684        }
 685
 686        return ret;
 687}
 688
 689/* Must be protected by RTNL.
 690 * Must be called with vid in range from 1 to 4094 inclusive.
 691 */
 692int br_vlan_delete(struct net_bridge *br, u16 vid)
 693{
 694        struct net_bridge_vlan_group *vg;
 695        struct net_bridge_vlan *v;
 696
 697        ASSERT_RTNL();
 698
 699        vg = br_vlan_group(br);
 700        v = br_vlan_find(vg, vid);
 701        if (!v || !br_vlan_is_brentry(v))
 702                return -ENOENT;
 703
 704        br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 705        br_fdb_delete_by_port(br, NULL, vid, 0);
 706
 707        vlan_tunnel_info_del(vg, v);
 708
 709        return __vlan_del(v);
 710}
 711
 712void br_vlan_flush(struct net_bridge *br)
 713{
 714        struct net_bridge_vlan_group *vg;
 715
 716        ASSERT_RTNL();
 717
 718        vg = br_vlan_group(br);
 719        __vlan_flush(vg);
 720        RCU_INIT_POINTER(br->vlgrp, NULL);
 721        synchronize_rcu();
 722        __vlan_group_free(vg);
 723}
 724
 725struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 726{
 727        if (!vg)
 728                return NULL;
 729
 730        return br_vlan_lookup(&vg->vlan_hash, vid);
 731}
 732
 733/* Must be protected by RTNL. */
 734static void recalculate_group_addr(struct net_bridge *br)
 735{
 736        if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
 737                return;
 738
 739        spin_lock_bh(&br->lock);
 740        if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 741            br->vlan_proto == htons(ETH_P_8021Q)) {
 742                /* Bridge Group Address */
 743                br->group_addr[5] = 0x00;
 744        } else { /* vlan_enabled && ETH_P_8021AD */
 745                /* Provider Bridge Group Address */
 746                br->group_addr[5] = 0x08;
 747        }
 748        spin_unlock_bh(&br->lock);
 749}
 750
 751/* Must be protected by RTNL. */
 752void br_recalculate_fwd_mask(struct net_bridge *br)
 753{
 754        if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 755            br->vlan_proto == htons(ETH_P_8021Q))
 756                br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
 757        else /* vlan_enabled && ETH_P_8021AD */
 758                br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
 759                                              ~(1u << br->group_addr[5]);
 760}
 761
 762int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 763{
 764        struct switchdev_attr attr = {
 765                .orig_dev = br->dev,
 766                .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 767                .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 768                .u.vlan_filtering = val,
 769        };
 770        int err;
 771
 772        if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
 773                return 0;
 774
 775        err = switchdev_port_attr_set(br->dev, &attr);
 776        if (err && err != -EOPNOTSUPP)
 777                return err;
 778
 779        br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
 780        br_manage_promisc(br);
 781        recalculate_group_addr(br);
 782        br_recalculate_fwd_mask(br);
 783
 784        return 0;
 785}
 786
 787int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 788{
 789        return __br_vlan_filter_toggle(br, val);
 790}
 791
 792bool br_vlan_enabled(const struct net_device *dev)
 793{
 794        struct net_bridge *br = netdev_priv(dev);
 795
 796        return br_opt_get(br, BROPT_VLAN_ENABLED);
 797}
 798EXPORT_SYMBOL_GPL(br_vlan_enabled);
 799
 800int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
 801{
 802        struct net_bridge *br = netdev_priv(dev);
 803
 804        *p_proto = ntohs(br->vlan_proto);
 805
 806        return 0;
 807}
 808EXPORT_SYMBOL_GPL(br_vlan_get_proto);
 809
 810int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 811{
 812        int err = 0;
 813        struct net_bridge_port *p;
 814        struct net_bridge_vlan *vlan;
 815        struct net_bridge_vlan_group *vg;
 816        __be16 oldproto;
 817
 818        if (br->vlan_proto == proto)
 819                return 0;
 820
 821        /* Add VLANs for the new proto to the device filter. */
 822        list_for_each_entry(p, &br->port_list, list) {
 823                vg = nbp_vlan_group(p);
 824                list_for_each_entry(vlan, &vg->vlan_list, vlist) {
 825                        err = vlan_vid_add(p->dev, proto, vlan->vid);
 826                        if (err)
 827                                goto err_filt;
 828                }
 829        }
 830
 831        oldproto = br->vlan_proto;
 832        br->vlan_proto = proto;
 833
 834        recalculate_group_addr(br);
 835        br_recalculate_fwd_mask(br);
 836
 837        /* Delete VLANs for the old proto from the device filter. */
 838        list_for_each_entry(p, &br->port_list, list) {
 839                vg = nbp_vlan_group(p);
 840                list_for_each_entry(vlan, &vg->vlan_list, vlist)
 841                        vlan_vid_del(p->dev, oldproto, vlan->vid);
 842        }
 843
 844        return 0;
 845
 846err_filt:
 847        list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
 848                vlan_vid_del(p->dev, proto, vlan->vid);
 849
 850        list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 851                vg = nbp_vlan_group(p);
 852                list_for_each_entry(vlan, &vg->vlan_list, vlist)
 853                        vlan_vid_del(p->dev, proto, vlan->vid);
 854        }
 855
 856        return err;
 857}
 858
 859int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 860{
 861        if (val != ETH_P_8021Q && val != ETH_P_8021AD)
 862                return -EPROTONOSUPPORT;
 863
 864        return __br_vlan_set_proto(br, htons(val));
 865}
 866
 867int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
 868{
 869        switch (val) {
 870        case 0:
 871        case 1:
 872                br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
 873                break;
 874        default:
 875                return -EINVAL;
 876        }
 877
 878        return 0;
 879}
 880
 881int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
 882{
 883        struct net_bridge_port *p;
 884
 885        /* allow to change the option if there are no port vlans configured */
 886        list_for_each_entry(p, &br->port_list, list) {
 887                struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
 888
 889                if (vg->num_vlans)
 890                        return -EBUSY;
 891        }
 892
 893        switch (val) {
 894        case 0:
 895        case 1:
 896                br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
 897                break;
 898        default:
 899                return -EINVAL;
 900        }
 901
 902        return 0;
 903}
 904
 905static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 906{
 907        struct net_bridge_vlan *v;
 908
 909        if (vid != vg->pvid)
 910                return false;
 911
 912        v = br_vlan_lookup(&vg->vlan_hash, vid);
 913        if (v && br_vlan_should_use(v) &&
 914            (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
 915                return true;
 916
 917        return false;
 918}
 919
 920static void br_vlan_disable_default_pvid(struct net_bridge *br)
 921{
 922        struct net_bridge_port *p;
 923        u16 pvid = br->default_pvid;
 924
 925        /* Disable default_pvid on all ports where it is still
 926         * configured.
 927         */
 928        if (vlan_default_pvid(br_vlan_group(br), pvid))
 929                br_vlan_delete(br, pvid);
 930
 931        list_for_each_entry(p, &br->port_list, list) {
 932                if (vlan_default_pvid(nbp_vlan_group(p), pvid))
 933                        nbp_vlan_delete(p, pvid);
 934        }
 935
 936        br->default_pvid = 0;
 937}
 938
 939int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
 940                               struct netlink_ext_ack *extack)
 941{
 942        const struct net_bridge_vlan *pvent;
 943        struct net_bridge_vlan_group *vg;
 944        struct net_bridge_port *p;
 945        unsigned long *changed;
 946        bool vlchange;
 947        u16 old_pvid;
 948        int err = 0;
 949
 950        if (!pvid) {
 951                br_vlan_disable_default_pvid(br);
 952                return 0;
 953        }
 954
 955        changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
 956        if (!changed)
 957                return -ENOMEM;
 958
 959        old_pvid = br->default_pvid;
 960
 961        /* Update default_pvid config only if we do not conflict with
 962         * user configuration.
 963         */
 964        vg = br_vlan_group(br);
 965        pvent = br_vlan_find(vg, pvid);
 966        if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
 967            (!pvent || !br_vlan_should_use(pvent))) {
 968                err = br_vlan_add(br, pvid,
 969                                  BRIDGE_VLAN_INFO_PVID |
 970                                  BRIDGE_VLAN_INFO_UNTAGGED |
 971                                  BRIDGE_VLAN_INFO_BRENTRY,
 972                                  &vlchange, extack);
 973                if (err)
 974                        goto out;
 975                br_vlan_delete(br, old_pvid);
 976                set_bit(0, changed);
 977        }
 978
 979        list_for_each_entry(p, &br->port_list, list) {
 980                /* Update default_pvid config only if we do not conflict with
 981                 * user configuration.
 982                 */
 983                vg = nbp_vlan_group(p);
 984                if ((old_pvid &&
 985                     !vlan_default_pvid(vg, old_pvid)) ||
 986                    br_vlan_find(vg, pvid))
 987                        continue;
 988
 989                err = nbp_vlan_add(p, pvid,
 990                                   BRIDGE_VLAN_INFO_PVID |
 991                                   BRIDGE_VLAN_INFO_UNTAGGED,
 992                                   &vlchange, extack);
 993                if (err)
 994                        goto err_port;
 995                nbp_vlan_delete(p, old_pvid);
 996                set_bit(p->port_no, changed);
 997        }
 998
 999        br->default_pvid = pvid;
1000
1001out:
1002        bitmap_free(changed);
1003        return err;
1004
1005err_port:
1006        list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1007                if (!test_bit(p->port_no, changed))
1008                        continue;
1009
1010                if (old_pvid)
1011                        nbp_vlan_add(p, old_pvid,
1012                                     BRIDGE_VLAN_INFO_PVID |
1013                                     BRIDGE_VLAN_INFO_UNTAGGED,
1014                                     &vlchange, NULL);
1015                nbp_vlan_delete(p, pvid);
1016        }
1017
1018        if (test_bit(0, changed)) {
1019                if (old_pvid)
1020                        br_vlan_add(br, old_pvid,
1021                                    BRIDGE_VLAN_INFO_PVID |
1022                                    BRIDGE_VLAN_INFO_UNTAGGED |
1023                                    BRIDGE_VLAN_INFO_BRENTRY,
1024                                    &vlchange, NULL);
1025                br_vlan_delete(br, pvid);
1026        }
1027        goto out;
1028}
1029
1030int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1031{
1032        u16 pvid = val;
1033        int err = 0;
1034
1035        if (val >= VLAN_VID_MASK)
1036                return -EINVAL;
1037
1038        if (pvid == br->default_pvid)
1039                goto out;
1040
1041        /* Only allow default pvid change when filtering is disabled */
1042        if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1043                pr_info_once("Please disable vlan filtering to change default_pvid\n");
1044                err = -EPERM;
1045                goto out;
1046        }
1047        err = __br_vlan_set_default_pvid(br, pvid, NULL);
1048out:
1049        return err;
1050}
1051
1052int br_vlan_init(struct net_bridge *br)
1053{
1054        struct net_bridge_vlan_group *vg;
1055        int ret = -ENOMEM;
1056
1057        vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1058        if (!vg)
1059                goto out;
1060        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1061        if (ret)
1062                goto err_rhtbl;
1063        ret = vlan_tunnel_init(vg);
1064        if (ret)
1065                goto err_tunnel_init;
1066        INIT_LIST_HEAD(&vg->vlan_list);
1067        br->vlan_proto = htons(ETH_P_8021Q);
1068        br->default_pvid = 1;
1069        rcu_assign_pointer(br->vlgrp, vg);
1070
1071out:
1072        return ret;
1073
1074err_tunnel_init:
1075        rhashtable_destroy(&vg->vlan_hash);
1076err_rhtbl:
1077        kfree(vg);
1078
1079        goto out;
1080}
1081
1082int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1083{
1084        struct switchdev_attr attr = {
1085                .orig_dev = p->br->dev,
1086                .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1087                .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1088                .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1089        };
1090        struct net_bridge_vlan_group *vg;
1091        int ret = -ENOMEM;
1092
1093        vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1094        if (!vg)
1095                goto out;
1096
1097        ret = switchdev_port_attr_set(p->dev, &attr);
1098        if (ret && ret != -EOPNOTSUPP)
1099                goto err_vlan_enabled;
1100
1101        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1102        if (ret)
1103                goto err_rhtbl;
1104        ret = vlan_tunnel_init(vg);
1105        if (ret)
1106                goto err_tunnel_init;
1107        INIT_LIST_HEAD(&vg->vlan_list);
1108        rcu_assign_pointer(p->vlgrp, vg);
1109        if (p->br->default_pvid) {
1110                bool changed;
1111
1112                ret = nbp_vlan_add(p, p->br->default_pvid,
1113                                   BRIDGE_VLAN_INFO_PVID |
1114                                   BRIDGE_VLAN_INFO_UNTAGGED,
1115                                   &changed, extack);
1116                if (ret)
1117                        goto err_vlan_add;
1118        }
1119out:
1120        return ret;
1121
1122err_vlan_add:
1123        RCU_INIT_POINTER(p->vlgrp, NULL);
1124        synchronize_rcu();
1125        vlan_tunnel_deinit(vg);
1126err_tunnel_init:
1127        rhashtable_destroy(&vg->vlan_hash);
1128err_rhtbl:
1129err_vlan_enabled:
1130        kfree(vg);
1131
1132        goto out;
1133}
1134
1135/* Must be protected by RTNL.
1136 * Must be called with vid in range from 1 to 4094 inclusive.
1137 * changed must be true only if the vlan was created or updated
1138 */
1139int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1140                 bool *changed, struct netlink_ext_ack *extack)
1141{
1142        struct net_bridge_vlan *vlan;
1143        int ret;
1144
1145        ASSERT_RTNL();
1146
1147        *changed = false;
1148        vlan = br_vlan_find(nbp_vlan_group(port), vid);
1149        if (vlan) {
1150                /* Pass the flags to the hardware bridge */
1151                ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1152                if (ret && ret != -EOPNOTSUPP)
1153                        return ret;
1154                *changed = __vlan_add_flags(vlan, flags);
1155
1156                return 0;
1157        }
1158
1159        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1160        if (!vlan)
1161                return -ENOMEM;
1162
1163        vlan->vid = vid;
1164        vlan->port = port;
1165        ret = __vlan_add(vlan, flags, extack);
1166        if (ret)
1167                kfree(vlan);
1168        else
1169                *changed = true;
1170
1171        return ret;
1172}
1173
1174/* Must be protected by RTNL.
1175 * Must be called with vid in range from 1 to 4094 inclusive.
1176 */
1177int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1178{
1179        struct net_bridge_vlan *v;
1180
1181        ASSERT_RTNL();
1182
1183        v = br_vlan_find(nbp_vlan_group(port), vid);
1184        if (!v)
1185                return -ENOENT;
1186        br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1187        br_fdb_delete_by_port(port->br, port, vid, 0);
1188
1189        return __vlan_del(v);
1190}
1191
1192void nbp_vlan_flush(struct net_bridge_port *port)
1193{
1194        struct net_bridge_vlan_group *vg;
1195
1196        ASSERT_RTNL();
1197
1198        vg = nbp_vlan_group(port);
1199        __vlan_flush(vg);
1200        RCU_INIT_POINTER(port->vlgrp, NULL);
1201        synchronize_rcu();
1202        __vlan_group_free(vg);
1203}
1204
1205void br_vlan_get_stats(const struct net_bridge_vlan *v,
1206                       struct br_vlan_stats *stats)
1207{
1208        int i;
1209
1210        memset(stats, 0, sizeof(*stats));
1211        for_each_possible_cpu(i) {
1212                u64 rxpackets, rxbytes, txpackets, txbytes;
1213                struct br_vlan_stats *cpu_stats;
1214                unsigned int start;
1215
1216                cpu_stats = per_cpu_ptr(v->stats, i);
1217                do {
1218                        start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1219                        rxpackets = cpu_stats->rx_packets;
1220                        rxbytes = cpu_stats->rx_bytes;
1221                        txbytes = cpu_stats->tx_bytes;
1222                        txpackets = cpu_stats->tx_packets;
1223                } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1224
1225                stats->rx_packets += rxpackets;
1226                stats->rx_bytes += rxbytes;
1227                stats->tx_bytes += txbytes;
1228                stats->tx_packets += txpackets;
1229        }
1230}
1231
1232static int __br_vlan_get_pvid(const struct net_device *dev,
1233                              struct net_bridge_port *p, u16 *p_pvid)
1234{
1235        struct net_bridge_vlan_group *vg;
1236
1237        if (p)
1238                vg = nbp_vlan_group(p);
1239        else if (netif_is_bridge_master(dev))
1240                vg = br_vlan_group(netdev_priv(dev));
1241        else
1242                return -EINVAL;
1243
1244        *p_pvid = br_get_pvid(vg);
1245        return 0;
1246}
1247
1248int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1249{
1250        ASSERT_RTNL();
1251
1252        return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid);
1253}
1254EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1255
1256int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1257{
1258        return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid);
1259}
1260EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1261
1262int br_vlan_get_info(const struct net_device *dev, u16 vid,
1263                     struct bridge_vlan_info *p_vinfo)
1264{
1265        struct net_bridge_vlan_group *vg;
1266        struct net_bridge_vlan *v;
1267        struct net_bridge_port *p;
1268
1269        ASSERT_RTNL();
1270        p = br_port_get_check_rtnl(dev);
1271        if (p)
1272                vg = nbp_vlan_group(p);
1273        else if (netif_is_bridge_master(dev))
1274                vg = br_vlan_group(netdev_priv(dev));
1275        else
1276                return -EINVAL;
1277
1278        v = br_vlan_find(vg, vid);
1279        if (!v)
1280                return -ENOENT;
1281
1282        p_vinfo->vid = vid;
1283        p_vinfo->flags = v->flags;
1284        return 0;
1285}
1286EXPORT_SYMBOL_GPL(br_vlan_get_info);
1287
1288static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1289{
1290        return is_vlan_dev(dev) &&
1291                !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1292}
1293
1294static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1295                                       __always_unused void *data)
1296{
1297        return br_vlan_is_bind_vlan_dev(dev);
1298}
1299
1300static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1301{
1302        int found;
1303
1304        rcu_read_lock();
1305        found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1306                                              NULL);
1307        rcu_read_unlock();
1308
1309        return !!found;
1310}
1311
1312struct br_vlan_bind_walk_data {
1313        u16 vid;
1314        struct net_device *result;
1315};
1316
1317static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1318                                          void *data_in)
1319{
1320        struct br_vlan_bind_walk_data *data = data_in;
1321        int found = 0;
1322
1323        if (br_vlan_is_bind_vlan_dev(dev) &&
1324            vlan_dev_priv(dev)->vlan_id == data->vid) {
1325                data->result = dev;
1326                found = 1;
1327        }
1328
1329        return found;
1330}
1331
1332static struct net_device *
1333br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1334{
1335        struct br_vlan_bind_walk_data data = {
1336                .vid = vid,
1337        };
1338
1339        rcu_read_lock();
1340        netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1341                                      &data);
1342        rcu_read_unlock();
1343
1344        return data.result;
1345}
1346
1347static bool br_vlan_is_dev_up(const struct net_device *dev)
1348{
1349        return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1350}
1351
1352static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1353                                       struct net_device *vlan_dev)
1354{
1355        u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1356        struct net_bridge_vlan_group *vg;
1357        struct net_bridge_port *p;
1358        bool has_carrier = false;
1359
1360        if (!netif_carrier_ok(br->dev)) {
1361                netif_carrier_off(vlan_dev);
1362                return;
1363        }
1364
1365        list_for_each_entry(p, &br->port_list, list) {
1366                vg = nbp_vlan_group(p);
1367                if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1368                        has_carrier = true;
1369                        break;
1370                }
1371        }
1372
1373        if (has_carrier)
1374                netif_carrier_on(vlan_dev);
1375        else
1376                netif_carrier_off(vlan_dev);
1377}
1378
1379static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1380{
1381        struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1382        struct net_bridge_vlan *vlan;
1383        struct net_device *vlan_dev;
1384
1385        list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1386                vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1387                                                           vlan->vid);
1388                if (vlan_dev) {
1389                        if (br_vlan_is_dev_up(p->dev)) {
1390                                if (netif_carrier_ok(p->br->dev))
1391                                        netif_carrier_on(vlan_dev);
1392                        } else {
1393                                br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1394                        }
1395                }
1396        }
1397}
1398
1399static void br_vlan_upper_change(struct net_device *dev,
1400                                 struct net_device *upper_dev,
1401                                 bool linking)
1402{
1403        struct net_bridge *br = netdev_priv(dev);
1404
1405        if (!br_vlan_is_bind_vlan_dev(upper_dev))
1406                return;
1407
1408        if (linking) {
1409                br_vlan_set_vlan_dev_state(br, upper_dev);
1410                br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1411        } else {
1412                br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1413                              br_vlan_has_upper_bind_vlan_dev(dev));
1414        }
1415}
1416
1417struct br_vlan_link_state_walk_data {
1418        struct net_bridge *br;
1419};
1420
1421static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1422                                        void *data_in)
1423{
1424        struct br_vlan_link_state_walk_data *data = data_in;
1425
1426        if (br_vlan_is_bind_vlan_dev(vlan_dev))
1427                br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1428
1429        return 0;
1430}
1431
1432static void br_vlan_link_state_change(struct net_device *dev,
1433                                      struct net_bridge *br)
1434{
1435        struct br_vlan_link_state_walk_data data = {
1436                .br = br
1437        };
1438
1439        rcu_read_lock();
1440        netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1441                                      &data);
1442        rcu_read_unlock();
1443}
1444
1445/* Must be protected by RTNL. */
1446static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1447{
1448        struct net_device *vlan_dev;
1449
1450        if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1451                return;
1452
1453        vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1454        if (vlan_dev)
1455                br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1456}
1457
1458/* Must be protected by RTNL. */
1459int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1460{
1461        struct netdev_notifier_changeupper_info *info;
1462        struct net_bridge *br = netdev_priv(dev);
1463        bool changed;
1464        int ret = 0;
1465
1466        switch (event) {
1467        case NETDEV_REGISTER:
1468                ret = br_vlan_add(br, br->default_pvid,
1469                                  BRIDGE_VLAN_INFO_PVID |
1470                                  BRIDGE_VLAN_INFO_UNTAGGED |
1471                                  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1472                break;
1473        case NETDEV_UNREGISTER:
1474                br_vlan_delete(br, br->default_pvid);
1475                break;
1476        case NETDEV_CHANGEUPPER:
1477                info = ptr;
1478                br_vlan_upper_change(dev, info->upper_dev, info->linking);
1479                break;
1480
1481        case NETDEV_CHANGE:
1482        case NETDEV_UP:
1483                if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1484                        break;
1485                br_vlan_link_state_change(dev, br);
1486                break;
1487        }
1488
1489        return ret;
1490}
1491
1492/* Must be protected by RTNL. */
1493void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1494{
1495        if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1496                return;
1497
1498        switch (event) {
1499        case NETDEV_CHANGE:
1500        case NETDEV_DOWN:
1501        case NETDEV_UP:
1502                br_vlan_set_all_vlan_dev_state(p);
1503                break;
1504        }
1505}
1506