linux/net/bridge/br_vlan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/kernel.h>
   3#include <linux/netdevice.h>
   4#include <linux/rtnetlink.h>
   5#include <linux/slab.h>
   6#include <net/switchdev.h>
   7
   8#include "br_private.h"
   9#include "br_private_tunnel.h"
  10
  11static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
  12
  13static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  14                              const void *ptr)
  15{
  16        const struct net_bridge_vlan *vle = ptr;
  17        u16 vid = *(u16 *)arg->key;
  18
  19        return vle->vid != vid;
  20}
  21
  22static const struct rhashtable_params br_vlan_rht_params = {
  23        .head_offset = offsetof(struct net_bridge_vlan, vnode),
  24        .key_offset = offsetof(struct net_bridge_vlan, vid),
  25        .key_len = sizeof(u16),
  26        .nelem_hint = 3,
  27        .max_size = VLAN_N_VID,
  28        .obj_cmpfn = br_vlan_cmp,
  29        .automatic_shrinking = true,
  30};
  31
  32static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  33{
  34        return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  35}
  36
  37static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
  38                            const struct net_bridge_vlan *v)
  39{
  40        if (vg->pvid == v->vid)
  41                return false;
  42
  43        smp_wmb();
  44        br_vlan_set_pvid_state(vg, v->state);
  45        vg->pvid = v->vid;
  46
  47        return true;
  48}
  49
  50static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  51{
  52        if (vg->pvid != vid)
  53                return false;
  54
  55        smp_wmb();
  56        vg->pvid = 0;
  57
  58        return true;
  59}
  60
  61/* return true if anything changed, false otherwise */
  62static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  63{
  64        struct net_bridge_vlan_group *vg;
  65        u16 old_flags = v->flags;
  66        bool ret;
  67
  68        if (br_vlan_is_master(v))
  69                vg = br_vlan_group(v->br);
  70        else
  71                vg = nbp_vlan_group(v->port);
  72
  73        if (flags & BRIDGE_VLAN_INFO_PVID)
  74                ret = __vlan_add_pvid(vg, v);
  75        else
  76                ret = __vlan_delete_pvid(vg, v->vid);
  77
  78        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  79                v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  80        else
  81                v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  82
  83        return ret || !!(old_flags ^ v->flags);
  84}
  85
  86static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  87                          struct net_bridge_vlan *v, u16 flags,
  88                          struct netlink_ext_ack *extack)
  89{
  90        int err;
  91
  92        /* Try switchdev op first. In case it is not supported, fallback to
  93         * 8021q add.
  94         */
  95        err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
  96        if (err == -EOPNOTSUPP)
  97                return vlan_vid_add(dev, br->vlan_proto, v->vid);
  98        v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
  99        return err;
 100}
 101
 102static void __vlan_add_list(struct net_bridge_vlan *v)
 103{
 104        struct net_bridge_vlan_group *vg;
 105        struct list_head *headp, *hpos;
 106        struct net_bridge_vlan *vent;
 107
 108        if (br_vlan_is_master(v))
 109                vg = br_vlan_group(v->br);
 110        else
 111                vg = nbp_vlan_group(v->port);
 112
 113        headp = &vg->vlan_list;
 114        list_for_each_prev(hpos, headp) {
 115                vent = list_entry(hpos, struct net_bridge_vlan, vlist);
 116                if (v->vid >= vent->vid)
 117                        break;
 118        }
 119        list_add_rcu(&v->vlist, hpos);
 120}
 121
 122static void __vlan_del_list(struct net_bridge_vlan *v)
 123{
 124        list_del_rcu(&v->vlist);
 125}
 126
 127static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
 128                          const struct net_bridge_vlan *v)
 129{
 130        int err;
 131
 132        /* Try switchdev op first. In case it is not supported, fallback to
 133         * 8021q del.
 134         */
 135        err = br_switchdev_port_vlan_del(dev, v->vid);
 136        if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
 137                vlan_vid_del(dev, br->vlan_proto, v->vid);
 138        return err == -EOPNOTSUPP ? 0 : err;
 139}
 140
 141/* Returns a master vlan, if it didn't exist it gets created. In all cases
 142 * a reference is taken to the master vlan before returning.
 143 */
 144static struct net_bridge_vlan *
 145br_vlan_get_master(struct net_bridge *br, u16 vid,
 146                   struct netlink_ext_ack *extack)
 147{
 148        struct net_bridge_vlan_group *vg;
 149        struct net_bridge_vlan *masterv;
 150
 151        vg = br_vlan_group(br);
 152        masterv = br_vlan_find(vg, vid);
 153        if (!masterv) {
 154                bool changed;
 155
 156                /* missing global ctx, create it now */
 157                if (br_vlan_add(br, vid, 0, &changed, extack))
 158                        return NULL;
 159                masterv = br_vlan_find(vg, vid);
 160                if (WARN_ON(!masterv))
 161                        return NULL;
 162                refcount_set(&masterv->refcnt, 1);
 163                return masterv;
 164        }
 165        refcount_inc(&masterv->refcnt);
 166
 167        return masterv;
 168}
 169
 170static void br_master_vlan_rcu_free(struct rcu_head *rcu)
 171{
 172        struct net_bridge_vlan *v;
 173
 174        v = container_of(rcu, struct net_bridge_vlan, rcu);
 175        WARN_ON(!br_vlan_is_master(v));
 176        free_percpu(v->stats);
 177        v->stats = NULL;
 178        kfree(v);
 179}
 180
 181static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 182{
 183        struct net_bridge_vlan_group *vg;
 184
 185        if (!br_vlan_is_master(masterv))
 186                return;
 187
 188        vg = br_vlan_group(masterv->br);
 189        if (refcount_dec_and_test(&masterv->refcnt)) {
 190                rhashtable_remove_fast(&vg->vlan_hash,
 191                                       &masterv->vnode, br_vlan_rht_params);
 192                __vlan_del_list(masterv);
 193                br_multicast_toggle_one_vlan(masterv, false);
 194                br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
 195                call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
 196        }
 197}
 198
 199static void nbp_vlan_rcu_free(struct rcu_head *rcu)
 200{
 201        struct net_bridge_vlan *v;
 202
 203        v = container_of(rcu, struct net_bridge_vlan, rcu);
 204        WARN_ON(br_vlan_is_master(v));
 205        /* if we had per-port stats configured then free them here */
 206        if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
 207                free_percpu(v->stats);
 208        v->stats = NULL;
 209        kfree(v);
 210}
 211
 212/* This is the shared VLAN add function which works for both ports and bridge
 213 * devices. There are four possible calls to this function in terms of the
 214 * vlan entry type:
 215 * 1. vlan is being added on a port (no master flags, global entry exists)
 216 * 2. vlan is being added on a bridge (both master and brentry flags)
 217 * 3. vlan is being added on a port, but a global entry didn't exist which
 218 *    is being created right now (master flag set, brentry flag unset), the
 219 *    global entry is used for global per-vlan features, but not for filtering
 220 * 4. same as 3 but with both master and brentry flags set so the entry
 221 *    will be used for filtering in both the port and the bridge
 222 */
 223static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
 224                      struct netlink_ext_ack *extack)
 225{
 226        struct net_bridge_vlan *masterv = NULL;
 227        struct net_bridge_port *p = NULL;
 228        struct net_bridge_vlan_group *vg;
 229        struct net_device *dev;
 230        struct net_bridge *br;
 231        int err;
 232
 233        if (br_vlan_is_master(v)) {
 234                br = v->br;
 235                dev = br->dev;
 236                vg = br_vlan_group(br);
 237        } else {
 238                p = v->port;
 239                br = p->br;
 240                dev = p->dev;
 241                vg = nbp_vlan_group(p);
 242        }
 243
 244        if (p) {
 245                /* Add VLAN to the device filter if it is supported.
 246                 * This ensures tagged traffic enters the bridge when
 247                 * promiscuous mode is disabled by br_manage_promisc().
 248                 */
 249                err = __vlan_vid_add(dev, br, v, flags, extack);
 250                if (err)
 251                        goto out;
 252
 253                /* need to work on the master vlan too */
 254                if (flags & BRIDGE_VLAN_INFO_MASTER) {
 255                        bool changed;
 256
 257                        err = br_vlan_add(br, v->vid,
 258                                          flags | BRIDGE_VLAN_INFO_BRENTRY,
 259                                          &changed, extack);
 260                        if (err)
 261                                goto out_filt;
 262
 263                        if (changed)
 264                                br_vlan_notify(br, NULL, v->vid, 0,
 265                                               RTM_NEWVLAN);
 266                }
 267
 268                masterv = br_vlan_get_master(br, v->vid, extack);
 269                if (!masterv) {
 270                        err = -ENOMEM;
 271                        goto out_filt;
 272                }
 273                v->brvlan = masterv;
 274                if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
 275                        v->stats =
 276                             netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 277                        if (!v->stats) {
 278                                err = -ENOMEM;
 279                                goto out_filt;
 280                        }
 281                        v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
 282                } else {
 283                        v->stats = masterv->stats;
 284                }
 285                br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
 286        } else {
 287                err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
 288                if (err && err != -EOPNOTSUPP)
 289                        goto out;
 290                br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
 291                v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
 292        }
 293
 294        /* Add the dev mac and count the vlan only if it's usable */
 295        if (br_vlan_should_use(v)) {
 296                err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
 297                if (err) {
 298                        br_err(br, "failed insert local address into bridge forwarding table\n");
 299                        goto out_filt;
 300                }
 301                vg->num_vlans++;
 302        }
 303
 304        /* set the state before publishing */
 305        v->state = BR_STATE_FORWARDING;
 306
 307        err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 308                                            br_vlan_rht_params);
 309        if (err)
 310                goto out_fdb_insert;
 311
 312        __vlan_add_list(v);
 313        __vlan_add_flags(v, flags);
 314        br_multicast_toggle_one_vlan(v, true);
 315
 316        if (p)
 317                nbp_vlan_set_vlan_dev_state(p, v->vid);
 318out:
 319        return err;
 320
 321out_fdb_insert:
 322        if (br_vlan_should_use(v)) {
 323                br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
 324                vg->num_vlans--;
 325        }
 326
 327out_filt:
 328        if (p) {
 329                __vlan_vid_del(dev, br, v);
 330                if (masterv) {
 331                        if (v->stats && masterv->stats != v->stats)
 332                                free_percpu(v->stats);
 333                        v->stats = NULL;
 334
 335                        br_vlan_put_master(masterv);
 336                        v->brvlan = NULL;
 337                }
 338        } else {
 339                br_switchdev_port_vlan_del(dev, v->vid);
 340        }
 341
 342        goto out;
 343}
 344
 345static int __vlan_del(struct net_bridge_vlan *v)
 346{
 347        struct net_bridge_vlan *masterv = v;
 348        struct net_bridge_vlan_group *vg;
 349        struct net_bridge_port *p = NULL;
 350        int err = 0;
 351
 352        if (br_vlan_is_master(v)) {
 353                vg = br_vlan_group(v->br);
 354        } else {
 355                p = v->port;
 356                vg = nbp_vlan_group(v->port);
 357                masterv = v->brvlan;
 358        }
 359
 360        __vlan_delete_pvid(vg, v->vid);
 361        if (p) {
 362                err = __vlan_vid_del(p->dev, p->br, v);
 363                if (err)
 364                        goto out;
 365        } else {
 366                err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
 367                if (err && err != -EOPNOTSUPP)
 368                        goto out;
 369                err = 0;
 370        }
 371
 372        if (br_vlan_should_use(v)) {
 373                v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
 374                vg->num_vlans--;
 375        }
 376
 377        if (masterv != v) {
 378                vlan_tunnel_info_del(vg, v);
 379                rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
 380                                       br_vlan_rht_params);
 381                __vlan_del_list(v);
 382                nbp_vlan_set_vlan_dev_state(p, v->vid);
 383                br_multicast_toggle_one_vlan(v, false);
 384                br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
 385                call_rcu(&v->rcu, nbp_vlan_rcu_free);
 386        }
 387
 388        br_vlan_put_master(masterv);
 389out:
 390        return err;
 391}
 392
 393static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 394{
 395        WARN_ON(!list_empty(&vg->vlan_list));
 396        rhashtable_destroy(&vg->vlan_hash);
 397        vlan_tunnel_deinit(vg);
 398        kfree(vg);
 399}
 400
 401static void __vlan_flush(const struct net_bridge *br,
 402                         const struct net_bridge_port *p,
 403                         struct net_bridge_vlan_group *vg)
 404{
 405        struct net_bridge_vlan *vlan, *tmp;
 406        u16 v_start = 0, v_end = 0;
 407
 408        __vlan_delete_pvid(vg, vg->pvid);
 409        list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
 410                /* take care of disjoint ranges */
 411                if (!v_start) {
 412                        v_start = vlan->vid;
 413                } else if (vlan->vid - v_end != 1) {
 414                        /* found range end, notify and start next one */
 415                        br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
 416                        v_start = vlan->vid;
 417                }
 418                v_end = vlan->vid;
 419
 420                __vlan_del(vlan);
 421        }
 422
 423        /* notify about the last/whole vlan range */
 424        if (v_start)
 425                br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
 426}
 427
 428struct sk_buff *br_handle_vlan(struct net_bridge *br,
 429                               const struct net_bridge_port *p,
 430                               struct net_bridge_vlan_group *vg,
 431                               struct sk_buff *skb)
 432{
 433        struct pcpu_sw_netstats *stats;
 434        struct net_bridge_vlan *v;
 435        u16 vid;
 436
 437        /* If this packet was not filtered at input, let it pass */
 438        if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 439                goto out;
 440
 441        /* At this point, we know that the frame was filtered and contains
 442         * a valid vlan id.  If the vlan id has untagged flag set,
 443         * send untagged; otherwise, send tagged.
 444         */
 445        br_vlan_get_tag(skb, &vid);
 446        v = br_vlan_find(vg, vid);
 447        /* Vlan entry must be configured at this point.  The
 448         * only exception is the bridge is set in promisc mode and the
 449         * packet is destined for the bridge device.  In this case
 450         * pass the packet as is.
 451         */
 452        if (!v || !br_vlan_should_use(v)) {
 453                if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 454                        goto out;
 455                } else {
 456                        kfree_skb(skb);
 457                        return NULL;
 458                }
 459        }
 460        if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 461                stats = this_cpu_ptr(v->stats);
 462                u64_stats_update_begin(&stats->syncp);
 463                stats->tx_bytes += skb->len;
 464                stats->tx_packets++;
 465                u64_stats_update_end(&stats->syncp);
 466        }
 467
 468        /* If the skb will be sent using forwarding offload, the assumption is
 469         * that the switchdev will inject the packet into hardware together
 470         * with the bridge VLAN, so that it can be forwarded according to that
 471         * VLAN. The switchdev should deal with popping the VLAN header in
 472         * hardware on each egress port as appropriate. So only strip the VLAN
 473         * header if forwarding offload is not being used.
 474         */
 475        if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
 476            !br_switchdev_frame_uses_tx_fwd_offload(skb))
 477                __vlan_hwaccel_clear_tag(skb);
 478
 479        if (p && (p->flags & BR_VLAN_TUNNEL) &&
 480            br_handle_egress_vlan_tunnel(skb, v)) {
 481                kfree_skb(skb);
 482                return NULL;
 483        }
 484out:
 485        return skb;
 486}
 487
 488/* Called under RCU */
 489static bool __allowed_ingress(const struct net_bridge *br,
 490                              struct net_bridge_vlan_group *vg,
 491                              struct sk_buff *skb, u16 *vid,
 492                              u8 *state,
 493                              struct net_bridge_vlan **vlan)
 494{
 495        struct pcpu_sw_netstats *stats;
 496        struct net_bridge_vlan *v;
 497        bool tagged;
 498
 499        BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
 500        /* If vlan tx offload is disabled on bridge device and frame was
 501         * sent from vlan device on the bridge device, it does not have
 502         * HW accelerated vlan tag.
 503         */
 504        if (unlikely(!skb_vlan_tag_present(skb) &&
 505                     skb->protocol == br->vlan_proto)) {
 506                skb = skb_vlan_untag(skb);
 507                if (unlikely(!skb))
 508                        return false;
 509        }
 510
 511        if (!br_vlan_get_tag(skb, vid)) {
 512                /* Tagged frame */
 513                if (skb->vlan_proto != br->vlan_proto) {
 514                        /* Protocol-mismatch, empty out vlan_tci for new tag */
 515                        skb_push(skb, ETH_HLEN);
 516                        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 517                                                        skb_vlan_tag_get(skb));
 518                        if (unlikely(!skb))
 519                                return false;
 520
 521                        skb_pull(skb, ETH_HLEN);
 522                        skb_reset_mac_len(skb);
 523                        *vid = 0;
 524                        tagged = false;
 525                } else {
 526                        tagged = true;
 527                }
 528        } else {
 529                /* Untagged frame */
 530                tagged = false;
 531        }
 532
 533        if (!*vid) {
 534                u16 pvid = br_get_pvid(vg);
 535
 536                /* Frame had a tag with VID 0 or did not have a tag.
 537                 * See if pvid is set on this port.  That tells us which
 538                 * vlan untagged or priority-tagged traffic belongs to.
 539                 */
 540                if (!pvid)
 541                        goto drop;
 542
 543                /* PVID is set on this port.  Any untagged or priority-tagged
 544                 * ingress frame is considered to belong to this vlan.
 545                 */
 546                *vid = pvid;
 547                if (likely(!tagged))
 548                        /* Untagged Frame. */
 549                        __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 550                else
 551                        /* Priority-tagged Frame.
 552                         * At this point, we know that skb->vlan_tci VID
 553                         * field was 0.
 554                         * We update only VID field and preserve PCP field.
 555                         */
 556                        skb->vlan_tci |= pvid;
 557
 558                /* if snooping and stats are disabled we can avoid the lookup */
 559                if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
 560                    !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 561                        if (*state == BR_STATE_FORWARDING) {
 562                                *state = br_vlan_get_pvid_state(vg);
 563                                return br_vlan_state_allowed(*state, true);
 564                        } else {
 565                                return true;
 566                        }
 567                }
 568        }
 569        v = br_vlan_find(vg, *vid);
 570        if (!v || !br_vlan_should_use(v))
 571                goto drop;
 572
 573        if (*state == BR_STATE_FORWARDING) {
 574                *state = br_vlan_get_state(v);
 575                if (!br_vlan_state_allowed(*state, true))
 576                        goto drop;
 577        }
 578
 579        if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 580                stats = this_cpu_ptr(v->stats);
 581                u64_stats_update_begin(&stats->syncp);
 582                stats->rx_bytes += skb->len;
 583                stats->rx_packets++;
 584                u64_stats_update_end(&stats->syncp);
 585        }
 586
 587        *vlan = v;
 588
 589        return true;
 590
 591drop:
 592        kfree_skb(skb);
 593        return false;
 594}
 595
 596bool br_allowed_ingress(const struct net_bridge *br,
 597                        struct net_bridge_vlan_group *vg, struct sk_buff *skb,
 598                        u16 *vid, u8 *state,
 599                        struct net_bridge_vlan **vlan)
 600{
 601        /* If VLAN filtering is disabled on the bridge, all packets are
 602         * permitted.
 603         */
 604        *vlan = NULL;
 605        if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
 606                BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
 607                return true;
 608        }
 609
 610        return __allowed_ingress(br, vg, skb, vid, state, vlan);
 611}
 612
 613/* Called under RCU. */
 614bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 615                       const struct sk_buff *skb)
 616{
 617        const struct net_bridge_vlan *v;
 618        u16 vid;
 619
 620        /* If this packet was not filtered at input, let it pass */
 621        if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 622                return true;
 623
 624        br_vlan_get_tag(skb, &vid);
 625        v = br_vlan_find(vg, vid);
 626        if (v && br_vlan_should_use(v) &&
 627            br_vlan_state_allowed(br_vlan_get_state(v), false))
 628                return true;
 629
 630        return false;
 631}
 632
 633/* Called under RCU */
 634bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 635{
 636        struct net_bridge_vlan_group *vg;
 637        struct net_bridge *br = p->br;
 638        struct net_bridge_vlan *v;
 639
 640        /* If filtering was disabled at input, let it pass. */
 641        if (!br_opt_get(br, BROPT_VLAN_ENABLED))
 642                return true;
 643
 644        vg = nbp_vlan_group_rcu(p);
 645        if (!vg || !vg->num_vlans)
 646                return false;
 647
 648        if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 649                *vid = 0;
 650
 651        if (!*vid) {
 652                *vid = br_get_pvid(vg);
 653                if (!*vid ||
 654                    !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
 655                        return false;
 656
 657                return true;
 658        }
 659
 660        v = br_vlan_find(vg, *vid);
 661        if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
 662                return true;
 663
 664        return false;
 665}
 666
 667static int br_vlan_add_existing(struct net_bridge *br,
 668                                struct net_bridge_vlan_group *vg,
 669                                struct net_bridge_vlan *vlan,
 670                                u16 flags, bool *changed,
 671                                struct netlink_ext_ack *extack)
 672{
 673        int err;
 674
 675        err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
 676        if (err && err != -EOPNOTSUPP)
 677                return err;
 678
 679        if (!br_vlan_is_brentry(vlan)) {
 680                /* Trying to change flags of non-existent bridge vlan */
 681                if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
 682                        err = -EINVAL;
 683                        goto err_flags;
 684                }
 685                /* It was only kept for port vlans, now make it real */
 686                err = br_fdb_insert(br, NULL, br->dev->dev_addr,
 687                                    vlan->vid);
 688                if (err) {
 689                        br_err(br, "failed to insert local address into bridge forwarding table\n");
 690                        goto err_fdb_insert;
 691                }
 692
 693                refcount_inc(&vlan->refcnt);
 694                vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 695                vg->num_vlans++;
 696                *changed = true;
 697                br_multicast_toggle_one_vlan(vlan, true);
 698        }
 699
 700        if (__vlan_add_flags(vlan, flags))
 701                *changed = true;
 702
 703        return 0;
 704
 705err_fdb_insert:
 706err_flags:
 707        br_switchdev_port_vlan_del(br->dev, vlan->vid);
 708        return err;
 709}
 710
 711/* Must be protected by RTNL.
 712 * Must be called with vid in range from 1 to 4094 inclusive.
 713 * changed must be true only if the vlan was created or updated
 714 */
 715int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
 716                struct netlink_ext_ack *extack)
 717{
 718        struct net_bridge_vlan_group *vg;
 719        struct net_bridge_vlan *vlan;
 720        int ret;
 721
 722        ASSERT_RTNL();
 723
 724        *changed = false;
 725        vg = br_vlan_group(br);
 726        vlan = br_vlan_find(vg, vid);
 727        if (vlan)
 728                return br_vlan_add_existing(br, vg, vlan, flags, changed,
 729                                            extack);
 730
 731        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 732        if (!vlan)
 733                return -ENOMEM;
 734
 735        vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 736        if (!vlan->stats) {
 737                kfree(vlan);
 738                return -ENOMEM;
 739        }
 740        vlan->vid = vid;
 741        vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 742        vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
 743        vlan->br = br;
 744        if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 745                refcount_set(&vlan->refcnt, 1);
 746        ret = __vlan_add(vlan, flags, extack);
 747        if (ret) {
 748                free_percpu(vlan->stats);
 749                kfree(vlan);
 750        } else {
 751                *changed = true;
 752        }
 753
 754        return ret;
 755}
 756
 757/* Must be protected by RTNL.
 758 * Must be called with vid in range from 1 to 4094 inclusive.
 759 */
 760int br_vlan_delete(struct net_bridge *br, u16 vid)
 761{
 762        struct net_bridge_vlan_group *vg;
 763        struct net_bridge_vlan *v;
 764
 765        ASSERT_RTNL();
 766
 767        vg = br_vlan_group(br);
 768        v = br_vlan_find(vg, vid);
 769        if (!v || !br_vlan_is_brentry(v))
 770                return -ENOENT;
 771
 772        br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 773        br_fdb_delete_by_port(br, NULL, vid, 0);
 774
 775        vlan_tunnel_info_del(vg, v);
 776
 777        return __vlan_del(v);
 778}
 779
 780void br_vlan_flush(struct net_bridge *br)
 781{
 782        struct net_bridge_vlan_group *vg;
 783
 784        ASSERT_RTNL();
 785
 786        vg = br_vlan_group(br);
 787        __vlan_flush(br, NULL, vg);
 788        RCU_INIT_POINTER(br->vlgrp, NULL);
 789        synchronize_rcu();
 790        __vlan_group_free(vg);
 791}
 792
 793struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 794{
 795        if (!vg)
 796                return NULL;
 797
 798        return br_vlan_lookup(&vg->vlan_hash, vid);
 799}
 800
 801/* Must be protected by RTNL. */
 802static void recalculate_group_addr(struct net_bridge *br)
 803{
 804        if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
 805                return;
 806
 807        spin_lock_bh(&br->lock);
 808        if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 809            br->vlan_proto == htons(ETH_P_8021Q)) {
 810                /* Bridge Group Address */
 811                br->group_addr[5] = 0x00;
 812        } else { /* vlan_enabled && ETH_P_8021AD */
 813                /* Provider Bridge Group Address */
 814                br->group_addr[5] = 0x08;
 815        }
 816        spin_unlock_bh(&br->lock);
 817}
 818
 819/* Must be protected by RTNL. */
 820void br_recalculate_fwd_mask(struct net_bridge *br)
 821{
 822        if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 823            br->vlan_proto == htons(ETH_P_8021Q))
 824                br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
 825        else /* vlan_enabled && ETH_P_8021AD */
 826                br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
 827                                              ~(1u << br->group_addr[5]);
 828}
 829
 830int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
 831                          struct netlink_ext_ack *extack)
 832{
 833        struct switchdev_attr attr = {
 834                .orig_dev = br->dev,
 835                .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 836                .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 837                .u.vlan_filtering = val,
 838        };
 839        int err;
 840
 841        if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
 842                return 0;
 843
 844        br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
 845
 846        err = switchdev_port_attr_set(br->dev, &attr, extack);
 847        if (err && err != -EOPNOTSUPP) {
 848                br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
 849                return err;
 850        }
 851
 852        br_manage_promisc(br);
 853        recalculate_group_addr(br);
 854        br_recalculate_fwd_mask(br);
 855        if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
 856                br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
 857                br_multicast_toggle_vlan_snooping(br, false, NULL);
 858        }
 859
 860        return 0;
 861}
 862
 863bool br_vlan_enabled(const struct net_device *dev)
 864{
 865        struct net_bridge *br = netdev_priv(dev);
 866
 867        return br_opt_get(br, BROPT_VLAN_ENABLED);
 868}
 869EXPORT_SYMBOL_GPL(br_vlan_enabled);
 870
 871int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
 872{
 873        struct net_bridge *br = netdev_priv(dev);
 874
 875        *p_proto = ntohs(br->vlan_proto);
 876
 877        return 0;
 878}
 879EXPORT_SYMBOL_GPL(br_vlan_get_proto);
 880
 881int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
 882                        struct netlink_ext_ack *extack)
 883{
 884        struct switchdev_attr attr = {
 885                .orig_dev = br->dev,
 886                .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
 887                .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 888                .u.vlan_protocol = ntohs(proto),
 889        };
 890        int err = 0;
 891        struct net_bridge_port *p;
 892        struct net_bridge_vlan *vlan;
 893        struct net_bridge_vlan_group *vg;
 894        __be16 oldproto = br->vlan_proto;
 895
 896        if (br->vlan_proto == proto)
 897                return 0;
 898
 899        err = switchdev_port_attr_set(br->dev, &attr, extack);
 900        if (err && err != -EOPNOTSUPP)
 901                return err;
 902
 903        /* Add VLANs for the new proto to the device filter. */
 904        list_for_each_entry(p, &br->port_list, list) {
 905                vg = nbp_vlan_group(p);
 906                list_for_each_entry(vlan, &vg->vlan_list, vlist) {
 907                        err = vlan_vid_add(p->dev, proto, vlan->vid);
 908                        if (err)
 909                                goto err_filt;
 910                }
 911        }
 912
 913        br->vlan_proto = proto;
 914
 915        recalculate_group_addr(br);
 916        br_recalculate_fwd_mask(br);
 917
 918        /* Delete VLANs for the old proto from the device filter. */
 919        list_for_each_entry(p, &br->port_list, list) {
 920                vg = nbp_vlan_group(p);
 921                list_for_each_entry(vlan, &vg->vlan_list, vlist)
 922                        vlan_vid_del(p->dev, oldproto, vlan->vid);
 923        }
 924
 925        return 0;
 926
 927err_filt:
 928        attr.u.vlan_protocol = ntohs(oldproto);
 929        switchdev_port_attr_set(br->dev, &attr, NULL);
 930
 931        list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
 932                vlan_vid_del(p->dev, proto, vlan->vid);
 933
 934        list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 935                vg = nbp_vlan_group(p);
 936                list_for_each_entry(vlan, &vg->vlan_list, vlist)
 937                        vlan_vid_del(p->dev, proto, vlan->vid);
 938        }
 939
 940        return err;
 941}
 942
 943int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
 944                      struct netlink_ext_ack *extack)
 945{
 946        if (!eth_type_vlan(htons(val)))
 947                return -EPROTONOSUPPORT;
 948
 949        return __br_vlan_set_proto(br, htons(val), extack);
 950}
 951
 952int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
 953{
 954        switch (val) {
 955        case 0:
 956        case 1:
 957                br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
 958                break;
 959        default:
 960                return -EINVAL;
 961        }
 962
 963        return 0;
 964}
 965
 966int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
 967{
 968        struct net_bridge_port *p;
 969
 970        /* allow to change the option if there are no port vlans configured */
 971        list_for_each_entry(p, &br->port_list, list) {
 972                struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
 973
 974                if (vg->num_vlans)
 975                        return -EBUSY;
 976        }
 977
 978        switch (val) {
 979        case 0:
 980        case 1:
 981                br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
 982                break;
 983        default:
 984                return -EINVAL;
 985        }
 986
 987        return 0;
 988}
 989
 990static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 991{
 992        struct net_bridge_vlan *v;
 993
 994        if (vid != vg->pvid)
 995                return false;
 996
 997        v = br_vlan_lookup(&vg->vlan_hash, vid);
 998        if (v && br_vlan_should_use(v) &&
 999            (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1000                return true;
1001
1002        return false;
1003}
1004
1005static void br_vlan_disable_default_pvid(struct net_bridge *br)
1006{
1007        struct net_bridge_port *p;
1008        u16 pvid = br->default_pvid;
1009
1010        /* Disable default_pvid on all ports where it is still
1011         * configured.
1012         */
1013        if (vlan_default_pvid(br_vlan_group(br), pvid)) {
1014                if (!br_vlan_delete(br, pvid))
1015                        br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1016        }
1017
1018        list_for_each_entry(p, &br->port_list, list) {
1019                if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
1020                    !nbp_vlan_delete(p, pvid))
1021                        br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1022        }
1023
1024        br->default_pvid = 0;
1025}
1026
1027int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
1028                               struct netlink_ext_ack *extack)
1029{
1030        const struct net_bridge_vlan *pvent;
1031        struct net_bridge_vlan_group *vg;
1032        struct net_bridge_port *p;
1033        unsigned long *changed;
1034        bool vlchange;
1035        u16 old_pvid;
1036        int err = 0;
1037
1038        if (!pvid) {
1039                br_vlan_disable_default_pvid(br);
1040                return 0;
1041        }
1042
1043        changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1044        if (!changed)
1045                return -ENOMEM;
1046
1047        old_pvid = br->default_pvid;
1048
1049        /* Update default_pvid config only if we do not conflict with
1050         * user configuration.
1051         */
1052        vg = br_vlan_group(br);
1053        pvent = br_vlan_find(vg, pvid);
1054        if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1055            (!pvent || !br_vlan_should_use(pvent))) {
1056                err = br_vlan_add(br, pvid,
1057                                  BRIDGE_VLAN_INFO_PVID |
1058                                  BRIDGE_VLAN_INFO_UNTAGGED |
1059                                  BRIDGE_VLAN_INFO_BRENTRY,
1060                                  &vlchange, extack);
1061                if (err)
1062                        goto out;
1063
1064                if (br_vlan_delete(br, old_pvid))
1065                        br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1066                br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1067                set_bit(0, changed);
1068        }
1069
1070        list_for_each_entry(p, &br->port_list, list) {
1071                /* Update default_pvid config only if we do not conflict with
1072                 * user configuration.
1073                 */
1074                vg = nbp_vlan_group(p);
1075                if ((old_pvid &&
1076                     !vlan_default_pvid(vg, old_pvid)) ||
1077                    br_vlan_find(vg, pvid))
1078                        continue;
1079
1080                err = nbp_vlan_add(p, pvid,
1081                                   BRIDGE_VLAN_INFO_PVID |
1082                                   BRIDGE_VLAN_INFO_UNTAGGED,
1083                                   &vlchange, extack);
1084                if (err)
1085                        goto err_port;
1086                if (nbp_vlan_delete(p, old_pvid))
1087                        br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1088                br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1089                set_bit(p->port_no, changed);
1090        }
1091
1092        br->default_pvid = pvid;
1093
1094out:
1095        bitmap_free(changed);
1096        return err;
1097
1098err_port:
1099        list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1100                if (!test_bit(p->port_no, changed))
1101                        continue;
1102
1103                if (old_pvid) {
1104                        nbp_vlan_add(p, old_pvid,
1105                                     BRIDGE_VLAN_INFO_PVID |
1106                                     BRIDGE_VLAN_INFO_UNTAGGED,
1107                                     &vlchange, NULL);
1108                        br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1109                }
1110                nbp_vlan_delete(p, pvid);
1111                br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1112        }
1113
1114        if (test_bit(0, changed)) {
1115                if (old_pvid) {
1116                        br_vlan_add(br, old_pvid,
1117                                    BRIDGE_VLAN_INFO_PVID |
1118                                    BRIDGE_VLAN_INFO_UNTAGGED |
1119                                    BRIDGE_VLAN_INFO_BRENTRY,
1120                                    &vlchange, NULL);
1121                        br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1122                }
1123                br_vlan_delete(br, pvid);
1124                br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1125        }
1126        goto out;
1127}
1128
1129int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
1130                             struct netlink_ext_ack *extack)
1131{
1132        u16 pvid = val;
1133        int err = 0;
1134
1135        if (val >= VLAN_VID_MASK)
1136                return -EINVAL;
1137
1138        if (pvid == br->default_pvid)
1139                goto out;
1140
1141        /* Only allow default pvid change when filtering is disabled */
1142        if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1143                pr_info_once("Please disable vlan filtering to change default_pvid\n");
1144                err = -EPERM;
1145                goto out;
1146        }
1147        err = __br_vlan_set_default_pvid(br, pvid, extack);
1148out:
1149        return err;
1150}
1151
1152int br_vlan_init(struct net_bridge *br)
1153{
1154        struct net_bridge_vlan_group *vg;
1155        int ret = -ENOMEM;
1156
1157        vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1158        if (!vg)
1159                goto out;
1160        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1161        if (ret)
1162                goto err_rhtbl;
1163        ret = vlan_tunnel_init(vg);
1164        if (ret)
1165                goto err_tunnel_init;
1166        INIT_LIST_HEAD(&vg->vlan_list);
1167        br->vlan_proto = htons(ETH_P_8021Q);
1168        br->default_pvid = 1;
1169        rcu_assign_pointer(br->vlgrp, vg);
1170
1171out:
1172        return ret;
1173
1174err_tunnel_init:
1175        rhashtable_destroy(&vg->vlan_hash);
1176err_rhtbl:
1177        kfree(vg);
1178
1179        goto out;
1180}
1181
1182int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1183{
1184        struct switchdev_attr attr = {
1185                .orig_dev = p->br->dev,
1186                .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1187                .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1188                .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1189        };
1190        struct net_bridge_vlan_group *vg;
1191        int ret = -ENOMEM;
1192
1193        vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1194        if (!vg)
1195                goto out;
1196
1197        ret = switchdev_port_attr_set(p->dev, &attr, extack);
1198        if (ret && ret != -EOPNOTSUPP)
1199                goto err_vlan_enabled;
1200
1201        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1202        if (ret)
1203                goto err_rhtbl;
1204        ret = vlan_tunnel_init(vg);
1205        if (ret)
1206                goto err_tunnel_init;
1207        INIT_LIST_HEAD(&vg->vlan_list);
1208        rcu_assign_pointer(p->vlgrp, vg);
1209        if (p->br->default_pvid) {
1210                bool changed;
1211
1212                ret = nbp_vlan_add(p, p->br->default_pvid,
1213                                   BRIDGE_VLAN_INFO_PVID |
1214                                   BRIDGE_VLAN_INFO_UNTAGGED,
1215                                   &changed, extack);
1216                if (ret)
1217                        goto err_vlan_add;
1218                br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1219        }
1220out:
1221        return ret;
1222
1223err_vlan_add:
1224        RCU_INIT_POINTER(p->vlgrp, NULL);
1225        synchronize_rcu();
1226        vlan_tunnel_deinit(vg);
1227err_tunnel_init:
1228        rhashtable_destroy(&vg->vlan_hash);
1229err_rhtbl:
1230err_vlan_enabled:
1231        kfree(vg);
1232
1233        goto out;
1234}
1235
1236/* Must be protected by RTNL.
1237 * Must be called with vid in range from 1 to 4094 inclusive.
1238 * changed must be true only if the vlan was created or updated
1239 */
1240int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1241                 bool *changed, struct netlink_ext_ack *extack)
1242{
1243        struct net_bridge_vlan *vlan;
1244        int ret;
1245
1246        ASSERT_RTNL();
1247
1248        *changed = false;
1249        vlan = br_vlan_find(nbp_vlan_group(port), vid);
1250        if (vlan) {
1251                /* Pass the flags to the hardware bridge */
1252                ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1253                if (ret && ret != -EOPNOTSUPP)
1254                        return ret;
1255                *changed = __vlan_add_flags(vlan, flags);
1256
1257                return 0;
1258        }
1259
1260        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1261        if (!vlan)
1262                return -ENOMEM;
1263
1264        vlan->vid = vid;
1265        vlan->port = port;
1266        ret = __vlan_add(vlan, flags, extack);
1267        if (ret)
1268                kfree(vlan);
1269        else
1270                *changed = true;
1271
1272        return ret;
1273}
1274
1275/* Must be protected by RTNL.
1276 * Must be called with vid in range from 1 to 4094 inclusive.
1277 */
1278int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1279{
1280        struct net_bridge_vlan *v;
1281
1282        ASSERT_RTNL();
1283
1284        v = br_vlan_find(nbp_vlan_group(port), vid);
1285        if (!v)
1286                return -ENOENT;
1287        br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1288        br_fdb_delete_by_port(port->br, port, vid, 0);
1289
1290        return __vlan_del(v);
1291}
1292
1293void nbp_vlan_flush(struct net_bridge_port *port)
1294{
1295        struct net_bridge_vlan_group *vg;
1296
1297        ASSERT_RTNL();
1298
1299        vg = nbp_vlan_group(port);
1300        __vlan_flush(port->br, port, vg);
1301        RCU_INIT_POINTER(port->vlgrp, NULL);
1302        synchronize_rcu();
1303        __vlan_group_free(vg);
1304}
1305
1306void br_vlan_get_stats(const struct net_bridge_vlan *v,
1307                       struct pcpu_sw_netstats *stats)
1308{
1309        int i;
1310
1311        memset(stats, 0, sizeof(*stats));
1312        for_each_possible_cpu(i) {
1313                u64 rxpackets, rxbytes, txpackets, txbytes;
1314                struct pcpu_sw_netstats *cpu_stats;
1315                unsigned int start;
1316
1317                cpu_stats = per_cpu_ptr(v->stats, i);
1318                do {
1319                        start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1320                        rxpackets = cpu_stats->rx_packets;
1321                        rxbytes = cpu_stats->rx_bytes;
1322                        txbytes = cpu_stats->tx_bytes;
1323                        txpackets = cpu_stats->tx_packets;
1324                } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1325
1326                stats->rx_packets += rxpackets;
1327                stats->rx_bytes += rxbytes;
1328                stats->tx_bytes += txbytes;
1329                stats->tx_packets += txpackets;
1330        }
1331}
1332
1333int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1334{
1335        struct net_bridge_vlan_group *vg;
1336        struct net_bridge_port *p;
1337
1338        ASSERT_RTNL();
1339        p = br_port_get_check_rtnl(dev);
1340        if (p)
1341                vg = nbp_vlan_group(p);
1342        else if (netif_is_bridge_master(dev))
1343                vg = br_vlan_group(netdev_priv(dev));
1344        else
1345                return -EINVAL;
1346
1347        *p_pvid = br_get_pvid(vg);
1348        return 0;
1349}
1350EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1351
1352int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1353{
1354        struct net_bridge_vlan_group *vg;
1355        struct net_bridge_port *p;
1356
1357        p = br_port_get_check_rcu(dev);
1358        if (p)
1359                vg = nbp_vlan_group_rcu(p);
1360        else if (netif_is_bridge_master(dev))
1361                vg = br_vlan_group_rcu(netdev_priv(dev));
1362        else
1363                return -EINVAL;
1364
1365        *p_pvid = br_get_pvid(vg);
1366        return 0;
1367}
1368EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1369
1370void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
1371                                    struct net_device_path_ctx *ctx,
1372                                    struct net_device_path *path)
1373{
1374        struct net_bridge_vlan_group *vg;
1375        int idx = ctx->num_vlans - 1;
1376        u16 vid;
1377
1378        path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1379
1380        if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1381                return;
1382
1383        vg = br_vlan_group(br);
1384
1385        if (idx >= 0 &&
1386            ctx->vlan[idx].proto == br->vlan_proto) {
1387                vid = ctx->vlan[idx].id;
1388        } else {
1389                path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
1390                vid = br_get_pvid(vg);
1391        }
1392
1393        path->bridge.vlan_id = vid;
1394        path->bridge.vlan_proto = br->vlan_proto;
1395}
1396
1397int br_vlan_fill_forward_path_mode(struct net_bridge *br,
1398                                   struct net_bridge_port *dst,
1399                                   struct net_device_path *path)
1400{
1401        struct net_bridge_vlan_group *vg;
1402        struct net_bridge_vlan *v;
1403
1404        if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1405                return 0;
1406
1407        vg = nbp_vlan_group_rcu(dst);
1408        v = br_vlan_find(vg, path->bridge.vlan_id);
1409        if (!v || !br_vlan_should_use(v))
1410                return -EINVAL;
1411
1412        if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1413                return 0;
1414
1415        if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
1416                path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1417        else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
1418                path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
1419        else
1420                path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
1421
1422        return 0;
1423}
1424
1425int br_vlan_get_info(const struct net_device *dev, u16 vid,
1426                     struct bridge_vlan_info *p_vinfo)
1427{
1428        struct net_bridge_vlan_group *vg;
1429        struct net_bridge_vlan *v;
1430        struct net_bridge_port *p;
1431
1432        ASSERT_RTNL();
1433        p = br_port_get_check_rtnl(dev);
1434        if (p)
1435                vg = nbp_vlan_group(p);
1436        else if (netif_is_bridge_master(dev))
1437                vg = br_vlan_group(netdev_priv(dev));
1438        else
1439                return -EINVAL;
1440
1441        v = br_vlan_find(vg, vid);
1442        if (!v)
1443                return -ENOENT;
1444
1445        p_vinfo->vid = vid;
1446        p_vinfo->flags = v->flags;
1447        if (vid == br_get_pvid(vg))
1448                p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1449        return 0;
1450}
1451EXPORT_SYMBOL_GPL(br_vlan_get_info);
1452
1453int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
1454                         struct bridge_vlan_info *p_vinfo)
1455{
1456        struct net_bridge_vlan_group *vg;
1457        struct net_bridge_vlan *v;
1458        struct net_bridge_port *p;
1459
1460        p = br_port_get_check_rcu(dev);
1461        if (p)
1462                vg = nbp_vlan_group_rcu(p);
1463        else if (netif_is_bridge_master(dev))
1464                vg = br_vlan_group_rcu(netdev_priv(dev));
1465        else
1466                return -EINVAL;
1467
1468        v = br_vlan_find(vg, vid);
1469        if (!v)
1470                return -ENOENT;
1471
1472        p_vinfo->vid = vid;
1473        p_vinfo->flags = v->flags;
1474        if (vid == br_get_pvid(vg))
1475                p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1476        return 0;
1477}
1478EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);
1479
1480static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1481{
1482        return is_vlan_dev(dev) &&
1483                !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1484}
1485
1486static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1487                               __always_unused struct netdev_nested_priv *priv)
1488{
1489        return br_vlan_is_bind_vlan_dev(dev);
1490}
1491
1492static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1493{
1494        int found;
1495
1496        rcu_read_lock();
1497        found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1498                                              NULL);
1499        rcu_read_unlock();
1500
1501        return !!found;
1502}
1503
1504struct br_vlan_bind_walk_data {
1505        u16 vid;
1506        struct net_device *result;
1507};
1508
1509static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1510                                          struct netdev_nested_priv *priv)
1511{
1512        struct br_vlan_bind_walk_data *data = priv->data;
1513        int found = 0;
1514
1515        if (br_vlan_is_bind_vlan_dev(dev) &&
1516            vlan_dev_priv(dev)->vlan_id == data->vid) {
1517                data->result = dev;
1518                found = 1;
1519        }
1520
1521        return found;
1522}
1523
1524static struct net_device *
1525br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1526{
1527        struct br_vlan_bind_walk_data data = {
1528                .vid = vid,
1529        };
1530        struct netdev_nested_priv priv = {
1531                .data = (void *)&data,
1532        };
1533
1534        rcu_read_lock();
1535        netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1536                                      &priv);
1537        rcu_read_unlock();
1538
1539        return data.result;
1540}
1541
1542static bool br_vlan_is_dev_up(const struct net_device *dev)
1543{
1544        return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1545}
1546
1547static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1548                                       struct net_device *vlan_dev)
1549{
1550        u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1551        struct net_bridge_vlan_group *vg;
1552        struct net_bridge_port *p;
1553        bool has_carrier = false;
1554
1555        if (!netif_carrier_ok(br->dev)) {
1556                netif_carrier_off(vlan_dev);
1557                return;
1558        }
1559
1560        list_for_each_entry(p, &br->port_list, list) {
1561                vg = nbp_vlan_group(p);
1562                if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1563                        has_carrier = true;
1564                        break;
1565                }
1566        }
1567
1568        if (has_carrier)
1569                netif_carrier_on(vlan_dev);
1570        else
1571                netif_carrier_off(vlan_dev);
1572}
1573
1574static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1575{
1576        struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1577        struct net_bridge_vlan *vlan;
1578        struct net_device *vlan_dev;
1579
1580        list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1581                vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1582                                                           vlan->vid);
1583                if (vlan_dev) {
1584                        if (br_vlan_is_dev_up(p->dev)) {
1585                                if (netif_carrier_ok(p->br->dev))
1586                                        netif_carrier_on(vlan_dev);
1587                        } else {
1588                                br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1589                        }
1590                }
1591        }
1592}
1593
1594static void br_vlan_upper_change(struct net_device *dev,
1595                                 struct net_device *upper_dev,
1596                                 bool linking)
1597{
1598        struct net_bridge *br = netdev_priv(dev);
1599
1600        if (!br_vlan_is_bind_vlan_dev(upper_dev))
1601                return;
1602
1603        if (linking) {
1604                br_vlan_set_vlan_dev_state(br, upper_dev);
1605                br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1606        } else {
1607                br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1608                              br_vlan_has_upper_bind_vlan_dev(dev));
1609        }
1610}
1611
1612struct br_vlan_link_state_walk_data {
1613        struct net_bridge *br;
1614};
1615
1616static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1617                                        struct netdev_nested_priv *priv)
1618{
1619        struct br_vlan_link_state_walk_data *data = priv->data;
1620
1621        if (br_vlan_is_bind_vlan_dev(vlan_dev))
1622                br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1623
1624        return 0;
1625}
1626
1627static void br_vlan_link_state_change(struct net_device *dev,
1628                                      struct net_bridge *br)
1629{
1630        struct br_vlan_link_state_walk_data data = {
1631                .br = br
1632        };
1633        struct netdev_nested_priv priv = {
1634                .data = (void *)&data,
1635        };
1636
1637        rcu_read_lock();
1638        netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1639                                      &priv);
1640        rcu_read_unlock();
1641}
1642
1643/* Must be protected by RTNL. */
1644static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1645{
1646        struct net_device *vlan_dev;
1647
1648        if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1649                return;
1650
1651        vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1652        if (vlan_dev)
1653                br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1654}
1655
1656/* Must be protected by RTNL. */
1657int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1658{
1659        struct netdev_notifier_changeupper_info *info;
1660        struct net_bridge *br = netdev_priv(dev);
1661        int vlcmd = 0, ret = 0;
1662        bool changed = false;
1663
1664        switch (event) {
1665        case NETDEV_REGISTER:
1666                ret = br_vlan_add(br, br->default_pvid,
1667                                  BRIDGE_VLAN_INFO_PVID |
1668                                  BRIDGE_VLAN_INFO_UNTAGGED |
1669                                  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1670                vlcmd = RTM_NEWVLAN;
1671                break;
1672        case NETDEV_UNREGISTER:
1673                changed = !br_vlan_delete(br, br->default_pvid);
1674                vlcmd = RTM_DELVLAN;
1675                break;
1676        case NETDEV_CHANGEUPPER:
1677                info = ptr;
1678                br_vlan_upper_change(dev, info->upper_dev, info->linking);
1679                break;
1680
1681        case NETDEV_CHANGE:
1682        case NETDEV_UP:
1683                if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1684                        break;
1685                br_vlan_link_state_change(dev, br);
1686                break;
1687        }
1688        if (changed)
1689                br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1690
1691        return ret;
1692}
1693
1694/* Must be protected by RTNL. */
1695void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1696{
1697        if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1698                return;
1699
1700        switch (event) {
1701        case NETDEV_CHANGE:
1702        case NETDEV_DOWN:
1703        case NETDEV_UP:
1704                br_vlan_set_all_vlan_dev_state(p);
1705                break;
1706        }
1707}
1708
1709static bool br_vlan_stats_fill(struct sk_buff *skb,
1710                               const struct net_bridge_vlan *v)
1711{
1712        struct pcpu_sw_netstats stats;
1713        struct nlattr *nest;
1714
1715        nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1716        if (!nest)
1717                return false;
1718
1719        br_vlan_get_stats(v, &stats);
1720        if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
1721                              BRIDGE_VLANDB_STATS_PAD) ||
1722            nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1723                              stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
1724            nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
1725                              BRIDGE_VLANDB_STATS_PAD) ||
1726            nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1727                              stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
1728                goto out_err;
1729
1730        nla_nest_end(skb, nest);
1731
1732        return true;
1733
1734out_err:
1735        nla_nest_cancel(skb, nest);
1736        return false;
1737}
1738
1739/* v_opts is used to dump the options which must be equal in the whole range */
1740static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1741                              const struct net_bridge_vlan *v_opts,
1742                              u16 flags,
1743                              bool dump_stats)
1744{
1745        struct bridge_vlan_info info;
1746        struct nlattr *nest;
1747
1748        nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1749        if (!nest)
1750                return false;
1751
1752        memset(&info, 0, sizeof(info));
1753        info.vid = vid;
1754        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1755                info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1756        if (flags & BRIDGE_VLAN_INFO_PVID)
1757                info.flags |= BRIDGE_VLAN_INFO_PVID;
1758
1759        if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1760                goto out_err;
1761
1762        if (vid_range && vid < vid_range &&
1763            !(flags & BRIDGE_VLAN_INFO_PVID) &&
1764            nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1765                goto out_err;
1766
1767        if (v_opts) {
1768                if (!br_vlan_opts_fill(skb, v_opts))
1769                        goto out_err;
1770
1771                if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1772                        goto out_err;
1773        }
1774
1775        nla_nest_end(skb, nest);
1776
1777        return true;
1778
1779out_err:
1780        nla_nest_cancel(skb, nest);
1781        return false;
1782}
1783
1784static size_t rtnl_vlan_nlmsg_size(void)
1785{
1786        return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1787                + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1788                + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1789                + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1790                + br_vlan_opts_nl_size(); /* bridge vlan options */
1791}
1792
1793void br_vlan_notify(const struct net_bridge *br,
1794                    const struct net_bridge_port *p,
1795                    u16 vid, u16 vid_range,
1796                    int cmd)
1797{
1798        struct net_bridge_vlan_group *vg;
1799        struct net_bridge_vlan *v = NULL;
1800        struct br_vlan_msg *bvm;
1801        struct nlmsghdr *nlh;
1802        struct sk_buff *skb;
1803        int err = -ENOBUFS;
1804        struct net *net;
1805        u16 flags = 0;
1806        int ifindex;
1807
1808        /* right now notifications are done only with rtnl held */
1809        ASSERT_RTNL();
1810
1811        if (p) {
1812                ifindex = p->dev->ifindex;
1813                vg = nbp_vlan_group(p);
1814                net = dev_net(p->dev);
1815        } else {
1816                ifindex = br->dev->ifindex;
1817                vg = br_vlan_group(br);
1818                net = dev_net(br->dev);
1819        }
1820
1821        skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1822        if (!skb)
1823                goto out_err;
1824
1825        err = -EMSGSIZE;
1826        nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1827        if (!nlh)
1828                goto out_err;
1829        bvm = nlmsg_data(nlh);
1830        memset(bvm, 0, sizeof(*bvm));
1831        bvm->family = AF_BRIDGE;
1832        bvm->ifindex = ifindex;
1833
1834        switch (cmd) {
1835        case RTM_NEWVLAN:
1836                /* need to find the vlan due to flags/options */
1837                v = br_vlan_find(vg, vid);
1838                if (!v || !br_vlan_should_use(v))
1839                        goto out_kfree;
1840
1841                flags = v->flags;
1842                if (br_get_pvid(vg) == v->vid)
1843                        flags |= BRIDGE_VLAN_INFO_PVID;
1844                break;
1845        case RTM_DELVLAN:
1846                break;
1847        default:
1848                goto out_kfree;
1849        }
1850
1851        if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1852                goto out_err;
1853
1854        nlmsg_end(skb, nlh);
1855        rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1856        return;
1857
1858out_err:
1859        rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1860out_kfree:
1861        kfree_skb(skb);
1862}
1863
1864static int br_vlan_replay_one(struct notifier_block *nb,
1865                              struct net_device *dev,
1866                              struct switchdev_obj_port_vlan *vlan,
1867                              const void *ctx, unsigned long action,
1868                              struct netlink_ext_ack *extack)
1869{
1870        struct switchdev_notifier_port_obj_info obj_info = {
1871                .info = {
1872                        .dev = dev,
1873                        .extack = extack,
1874                        .ctx = ctx,
1875                },
1876                .obj = &vlan->obj,
1877        };
1878        int err;
1879
1880        err = nb->notifier_call(nb, action, &obj_info);
1881        return notifier_to_errno(err);
1882}
1883
1884int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
1885                   const void *ctx, bool adding, struct notifier_block *nb,
1886                   struct netlink_ext_ack *extack)
1887{
1888        struct net_bridge_vlan_group *vg;
1889        struct net_bridge_vlan *v;
1890        struct net_bridge_port *p;
1891        struct net_bridge *br;
1892        unsigned long action;
1893        int err = 0;
1894        u16 pvid;
1895
1896        ASSERT_RTNL();
1897
1898        if (!nb)
1899                return 0;
1900
1901        if (!netif_is_bridge_master(br_dev))
1902                return -EINVAL;
1903
1904        if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1905                return -EINVAL;
1906
1907        if (netif_is_bridge_master(dev)) {
1908                br = netdev_priv(dev);
1909                vg = br_vlan_group(br);
1910                p = NULL;
1911        } else {
1912                p = br_port_get_rtnl(dev);
1913                if (WARN_ON(!p))
1914                        return -EINVAL;
1915                vg = nbp_vlan_group(p);
1916                br = p->br;
1917        }
1918
1919        if (!vg)
1920                return 0;
1921
1922        if (adding)
1923                action = SWITCHDEV_PORT_OBJ_ADD;
1924        else
1925                action = SWITCHDEV_PORT_OBJ_DEL;
1926
1927        pvid = br_get_pvid(vg);
1928
1929        list_for_each_entry(v, &vg->vlan_list, vlist) {
1930                struct switchdev_obj_port_vlan vlan = {
1931                        .obj.orig_dev = dev,
1932                        .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1933                        .flags = br_vlan_flags(v, pvid),
1934                        .vid = v->vid,
1935                };
1936
1937                if (!br_vlan_should_use(v))
1938                        continue;
1939
1940                err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
1941                if (err)
1942                        return err;
1943        }
1944
1945        return err;
1946}
1947
1948/* check if v_curr can enter a range ending in range_end */
1949bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1950                             const struct net_bridge_vlan *range_end)
1951{
1952        return v_curr->vid - range_end->vid == 1 &&
1953               range_end->flags == v_curr->flags &&
1954               br_vlan_opts_eq_range(v_curr, range_end);
1955}
1956
1957static int br_vlan_dump_dev(const struct net_device *dev,
1958                            struct sk_buff *skb,
1959                            struct netlink_callback *cb,
1960                            u32 dump_flags)
1961{
1962        struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1963        bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1964        bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1965        struct net_bridge_vlan_group *vg;
1966        int idx = 0, s_idx = cb->args[1];
1967        struct nlmsghdr *nlh = NULL;
1968        struct net_bridge_port *p;
1969        struct br_vlan_msg *bvm;
1970        struct net_bridge *br;
1971        int err = 0;
1972        u16 pvid;
1973
1974        if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1975                return -EINVAL;
1976
1977        if (netif_is_bridge_master(dev)) {
1978                br = netdev_priv(dev);
1979                vg = br_vlan_group_rcu(br);
1980                p = NULL;
1981        } else {
1982                /* global options are dumped only for bridge devices */
1983                if (dump_global)
1984                        return 0;
1985
1986                p = br_port_get_rcu(dev);
1987                if (WARN_ON(!p))
1988                        return -EINVAL;
1989                vg = nbp_vlan_group_rcu(p);
1990                br = p->br;
1991        }
1992
1993        if (!vg)
1994                return 0;
1995
1996        nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1997                        RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1998        if (!nlh)
1999                return -EMSGSIZE;
2000        bvm = nlmsg_data(nlh);
2001        memset(bvm, 0, sizeof(*bvm));
2002        bvm->family = PF_BRIDGE;
2003        bvm->ifindex = dev->ifindex;
2004        pvid = br_get_pvid(vg);
2005
2006        /* idx must stay at range's beginning until it is filled in */
2007        list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
2008                if (!dump_global && !br_vlan_should_use(v))
2009                        continue;
2010                if (idx < s_idx) {
2011                        idx++;
2012                        continue;
2013                }
2014
2015                if (!range_start) {
2016                        range_start = v;
2017                        range_end = v;
2018                        continue;
2019                }
2020
2021                if (dump_global) {
2022                        if (br_vlan_global_opts_can_enter_range(v, range_end))
2023                                goto update_end;
2024                        if (!br_vlan_global_opts_fill(skb, range_start->vid,
2025                                                      range_end->vid,
2026                                                      range_start)) {
2027                                err = -EMSGSIZE;
2028                                break;
2029                        }
2030                        /* advance number of filled vlans */
2031                        idx += range_end->vid - range_start->vid + 1;
2032
2033                        range_start = v;
2034                } else if (dump_stats || v->vid == pvid ||
2035                           !br_vlan_can_enter_range(v, range_end)) {
2036                        u16 vlan_flags = br_vlan_flags(range_start, pvid);
2037
2038                        if (!br_vlan_fill_vids(skb, range_start->vid,
2039                                               range_end->vid, range_start,
2040                                               vlan_flags, dump_stats)) {
2041                                err = -EMSGSIZE;
2042                                break;
2043                        }
2044                        /* advance number of filled vlans */
2045                        idx += range_end->vid - range_start->vid + 1;
2046
2047                        range_start = v;
2048                }
2049update_end:
2050                range_end = v;
2051        }
2052
2053        /* err will be 0 and range_start will be set in 3 cases here:
2054         * - first vlan (range_start == range_end)
2055         * - last vlan (range_start == range_end, not in range)
2056         * - last vlan range (range_start != range_end, in range)
2057         */
2058        if (!err && range_start) {
2059                if (dump_global &&
2060                    !br_vlan_global_opts_fill(skb, range_start->vid,
2061                                              range_end->vid, range_start))
2062                        err = -EMSGSIZE;
2063                else if (!dump_global &&
2064                         !br_vlan_fill_vids(skb, range_start->vid,
2065                                            range_end->vid, range_start,
2066                                            br_vlan_flags(range_start, pvid),
2067                                            dump_stats))
2068                        err = -EMSGSIZE;
2069        }
2070
2071        cb->args[1] = err ? idx : 0;
2072
2073        nlmsg_end(skb, nlh);
2074
2075        return err;
2076}
2077
2078static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
2079        [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
2080};
2081
2082static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
2083{
2084        struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2085        int idx = 0, err = 0, s_idx = cb->args[0];
2086        struct net *net = sock_net(skb->sk);
2087        struct br_vlan_msg *bvm;
2088        struct net_device *dev;
2089        u32 dump_flags = 0;
2090
2091        err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
2092                          br_vlan_db_dump_pol, cb->extack);
2093        if (err < 0)
2094                return err;
2095
2096        bvm = nlmsg_data(cb->nlh);
2097        if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
2098                dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2099
2100        rcu_read_lock();
2101        if (bvm->ifindex) {
2102                dev = dev_get_by_index_rcu(net, bvm->ifindex);
2103                if (!dev) {
2104                        err = -ENODEV;
2105                        goto out_err;
2106                }
2107                err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2108                if (err && err != -EMSGSIZE)
2109                        goto out_err;
2110        } else {
2111                for_each_netdev_rcu(net, dev) {
2112                        if (idx < s_idx)
2113                                goto skip;
2114
2115                        err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2116                        if (err == -EMSGSIZE)
2117                                break;
2118skip:
2119                        idx++;
2120                }
2121        }
2122        cb->args[0] = idx;
2123        rcu_read_unlock();
2124
2125        return skb->len;
2126
2127out_err:
2128        rcu_read_unlock();
2129
2130        return err;
2131}
2132
2133static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2134        [BRIDGE_VLANDB_ENTRY_INFO]      =
2135                NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2136        [BRIDGE_VLANDB_ENTRY_RANGE]     = { .type = NLA_U16 },
2137        [BRIDGE_VLANDB_ENTRY_STATE]     = { .type = NLA_U8 },
2138        [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2139        [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]      = { .type = NLA_U8 },
2140};
2141
2142static int br_vlan_rtm_process_one(struct net_device *dev,
2143                                   const struct nlattr *attr,
2144                                   int cmd, struct netlink_ext_ack *extack)
2145{
2146        struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2147        struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2148        bool changed = false, skip_processing = false;
2149        struct net_bridge_vlan_group *vg;
2150        struct net_bridge_port *p = NULL;
2151        int err = 0, cmdmap = 0;
2152        struct net_bridge *br;
2153
2154        if (netif_is_bridge_master(dev)) {
2155                br = netdev_priv(dev);
2156                vg = br_vlan_group(br);
2157        } else {
2158                p = br_port_get_rtnl(dev);
2159                if (WARN_ON(!p))
2160                        return -ENODEV;
2161                br = p->br;
2162                vg = nbp_vlan_group(p);
2163        }
2164
2165        if (WARN_ON(!vg))
2166                return -ENODEV;
2167
2168        err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
2169                               br_vlan_db_policy, extack);
2170        if (err)
2171                return err;
2172
2173        if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
2174                NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
2175                return -EINVAL;
2176        }
2177        memset(&vrange_end, 0, sizeof(vrange_end));
2178
2179        vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
2180        if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
2181                            BRIDGE_VLAN_INFO_RANGE_END)) {
2182                NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
2183                return -EINVAL;
2184        }
2185        if (!br_vlan_valid_id(vinfo->vid, extack))
2186                return -EINVAL;
2187
2188        if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
2189                vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
2190                /* validate user-provided flags without RANGE_BEGIN */
2191                vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
2192                vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
2193
2194                /* vinfo_last is the range start, vinfo the range end */
2195                vinfo_last = vinfo;
2196                vinfo = &vrange_end;
2197
2198                if (!br_vlan_valid_id(vinfo->vid, extack) ||
2199                    !br_vlan_valid_range(vinfo, vinfo_last, extack))
2200                        return -EINVAL;
2201        }
2202
2203        switch (cmd) {
2204        case RTM_NEWVLAN:
2205                cmdmap = RTM_SETLINK;
2206                skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2207                break;
2208        case RTM_DELVLAN:
2209                cmdmap = RTM_DELLINK;
2210                break;
2211        }
2212
2213        if (!skip_processing) {
2214                struct bridge_vlan_info *tmp_last = vinfo_last;
2215
2216                /* br_process_vlan_info may overwrite vinfo_last */
2217                err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2218                                           &changed, extack);
2219
2220                /* notify first if anything changed */
2221                if (changed)
2222                        br_ifinfo_notify(cmdmap, br, p);
2223
2224                if (err)
2225                        return err;
2226        }
2227
2228        /* deal with options */
2229        if (cmd == RTM_NEWVLAN) {
2230                struct net_bridge_vlan *range_start, *range_end;
2231
2232                if (vinfo_last) {
2233                        range_start = br_vlan_find(vg, vinfo_last->vid);
2234                        range_end = br_vlan_find(vg, vinfo->vid);
2235                } else {
2236                        range_start = br_vlan_find(vg, vinfo->vid);
2237                        range_end = range_start;
2238                }
2239
2240                err = br_vlan_process_options(br, p, range_start, range_end,
2241                                              tb, extack);
2242        }
2243
2244        return err;
2245}
2246
2247static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2248                               struct netlink_ext_ack *extack)
2249{
2250        struct net *net = sock_net(skb->sk);
2251        struct br_vlan_msg *bvm;
2252        struct net_device *dev;
2253        struct nlattr *attr;
2254        int err, vlans = 0;
2255        int rem;
2256
2257        /* this should validate the header and check for remaining bytes */
2258        err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2259                          extack);
2260        if (err < 0)
2261                return err;
2262
2263        bvm = nlmsg_data(nlh);
2264        dev = __dev_get_by_index(net, bvm->ifindex);
2265        if (!dev)
2266                return -ENODEV;
2267
2268        if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2269                NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2270                return -EINVAL;
2271        }
2272
2273        nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2274                switch (nla_type(attr)) {
2275                case BRIDGE_VLANDB_ENTRY:
2276                        err = br_vlan_rtm_process_one(dev, attr,
2277                                                      nlh->nlmsg_type,
2278                                                      extack);
2279                        break;
2280                case BRIDGE_VLANDB_GLOBAL_OPTIONS:
2281                        err = br_vlan_rtm_process_global_options(dev, attr,
2282                                                                 nlh->nlmsg_type,
2283                                                                 extack);
2284                        break;
2285                default:
2286                        continue;
2287                }
2288
2289                vlans++;
2290                if (err)
2291                        break;
2292        }
2293        if (!vlans) {
2294                NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2295                err = -EINVAL;
2296        }
2297
2298        return err;
2299}
2300
2301void br_vlan_rtnl_init(void)
2302{
2303        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
2304                             br_vlan_rtm_dump, 0);
2305        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
2306                             br_vlan_rtm_process, NULL, 0);
2307        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
2308                             br_vlan_rtm_process, NULL, 0);
2309}
2310
2311void br_vlan_rtnl_uninit(void)
2312{
2313        rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2314        rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2315        rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2316}
2317