linux/net/bridge/br_vlan.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/kernel.h>
   3#include <linux/netdevice.h>
   4#include <linux/rtnetlink.h>
   5#include <linux/slab.h>
   6#include <net/switchdev.h>
   7
   8#include "br_private.h"
   9#include "br_private_tunnel.h"
  10
  11static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
  12
  13static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  14                              const void *ptr)
  15{
  16        const struct net_bridge_vlan *vle = ptr;
  17        u16 vid = *(u16 *)arg->key;
  18
  19        return vle->vid != vid;
  20}
  21
  22static const struct rhashtable_params br_vlan_rht_params = {
  23        .head_offset = offsetof(struct net_bridge_vlan, vnode),
  24        .key_offset = offsetof(struct net_bridge_vlan, vid),
  25        .key_len = sizeof(u16),
  26        .nelem_hint = 3,
  27        .max_size = VLAN_N_VID,
  28        .obj_cmpfn = br_vlan_cmp,
  29        .automatic_shrinking = true,
  30};
  31
  32static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  33{
  34        return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  35}
  36
  37static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
  38                            const struct net_bridge_vlan *v)
  39{
  40        if (vg->pvid == v->vid)
  41                return false;
  42
  43        smp_wmb();
  44        br_vlan_set_pvid_state(vg, v->state);
  45        vg->pvid = v->vid;
  46
  47        return true;
  48}
  49
  50static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  51{
  52        if (vg->pvid != vid)
  53                return false;
  54
  55        smp_wmb();
  56        vg->pvid = 0;
  57
  58        return true;
  59}
  60
  61/* return true if anything changed, false otherwise */
  62static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  63{
  64        struct net_bridge_vlan_group *vg;
  65        u16 old_flags = v->flags;
  66        bool ret;
  67
  68        if (br_vlan_is_master(v))
  69                vg = br_vlan_group(v->br);
  70        else
  71                vg = nbp_vlan_group(v->port);
  72
  73        if (flags & BRIDGE_VLAN_INFO_PVID)
  74                ret = __vlan_add_pvid(vg, v);
  75        else
  76                ret = __vlan_delete_pvid(vg, v->vid);
  77
  78        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  79                v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  80        else
  81                v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  82
  83        return ret || !!(old_flags ^ v->flags);
  84}
  85
  86static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  87                          struct net_bridge_vlan *v, u16 flags,
  88                          struct netlink_ext_ack *extack)
  89{
  90        int err;
  91
  92        /* Try switchdev op first. In case it is not supported, fallback to
  93         * 8021q add.
  94         */
  95        err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
  96        if (err == -EOPNOTSUPP)
  97                return vlan_vid_add(dev, br->vlan_proto, v->vid);
  98        v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
  99        return err;
 100}
 101
 102static void __vlan_add_list(struct net_bridge_vlan *v)
 103{
 104        struct net_bridge_vlan_group *vg;
 105        struct list_head *headp, *hpos;
 106        struct net_bridge_vlan *vent;
 107
 108        if (br_vlan_is_master(v))
 109                vg = br_vlan_group(v->br);
 110        else
 111                vg = nbp_vlan_group(v->port);
 112
 113        headp = &vg->vlan_list;
 114        list_for_each_prev(hpos, headp) {
 115                vent = list_entry(hpos, struct net_bridge_vlan, vlist);
 116                if (v->vid < vent->vid)
 117                        continue;
 118                else
 119                        break;
 120        }
 121        list_add_rcu(&v->vlist, hpos);
 122}
 123
 124static void __vlan_del_list(struct net_bridge_vlan *v)
 125{
 126        list_del_rcu(&v->vlist);
 127}
 128
 129static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
 130                          const struct net_bridge_vlan *v)
 131{
 132        int err;
 133
 134        /* Try switchdev op first. In case it is not supported, fallback to
 135         * 8021q del.
 136         */
 137        err = br_switchdev_port_vlan_del(dev, v->vid);
 138        if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
 139                vlan_vid_del(dev, br->vlan_proto, v->vid);
 140        return err == -EOPNOTSUPP ? 0 : err;
 141}
 142
 143/* Returns a master vlan, if it didn't exist it gets created. In all cases
 144 * a reference is taken to the master vlan before returning.
 145 */
 146static struct net_bridge_vlan *
 147br_vlan_get_master(struct net_bridge *br, u16 vid,
 148                   struct netlink_ext_ack *extack)
 149{
 150        struct net_bridge_vlan_group *vg;
 151        struct net_bridge_vlan *masterv;
 152
 153        vg = br_vlan_group(br);
 154        masterv = br_vlan_find(vg, vid);
 155        if (!masterv) {
 156                bool changed;
 157
 158                /* missing global ctx, create it now */
 159                if (br_vlan_add(br, vid, 0, &changed, extack))
 160                        return NULL;
 161                masterv = br_vlan_find(vg, vid);
 162                if (WARN_ON(!masterv))
 163                        return NULL;
 164                refcount_set(&masterv->refcnt, 1);
 165                return masterv;
 166        }
 167        refcount_inc(&masterv->refcnt);
 168
 169        return masterv;
 170}
 171
 172static void br_master_vlan_rcu_free(struct rcu_head *rcu)
 173{
 174        struct net_bridge_vlan *v;
 175
 176        v = container_of(rcu, struct net_bridge_vlan, rcu);
 177        WARN_ON(!br_vlan_is_master(v));
 178        free_percpu(v->stats);
 179        v->stats = NULL;
 180        kfree(v);
 181}
 182
 183static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 184{
 185        struct net_bridge_vlan_group *vg;
 186
 187        if (!br_vlan_is_master(masterv))
 188                return;
 189
 190        vg = br_vlan_group(masterv->br);
 191        if (refcount_dec_and_test(&masterv->refcnt)) {
 192                rhashtable_remove_fast(&vg->vlan_hash,
 193                                       &masterv->vnode, br_vlan_rht_params);
 194                __vlan_del_list(masterv);
 195                call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
 196        }
 197}
 198
 199static void nbp_vlan_rcu_free(struct rcu_head *rcu)
 200{
 201        struct net_bridge_vlan *v;
 202
 203        v = container_of(rcu, struct net_bridge_vlan, rcu);
 204        WARN_ON(br_vlan_is_master(v));
 205        /* if we had per-port stats configured then free them here */
 206        if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
 207                free_percpu(v->stats);
 208        v->stats = NULL;
 209        kfree(v);
 210}
 211
 212/* This is the shared VLAN add function which works for both ports and bridge
 213 * devices. There are four possible calls to this function in terms of the
 214 * vlan entry type:
 215 * 1. vlan is being added on a port (no master flags, global entry exists)
 216 * 2. vlan is being added on a bridge (both master and brentry flags)
 217 * 3. vlan is being added on a port, but a global entry didn't exist which
 218 *    is being created right now (master flag set, brentry flag unset), the
 219 *    global entry is used for global per-vlan features, but not for filtering
 220 * 4. same as 3 but with both master and brentry flags set so the entry
 221 *    will be used for filtering in both the port and the bridge
 222 */
 223static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
 224                      struct netlink_ext_ack *extack)
 225{
 226        struct net_bridge_vlan *masterv = NULL;
 227        struct net_bridge_port *p = NULL;
 228        struct net_bridge_vlan_group *vg;
 229        struct net_device *dev;
 230        struct net_bridge *br;
 231        int err;
 232
 233        if (br_vlan_is_master(v)) {
 234                br = v->br;
 235                dev = br->dev;
 236                vg = br_vlan_group(br);
 237        } else {
 238                p = v->port;
 239                br = p->br;
 240                dev = p->dev;
 241                vg = nbp_vlan_group(p);
 242        }
 243
 244        if (p) {
 245                /* Add VLAN to the device filter if it is supported.
 246                 * This ensures tagged traffic enters the bridge when
 247                 * promiscuous mode is disabled by br_manage_promisc().
 248                 */
 249                err = __vlan_vid_add(dev, br, v, flags, extack);
 250                if (err)
 251                        goto out;
 252
 253                /* need to work on the master vlan too */
 254                if (flags & BRIDGE_VLAN_INFO_MASTER) {
 255                        bool changed;
 256
 257                        err = br_vlan_add(br, v->vid,
 258                                          flags | BRIDGE_VLAN_INFO_BRENTRY,
 259                                          &changed, extack);
 260                        if (err)
 261                                goto out_filt;
 262
 263                        if (changed)
 264                                br_vlan_notify(br, NULL, v->vid, 0,
 265                                               RTM_NEWVLAN);
 266                }
 267
 268                masterv = br_vlan_get_master(br, v->vid, extack);
 269                if (!masterv) {
 270                        err = -ENOMEM;
 271                        goto out_filt;
 272                }
 273                v->brvlan = masterv;
 274                if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
 275                        v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 276                        if (!v->stats) {
 277                                err = -ENOMEM;
 278                                goto out_filt;
 279                        }
 280                        v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
 281                } else {
 282                        v->stats = masterv->stats;
 283                }
 284        } else {
 285                err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
 286                if (err && err != -EOPNOTSUPP)
 287                        goto out;
 288        }
 289
 290        /* Add the dev mac and count the vlan only if it's usable */
 291        if (br_vlan_should_use(v)) {
 292                err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
 293                if (err) {
 294                        br_err(br, "failed insert local address into bridge forwarding table\n");
 295                        goto out_filt;
 296                }
 297                vg->num_vlans++;
 298        }
 299
 300        /* set the state before publishing */
 301        v->state = BR_STATE_FORWARDING;
 302
 303        err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 304                                            br_vlan_rht_params);
 305        if (err)
 306                goto out_fdb_insert;
 307
 308        __vlan_add_list(v);
 309        __vlan_add_flags(v, flags);
 310
 311        if (p)
 312                nbp_vlan_set_vlan_dev_state(p, v->vid);
 313out:
 314        return err;
 315
 316out_fdb_insert:
 317        if (br_vlan_should_use(v)) {
 318                br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
 319                vg->num_vlans--;
 320        }
 321
 322out_filt:
 323        if (p) {
 324                __vlan_vid_del(dev, br, v);
 325                if (masterv) {
 326                        if (v->stats && masterv->stats != v->stats)
 327                                free_percpu(v->stats);
 328                        v->stats = NULL;
 329
 330                        br_vlan_put_master(masterv);
 331                        v->brvlan = NULL;
 332                }
 333        } else {
 334                br_switchdev_port_vlan_del(dev, v->vid);
 335        }
 336
 337        goto out;
 338}
 339
 340static int __vlan_del(struct net_bridge_vlan *v)
 341{
 342        struct net_bridge_vlan *masterv = v;
 343        struct net_bridge_vlan_group *vg;
 344        struct net_bridge_port *p = NULL;
 345        int err = 0;
 346
 347        if (br_vlan_is_master(v)) {
 348                vg = br_vlan_group(v->br);
 349        } else {
 350                p = v->port;
 351                vg = nbp_vlan_group(v->port);
 352                masterv = v->brvlan;
 353        }
 354
 355        __vlan_delete_pvid(vg, v->vid);
 356        if (p) {
 357                err = __vlan_vid_del(p->dev, p->br, v);
 358                if (err)
 359                        goto out;
 360        } else {
 361                err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
 362                if (err && err != -EOPNOTSUPP)
 363                        goto out;
 364                err = 0;
 365        }
 366
 367        if (br_vlan_should_use(v)) {
 368                v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
 369                vg->num_vlans--;
 370        }
 371
 372        if (masterv != v) {
 373                vlan_tunnel_info_del(vg, v);
 374                rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
 375                                       br_vlan_rht_params);
 376                __vlan_del_list(v);
 377                nbp_vlan_set_vlan_dev_state(p, v->vid);
 378                call_rcu(&v->rcu, nbp_vlan_rcu_free);
 379        }
 380
 381        br_vlan_put_master(masterv);
 382out:
 383        return err;
 384}
 385
 386static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 387{
 388        WARN_ON(!list_empty(&vg->vlan_list));
 389        rhashtable_destroy(&vg->vlan_hash);
 390        vlan_tunnel_deinit(vg);
 391        kfree(vg);
 392}
 393
 394static void __vlan_flush(const struct net_bridge *br,
 395                         const struct net_bridge_port *p,
 396                         struct net_bridge_vlan_group *vg)
 397{
 398        struct net_bridge_vlan *vlan, *tmp;
 399        u16 v_start = 0, v_end = 0;
 400
 401        __vlan_delete_pvid(vg, vg->pvid);
 402        list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
 403                /* take care of disjoint ranges */
 404                if (!v_start) {
 405                        v_start = vlan->vid;
 406                } else if (vlan->vid - v_end != 1) {
 407                        /* found range end, notify and start next one */
 408                        br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
 409                        v_start = vlan->vid;
 410                }
 411                v_end = vlan->vid;
 412
 413                __vlan_del(vlan);
 414        }
 415
 416        /* notify about the last/whole vlan range */
 417        if (v_start)
 418                br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
 419}
 420
 421struct sk_buff *br_handle_vlan(struct net_bridge *br,
 422                               const struct net_bridge_port *p,
 423                               struct net_bridge_vlan_group *vg,
 424                               struct sk_buff *skb)
 425{
 426        struct br_vlan_stats *stats;
 427        struct net_bridge_vlan *v;
 428        u16 vid;
 429
 430        /* If this packet was not filtered at input, let it pass */
 431        if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 432                goto out;
 433
 434        /* At this point, we know that the frame was filtered and contains
 435         * a valid vlan id.  If the vlan id has untagged flag set,
 436         * send untagged; otherwise, send tagged.
 437         */
 438        br_vlan_get_tag(skb, &vid);
 439        v = br_vlan_find(vg, vid);
 440        /* Vlan entry must be configured at this point.  The
 441         * only exception is the bridge is set in promisc mode and the
 442         * packet is destined for the bridge device.  In this case
 443         * pass the packet as is.
 444         */
 445        if (!v || !br_vlan_should_use(v)) {
 446                if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 447                        goto out;
 448                } else {
 449                        kfree_skb(skb);
 450                        return NULL;
 451                }
 452        }
 453        if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 454                stats = this_cpu_ptr(v->stats);
 455                u64_stats_update_begin(&stats->syncp);
 456                stats->tx_bytes += skb->len;
 457                stats->tx_packets++;
 458                u64_stats_update_end(&stats->syncp);
 459        }
 460
 461        if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 462                __vlan_hwaccel_clear_tag(skb);
 463
 464        if (p && (p->flags & BR_VLAN_TUNNEL) &&
 465            br_handle_egress_vlan_tunnel(skb, v)) {
 466                kfree_skb(skb);
 467                return NULL;
 468        }
 469out:
 470        return skb;
 471}
 472
 473/* Called under RCU */
 474static bool __allowed_ingress(const struct net_bridge *br,
 475                              struct net_bridge_vlan_group *vg,
 476                              struct sk_buff *skb, u16 *vid,
 477                              u8 *state)
 478{
 479        struct br_vlan_stats *stats;
 480        struct net_bridge_vlan *v;
 481        bool tagged;
 482
 483        BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
 484        /* If vlan tx offload is disabled on bridge device and frame was
 485         * sent from vlan device on the bridge device, it does not have
 486         * HW accelerated vlan tag.
 487         */
 488        if (unlikely(!skb_vlan_tag_present(skb) &&
 489                     skb->protocol == br->vlan_proto)) {
 490                skb = skb_vlan_untag(skb);
 491                if (unlikely(!skb))
 492                        return false;
 493        }
 494
 495        if (!br_vlan_get_tag(skb, vid)) {
 496                /* Tagged frame */
 497                if (skb->vlan_proto != br->vlan_proto) {
 498                        /* Protocol-mismatch, empty out vlan_tci for new tag */
 499                        skb_push(skb, ETH_HLEN);
 500                        skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 501                                                        skb_vlan_tag_get(skb));
 502                        if (unlikely(!skb))
 503                                return false;
 504
 505                        skb_pull(skb, ETH_HLEN);
 506                        skb_reset_mac_len(skb);
 507                        *vid = 0;
 508                        tagged = false;
 509                } else {
 510                        tagged = true;
 511                }
 512        } else {
 513                /* Untagged frame */
 514                tagged = false;
 515        }
 516
 517        if (!*vid) {
 518                u16 pvid = br_get_pvid(vg);
 519
 520                /* Frame had a tag with VID 0 or did not have a tag.
 521                 * See if pvid is set on this port.  That tells us which
 522                 * vlan untagged or priority-tagged traffic belongs to.
 523                 */
 524                if (!pvid)
 525                        goto drop;
 526
 527                /* PVID is set on this port.  Any untagged or priority-tagged
 528                 * ingress frame is considered to belong to this vlan.
 529                 */
 530                *vid = pvid;
 531                if (likely(!tagged))
 532                        /* Untagged Frame. */
 533                        __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 534                else
 535                        /* Priority-tagged Frame.
 536                         * At this point, we know that skb->vlan_tci VID
 537                         * field was 0.
 538                         * We update only VID field and preserve PCP field.
 539                         */
 540                        skb->vlan_tci |= pvid;
 541
 542                /* if stats are disabled we can avoid the lookup */
 543                if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 544                        if (*state == BR_STATE_FORWARDING) {
 545                                *state = br_vlan_get_pvid_state(vg);
 546                                return br_vlan_state_allowed(*state, true);
 547                        } else {
 548                                return true;
 549                        }
 550                }
 551        }
 552        v = br_vlan_find(vg, *vid);
 553        if (!v || !br_vlan_should_use(v))
 554                goto drop;
 555
 556        if (*state == BR_STATE_FORWARDING) {
 557                *state = br_vlan_get_state(v);
 558                if (!br_vlan_state_allowed(*state, true))
 559                        goto drop;
 560        }
 561
 562        if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 563                stats = this_cpu_ptr(v->stats);
 564                u64_stats_update_begin(&stats->syncp);
 565                stats->rx_bytes += skb->len;
 566                stats->rx_packets++;
 567                u64_stats_update_end(&stats->syncp);
 568        }
 569
 570        return true;
 571
 572drop:
 573        kfree_skb(skb);
 574        return false;
 575}
 576
 577bool br_allowed_ingress(const struct net_bridge *br,
 578                        struct net_bridge_vlan_group *vg, struct sk_buff *skb,
 579                        u16 *vid, u8 *state)
 580{
 581        /* If VLAN filtering is disabled on the bridge, all packets are
 582         * permitted.
 583         */
 584        if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
 585                BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
 586                return true;
 587        }
 588
 589        return __allowed_ingress(br, vg, skb, vid, state);
 590}
 591
 592/* Called under RCU. */
 593bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 594                       const struct sk_buff *skb)
 595{
 596        const struct net_bridge_vlan *v;
 597        u16 vid;
 598
 599        /* If this packet was not filtered at input, let it pass */
 600        if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 601                return true;
 602
 603        br_vlan_get_tag(skb, &vid);
 604        v = br_vlan_find(vg, vid);
 605        if (v && br_vlan_should_use(v) &&
 606            br_vlan_state_allowed(br_vlan_get_state(v), false))
 607                return true;
 608
 609        return false;
 610}
 611
 612/* Called under RCU */
 613bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 614{
 615        struct net_bridge_vlan_group *vg;
 616        struct net_bridge *br = p->br;
 617        struct net_bridge_vlan *v;
 618
 619        /* If filtering was disabled at input, let it pass. */
 620        if (!br_opt_get(br, BROPT_VLAN_ENABLED))
 621                return true;
 622
 623        vg = nbp_vlan_group_rcu(p);
 624        if (!vg || !vg->num_vlans)
 625                return false;
 626
 627        if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 628                *vid = 0;
 629
 630        if (!*vid) {
 631                *vid = br_get_pvid(vg);
 632                if (!*vid ||
 633                    !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
 634                        return false;
 635
 636                return true;
 637        }
 638
 639        v = br_vlan_find(vg, *vid);
 640        if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
 641                return true;
 642
 643        return false;
 644}
 645
 646static int br_vlan_add_existing(struct net_bridge *br,
 647                                struct net_bridge_vlan_group *vg,
 648                                struct net_bridge_vlan *vlan,
 649                                u16 flags, bool *changed,
 650                                struct netlink_ext_ack *extack)
 651{
 652        int err;
 653
 654        err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
 655        if (err && err != -EOPNOTSUPP)
 656                return err;
 657
 658        if (!br_vlan_is_brentry(vlan)) {
 659                /* Trying to change flags of non-existent bridge vlan */
 660                if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
 661                        err = -EINVAL;
 662                        goto err_flags;
 663                }
 664                /* It was only kept for port vlans, now make it real */
 665                err = br_fdb_insert(br, NULL, br->dev->dev_addr,
 666                                    vlan->vid);
 667                if (err) {
 668                        br_err(br, "failed to insert local address into bridge forwarding table\n");
 669                        goto err_fdb_insert;
 670                }
 671
 672                refcount_inc(&vlan->refcnt);
 673                vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 674                vg->num_vlans++;
 675                *changed = true;
 676        }
 677
 678        if (__vlan_add_flags(vlan, flags))
 679                *changed = true;
 680
 681        return 0;
 682
 683err_fdb_insert:
 684err_flags:
 685        br_switchdev_port_vlan_del(br->dev, vlan->vid);
 686        return err;
 687}
 688
 689/* Must be protected by RTNL.
 690 * Must be called with vid in range from 1 to 4094 inclusive.
 691 * changed must be true only if the vlan was created or updated
 692 */
 693int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
 694                struct netlink_ext_ack *extack)
 695{
 696        struct net_bridge_vlan_group *vg;
 697        struct net_bridge_vlan *vlan;
 698        int ret;
 699
 700        ASSERT_RTNL();
 701
 702        *changed = false;
 703        vg = br_vlan_group(br);
 704        vlan = br_vlan_find(vg, vid);
 705        if (vlan)
 706                return br_vlan_add_existing(br, vg, vlan, flags, changed,
 707                                            extack);
 708
 709        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 710        if (!vlan)
 711                return -ENOMEM;
 712
 713        vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 714        if (!vlan->stats) {
 715                kfree(vlan);
 716                return -ENOMEM;
 717        }
 718        vlan->vid = vid;
 719        vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 720        vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
 721        vlan->br = br;
 722        if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 723                refcount_set(&vlan->refcnt, 1);
 724        ret = __vlan_add(vlan, flags, extack);
 725        if (ret) {
 726                free_percpu(vlan->stats);
 727                kfree(vlan);
 728        } else {
 729                *changed = true;
 730        }
 731
 732        return ret;
 733}
 734
 735/* Must be protected by RTNL.
 736 * Must be called with vid in range from 1 to 4094 inclusive.
 737 */
 738int br_vlan_delete(struct net_bridge *br, u16 vid)
 739{
 740        struct net_bridge_vlan_group *vg;
 741        struct net_bridge_vlan *v;
 742
 743        ASSERT_RTNL();
 744
 745        vg = br_vlan_group(br);
 746        v = br_vlan_find(vg, vid);
 747        if (!v || !br_vlan_is_brentry(v))
 748                return -ENOENT;
 749
 750        br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 751        br_fdb_delete_by_port(br, NULL, vid, 0);
 752
 753        vlan_tunnel_info_del(vg, v);
 754
 755        return __vlan_del(v);
 756}
 757
 758void br_vlan_flush(struct net_bridge *br)
 759{
 760        struct net_bridge_vlan_group *vg;
 761
 762        ASSERT_RTNL();
 763
 764        vg = br_vlan_group(br);
 765        __vlan_flush(br, NULL, vg);
 766        RCU_INIT_POINTER(br->vlgrp, NULL);
 767        synchronize_rcu();
 768        __vlan_group_free(vg);
 769}
 770
 771struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 772{
 773        if (!vg)
 774                return NULL;
 775
 776        return br_vlan_lookup(&vg->vlan_hash, vid);
 777}
 778
 779/* Must be protected by RTNL. */
 780static void recalculate_group_addr(struct net_bridge *br)
 781{
 782        if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
 783                return;
 784
 785        spin_lock_bh(&br->lock);
 786        if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 787            br->vlan_proto == htons(ETH_P_8021Q)) {
 788                /* Bridge Group Address */
 789                br->group_addr[5] = 0x00;
 790        } else { /* vlan_enabled && ETH_P_8021AD */
 791                /* Provider Bridge Group Address */
 792                br->group_addr[5] = 0x08;
 793        }
 794        spin_unlock_bh(&br->lock);
 795}
 796
 797/* Must be protected by RTNL. */
 798void br_recalculate_fwd_mask(struct net_bridge *br)
 799{
 800        if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 801            br->vlan_proto == htons(ETH_P_8021Q))
 802                br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
 803        else /* vlan_enabled && ETH_P_8021AD */
 804                br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
 805                                              ~(1u << br->group_addr[5]);
 806}
 807
 808int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 809{
 810        struct switchdev_attr attr = {
 811                .orig_dev = br->dev,
 812                .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 813                .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 814                .u.vlan_filtering = val,
 815        };
 816        int err;
 817
 818        if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
 819                return 0;
 820
 821        err = switchdev_port_attr_set(br->dev, &attr);
 822        if (err && err != -EOPNOTSUPP)
 823                return err;
 824
 825        br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
 826        br_manage_promisc(br);
 827        recalculate_group_addr(br);
 828        br_recalculate_fwd_mask(br);
 829
 830        return 0;
 831}
 832
 833int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 834{
 835        return __br_vlan_filter_toggle(br, val);
 836}
 837
 838bool br_vlan_enabled(const struct net_device *dev)
 839{
 840        struct net_bridge *br = netdev_priv(dev);
 841
 842        return br_opt_get(br, BROPT_VLAN_ENABLED);
 843}
 844EXPORT_SYMBOL_GPL(br_vlan_enabled);
 845
 846int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
 847{
 848        struct net_bridge *br = netdev_priv(dev);
 849
 850        *p_proto = ntohs(br->vlan_proto);
 851
 852        return 0;
 853}
 854EXPORT_SYMBOL_GPL(br_vlan_get_proto);
 855
 856int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 857{
 858        int err = 0;
 859        struct net_bridge_port *p;
 860        struct net_bridge_vlan *vlan;
 861        struct net_bridge_vlan_group *vg;
 862        __be16 oldproto;
 863
 864        if (br->vlan_proto == proto)
 865                return 0;
 866
 867        /* Add VLANs for the new proto to the device filter. */
 868        list_for_each_entry(p, &br->port_list, list) {
 869                vg = nbp_vlan_group(p);
 870                list_for_each_entry(vlan, &vg->vlan_list, vlist) {
 871                        err = vlan_vid_add(p->dev, proto, vlan->vid);
 872                        if (err)
 873                                goto err_filt;
 874                }
 875        }
 876
 877        oldproto = br->vlan_proto;
 878        br->vlan_proto = proto;
 879
 880        recalculate_group_addr(br);
 881        br_recalculate_fwd_mask(br);
 882
 883        /* Delete VLANs for the old proto from the device filter. */
 884        list_for_each_entry(p, &br->port_list, list) {
 885                vg = nbp_vlan_group(p);
 886                list_for_each_entry(vlan, &vg->vlan_list, vlist)
 887                        vlan_vid_del(p->dev, oldproto, vlan->vid);
 888        }
 889
 890        return 0;
 891
 892err_filt:
 893        list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
 894                vlan_vid_del(p->dev, proto, vlan->vid);
 895
 896        list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 897                vg = nbp_vlan_group(p);
 898                list_for_each_entry(vlan, &vg->vlan_list, vlist)
 899                        vlan_vid_del(p->dev, proto, vlan->vid);
 900        }
 901
 902        return err;
 903}
 904
 905int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 906{
 907        if (val != ETH_P_8021Q && val != ETH_P_8021AD)
 908                return -EPROTONOSUPPORT;
 909
 910        return __br_vlan_set_proto(br, htons(val));
 911}
 912
 913int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
 914{
 915        switch (val) {
 916        case 0:
 917        case 1:
 918                br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
 919                break;
 920        default:
 921                return -EINVAL;
 922        }
 923
 924        return 0;
 925}
 926
 927int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
 928{
 929        struct net_bridge_port *p;
 930
 931        /* allow to change the option if there are no port vlans configured */
 932        list_for_each_entry(p, &br->port_list, list) {
 933                struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
 934
 935                if (vg->num_vlans)
 936                        return -EBUSY;
 937        }
 938
 939        switch (val) {
 940        case 0:
 941        case 1:
 942                br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
 943                break;
 944        default:
 945                return -EINVAL;
 946        }
 947
 948        return 0;
 949}
 950
 951static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 952{
 953        struct net_bridge_vlan *v;
 954
 955        if (vid != vg->pvid)
 956                return false;
 957
 958        v = br_vlan_lookup(&vg->vlan_hash, vid);
 959        if (v && br_vlan_should_use(v) &&
 960            (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
 961                return true;
 962
 963        return false;
 964}
 965
 966static void br_vlan_disable_default_pvid(struct net_bridge *br)
 967{
 968        struct net_bridge_port *p;
 969        u16 pvid = br->default_pvid;
 970
 971        /* Disable default_pvid on all ports where it is still
 972         * configured.
 973         */
 974        if (vlan_default_pvid(br_vlan_group(br), pvid)) {
 975                if (!br_vlan_delete(br, pvid))
 976                        br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
 977        }
 978
 979        list_for_each_entry(p, &br->port_list, list) {
 980                if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
 981                    !nbp_vlan_delete(p, pvid))
 982                        br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
 983        }
 984
 985        br->default_pvid = 0;
 986}
 987
 988int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
 989                               struct netlink_ext_ack *extack)
 990{
 991        const struct net_bridge_vlan *pvent;
 992        struct net_bridge_vlan_group *vg;
 993        struct net_bridge_port *p;
 994        unsigned long *changed;
 995        bool vlchange;
 996        u16 old_pvid;
 997        int err = 0;
 998
 999        if (!pvid) {
1000                br_vlan_disable_default_pvid(br);
1001                return 0;
1002        }
1003
1004        changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1005        if (!changed)
1006                return -ENOMEM;
1007
1008        old_pvid = br->default_pvid;
1009
1010        /* Update default_pvid config only if we do not conflict with
1011         * user configuration.
1012         */
1013        vg = br_vlan_group(br);
1014        pvent = br_vlan_find(vg, pvid);
1015        if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1016            (!pvent || !br_vlan_should_use(pvent))) {
1017                err = br_vlan_add(br, pvid,
1018                                  BRIDGE_VLAN_INFO_PVID |
1019                                  BRIDGE_VLAN_INFO_UNTAGGED |
1020                                  BRIDGE_VLAN_INFO_BRENTRY,
1021                                  &vlchange, extack);
1022                if (err)
1023                        goto out;
1024
1025                if (br_vlan_delete(br, old_pvid))
1026                        br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1027                br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1028                set_bit(0, changed);
1029        }
1030
1031        list_for_each_entry(p, &br->port_list, list) {
1032                /* Update default_pvid config only if we do not conflict with
1033                 * user configuration.
1034                 */
1035                vg = nbp_vlan_group(p);
1036                if ((old_pvid &&
1037                     !vlan_default_pvid(vg, old_pvid)) ||
1038                    br_vlan_find(vg, pvid))
1039                        continue;
1040
1041                err = nbp_vlan_add(p, pvid,
1042                                   BRIDGE_VLAN_INFO_PVID |
1043                                   BRIDGE_VLAN_INFO_UNTAGGED,
1044                                   &vlchange, extack);
1045                if (err)
1046                        goto err_port;
1047                if (nbp_vlan_delete(p, old_pvid))
1048                        br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1049                br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1050                set_bit(p->port_no, changed);
1051        }
1052
1053        br->default_pvid = pvid;
1054
1055out:
1056        bitmap_free(changed);
1057        return err;
1058
1059err_port:
1060        list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1061                if (!test_bit(p->port_no, changed))
1062                        continue;
1063
1064                if (old_pvid) {
1065                        nbp_vlan_add(p, old_pvid,
1066                                     BRIDGE_VLAN_INFO_PVID |
1067                                     BRIDGE_VLAN_INFO_UNTAGGED,
1068                                     &vlchange, NULL);
1069                        br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1070                }
1071                nbp_vlan_delete(p, pvid);
1072                br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1073        }
1074
1075        if (test_bit(0, changed)) {
1076                if (old_pvid) {
1077                        br_vlan_add(br, old_pvid,
1078                                    BRIDGE_VLAN_INFO_PVID |
1079                                    BRIDGE_VLAN_INFO_UNTAGGED |
1080                                    BRIDGE_VLAN_INFO_BRENTRY,
1081                                    &vlchange, NULL);
1082                        br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1083                }
1084                br_vlan_delete(br, pvid);
1085                br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1086        }
1087        goto out;
1088}
1089
1090int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1091{
1092        u16 pvid = val;
1093        int err = 0;
1094
1095        if (val >= VLAN_VID_MASK)
1096                return -EINVAL;
1097
1098        if (pvid == br->default_pvid)
1099                goto out;
1100
1101        /* Only allow default pvid change when filtering is disabled */
1102        if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1103                pr_info_once("Please disable vlan filtering to change default_pvid\n");
1104                err = -EPERM;
1105                goto out;
1106        }
1107        err = __br_vlan_set_default_pvid(br, pvid, NULL);
1108out:
1109        return err;
1110}
1111
1112int br_vlan_init(struct net_bridge *br)
1113{
1114        struct net_bridge_vlan_group *vg;
1115        int ret = -ENOMEM;
1116
1117        vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1118        if (!vg)
1119                goto out;
1120        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1121        if (ret)
1122                goto err_rhtbl;
1123        ret = vlan_tunnel_init(vg);
1124        if (ret)
1125                goto err_tunnel_init;
1126        INIT_LIST_HEAD(&vg->vlan_list);
1127        br->vlan_proto = htons(ETH_P_8021Q);
1128        br->default_pvid = 1;
1129        rcu_assign_pointer(br->vlgrp, vg);
1130
1131out:
1132        return ret;
1133
1134err_tunnel_init:
1135        rhashtable_destroy(&vg->vlan_hash);
1136err_rhtbl:
1137        kfree(vg);
1138
1139        goto out;
1140}
1141
1142int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1143{
1144        struct switchdev_attr attr = {
1145                .orig_dev = p->br->dev,
1146                .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1147                .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1148                .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1149        };
1150        struct net_bridge_vlan_group *vg;
1151        int ret = -ENOMEM;
1152
1153        vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1154        if (!vg)
1155                goto out;
1156
1157        ret = switchdev_port_attr_set(p->dev, &attr);
1158        if (ret && ret != -EOPNOTSUPP)
1159                goto err_vlan_enabled;
1160
1161        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1162        if (ret)
1163                goto err_rhtbl;
1164        ret = vlan_tunnel_init(vg);
1165        if (ret)
1166                goto err_tunnel_init;
1167        INIT_LIST_HEAD(&vg->vlan_list);
1168        rcu_assign_pointer(p->vlgrp, vg);
1169        if (p->br->default_pvid) {
1170                bool changed;
1171
1172                ret = nbp_vlan_add(p, p->br->default_pvid,
1173                                   BRIDGE_VLAN_INFO_PVID |
1174                                   BRIDGE_VLAN_INFO_UNTAGGED,
1175                                   &changed, extack);
1176                if (ret)
1177                        goto err_vlan_add;
1178                br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1179        }
1180out:
1181        return ret;
1182
1183err_vlan_add:
1184        RCU_INIT_POINTER(p->vlgrp, NULL);
1185        synchronize_rcu();
1186        vlan_tunnel_deinit(vg);
1187err_tunnel_init:
1188        rhashtable_destroy(&vg->vlan_hash);
1189err_rhtbl:
1190err_vlan_enabled:
1191        kfree(vg);
1192
1193        goto out;
1194}
1195
1196/* Must be protected by RTNL.
1197 * Must be called with vid in range from 1 to 4094 inclusive.
1198 * changed must be true only if the vlan was created or updated
1199 */
1200int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1201                 bool *changed, struct netlink_ext_ack *extack)
1202{
1203        struct net_bridge_vlan *vlan;
1204        int ret;
1205
1206        ASSERT_RTNL();
1207
1208        *changed = false;
1209        vlan = br_vlan_find(nbp_vlan_group(port), vid);
1210        if (vlan) {
1211                /* Pass the flags to the hardware bridge */
1212                ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1213                if (ret && ret != -EOPNOTSUPP)
1214                        return ret;
1215                *changed = __vlan_add_flags(vlan, flags);
1216
1217                return 0;
1218        }
1219
1220        vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1221        if (!vlan)
1222                return -ENOMEM;
1223
1224        vlan->vid = vid;
1225        vlan->port = port;
1226        ret = __vlan_add(vlan, flags, extack);
1227        if (ret)
1228                kfree(vlan);
1229        else
1230                *changed = true;
1231
1232        return ret;
1233}
1234
1235/* Must be protected by RTNL.
1236 * Must be called with vid in range from 1 to 4094 inclusive.
1237 */
1238int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1239{
1240        struct net_bridge_vlan *v;
1241
1242        ASSERT_RTNL();
1243
1244        v = br_vlan_find(nbp_vlan_group(port), vid);
1245        if (!v)
1246                return -ENOENT;
1247        br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1248        br_fdb_delete_by_port(port->br, port, vid, 0);
1249
1250        return __vlan_del(v);
1251}
1252
1253void nbp_vlan_flush(struct net_bridge_port *port)
1254{
1255        struct net_bridge_vlan_group *vg;
1256
1257        ASSERT_RTNL();
1258
1259        vg = nbp_vlan_group(port);
1260        __vlan_flush(port->br, port, vg);
1261        RCU_INIT_POINTER(port->vlgrp, NULL);
1262        synchronize_rcu();
1263        __vlan_group_free(vg);
1264}
1265
1266void br_vlan_get_stats(const struct net_bridge_vlan *v,
1267                       struct br_vlan_stats *stats)
1268{
1269        int i;
1270
1271        memset(stats, 0, sizeof(*stats));
1272        for_each_possible_cpu(i) {
1273                u64 rxpackets, rxbytes, txpackets, txbytes;
1274                struct br_vlan_stats *cpu_stats;
1275                unsigned int start;
1276
1277                cpu_stats = per_cpu_ptr(v->stats, i);
1278                do {
1279                        start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1280                        rxpackets = cpu_stats->rx_packets;
1281                        rxbytes = cpu_stats->rx_bytes;
1282                        txbytes = cpu_stats->tx_bytes;
1283                        txpackets = cpu_stats->tx_packets;
1284                } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1285
1286                stats->rx_packets += rxpackets;
1287                stats->rx_bytes += rxbytes;
1288                stats->tx_bytes += txbytes;
1289                stats->tx_packets += txpackets;
1290        }
1291}
1292
1293int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1294{
1295        struct net_bridge_vlan_group *vg;
1296        struct net_bridge_port *p;
1297
1298        ASSERT_RTNL();
1299        p = br_port_get_check_rtnl(dev);
1300        if (p)
1301                vg = nbp_vlan_group(p);
1302        else if (netif_is_bridge_master(dev))
1303                vg = br_vlan_group(netdev_priv(dev));
1304        else
1305                return -EINVAL;
1306
1307        *p_pvid = br_get_pvid(vg);
1308        return 0;
1309}
1310EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1311
1312int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1313{
1314        struct net_bridge_vlan_group *vg;
1315        struct net_bridge_port *p;
1316
1317        p = br_port_get_check_rcu(dev);
1318        if (p)
1319                vg = nbp_vlan_group_rcu(p);
1320        else if (netif_is_bridge_master(dev))
1321                vg = br_vlan_group_rcu(netdev_priv(dev));
1322        else
1323                return -EINVAL;
1324
1325        *p_pvid = br_get_pvid(vg);
1326        return 0;
1327}
1328EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1329
1330int br_vlan_get_info(const struct net_device *dev, u16 vid,
1331                     struct bridge_vlan_info *p_vinfo)
1332{
1333        struct net_bridge_vlan_group *vg;
1334        struct net_bridge_vlan *v;
1335        struct net_bridge_port *p;
1336
1337        ASSERT_RTNL();
1338        p = br_port_get_check_rtnl(dev);
1339        if (p)
1340                vg = nbp_vlan_group(p);
1341        else if (netif_is_bridge_master(dev))
1342                vg = br_vlan_group(netdev_priv(dev));
1343        else
1344                return -EINVAL;
1345
1346        v = br_vlan_find(vg, vid);
1347        if (!v)
1348                return -ENOENT;
1349
1350        p_vinfo->vid = vid;
1351        p_vinfo->flags = v->flags;
1352        if (vid == br_get_pvid(vg))
1353                p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1354        return 0;
1355}
1356EXPORT_SYMBOL_GPL(br_vlan_get_info);
1357
1358static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1359{
1360        return is_vlan_dev(dev) &&
1361                !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1362}
1363
1364static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1365                               __always_unused struct netdev_nested_priv *priv)
1366{
1367        return br_vlan_is_bind_vlan_dev(dev);
1368}
1369
1370static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1371{
1372        int found;
1373
1374        rcu_read_lock();
1375        found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1376                                              NULL);
1377        rcu_read_unlock();
1378
1379        return !!found;
1380}
1381
1382struct br_vlan_bind_walk_data {
1383        u16 vid;
1384        struct net_device *result;
1385};
1386
1387static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1388                                          struct netdev_nested_priv *priv)
1389{
1390        struct br_vlan_bind_walk_data *data = priv->data;
1391        int found = 0;
1392
1393        if (br_vlan_is_bind_vlan_dev(dev) &&
1394            vlan_dev_priv(dev)->vlan_id == data->vid) {
1395                data->result = dev;
1396                found = 1;
1397        }
1398
1399        return found;
1400}
1401
1402static struct net_device *
1403br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1404{
1405        struct br_vlan_bind_walk_data data = {
1406                .vid = vid,
1407        };
1408        struct netdev_nested_priv priv = {
1409                .data = (void *)&data,
1410        };
1411
1412        rcu_read_lock();
1413        netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1414                                      &priv);
1415        rcu_read_unlock();
1416
1417        return data.result;
1418}
1419
1420static bool br_vlan_is_dev_up(const struct net_device *dev)
1421{
1422        return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1423}
1424
1425static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1426                                       struct net_device *vlan_dev)
1427{
1428        u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1429        struct net_bridge_vlan_group *vg;
1430        struct net_bridge_port *p;
1431        bool has_carrier = false;
1432
1433        if (!netif_carrier_ok(br->dev)) {
1434                netif_carrier_off(vlan_dev);
1435                return;
1436        }
1437
1438        list_for_each_entry(p, &br->port_list, list) {
1439                vg = nbp_vlan_group(p);
1440                if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1441                        has_carrier = true;
1442                        break;
1443                }
1444        }
1445
1446        if (has_carrier)
1447                netif_carrier_on(vlan_dev);
1448        else
1449                netif_carrier_off(vlan_dev);
1450}
1451
1452static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1453{
1454        struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1455        struct net_bridge_vlan *vlan;
1456        struct net_device *vlan_dev;
1457
1458        list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1459                vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1460                                                           vlan->vid);
1461                if (vlan_dev) {
1462                        if (br_vlan_is_dev_up(p->dev)) {
1463                                if (netif_carrier_ok(p->br->dev))
1464                                        netif_carrier_on(vlan_dev);
1465                        } else {
1466                                br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1467                        }
1468                }
1469        }
1470}
1471
1472static void br_vlan_upper_change(struct net_device *dev,
1473                                 struct net_device *upper_dev,
1474                                 bool linking)
1475{
1476        struct net_bridge *br = netdev_priv(dev);
1477
1478        if (!br_vlan_is_bind_vlan_dev(upper_dev))
1479                return;
1480
1481        if (linking) {
1482                br_vlan_set_vlan_dev_state(br, upper_dev);
1483                br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1484        } else {
1485                br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1486                              br_vlan_has_upper_bind_vlan_dev(dev));
1487        }
1488}
1489
1490struct br_vlan_link_state_walk_data {
1491        struct net_bridge *br;
1492};
1493
1494static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1495                                        struct netdev_nested_priv *priv)
1496{
1497        struct br_vlan_link_state_walk_data *data = priv->data;
1498
1499        if (br_vlan_is_bind_vlan_dev(vlan_dev))
1500                br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1501
1502        return 0;
1503}
1504
1505static void br_vlan_link_state_change(struct net_device *dev,
1506                                      struct net_bridge *br)
1507{
1508        struct br_vlan_link_state_walk_data data = {
1509                .br = br
1510        };
1511        struct netdev_nested_priv priv = {
1512                .data = (void *)&data,
1513        };
1514
1515        rcu_read_lock();
1516        netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1517                                      &priv);
1518        rcu_read_unlock();
1519}
1520
1521/* Must be protected by RTNL. */
1522static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1523{
1524        struct net_device *vlan_dev;
1525
1526        if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1527                return;
1528
1529        vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1530        if (vlan_dev)
1531                br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1532}
1533
1534/* Must be protected by RTNL. */
1535int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1536{
1537        struct netdev_notifier_changeupper_info *info;
1538        struct net_bridge *br = netdev_priv(dev);
1539        int vlcmd = 0, ret = 0;
1540        bool changed = false;
1541
1542        switch (event) {
1543        case NETDEV_REGISTER:
1544                ret = br_vlan_add(br, br->default_pvid,
1545                                  BRIDGE_VLAN_INFO_PVID |
1546                                  BRIDGE_VLAN_INFO_UNTAGGED |
1547                                  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1548                vlcmd = RTM_NEWVLAN;
1549                break;
1550        case NETDEV_UNREGISTER:
1551                changed = !br_vlan_delete(br, br->default_pvid);
1552                vlcmd = RTM_DELVLAN;
1553                break;
1554        case NETDEV_CHANGEUPPER:
1555                info = ptr;
1556                br_vlan_upper_change(dev, info->upper_dev, info->linking);
1557                break;
1558
1559        case NETDEV_CHANGE:
1560        case NETDEV_UP:
1561                if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1562                        break;
1563                br_vlan_link_state_change(dev, br);
1564                break;
1565        }
1566        if (changed)
1567                br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1568
1569        return ret;
1570}
1571
1572/* Must be protected by RTNL. */
1573void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1574{
1575        if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1576                return;
1577
1578        switch (event) {
1579        case NETDEV_CHANGE:
1580        case NETDEV_DOWN:
1581        case NETDEV_UP:
1582                br_vlan_set_all_vlan_dev_state(p);
1583                break;
1584        }
1585}
1586
1587static bool br_vlan_stats_fill(struct sk_buff *skb,
1588                               const struct net_bridge_vlan *v)
1589{
1590        struct br_vlan_stats stats;
1591        struct nlattr *nest;
1592
1593        nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1594        if (!nest)
1595                return false;
1596
1597        br_vlan_get_stats(v, &stats);
1598        if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
1599                              BRIDGE_VLANDB_STATS_PAD) ||
1600            nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1601                              stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
1602            nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
1603                              BRIDGE_VLANDB_STATS_PAD) ||
1604            nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1605                              stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
1606                goto out_err;
1607
1608        nla_nest_end(skb, nest);
1609
1610        return true;
1611
1612out_err:
1613        nla_nest_cancel(skb, nest);
1614        return false;
1615}
1616
1617/* v_opts is used to dump the options which must be equal in the whole range */
1618static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1619                              const struct net_bridge_vlan *v_opts,
1620                              u16 flags,
1621                              bool dump_stats)
1622{
1623        struct bridge_vlan_info info;
1624        struct nlattr *nest;
1625
1626        nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1627        if (!nest)
1628                return false;
1629
1630        memset(&info, 0, sizeof(info));
1631        info.vid = vid;
1632        if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1633                info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1634        if (flags & BRIDGE_VLAN_INFO_PVID)
1635                info.flags |= BRIDGE_VLAN_INFO_PVID;
1636
1637        if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1638                goto out_err;
1639
1640        if (vid_range && vid < vid_range &&
1641            !(flags & BRIDGE_VLAN_INFO_PVID) &&
1642            nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1643                goto out_err;
1644
1645        if (v_opts) {
1646                if (!br_vlan_opts_fill(skb, v_opts))
1647                        goto out_err;
1648
1649                if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1650                        goto out_err;
1651        }
1652
1653        nla_nest_end(skb, nest);
1654
1655        return true;
1656
1657out_err:
1658        nla_nest_cancel(skb, nest);
1659        return false;
1660}
1661
1662static size_t rtnl_vlan_nlmsg_size(void)
1663{
1664        return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1665                + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1666                + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1667                + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1668                + br_vlan_opts_nl_size(); /* bridge vlan options */
1669}
1670
1671void br_vlan_notify(const struct net_bridge *br,
1672                    const struct net_bridge_port *p,
1673                    u16 vid, u16 vid_range,
1674                    int cmd)
1675{
1676        struct net_bridge_vlan_group *vg;
1677        struct net_bridge_vlan *v = NULL;
1678        struct br_vlan_msg *bvm;
1679        struct nlmsghdr *nlh;
1680        struct sk_buff *skb;
1681        int err = -ENOBUFS;
1682        struct net *net;
1683        u16 flags = 0;
1684        int ifindex;
1685
1686        /* right now notifications are done only with rtnl held */
1687        ASSERT_RTNL();
1688
1689        if (p) {
1690                ifindex = p->dev->ifindex;
1691                vg = nbp_vlan_group(p);
1692                net = dev_net(p->dev);
1693        } else {
1694                ifindex = br->dev->ifindex;
1695                vg = br_vlan_group(br);
1696                net = dev_net(br->dev);
1697        }
1698
1699        skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1700        if (!skb)
1701                goto out_err;
1702
1703        err = -EMSGSIZE;
1704        nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1705        if (!nlh)
1706                goto out_err;
1707        bvm = nlmsg_data(nlh);
1708        memset(bvm, 0, sizeof(*bvm));
1709        bvm->family = AF_BRIDGE;
1710        bvm->ifindex = ifindex;
1711
1712        switch (cmd) {
1713        case RTM_NEWVLAN:
1714                /* need to find the vlan due to flags/options */
1715                v = br_vlan_find(vg, vid);
1716                if (!v || !br_vlan_should_use(v))
1717                        goto out_kfree;
1718
1719                flags = v->flags;
1720                if (br_get_pvid(vg) == v->vid)
1721                        flags |= BRIDGE_VLAN_INFO_PVID;
1722                break;
1723        case RTM_DELVLAN:
1724                break;
1725        default:
1726                goto out_kfree;
1727        }
1728
1729        if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1730                goto out_err;
1731
1732        nlmsg_end(skb, nlh);
1733        rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1734        return;
1735
1736out_err:
1737        rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1738out_kfree:
1739        kfree_skb(skb);
1740}
1741
1742/* check if v_curr can enter a range ending in range_end */
1743bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1744                             const struct net_bridge_vlan *range_end)
1745{
1746        return v_curr->vid - range_end->vid == 1 &&
1747               range_end->flags == v_curr->flags &&
1748               br_vlan_opts_eq_range(v_curr, range_end);
1749}
1750
1751static int br_vlan_dump_dev(const struct net_device *dev,
1752                            struct sk_buff *skb,
1753                            struct netlink_callback *cb,
1754                            u32 dump_flags)
1755{
1756        struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1757        bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1758        struct net_bridge_vlan_group *vg;
1759        int idx = 0, s_idx = cb->args[1];
1760        struct nlmsghdr *nlh = NULL;
1761        struct net_bridge_port *p;
1762        struct br_vlan_msg *bvm;
1763        struct net_bridge *br;
1764        int err = 0;
1765        u16 pvid;
1766
1767        if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1768                return -EINVAL;
1769
1770        if (netif_is_bridge_master(dev)) {
1771                br = netdev_priv(dev);
1772                vg = br_vlan_group_rcu(br);
1773                p = NULL;
1774        } else {
1775                p = br_port_get_rcu(dev);
1776                if (WARN_ON(!p))
1777                        return -EINVAL;
1778                vg = nbp_vlan_group_rcu(p);
1779                br = p->br;
1780        }
1781
1782        if (!vg)
1783                return 0;
1784
1785        nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1786                        RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1787        if (!nlh)
1788                return -EMSGSIZE;
1789        bvm = nlmsg_data(nlh);
1790        memset(bvm, 0, sizeof(*bvm));
1791        bvm->family = PF_BRIDGE;
1792        bvm->ifindex = dev->ifindex;
1793        pvid = br_get_pvid(vg);
1794
1795        /* idx must stay at range's beginning until it is filled in */
1796        list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1797                if (!br_vlan_should_use(v))
1798                        continue;
1799                if (idx < s_idx) {
1800                        idx++;
1801                        continue;
1802                }
1803
1804                if (!range_start) {
1805                        range_start = v;
1806                        range_end = v;
1807                        continue;
1808                }
1809
1810                if (dump_stats || v->vid == pvid ||
1811                    !br_vlan_can_enter_range(v, range_end)) {
1812                        u16 vlan_flags = br_vlan_flags(range_start, pvid);
1813
1814                        if (!br_vlan_fill_vids(skb, range_start->vid,
1815                                               range_end->vid, range_start,
1816                                               vlan_flags, dump_stats)) {
1817                                err = -EMSGSIZE;
1818                                break;
1819                        }
1820                        /* advance number of filled vlans */
1821                        idx += range_end->vid - range_start->vid + 1;
1822
1823                        range_start = v;
1824                }
1825                range_end = v;
1826        }
1827
1828        /* err will be 0 and range_start will be set in 3 cases here:
1829         * - first vlan (range_start == range_end)
1830         * - last vlan (range_start == range_end, not in range)
1831         * - last vlan range (range_start != range_end, in range)
1832         */
1833        if (!err && range_start &&
1834            !br_vlan_fill_vids(skb, range_start->vid, range_end->vid,
1835                               range_start, br_vlan_flags(range_start, pvid),
1836                               dump_stats))
1837                err = -EMSGSIZE;
1838
1839        cb->args[1] = err ? idx : 0;
1840
1841        nlmsg_end(skb, nlh);
1842
1843        return err;
1844}
1845
1846static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
1847        [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
1848};
1849
1850static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
1851{
1852        struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
1853        int idx = 0, err = 0, s_idx = cb->args[0];
1854        struct net *net = sock_net(skb->sk);
1855        struct br_vlan_msg *bvm;
1856        struct net_device *dev;
1857        u32 dump_flags = 0;
1858
1859        err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
1860                          br_vlan_db_dump_pol, cb->extack);
1861        if (err < 0)
1862                return err;
1863
1864        bvm = nlmsg_data(cb->nlh);
1865        if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
1866                dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
1867
1868        rcu_read_lock();
1869        if (bvm->ifindex) {
1870                dev = dev_get_by_index_rcu(net, bvm->ifindex);
1871                if (!dev) {
1872                        err = -ENODEV;
1873                        goto out_err;
1874                }
1875                err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1876                if (err && err != -EMSGSIZE)
1877                        goto out_err;
1878        } else {
1879                for_each_netdev_rcu(net, dev) {
1880                        if (idx < s_idx)
1881                                goto skip;
1882
1883                        err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
1884                        if (err == -EMSGSIZE)
1885                                break;
1886skip:
1887                        idx++;
1888                }
1889        }
1890        cb->args[0] = idx;
1891        rcu_read_unlock();
1892
1893        return skb->len;
1894
1895out_err:
1896        rcu_read_unlock();
1897
1898        return err;
1899}
1900
1901static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
1902        [BRIDGE_VLANDB_ENTRY_INFO]      =
1903                NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
1904        [BRIDGE_VLANDB_ENTRY_RANGE]     = { .type = NLA_U16 },
1905        [BRIDGE_VLANDB_ENTRY_STATE]     = { .type = NLA_U8 },
1906        [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
1907};
1908
1909static int br_vlan_rtm_process_one(struct net_device *dev,
1910                                   const struct nlattr *attr,
1911                                   int cmd, struct netlink_ext_ack *extack)
1912{
1913        struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
1914        struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
1915        bool changed = false, skip_processing = false;
1916        struct net_bridge_vlan_group *vg;
1917        struct net_bridge_port *p = NULL;
1918        int err = 0, cmdmap = 0;
1919        struct net_bridge *br;
1920
1921        if (netif_is_bridge_master(dev)) {
1922                br = netdev_priv(dev);
1923                vg = br_vlan_group(br);
1924        } else {
1925                p = br_port_get_rtnl(dev);
1926                if (WARN_ON(!p))
1927                        return -ENODEV;
1928                br = p->br;
1929                vg = nbp_vlan_group(p);
1930        }
1931
1932        if (WARN_ON(!vg))
1933                return -ENODEV;
1934
1935        err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
1936                               br_vlan_db_policy, extack);
1937        if (err)
1938                return err;
1939
1940        if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
1941                NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
1942                return -EINVAL;
1943        }
1944        memset(&vrange_end, 0, sizeof(vrange_end));
1945
1946        vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
1947        if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
1948                            BRIDGE_VLAN_INFO_RANGE_END)) {
1949                NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
1950                return -EINVAL;
1951        }
1952        if (!br_vlan_valid_id(vinfo->vid, extack))
1953                return -EINVAL;
1954
1955        if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
1956                vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
1957                /* validate user-provided flags without RANGE_BEGIN */
1958                vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
1959                vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
1960
1961                /* vinfo_last is the range start, vinfo the range end */
1962                vinfo_last = vinfo;
1963                vinfo = &vrange_end;
1964
1965                if (!br_vlan_valid_id(vinfo->vid, extack) ||
1966                    !br_vlan_valid_range(vinfo, vinfo_last, extack))
1967                        return -EINVAL;
1968        }
1969
1970        switch (cmd) {
1971        case RTM_NEWVLAN:
1972                cmdmap = RTM_SETLINK;
1973                skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
1974                break;
1975        case RTM_DELVLAN:
1976                cmdmap = RTM_DELLINK;
1977                break;
1978        }
1979
1980        if (!skip_processing) {
1981                struct bridge_vlan_info *tmp_last = vinfo_last;
1982
1983                /* br_process_vlan_info may overwrite vinfo_last */
1984                err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
1985                                           &changed, extack);
1986
1987                /* notify first if anything changed */
1988                if (changed)
1989                        br_ifinfo_notify(cmdmap, br, p);
1990
1991                if (err)
1992                        return err;
1993        }
1994
1995        /* deal with options */
1996        if (cmd == RTM_NEWVLAN) {
1997                struct net_bridge_vlan *range_start, *range_end;
1998
1999                if (vinfo_last) {
2000                        range_start = br_vlan_find(vg, vinfo_last->vid);
2001                        range_end = br_vlan_find(vg, vinfo->vid);
2002                } else {
2003                        range_start = br_vlan_find(vg, vinfo->vid);
2004                        range_end = range_start;
2005                }
2006
2007                err = br_vlan_process_options(br, p, range_start, range_end,
2008                                              tb, extack);
2009        }
2010
2011        return err;
2012}
2013
2014static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2015                               struct netlink_ext_ack *extack)
2016{
2017        struct net *net = sock_net(skb->sk);
2018        struct br_vlan_msg *bvm;
2019        struct net_device *dev;
2020        struct nlattr *attr;
2021        int err, vlans = 0;
2022        int rem;
2023
2024        /* this should validate the header and check for remaining bytes */
2025        err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2026                          extack);
2027        if (err < 0)
2028                return err;
2029
2030        bvm = nlmsg_data(nlh);
2031        dev = __dev_get_by_index(net, bvm->ifindex);
2032        if (!dev)
2033                return -ENODEV;
2034
2035        if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2036                NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2037                return -EINVAL;
2038        }
2039
2040        nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2041                if (nla_type(attr) != BRIDGE_VLANDB_ENTRY)
2042                        continue;
2043
2044                vlans++;
2045                err = br_vlan_rtm_process_one(dev, attr, nlh->nlmsg_type,
2046                                              extack);
2047                if (err)
2048                        break;
2049        }
2050        if (!vlans) {
2051                NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2052                err = -EINVAL;
2053        }
2054
2055        return err;
2056}
2057
2058void br_vlan_rtnl_init(void)
2059{
2060        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
2061                             br_vlan_rtm_dump, 0);
2062        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
2063                             br_vlan_rtm_process, NULL, 0);
2064        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
2065                             br_vlan_rtm_process, NULL, 0);
2066}
2067
2068void br_vlan_rtnl_uninit(void)
2069{
2070        rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2071        rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2072        rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2073}
2074