linux/net/bridge/br_mdb.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/err.h>
   3#include <linux/igmp.h>
   4#include <linux/kernel.h>
   5#include <linux/netdevice.h>
   6#include <linux/rculist.h>
   7#include <linux/skbuff.h>
   8#include <linux/if_ether.h>
   9#include <net/ip.h>
  10#include <net/netlink.h>
  11#include <net/switchdev.h>
  12#if IS_ENABLED(CONFIG_IPV6)
  13#include <net/ipv6.h>
  14#include <net/addrconf.h>
  15#endif
  16
  17#include "br_private.h"
  18
  19static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  20                               struct net_device *dev)
  21{
  22        struct net_bridge *br = netdev_priv(dev);
  23        struct net_bridge_port *p;
  24        struct nlattr *nest, *port_nest;
  25
  26        if (!br->multicast_router || hlist_empty(&br->router_list))
  27                return 0;
  28
  29        nest = nla_nest_start(skb, MDBA_ROUTER);
  30        if (nest == NULL)
  31                return -EMSGSIZE;
  32
  33        hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
  34                if (!p)
  35                        continue;
  36                port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT);
  37                if (!port_nest)
  38                        goto fail;
  39                if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
  40                    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
  41                                br_timer_value(&p->multicast_router_timer)) ||
  42                    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
  43                               p->multicast_router)) {
  44                        nla_nest_cancel(skb, port_nest);
  45                        goto fail;
  46                }
  47                nla_nest_end(skb, port_nest);
  48        }
  49
  50        nla_nest_end(skb, nest);
  51        return 0;
  52fail:
  53        nla_nest_cancel(skb, nest);
  54        return -EMSGSIZE;
  55}
  56
  57static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
  58{
  59        e->state = flags & MDB_PG_FLAGS_PERMANENT;
  60        e->flags = 0;
  61        if (flags & MDB_PG_FLAGS_OFFLOAD)
  62                e->flags |= MDB_FLAGS_OFFLOAD;
  63}
  64
  65static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
  66{
  67        memset(ip, 0, sizeof(struct br_ip));
  68        ip->vid = entry->vid;
  69        ip->proto = entry->addr.proto;
  70        if (ip->proto == htons(ETH_P_IP))
  71                ip->u.ip4 = entry->addr.u.ip4;
  72#if IS_ENABLED(CONFIG_IPV6)
  73        else
  74                ip->u.ip6 = entry->addr.u.ip6;
  75#endif
  76}
  77
  78static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  79                            struct net_device *dev)
  80{
  81        struct net_bridge *br = netdev_priv(dev);
  82        struct net_bridge_mdb_htable *mdb;
  83        struct nlattr *nest, *nest2;
  84        int i, err = 0;
  85        int idx = 0, s_idx = cb->args[1];
  86
  87        if (br->multicast_disabled)
  88                return 0;
  89
  90        mdb = rcu_dereference(br->mdb);
  91        if (!mdb)
  92                return 0;
  93
  94        nest = nla_nest_start(skb, MDBA_MDB);
  95        if (nest == NULL)
  96                return -EMSGSIZE;
  97
  98        for (i = 0; i < mdb->max; i++) {
  99                struct net_bridge_mdb_entry *mp;
 100                struct net_bridge_port_group *p;
 101                struct net_bridge_port_group __rcu **pp;
 102                struct net_bridge_port *port;
 103
 104                hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
 105                        if (idx < s_idx)
 106                                goto skip;
 107
 108                        nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
 109                        if (nest2 == NULL) {
 110                                err = -EMSGSIZE;
 111                                goto out;
 112                        }
 113
 114                        for (pp = &mp->ports;
 115                             (p = rcu_dereference(*pp)) != NULL;
 116                              pp = &p->next) {
 117                                struct nlattr *nest_ent;
 118                                struct br_mdb_entry e;
 119
 120                                port = p->port;
 121                                if (!port)
 122                                        continue;
 123
 124                                memset(&e, 0, sizeof(e));
 125                                e.ifindex = port->dev->ifindex;
 126                                e.vid = p->addr.vid;
 127                                __mdb_entry_fill_flags(&e, p->flags);
 128                                if (p->addr.proto == htons(ETH_P_IP))
 129                                        e.addr.u.ip4 = p->addr.u.ip4;
 130#if IS_ENABLED(CONFIG_IPV6)
 131                                if (p->addr.proto == htons(ETH_P_IPV6))
 132                                        e.addr.u.ip6 = p->addr.u.ip6;
 133#endif
 134                                e.addr.proto = p->addr.proto;
 135                                nest_ent = nla_nest_start(skb,
 136                                                          MDBA_MDB_ENTRY_INFO);
 137                                if (!nest_ent) {
 138                                        nla_nest_cancel(skb, nest2);
 139                                        err = -EMSGSIZE;
 140                                        goto out;
 141                                }
 142                                if (nla_put_nohdr(skb, sizeof(e), &e) ||
 143                                    nla_put_u32(skb,
 144                                                MDBA_MDB_EATTR_TIMER,
 145                                                br_timer_value(&p->timer))) {
 146                                        nla_nest_cancel(skb, nest_ent);
 147                                        nla_nest_cancel(skb, nest2);
 148                                        err = -EMSGSIZE;
 149                                        goto out;
 150                                }
 151                                nla_nest_end(skb, nest_ent);
 152                        }
 153                        nla_nest_end(skb, nest2);
 154                skip:
 155                        idx++;
 156                }
 157        }
 158
 159out:
 160        cb->args[1] = idx;
 161        nla_nest_end(skb, nest);
 162        return err;
 163}
 164
 165static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 166{
 167        struct net_device *dev;
 168        struct net *net = sock_net(skb->sk);
 169        struct nlmsghdr *nlh = NULL;
 170        int idx = 0, s_idx;
 171
 172        s_idx = cb->args[0];
 173
 174        rcu_read_lock();
 175
 176        /* In theory this could be wrapped to 0... */
 177        cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
 178
 179        for_each_netdev_rcu(net, dev) {
 180                if (dev->priv_flags & IFF_EBRIDGE) {
 181                        struct br_port_msg *bpm;
 182
 183                        if (idx < s_idx)
 184                                goto skip;
 185
 186                        nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 187                                        cb->nlh->nlmsg_seq, RTM_GETMDB,
 188                                        sizeof(*bpm), NLM_F_MULTI);
 189                        if (nlh == NULL)
 190                                break;
 191
 192                        bpm = nlmsg_data(nlh);
 193                        memset(bpm, 0, sizeof(*bpm));
 194                        bpm->ifindex = dev->ifindex;
 195                        if (br_mdb_fill_info(skb, cb, dev) < 0)
 196                                goto out;
 197                        if (br_rports_fill_info(skb, cb, dev) < 0)
 198                                goto out;
 199
 200                        cb->args[1] = 0;
 201                        nlmsg_end(skb, nlh);
 202                skip:
 203                        idx++;
 204                }
 205        }
 206
 207out:
 208        if (nlh)
 209                nlmsg_end(skb, nlh);
 210        rcu_read_unlock();
 211        cb->args[0] = idx;
 212        return skb->len;
 213}
 214
 215static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
 216                                   struct net_device *dev,
 217                                   struct br_mdb_entry *entry, u32 pid,
 218                                   u32 seq, int type, unsigned int flags)
 219{
 220        struct nlmsghdr *nlh;
 221        struct br_port_msg *bpm;
 222        struct nlattr *nest, *nest2;
 223
 224        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
 225        if (!nlh)
 226                return -EMSGSIZE;
 227
 228        bpm = nlmsg_data(nlh);
 229        memset(bpm, 0, sizeof(*bpm));
 230        bpm->family  = AF_BRIDGE;
 231        bpm->ifindex = dev->ifindex;
 232        nest = nla_nest_start(skb, MDBA_MDB);
 233        if (nest == NULL)
 234                goto cancel;
 235        nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
 236        if (nest2 == NULL)
 237                goto end;
 238
 239        if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
 240                goto end;
 241
 242        nla_nest_end(skb, nest2);
 243        nla_nest_end(skb, nest);
 244        nlmsg_end(skb, nlh);
 245        return 0;
 246
 247end:
 248        nla_nest_end(skb, nest);
 249cancel:
 250        nlmsg_cancel(skb, nlh);
 251        return -EMSGSIZE;
 252}
 253
 254static inline size_t rtnl_mdb_nlmsg_size(void)
 255{
 256        return NLMSG_ALIGN(sizeof(struct br_port_msg))
 257                + nla_total_size(sizeof(struct br_mdb_entry));
 258}
 259
 260struct br_mdb_complete_info {
 261        struct net_bridge_port *port;
 262        struct br_ip ip;
 263};
 264
 265static void br_mdb_complete(struct net_device *dev, int err, void *priv)
 266{
 267        struct br_mdb_complete_info *data = priv;
 268        struct net_bridge_port_group __rcu **pp;
 269        struct net_bridge_port_group *p;
 270        struct net_bridge_mdb_htable *mdb;
 271        struct net_bridge_mdb_entry *mp;
 272        struct net_bridge_port *port = data->port;
 273        struct net_bridge *br = port->br;
 274
 275        if (err)
 276                goto err;
 277
 278        spin_lock_bh(&br->multicast_lock);
 279        mdb = mlock_dereference(br->mdb, br);
 280        mp = br_mdb_ip_get(mdb, &data->ip);
 281        if (!mp)
 282                goto out;
 283        for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
 284             pp = &p->next) {
 285                if (p->port != port)
 286                        continue;
 287                p->flags |= MDB_PG_FLAGS_OFFLOAD;
 288        }
 289out:
 290        spin_unlock_bh(&br->multicast_lock);
 291err:
 292        kfree(priv);
 293}
 294
 295static void br_mdb_switchdev_host_port(struct net_device *dev,
 296                                       struct net_device *lower_dev,
 297                                       struct br_mdb_entry *entry, int type)
 298{
 299        struct switchdev_obj_port_mdb mdb = {
 300                .obj = {
 301                        .id = SWITCHDEV_OBJ_ID_HOST_MDB,
 302                        .flags = SWITCHDEV_F_DEFER,
 303                },
 304                .vid = entry->vid,
 305        };
 306
 307        if (entry->addr.proto == htons(ETH_P_IP))
 308                ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
 309#if IS_ENABLED(CONFIG_IPV6)
 310        else
 311                ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
 312#endif
 313
 314        mdb.obj.orig_dev = dev;
 315        switch (type) {
 316        case RTM_NEWMDB:
 317                switchdev_port_obj_add(lower_dev, &mdb.obj);
 318                break;
 319        case RTM_DELMDB:
 320                switchdev_port_obj_del(lower_dev, &mdb.obj);
 321                break;
 322        }
 323}
 324
 325static void br_mdb_switchdev_host(struct net_device *dev,
 326                                  struct br_mdb_entry *entry, int type)
 327{
 328        struct net_device *lower_dev;
 329        struct list_head *iter;
 330
 331        netdev_for_each_lower_dev(dev, lower_dev, iter)
 332                br_mdb_switchdev_host_port(dev, lower_dev, entry, type);
 333}
 334
 335static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
 336                            struct br_mdb_entry *entry, int type)
 337{
 338        struct br_mdb_complete_info *complete_info;
 339        struct switchdev_obj_port_mdb mdb = {
 340                .obj = {
 341                        .id = SWITCHDEV_OBJ_ID_PORT_MDB,
 342                        .flags = SWITCHDEV_F_DEFER,
 343                },
 344                .vid = entry->vid,
 345        };
 346        struct net_device *port_dev;
 347        struct net *net = dev_net(dev);
 348        struct sk_buff *skb;
 349        int err = -ENOBUFS;
 350
 351        port_dev = __dev_get_by_index(net, entry->ifindex);
 352        if (entry->addr.proto == htons(ETH_P_IP))
 353                ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
 354#if IS_ENABLED(CONFIG_IPV6)
 355        else
 356                ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
 357#endif
 358
 359        mdb.obj.orig_dev = port_dev;
 360        if (p && port_dev && type == RTM_NEWMDB) {
 361                complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
 362                if (complete_info) {
 363                        complete_info->port = p;
 364                        __mdb_entry_to_br_ip(entry, &complete_info->ip);
 365                        mdb.obj.complete_priv = complete_info;
 366                        mdb.obj.complete = br_mdb_complete;
 367                        if (switchdev_port_obj_add(port_dev, &mdb.obj))
 368                                kfree(complete_info);
 369                }
 370        } else if (p && port_dev && type == RTM_DELMDB) {
 371                switchdev_port_obj_del(port_dev, &mdb.obj);
 372        }
 373
 374        if (!p)
 375                br_mdb_switchdev_host(dev, entry, type);
 376
 377        skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
 378        if (!skb)
 379                goto errout;
 380
 381        err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
 382        if (err < 0) {
 383                kfree_skb(skb);
 384                goto errout;
 385        }
 386
 387        rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
 388        return;
 389errout:
 390        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 391}
 392
 393void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
 394                   struct br_ip *group, int type, u8 flags)
 395{
 396        struct br_mdb_entry entry;
 397
 398        memset(&entry, 0, sizeof(entry));
 399        if (port)
 400                entry.ifindex = port->dev->ifindex;
 401        else
 402                entry.ifindex = dev->ifindex;
 403        entry.addr.proto = group->proto;
 404        entry.addr.u.ip4 = group->u.ip4;
 405#if IS_ENABLED(CONFIG_IPV6)
 406        entry.addr.u.ip6 = group->u.ip6;
 407#endif
 408        entry.vid = group->vid;
 409        __mdb_entry_fill_flags(&entry, flags);
 410        __br_mdb_notify(dev, port, &entry, type);
 411}
 412
 413static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
 414                                   struct net_device *dev,
 415                                   int ifindex, u32 pid,
 416                                   u32 seq, int type, unsigned int flags)
 417{
 418        struct br_port_msg *bpm;
 419        struct nlmsghdr *nlh;
 420        struct nlattr *nest;
 421
 422        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
 423        if (!nlh)
 424                return -EMSGSIZE;
 425
 426        bpm = nlmsg_data(nlh);
 427        memset(bpm, 0, sizeof(*bpm));
 428        bpm->family = AF_BRIDGE;
 429        bpm->ifindex = dev->ifindex;
 430        nest = nla_nest_start(skb, MDBA_ROUTER);
 431        if (!nest)
 432                goto cancel;
 433
 434        if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
 435                goto end;
 436
 437        nla_nest_end(skb, nest);
 438        nlmsg_end(skb, nlh);
 439        return 0;
 440
 441end:
 442        nla_nest_end(skb, nest);
 443cancel:
 444        nlmsg_cancel(skb, nlh);
 445        return -EMSGSIZE;
 446}
 447
 448static inline size_t rtnl_rtr_nlmsg_size(void)
 449{
 450        return NLMSG_ALIGN(sizeof(struct br_port_msg))
 451                + nla_total_size(sizeof(__u32));
 452}
 453
 454void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
 455                   int type)
 456{
 457        struct net *net = dev_net(dev);
 458        struct sk_buff *skb;
 459        int err = -ENOBUFS;
 460        int ifindex;
 461
 462        ifindex = port ? port->dev->ifindex : 0;
 463        skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
 464        if (!skb)
 465                goto errout;
 466
 467        err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
 468        if (err < 0) {
 469                kfree_skb(skb);
 470                goto errout;
 471        }
 472
 473        rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
 474        return;
 475
 476errout:
 477        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 478}
 479
 480static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 481{
 482        if (entry->ifindex == 0)
 483                return false;
 484
 485        if (entry->addr.proto == htons(ETH_P_IP)) {
 486                if (!ipv4_is_multicast(entry->addr.u.ip4))
 487                        return false;
 488                if (ipv4_is_local_multicast(entry->addr.u.ip4))
 489                        return false;
 490#if IS_ENABLED(CONFIG_IPV6)
 491        } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
 492                if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
 493                        return false;
 494#endif
 495        } else
 496                return false;
 497        if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
 498                return false;
 499        if (entry->vid >= VLAN_VID_MASK)
 500                return false;
 501
 502        return true;
 503}
 504
 505static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
 506                        struct net_device **pdev, struct br_mdb_entry **pentry)
 507{
 508        struct net *net = sock_net(skb->sk);
 509        struct br_mdb_entry *entry;
 510        struct br_port_msg *bpm;
 511        struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
 512        struct net_device *dev;
 513        int err;
 514
 515        err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL,
 516                          NULL);
 517        if (err < 0)
 518                return err;
 519
 520        bpm = nlmsg_data(nlh);
 521        if (bpm->ifindex == 0) {
 522                pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
 523                return -EINVAL;
 524        }
 525
 526        dev = __dev_get_by_index(net, bpm->ifindex);
 527        if (dev == NULL) {
 528                pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
 529                return -ENODEV;
 530        }
 531
 532        if (!(dev->priv_flags & IFF_EBRIDGE)) {
 533                pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
 534                return -EOPNOTSUPP;
 535        }
 536
 537        *pdev = dev;
 538
 539        if (!tb[MDBA_SET_ENTRY] ||
 540            nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
 541                pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
 542                return -EINVAL;
 543        }
 544
 545        entry = nla_data(tb[MDBA_SET_ENTRY]);
 546        if (!is_valid_mdb_entry(entry)) {
 547                pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
 548                return -EINVAL;
 549        }
 550
 551        *pentry = entry;
 552        return 0;
 553}
 554
 555static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 556                            struct br_ip *group, unsigned char state)
 557{
 558        struct net_bridge_mdb_entry *mp;
 559        struct net_bridge_port_group *p;
 560        struct net_bridge_port_group __rcu **pp;
 561        struct net_bridge_mdb_htable *mdb;
 562        unsigned long now = jiffies;
 563        int err;
 564
 565        mdb = mlock_dereference(br->mdb, br);
 566        mp = br_mdb_ip_get(mdb, group);
 567        if (!mp) {
 568                mp = br_multicast_new_group(br, port, group);
 569                err = PTR_ERR_OR_ZERO(mp);
 570                if (err)
 571                        return err;
 572        }
 573
 574        for (pp = &mp->ports;
 575             (p = mlock_dereference(*pp, br)) != NULL;
 576             pp = &p->next) {
 577                if (p->port == port)
 578                        return -EEXIST;
 579                if ((unsigned long)p->port < (unsigned long)port)
 580                        break;
 581        }
 582
 583        p = br_multicast_new_port_group(port, group, *pp, state, NULL);
 584        if (unlikely(!p))
 585                return -ENOMEM;
 586        rcu_assign_pointer(*pp, p);
 587        if (state == MDB_TEMPORARY)
 588                mod_timer(&p->timer, now + br->multicast_membership_interval);
 589
 590        return 0;
 591}
 592
 593static int __br_mdb_add(struct net *net, struct net_bridge *br,
 594                        struct br_mdb_entry *entry)
 595{
 596        struct br_ip ip;
 597        struct net_device *dev;
 598        struct net_bridge_port *p;
 599        int ret;
 600
 601        if (!netif_running(br->dev) || br->multicast_disabled)
 602                return -EINVAL;
 603
 604        dev = __dev_get_by_index(net, entry->ifindex);
 605        if (!dev)
 606                return -ENODEV;
 607
 608        p = br_port_get_rtnl(dev);
 609        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 610                return -EINVAL;
 611
 612        __mdb_entry_to_br_ip(entry, &ip);
 613
 614        spin_lock_bh(&br->multicast_lock);
 615        ret = br_mdb_add_group(br, p, &ip, entry->state);
 616        spin_unlock_bh(&br->multicast_lock);
 617        return ret;
 618}
 619
 620static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
 621                      struct netlink_ext_ack *extack)
 622{
 623        struct net *net = sock_net(skb->sk);
 624        struct net_bridge_vlan_group *vg;
 625        struct net_device *dev, *pdev;
 626        struct br_mdb_entry *entry;
 627        struct net_bridge_port *p;
 628        struct net_bridge_vlan *v;
 629        struct net_bridge *br;
 630        int err;
 631
 632        err = br_mdb_parse(skb, nlh, &dev, &entry);
 633        if (err < 0)
 634                return err;
 635
 636        br = netdev_priv(dev);
 637
 638        /* If vlan filtering is enabled and VLAN is not specified
 639         * install mdb entry on all vlans configured on the port.
 640         */
 641        pdev = __dev_get_by_index(net, entry->ifindex);
 642        if (!pdev)
 643                return -ENODEV;
 644
 645        p = br_port_get_rtnl(pdev);
 646        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 647                return -EINVAL;
 648
 649        vg = nbp_vlan_group(p);
 650        if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
 651                list_for_each_entry(v, &vg->vlan_list, vlist) {
 652                        entry->vid = v->vid;
 653                        err = __br_mdb_add(net, br, entry);
 654                        if (err)
 655                                break;
 656                        __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 657                }
 658        } else {
 659                err = __br_mdb_add(net, br, entry);
 660                if (!err)
 661                        __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 662        }
 663
 664        return err;
 665}
 666
 667static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
 668{
 669        struct net_bridge_mdb_htable *mdb;
 670        struct net_bridge_mdb_entry *mp;
 671        struct net_bridge_port_group *p;
 672        struct net_bridge_port_group __rcu **pp;
 673        struct br_ip ip;
 674        int err = -EINVAL;
 675
 676        if (!netif_running(br->dev) || br->multicast_disabled)
 677                return -EINVAL;
 678
 679        __mdb_entry_to_br_ip(entry, &ip);
 680
 681        spin_lock_bh(&br->multicast_lock);
 682        mdb = mlock_dereference(br->mdb, br);
 683
 684        mp = br_mdb_ip_get(mdb, &ip);
 685        if (!mp)
 686                goto unlock;
 687
 688        for (pp = &mp->ports;
 689             (p = mlock_dereference(*pp, br)) != NULL;
 690             pp = &p->next) {
 691                if (!p->port || p->port->dev->ifindex != entry->ifindex)
 692                        continue;
 693
 694                if (p->port->state == BR_STATE_DISABLED)
 695                        goto unlock;
 696
 697                __mdb_entry_fill_flags(entry, p->flags);
 698                rcu_assign_pointer(*pp, p->next);
 699                hlist_del_init(&p->mglist);
 700                del_timer(&p->timer);
 701                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 702                err = 0;
 703
 704                if (!mp->ports && !mp->host_joined &&
 705                    netif_running(br->dev))
 706                        mod_timer(&mp->timer, jiffies);
 707                break;
 708        }
 709
 710unlock:
 711        spin_unlock_bh(&br->multicast_lock);
 712        return err;
 713}
 714
 715static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
 716                      struct netlink_ext_ack *extack)
 717{
 718        struct net *net = sock_net(skb->sk);
 719        struct net_bridge_vlan_group *vg;
 720        struct net_device *dev, *pdev;
 721        struct br_mdb_entry *entry;
 722        struct net_bridge_port *p;
 723        struct net_bridge_vlan *v;
 724        struct net_bridge *br;
 725        int err;
 726
 727        err = br_mdb_parse(skb, nlh, &dev, &entry);
 728        if (err < 0)
 729                return err;
 730
 731        br = netdev_priv(dev);
 732
 733        /* If vlan filtering is enabled and VLAN is not specified
 734         * delete mdb entry on all vlans configured on the port.
 735         */
 736        pdev = __dev_get_by_index(net, entry->ifindex);
 737        if (!pdev)
 738                return -ENODEV;
 739
 740        p = br_port_get_rtnl(pdev);
 741        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 742                return -EINVAL;
 743
 744        vg = nbp_vlan_group(p);
 745        if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
 746                list_for_each_entry(v, &vg->vlan_list, vlist) {
 747                        entry->vid = v->vid;
 748                        err = __br_mdb_del(br, entry);
 749                        if (!err)
 750                                __br_mdb_notify(dev, p, entry, RTM_DELMDB);
 751                }
 752        } else {
 753                err = __br_mdb_del(br, entry);
 754                if (!err)
 755                        __br_mdb_notify(dev, p, entry, RTM_DELMDB);
 756        }
 757
 758        return err;
 759}
 760
 761void br_mdb_init(void)
 762{
 763        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
 764        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
 765        rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
 766}
 767
 768void br_mdb_uninit(void)
 769{
 770        rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
 771        rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
 772        rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
 773}
 774