linux/net/bridge/br_mdb.c
<<
>>
Prefs
   1#include <linux/err.h>
   2#include <linux/igmp.h>
   3#include <linux/kernel.h>
   4#include <linux/netdevice.h>
   5#include <linux/rculist.h>
   6#include <linux/skbuff.h>
   7#include <linux/if_ether.h>
   8#include <net/ip.h>
   9#include <net/netlink.h>
  10#include <net/switchdev.h>
  11#if IS_ENABLED(CONFIG_IPV6)
  12#include <net/ipv6.h>
  13#include <net/addrconf.h>
  14#endif
  15
  16#include "br_private.h"
  17
  18static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  19                               struct net_device *dev)
  20{
  21        struct net_bridge *br = netdev_priv(dev);
  22        struct net_bridge_port *p;
  23        struct nlattr *nest, *port_nest;
  24
  25        if (!br->multicast_router || hlist_empty(&br->router_list))
  26                return 0;
  27
  28        nest = nla_nest_start(skb, MDBA_ROUTER);
  29        if (nest == NULL)
  30                return -EMSGSIZE;
  31
  32        hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
  33                if (!p)
  34                        continue;
  35                port_nest = nla_nest_start(skb, MDBA_ROUTER_PORT);
  36                if (!port_nest)
  37                        goto fail;
  38                if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
  39                    nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
  40                                br_timer_value(&p->multicast_router_timer)) ||
  41                    nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
  42                               p->multicast_router)) {
  43                        nla_nest_cancel(skb, port_nest);
  44                        goto fail;
  45                }
  46                nla_nest_end(skb, port_nest);
  47        }
  48
  49        nla_nest_end(skb, nest);
  50        return 0;
  51fail:
  52        nla_nest_cancel(skb, nest);
  53        return -EMSGSIZE;
  54}
  55
  56static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
  57{
  58        e->state = flags & MDB_PG_FLAGS_PERMANENT;
  59        e->flags = 0;
  60        if (flags & MDB_PG_FLAGS_OFFLOAD)
  61                e->flags |= MDB_FLAGS_OFFLOAD;
  62}
  63
  64static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
  65{
  66        memset(ip, 0, sizeof(struct br_ip));
  67        ip->vid = entry->vid;
  68        ip->proto = entry->addr.proto;
  69        if (ip->proto == htons(ETH_P_IP))
  70                ip->u.ip4 = entry->addr.u.ip4;
  71#if IS_ENABLED(CONFIG_IPV6)
  72        else
  73                ip->u.ip6 = entry->addr.u.ip6;
  74#endif
  75}
  76
  77static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  78                            struct net_device *dev)
  79{
  80        struct net_bridge *br = netdev_priv(dev);
  81        struct net_bridge_mdb_htable *mdb;
  82        struct nlattr *nest, *nest2;
  83        int i, err = 0;
  84        int idx = 0, s_idx = cb->args[1];
  85
  86        if (br->multicast_disabled)
  87                return 0;
  88
  89        mdb = rcu_dereference(br->mdb);
  90        if (!mdb)
  91                return 0;
  92
  93        nest = nla_nest_start(skb, MDBA_MDB);
  94        if (nest == NULL)
  95                return -EMSGSIZE;
  96
  97        for (i = 0; i < mdb->max; i++) {
  98                struct net_bridge_mdb_entry *mp;
  99                struct net_bridge_port_group *p;
 100                struct net_bridge_port_group __rcu **pp;
 101                struct net_bridge_port *port;
 102
 103                hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
 104                        if (idx < s_idx)
 105                                goto skip;
 106
 107                        nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
 108                        if (nest2 == NULL) {
 109                                err = -EMSGSIZE;
 110                                goto out;
 111                        }
 112
 113                        for (pp = &mp->ports;
 114                             (p = rcu_dereference(*pp)) != NULL;
 115                              pp = &p->next) {
 116                                struct nlattr *nest_ent;
 117                                struct br_mdb_entry e;
 118
 119                                port = p->port;
 120                                if (!port)
 121                                        continue;
 122
 123                                memset(&e, 0, sizeof(e));
 124                                e.ifindex = port->dev->ifindex;
 125                                e.vid = p->addr.vid;
 126                                __mdb_entry_fill_flags(&e, p->flags);
 127                                if (p->addr.proto == htons(ETH_P_IP))
 128                                        e.addr.u.ip4 = p->addr.u.ip4;
 129#if IS_ENABLED(CONFIG_IPV6)
 130                                if (p->addr.proto == htons(ETH_P_IPV6))
 131                                        e.addr.u.ip6 = p->addr.u.ip6;
 132#endif
 133                                e.addr.proto = p->addr.proto;
 134                                nest_ent = nla_nest_start(skb,
 135                                                          MDBA_MDB_ENTRY_INFO);
 136                                if (!nest_ent) {
 137                                        nla_nest_cancel(skb, nest2);
 138                                        err = -EMSGSIZE;
 139                                        goto out;
 140                                }
 141                                if (nla_put_nohdr(skb, sizeof(e), &e) ||
 142                                    nla_put_u32(skb,
 143                                                MDBA_MDB_EATTR_TIMER,
 144                                                br_timer_value(&p->timer))) {
 145                                        nla_nest_cancel(skb, nest_ent);
 146                                        nla_nest_cancel(skb, nest2);
 147                                        err = -EMSGSIZE;
 148                                        goto out;
 149                                }
 150                                nla_nest_end(skb, nest_ent);
 151                        }
 152                        nla_nest_end(skb, nest2);
 153                skip:
 154                        idx++;
 155                }
 156        }
 157
 158out:
 159        cb->args[1] = idx;
 160        nla_nest_end(skb, nest);
 161        return err;
 162}
 163
 164static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 165{
 166        struct net_device *dev;
 167        struct net *net = sock_net(skb->sk);
 168        struct nlmsghdr *nlh = NULL;
 169        int idx = 0, s_idx;
 170
 171        s_idx = cb->args[0];
 172
 173        rcu_read_lock();
 174
 175        /* In theory this could be wrapped to 0... */
 176        cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
 177
 178        for_each_netdev_rcu(net, dev) {
 179                if (dev->priv_flags & IFF_EBRIDGE) {
 180                        struct br_port_msg *bpm;
 181
 182                        if (idx < s_idx)
 183                                goto skip;
 184
 185                        nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
 186                                        cb->nlh->nlmsg_seq, RTM_GETMDB,
 187                                        sizeof(*bpm), NLM_F_MULTI);
 188                        if (nlh == NULL)
 189                                break;
 190
 191                        bpm = nlmsg_data(nlh);
 192                        memset(bpm, 0, sizeof(*bpm));
 193                        bpm->ifindex = dev->ifindex;
 194                        if (br_mdb_fill_info(skb, cb, dev) < 0)
 195                                goto out;
 196                        if (br_rports_fill_info(skb, cb, dev) < 0)
 197                                goto out;
 198
 199                        cb->args[1] = 0;
 200                        nlmsg_end(skb, nlh);
 201                skip:
 202                        idx++;
 203                }
 204        }
 205
 206out:
 207        if (nlh)
 208                nlmsg_end(skb, nlh);
 209        rcu_read_unlock();
 210        cb->args[0] = idx;
 211        return skb->len;
 212}
 213
 214static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
 215                                   struct net_device *dev,
 216                                   struct br_mdb_entry *entry, u32 pid,
 217                                   u32 seq, int type, unsigned int flags)
 218{
 219        struct nlmsghdr *nlh;
 220        struct br_port_msg *bpm;
 221        struct nlattr *nest, *nest2;
 222
 223        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
 224        if (!nlh)
 225                return -EMSGSIZE;
 226
 227        bpm = nlmsg_data(nlh);
 228        memset(bpm, 0, sizeof(*bpm));
 229        bpm->family  = AF_BRIDGE;
 230        bpm->ifindex = dev->ifindex;
 231        nest = nla_nest_start(skb, MDBA_MDB);
 232        if (nest == NULL)
 233                goto cancel;
 234        nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
 235        if (nest2 == NULL)
 236                goto end;
 237
 238        if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
 239                goto end;
 240
 241        nla_nest_end(skb, nest2);
 242        nla_nest_end(skb, nest);
 243        nlmsg_end(skb, nlh);
 244        return 0;
 245
 246end:
 247        nla_nest_end(skb, nest);
 248cancel:
 249        nlmsg_cancel(skb, nlh);
 250        return -EMSGSIZE;
 251}
 252
 253static inline size_t rtnl_mdb_nlmsg_size(void)
 254{
 255        return NLMSG_ALIGN(sizeof(struct br_port_msg))
 256                + nla_total_size(sizeof(struct br_mdb_entry));
 257}
 258
 259struct br_mdb_complete_info {
 260        struct net_bridge_port *port;
 261        struct br_ip ip;
 262};
 263
 264static void br_mdb_complete(struct net_device *dev, int err, void *priv)
 265{
 266        struct br_mdb_complete_info *data = priv;
 267        struct net_bridge_port_group __rcu **pp;
 268        struct net_bridge_port_group *p;
 269        struct net_bridge_mdb_htable *mdb;
 270        struct net_bridge_mdb_entry *mp;
 271        struct net_bridge_port *port = data->port;
 272        struct net_bridge *br = port->br;
 273
 274        if (err)
 275                goto err;
 276
 277        spin_lock_bh(&br->multicast_lock);
 278        mdb = mlock_dereference(br->mdb, br);
 279        mp = br_mdb_ip_get(mdb, &data->ip);
 280        if (!mp)
 281                goto out;
 282        for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
 283             pp = &p->next) {
 284                if (p->port != port)
 285                        continue;
 286                p->flags |= MDB_PG_FLAGS_OFFLOAD;
 287        }
 288out:
 289        spin_unlock_bh(&br->multicast_lock);
 290err:
 291        kfree(priv);
 292}
 293
 294static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
 295                            struct br_mdb_entry *entry, int type)
 296{
 297        struct br_mdb_complete_info *complete_info;
 298        struct switchdev_obj_port_mdb mdb = {
 299                .obj = {
 300                        .id = SWITCHDEV_OBJ_ID_PORT_MDB,
 301                        .flags = SWITCHDEV_F_DEFER,
 302                },
 303                .vid = entry->vid,
 304        };
 305        struct net_device *port_dev;
 306        struct net *net = dev_net(dev);
 307        struct sk_buff *skb;
 308        int err = -ENOBUFS;
 309
 310        port_dev = __dev_get_by_index(net, entry->ifindex);
 311        if (entry->addr.proto == htons(ETH_P_IP))
 312                ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
 313#if IS_ENABLED(CONFIG_IPV6)
 314        else
 315                ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
 316#endif
 317
 318        mdb.obj.orig_dev = port_dev;
 319        if (port_dev && type == RTM_NEWMDB) {
 320                complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
 321                if (complete_info) {
 322                        complete_info->port = p;
 323                        __mdb_entry_to_br_ip(entry, &complete_info->ip);
 324                        mdb.obj.complete_priv = complete_info;
 325                        mdb.obj.complete = br_mdb_complete;
 326                        if (switchdev_port_obj_add(port_dev, &mdb.obj))
 327                                kfree(complete_info);
 328                }
 329        } else if (port_dev && type == RTM_DELMDB) {
 330                switchdev_port_obj_del(port_dev, &mdb.obj);
 331        }
 332
 333        skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
 334        if (!skb)
 335                goto errout;
 336
 337        err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
 338        if (err < 0) {
 339                kfree_skb(skb);
 340                goto errout;
 341        }
 342
 343        rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
 344        return;
 345errout:
 346        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 347}
 348
 349void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
 350                   struct br_ip *group, int type, u8 flags)
 351{
 352        struct br_mdb_entry entry;
 353
 354        memset(&entry, 0, sizeof(entry));
 355        entry.ifindex = port->dev->ifindex;
 356        entry.addr.proto = group->proto;
 357        entry.addr.u.ip4 = group->u.ip4;
 358#if IS_ENABLED(CONFIG_IPV6)
 359        entry.addr.u.ip6 = group->u.ip6;
 360#endif
 361        entry.vid = group->vid;
 362        __mdb_entry_fill_flags(&entry, flags);
 363        __br_mdb_notify(dev, port, &entry, type);
 364}
 365
 366static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
 367                                   struct net_device *dev,
 368                                   int ifindex, u32 pid,
 369                                   u32 seq, int type, unsigned int flags)
 370{
 371        struct br_port_msg *bpm;
 372        struct nlmsghdr *nlh;
 373        struct nlattr *nest;
 374
 375        nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
 376        if (!nlh)
 377                return -EMSGSIZE;
 378
 379        bpm = nlmsg_data(nlh);
 380        memset(bpm, 0, sizeof(*bpm));
 381        bpm->family = AF_BRIDGE;
 382        bpm->ifindex = dev->ifindex;
 383        nest = nla_nest_start(skb, MDBA_ROUTER);
 384        if (!nest)
 385                goto cancel;
 386
 387        if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
 388                goto end;
 389
 390        nla_nest_end(skb, nest);
 391        nlmsg_end(skb, nlh);
 392        return 0;
 393
 394end:
 395        nla_nest_end(skb, nest);
 396cancel:
 397        nlmsg_cancel(skb, nlh);
 398        return -EMSGSIZE;
 399}
 400
 401static inline size_t rtnl_rtr_nlmsg_size(void)
 402{
 403        return NLMSG_ALIGN(sizeof(struct br_port_msg))
 404                + nla_total_size(sizeof(__u32));
 405}
 406
 407void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
 408                   int type)
 409{
 410        struct net *net = dev_net(dev);
 411        struct sk_buff *skb;
 412        int err = -ENOBUFS;
 413        int ifindex;
 414
 415        ifindex = port ? port->dev->ifindex : 0;
 416        skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
 417        if (!skb)
 418                goto errout;
 419
 420        err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
 421        if (err < 0) {
 422                kfree_skb(skb);
 423                goto errout;
 424        }
 425
 426        rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
 427        return;
 428
 429errout:
 430        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 431}
 432
 433static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
 434{
 435        if (entry->ifindex == 0)
 436                return false;
 437
 438        if (entry->addr.proto == htons(ETH_P_IP)) {
 439                if (!ipv4_is_multicast(entry->addr.u.ip4))
 440                        return false;
 441                if (ipv4_is_local_multicast(entry->addr.u.ip4))
 442                        return false;
 443#if IS_ENABLED(CONFIG_IPV6)
 444        } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
 445                if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
 446                        return false;
 447#endif
 448        } else
 449                return false;
 450        if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
 451                return false;
 452        if (entry->vid >= VLAN_VID_MASK)
 453                return false;
 454
 455        return true;
 456}
 457
 458static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
 459                        struct net_device **pdev, struct br_mdb_entry **pentry)
 460{
 461        struct net *net = sock_net(skb->sk);
 462        struct br_mdb_entry *entry;
 463        struct br_port_msg *bpm;
 464        struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
 465        struct net_device *dev;
 466        int err;
 467
 468        err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX, NULL);
 469        if (err < 0)
 470                return err;
 471
 472        bpm = nlmsg_data(nlh);
 473        if (bpm->ifindex == 0) {
 474                pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
 475                return -EINVAL;
 476        }
 477
 478        dev = __dev_get_by_index(net, bpm->ifindex);
 479        if (dev == NULL) {
 480                pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
 481                return -ENODEV;
 482        }
 483
 484        if (!(dev->priv_flags & IFF_EBRIDGE)) {
 485                pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
 486                return -EOPNOTSUPP;
 487        }
 488
 489        *pdev = dev;
 490
 491        if (!tb[MDBA_SET_ENTRY] ||
 492            nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
 493                pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
 494                return -EINVAL;
 495        }
 496
 497        entry = nla_data(tb[MDBA_SET_ENTRY]);
 498        if (!is_valid_mdb_entry(entry)) {
 499                pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
 500                return -EINVAL;
 501        }
 502
 503        *pentry = entry;
 504        return 0;
 505}
 506
 507static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 508                            struct br_ip *group, unsigned char state)
 509{
 510        struct net_bridge_mdb_entry *mp;
 511        struct net_bridge_port_group *p;
 512        struct net_bridge_port_group __rcu **pp;
 513        struct net_bridge_mdb_htable *mdb;
 514        unsigned long now = jiffies;
 515        int err;
 516
 517        mdb = mlock_dereference(br->mdb, br);
 518        mp = br_mdb_ip_get(mdb, group);
 519        if (!mp) {
 520                mp = br_multicast_new_group(br, port, group);
 521                err = PTR_ERR_OR_ZERO(mp);
 522                if (err)
 523                        return err;
 524        }
 525
 526        for (pp = &mp->ports;
 527             (p = mlock_dereference(*pp, br)) != NULL;
 528             pp = &p->next) {
 529                if (p->port == port)
 530                        return -EEXIST;
 531                if ((unsigned long)p->port < (unsigned long)port)
 532                        break;
 533        }
 534
 535        p = br_multicast_new_port_group(port, group, *pp, state, NULL);
 536        if (unlikely(!p))
 537                return -ENOMEM;
 538        rcu_assign_pointer(*pp, p);
 539        if (state == MDB_TEMPORARY)
 540                mod_timer(&p->timer, now + br->multicast_membership_interval);
 541
 542        return 0;
 543}
 544
 545static int __br_mdb_add(struct net *net, struct net_bridge *br,
 546                        struct br_mdb_entry *entry)
 547{
 548        struct br_ip ip;
 549        struct net_device *dev;
 550        struct net_bridge_port *p;
 551        int ret;
 552
 553        if (!netif_running(br->dev) || br->multicast_disabled)
 554                return -EINVAL;
 555
 556        dev = __dev_get_by_index(net, entry->ifindex);
 557        if (!dev)
 558                return -ENODEV;
 559
 560        p = br_port_get_rtnl(dev);
 561        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 562                return -EINVAL;
 563
 564        __mdb_entry_to_br_ip(entry, &ip);
 565
 566        spin_lock_bh(&br->multicast_lock);
 567        ret = br_mdb_add_group(br, p, &ip, entry->state);
 568        spin_unlock_bh(&br->multicast_lock);
 569        return ret;
 570}
 571
 572static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 573{
 574        struct net *net = sock_net(skb->sk);
 575        struct net_bridge_vlan_group *vg;
 576        struct net_device *dev, *pdev;
 577        struct br_mdb_entry *entry;
 578        struct net_bridge_port *p;
 579        struct net_bridge_vlan *v;
 580        struct net_bridge *br;
 581        int err;
 582
 583        err = br_mdb_parse(skb, nlh, &dev, &entry);
 584        if (err < 0)
 585                return err;
 586
 587        br = netdev_priv(dev);
 588
 589        /* If vlan filtering is enabled and VLAN is not specified
 590         * install mdb entry on all vlans configured on the port.
 591         */
 592        pdev = __dev_get_by_index(net, entry->ifindex);
 593        if (!pdev)
 594                return -ENODEV;
 595
 596        p = br_port_get_rtnl(pdev);
 597        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 598                return -EINVAL;
 599
 600        vg = nbp_vlan_group(p);
 601        if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
 602                list_for_each_entry(v, &vg->vlan_list, vlist) {
 603                        entry->vid = v->vid;
 604                        err = __br_mdb_add(net, br, entry);
 605                        if (err)
 606                                break;
 607                        __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 608                }
 609        } else {
 610                err = __br_mdb_add(net, br, entry);
 611                if (!err)
 612                        __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
 613        }
 614
 615        return err;
 616}
 617
 618static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
 619{
 620        struct net_bridge_mdb_htable *mdb;
 621        struct net_bridge_mdb_entry *mp;
 622        struct net_bridge_port_group *p;
 623        struct net_bridge_port_group __rcu **pp;
 624        struct br_ip ip;
 625        int err = -EINVAL;
 626
 627        if (!netif_running(br->dev) || br->multicast_disabled)
 628                return -EINVAL;
 629
 630        __mdb_entry_to_br_ip(entry, &ip);
 631
 632        spin_lock_bh(&br->multicast_lock);
 633        mdb = mlock_dereference(br->mdb, br);
 634
 635        mp = br_mdb_ip_get(mdb, &ip);
 636        if (!mp)
 637                goto unlock;
 638
 639        for (pp = &mp->ports;
 640             (p = mlock_dereference(*pp, br)) != NULL;
 641             pp = &p->next) {
 642                if (!p->port || p->port->dev->ifindex != entry->ifindex)
 643                        continue;
 644
 645                if (p->port->state == BR_STATE_DISABLED)
 646                        goto unlock;
 647
 648                __mdb_entry_fill_flags(entry, p->flags);
 649                rcu_assign_pointer(*pp, p->next);
 650                hlist_del_init(&p->mglist);
 651                del_timer(&p->timer);
 652                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 653                err = 0;
 654
 655                if (!mp->ports && !mp->mglist &&
 656                    netif_running(br->dev))
 657                        mod_timer(&mp->timer, jiffies);
 658                break;
 659        }
 660
 661unlock:
 662        spin_unlock_bh(&br->multicast_lock);
 663        return err;
 664}
 665
 666static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
 667{
 668        struct net *net = sock_net(skb->sk);
 669        struct net_bridge_vlan_group *vg;
 670        struct net_device *dev, *pdev;
 671        struct br_mdb_entry *entry;
 672        struct net_bridge_port *p;
 673        struct net_bridge_vlan *v;
 674        struct net_bridge *br;
 675        int err;
 676
 677        err = br_mdb_parse(skb, nlh, &dev, &entry);
 678        if (err < 0)
 679                return err;
 680
 681        br = netdev_priv(dev);
 682
 683        /* If vlan filtering is enabled and VLAN is not specified
 684         * delete mdb entry on all vlans configured on the port.
 685         */
 686        pdev = __dev_get_by_index(net, entry->ifindex);
 687        if (!pdev)
 688                return -ENODEV;
 689
 690        p = br_port_get_rtnl(pdev);
 691        if (!p || p->br != br || p->state == BR_STATE_DISABLED)
 692                return -EINVAL;
 693
 694        vg = nbp_vlan_group(p);
 695        if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
 696                list_for_each_entry(v, &vg->vlan_list, vlist) {
 697                        entry->vid = v->vid;
 698                        err = __br_mdb_del(br, entry);
 699                        if (!err)
 700                                __br_mdb_notify(dev, p, entry, RTM_DELMDB);
 701                }
 702        } else {
 703                err = __br_mdb_del(br, entry);
 704                if (!err)
 705                        __br_mdb_notify(dev, p, entry, RTM_DELMDB);
 706        }
 707
 708        return err;
 709}
 710
 711void br_mdb_init(void)
 712{
 713        rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
 714        rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
 715        rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
 716}
 717
 718void br_mdb_uninit(void)
 719{
 720        rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
 721        rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
 722        rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
 723}
 724